text
stringlengths 37
1.41M
|
---|
def BinaryToDecimal(binary):
return int(binary, base=2)
test_str = """
Lately I'm feeling lonely,
I am getting bored of torturing primates all of the time.
At least I'm doing the world a favor my punishing them.
Just remember we are good. Not Cruel
"""
res = ''.join('{0:08b}'.format(ord(i), 'b') for i in test_str)
print(res)
str_data = ''
for i in range(0, len(res), 8):
temp_data = res[i:i + 8].lstrip('0')
decimal_data = BinaryToDecimal(temp_data)
str_data += chr(decimal_data)
print(str_data)
|
N = int(input())
if N == 3:
print("* . .\n. . *\n. . .")
exit()
mat = [['.' for i in range(N)] for j in range(N)]
for i in range(N // 2):
mat[i][2 * i] = '*'
for i in range(N // 2):
if i + N // 2 == N - 1: break
mat[1 + i + N // 2][1 + 2 * i] = '*'
for row in mat:
print(*row) |
import random
count = 10**6;
print(count)
for i in range(count):
print(random.randint(5 * 10**5, 10**6), end = ' ')
print()
for i in range(1, count):
print(random.randint(1, i), end = ' ')
print() |
import calendar
import datetime
import time
print(calendar.weekheader(3))
print()
print(calendar.firstweekday())
print()
print(calendar.month(2020, 3, 3))
print()
print(calendar.monthcalendar(2020, 3))
print()
print(calendar.calendar(2020))
day_of_the_week = calendar.weekday(2020, 9, 22)
print(day_of_the_week)
is_leap = calendar.isleap(2020)
print(is_leap)
how_many_leap_days = calendar.leapdays(2000, 2005)
print(how_many_leap_days) |
# unittest skip
# conditinal skipping
import unittest
def divide(la, lb):
return la/lb
class TestAdd(unittest.TestCase):
data = 10
datb = 30
def test_add(self):
self.assertRaises (ZeroDivisionError, divide, self.datb, self.data)
"test fails if exception is not raised"
"test passes if ZeroDivisionError is raised"
'''
@unittest.skip("will be tested later")
def test_add_two(self):
self.assertEqual (sum([6, 4, 3]), 16, "expecting 13")
def test_add_three(self):
self.assertEqual (sum([6, 4, 3]), 16, "expecting 13")
@unittest.skipIf(datb>data, "skip if")
def test_add_four(self):
self.assertEqual (sum([6, 4, 3]), 16, "expecting 13")
@unittest.expectedFailure
def test_add_five(self):
self.assertEqual (sum([6, 4, 3]), 16, "expecting 13")
'''
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
# In[2]:
def funca():
i = 0
for i in range(-9, 1):
print('A will take {} seconds more'.format(i))
time.sleep(1)
def funcb():
i = 0
for i in range(-3, 1):
print('B will take {} seconds more'.format(i))
time.sleep(1)
# In[3]:
funca()
# In[6]:
def maintask():
print("main task")
i = 0
funca()
funcb()
for i in range(-6, 1):
print("main will take {} seconds more".format(i))
time.sleep(1)
print("main exits")
# In[7]:
maintask()
# In[8]:
import multiprocessing
from multiprocessing import Process
import time
def funca():
i = 0
for i in range(-9, 1):
print('A will take {} seconds more'.format(i))
time.sleep(1)
def funcb():
i = 0
for i in range(-3, 1):
print('B will take {} seconds more'.format(i))
time.sleep(1)
def maintask():
print("main task")
pa = Process(target = funca)
pb = Process(target = funcb)
pa.start()
pb.start()
for i in range(-6, 1):
print("main will take {} seconds more".format(i))
time.sleep(1)
print("main exits")
# In[9]:
maintask()
# In[10]:
maintask()
# In[11]:
import multiprocessing
from multiprocessing import Process
import time
def funca():
i = 0
for i in range(-9, 1):
print('A will take {} seconds more'.format(i))
time.sleep(1)
def funcb():
i = 0
for i in range(-3, 1):
print('B will take {} seconds more'.format(i))
time.sleep(1)
def maintask():
print("main task")
pa = Process(target = funca)
pb = Process(target = funcb)
pa.start()
pb.start()
for i in range(-6, 1):
print("main will take {} seconds more".format(i))
time.sleep(1)
pa.join()
pb.join()
print("main exits")
# In[12]:
maintask()
# In[17]:
x = 0
y = 0
import threading
def activity():
global x
x = x + 1
def taska():
global y
y = y+1
print('starting task', y)
for var in range(1000000):
activity()
print('ending task', y)
def taskb():
global y
y = y + 1
print('starting task', y)
for var in range(1000000):
activity()
print('ending task', y)
def main_task():
ta = threading.Thread(target=taska)
tb = threading.Thread(target=taskb)
ta.start()
tb.start()
ta.join()
tb.join()
global x
print(x)
print("main task exits")
# In[20]:
main_task()
# In[16]:
main_task()
# In[23]:
x = 0
y = 0
import threading
lock = threading.Lock()
def activity():
global x
x = x + 1
def taska():
global y
print('task a needs the lock')
global lock
lock.acquire()
y = y+1
print('starting task', y)
for var in range(100000000):
activity()
print('ending task', y)
print('a will release the lock')
lock.release()
def taskb():
global y
print('task b needs the lock')
global lock
lock.acquire()
y = y + 1
print('starting task', y)
for var in range(100000000):
activity()
print('ending task', y)
print('b will release the lock')
lock.release()
def main_task():
ta = threading.Thread(target=taska)
tb = threading.Thread(target=taskb)
ta.start()
tb.start()
ta.join()
tb.join()
global x
print(x)
print("main task exits")
# In[24]:
main_task()
# In[25]:
help(threading.Thread)
# In[ ]:
|
#!/usr/bin/env python
"""This script uses a trained convtransformer model to translate a
sentence from the source to the target language.
It expects two arguments:
- The path to the trained model
- The sentence to be translated
The beam width used in the beam search can be specified by using the
flag --beam-width.
Example usage:
python translate.py --beam-width 5 model.pt "Je parle allemand."
"""
import argparse
import os
from typing import List, Optional, Tuple
import torch
import torch.optim as optim
from alphabet import Alphabet
from convtransformer import ConvTransformerModel
DEFAULT_BEAM_WIDTH = 3
def translate(
sentence: str,
alph: Alphabet,
model: ConvTransformerModel,
device: torch.device,
beam_width: int = DEFAULT_BEAM_WIDTH,
) -> Tuple[str, str]:
"""Translates a sentence using the passed model.
We use beam search with beam width specified by 'beam_width'.
Args:
sentence: The sentence to be translated.
alph: An instance of the class 'Alphabet'.
model: A trained convtransformer model.
device: The torch device to use.
beam_width: The beam width in the beam search.
Returns:
The (possibly altered) source sentence and its translation.
"""
if beam_width <= 0:
raise ValueError("The beam width must be positive!")
if beam_width > alph.size:
raise ValueError("The beam width cannot be larger than the alphabet size!")
# Turn the given sentence into a tensor
sentence_idx = alph.prepare_sentence(sentence[: model.max_len - 2])
src = torch.tensor(sentence_idx, device=device).unsqueeze(0)
# In the list 'intermediate_results' we save all unfinished
# translations considered at a given moment. Its elements are pairs,
# where the first coefficient holds the log-probability of the
# translation, and the second coefficient the translation itself.
intermediate_results = [
(torch.zeros(1).to(device), torch.tensor([alph.idx_start]).to(device))
]
# In the list 'final_results' we save all finished translations. We
# stop the search once we have found 'beam_width'**2 translations.
final_results: List[Tuple[torch.Tensor, torch.Tensor]] = []
model.eval()
for current_tgt_length in range(1, model.max_len):
# In the list 'next_sentences' we save all candidate
# translations gathered in this round.
next_sentences: List[Tuple[torch.Tensor, torch.Tensor]] = []
for log_prob, tgt in intermediate_results:
# Given an unfinished translation 'tgt', get the
# log-probability distribution for the next character
with torch.no_grad():
distr = model(src, tgt.unsqueeze(0))[1][0][-1]
log_prob_topk, indices_topk = distr.topk(beam_width)
# Add the best candidates either to 'next_sentences' (if
# the translation is unfinished), or to 'final_results'
# (if we encounter an 'end-'character).
for i in range(beam_width):
next_log_prob = log_prob + log_prob_topk[i]
next_tgt = torch.cat([tgt, indices_topk[i].unsqueeze(0)])
if (
indices_topk[i].item() == alph.idx_end
or current_tgt_length == model.max_len - 1
):
next_log_prob /= current_tgt_length + 1
final_results.append((next_log_prob, next_tgt))
else:
next_sentences.append((next_log_prob, next_tgt))
# Move the best candidate translations to 'intermediate_results'
next_sentences.sort(key=lambda x: -x[0].item())
intermediate_results = next_sentences[:beam_width]
# Stop once we have enough finished translations
if len(final_results) >= beam_width ** 2:
break
# Choose the translation with the highest (normalized) probability
final_results.sort(key=lambda x: -x[0].item())
translation_idx = final_results[0][1].tolist()
# Turn the sentences back to strings
sentence = alph.idx_to_str(sentence_idx)[1:-1]
translation = alph.idx_to_str(translation_idx)[1:-1]
return sentence, translation
if __name__ == "__main__":
# Configure the argument parser
parser = argparse.ArgumentParser(
description="Translate a sentence using a trained convtransformer model."
)
parser.add_argument("model", help="path to the model")
parser.add_argument("sentence", help="sentence to be translated")
parser.add_argument(
"--beam-width",
type=int,
help="set beam width (default: {})".format(DEFAULT_BEAM_WIDTH),
)
args = parser.parse_args()
path_model = args.model
original_sentence = args.sentence
if args.beam_width:
beam_width = args.beam_width
else:
beam_width = DEFAULT_BEAM_WIDTH
# Load the model configuration states from the specified path
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
if not os.path.isfile(path_model):
print("Model not found!")
quit()
states = torch.load(path_model, map_location=device)
conf_alph = states["conf_alph"]
conf_model = states["conf_model"]
model_state_dict = states["model_state_dict"]
# Initialize the alphabet and the convtransformer model
alph = Alphabet(**conf_alph)
model = ConvTransformerModel(**conf_model).to(device)
model.load_state_dict(model_state_dict)
# Translate the given sentence
sentence, translation = translate(
original_sentence, alph, model, device, beam_width
)
print("Original: {}".format(sentence))
print("Translation: {}".format(translation))
|
# 2520 is the smallest number that can be divided by each of the numbers from 1 to 10
# without any remainder.
# What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
def smallest_number():
i= 2520
primes = [2,3,5,7,11,13,17,19]
max_num = 20
while True:
i += 1
# testa todos os numeros 11 a 20 divisiveis por i com resto de 0 e imprime resultado
divisible = True
for x in primes:
divisible = (i % x == 0)
if not divisible:
break
if divisible:
print("Found: %d"%i)
break
else:
pass
# print("%d not div by %d"%(i,x))
smallest_number()
|
import numpy as np
def choice(x):
if(x > 0):
return 1
else:
return 0
# input and output dataset
training_data = [
(np.array([0, 0, 1]), 0),
(np.array([0, 1, 1]), 1),
(np.array([1, 0, 1]), 1),
(np.array([1, 1, 1]), 1),
]
w = random.rand(3)
a = []
mu = 0.001
n = 50
for i in xrange(n):
x, expected = choice(training_data)
result = dot(w, x)
delta = abs(expected - result)
a.append(delta)
print("delta \n", a)
if delta < a[-1]:
w += mu * delta * x
else:
w -= mu * delta * x
plt.plot(a)
|
# 문제2. 다음과 같은 텍스트에서 모든 태그를 제외한 텍스트만 출력하세요.
import re
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
s = """
<html>
<body style='background-color:#ffff'>
<h4>Click</h4>
<a href='http://www.python.org'>Here</a>
<p>
To connect to the most powerful tools in the world.
</p>
</body>
</html>
"""
print(cleanhtml(s))
|
# Write a function named readable_timedelta. The function should take one argument,
# an integer days, and return a string that says how many weeks and days that is.
# For example, calling the function and printing the result like this:
# print(readable_timedelta(10))
# should output the following:
# 1 week(s) and 3 day(s).
# write your function here
def readable_timedelta(days):
week = int(days // 7)
day = int(days % 7)
return f'{week} week(s) and {day} day(s).'
# test your function
print(readable_timedelta(10)) |
# Quiz: Assign and Modify Variables
# Now it's your turn to work with variables. The comments in this quiz (the lines that begin with #)
# have instructions for creating and modifying variables. After each comment write a line of code
# that implements the instruction.
# Note that this code uses scientific notation to define large numbers.
# 4.445e8 is equal to 4.445 * 10 ** 8 which is equal to 444500000.0.
# Write your function here. Make sure to use "population_density" as the name of the fucntion.
# So, the test below works.
def population_density(num1, num2):
return num1 / num2
# test cases for your function Dont change anything below this comment.
test1 = population_density(10, 1)
expected_result1 = 10
print("expected result: {}, actual result: {}".format(expected_result1, test1))
test2 = population_density(864816, 121.4)
expected_result2 = 7123.6902801
print("expected result: {}, actual result: {}".format(expected_result2, test2)) |
#!/usr/bin/env python3
"""
command-line utility to convert date to day of year
"""
from argparse import ArgumentParser
from sciencedates import date2doy
p = ArgumentParser(description="convert date to day of year")
p.add_argument("date", help="yyyy-mm-dd")
P = p.parse_args()
doy, year = date2doy(P.date)
print(doy.item())
|
def pairCorrelationFunction_2D(x, y, S, rMax, dr):
"""Compute the two-dimensional pair correlation function, also known
as the radial distribution function, for a set of circular particles
contained in a square region of a plane. This simple function finds
reference particles such that a circle of radius rMax drawn around the
particle will fit entirely within the square, eliminating the need to
compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
S length of each side of the square region of the plane
rMax outer diameter of largest annulus
dr increment for increasing radius of annulus
Returns a tuple: (g, radii, interior_indices)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
annuli used to compute g(r)
reference_indices indices of reference particles
"""
from numpy import zeros, sqrt, where, pi, mean, arange, histogram
# Number of particles in ring/area of ring/number of reference particles/number density
# area of ring = pi*(r_outer**2 - r_inner**2)
# Find particles which are close enough to the box center that a circle of radius
# rMax will not cross any edge of the box
bools1 = x > rMax
bools2 = x < (S - rMax)
bools3 = y > rMax
bools4 = y < (S - rMax)
interior_indices, = where(bools1 * bools2 * bools3 * bools4)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
raise RuntimeError ("No particles found for which a circle of radius rMax\
will lie entirely within a square of side length S. Decrease rMax\
or increase the size of the square.")
edges = arange(0., rMax + 1.1 * dr, dr)
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments])
radii = zeros(num_increments)
numberDensity = len(x) / S**2
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = sqrt((x[index] - x)**2 + (y[index] - y)**2)
d[index] = 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False)
g[p, :] = result/numberDensity
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i+1]) / 2.
rOuter = edges[i + 1]
rInner = edges[i]
g_average[i] = mean(g[:, i]) / (pi * (rOuter**2 - rInner**2))
return (g_average, radii, interior_indices)
####
def pairCorrelationFunction_3D(x, y, z, S, rMax, dr):
"""Compute the three-dimensional pair correlation function for a set of
spherical particles contained in a cube with side length S. This simple
function finds reference particles such that a sphere of radius rMax drawn
around the particle will fit entirely within the cube, eliminating the need
to compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
z an array of z positions of centers of particles
S length of each side of the cube in space
rMax outer diameter of largest spherical shell
dr increment for increasing radius of spherical shell
Returns a tuple: (g, radii, interior_indices)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
spherical shells used to compute g(r)
reference_indices indices of reference particles
"""
from numpy import zeros, sqrt, where, pi, mean, arange, histogram
# Find particles which are close enough to the cube center that a sphere of radius
# rMax will not cross any face of the cube
bools1 = x > rMax
bools2 = x < (S - rMax)
bools3 = y > rMax
bools4 = y < (S - rMax)
bools5 = z > rMax
bools6 = z < (S - rMax)
interior_indices, = where(bools1 * bools2 * bools3 * bools4 * bools5 * bools6)
#print(interior_indices)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
raise RuntimeError ("No particles found for which a sphere of radius rMax\
will lie entirely within a cube of side length S. Decrease rMax\
or increase the size of the cube.")
edges = arange(0., rMax + 1.1 * dr, dr)
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments])
radii = zeros(num_increments)
numberDensity = len(x) / S**3
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = sqrt((x[index] - x)**2 + (y[index] - y)**2 + (z[index] - z)**2)
d[index] = 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False)
g[p,:] = result / numberDensity
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i+1]) / 2.
rOuter = edges[i + 1]
rInner = edges[i]
g_average[i] = mean(g[:, i]) / (4.0 / 3.0 * pi * (rOuter**3 - rInner**3))
return (g_average, radii, interior_indices)
# Number of particles in shell/total number of particles/volume of shell/number density
# shell volume = 4/3*pi(r_outer**3-r_inner**3)
####
################################################################
################################################################
def pairCorrelationFunction_3D_meng(x, y, z, lowx,highx,lowy,highy,lowz,highz,deltaZ, rMax, dr, bd):
"""Compute the three-dimensional pair correlation function for a set of
spherical particles contained in a cube with side length S. This simple
function finds reference particles such that a sphere of radius rMax drawn
around the particle will fit entirely within the cube, eliminating the need
to compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
z an array of z positions of centers of particles
S length of each side of the cube in space
rMax outer diameter of largest spherical shell
dr increment for increasing radius of spherical shell
Returns a tuple: (g, radii, interior_indices)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
spherical shells used to compute g(r)
reference_indices indices of reference particles
bd the bound of the box
"""
import numpy as np
from numpy import zeros, sqrt, where, pi, mean, arange, histogram
# Find particles which are close enough to the cube center that a sphere of radius
# rMax will not cross any face of the cube
Sx=highx-lowx
Sy=highy-lowy
Sz=deltaZ
#Sz=highz-lowz
bools1 = x > bd[0]
bools2 = x < bd[1]
bools3 = y > bd[2]
bools4 = y < bd[3]
bools5 = z > bd[4]
bools6 = z < bd[5]
interior_indices, = where(bools1 * bools2 * bools3 * bools4 * bools5 * bools6)
#print(interior_indices) #is a list of index of all particles
num_interior_particles = len(interior_indices)
print('total number of particle within the bound: ', len(x))
print('number of particle within the bound: ', num_interior_particles)
if num_interior_particles < 1:
print("No particles found for which a sphere of radius rMax\
will lie entirely within a cube of side length S. Decrease rMax\
or increase the size of the cube.")
return ([0],[0],[0])
edges = arange(0., rMax + 1.1 * dr, dr)
#print(edges)
#### [0. 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.....................6 4.7 4.8 4.9 5. 5.1] shell
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments]) # empty array for storing
#print(g)
#[[0. 0. 0. ... 0. 0. 0.] par1 num_interior_particles
# [0. 0. 0. ... 0. 0. 0.] par2
# [0. 0. 0. ... 0. 0. 0.].
# ...
# [0. 0. 0. ... 0. 0. 0.]
# [0. 0. 0. ... 0. 0. 0.]
# [0. 0. 0. ... 0. 0. 0.]]
# col differet shells num_increments
radii = zeros(num_increments)
numberDensity = len(x) / Sx*Sy*Sz ### all particel / square box == mean density
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles): # loop over the interoir index
index = interior_indices[p]
d = sqrt((x[index] - x)**2 + (y[index] - y)**2 + (z[index] - z)**2) # calculate the pair distance of current index to the rest of all points. d is a list of distance
d[index] = 2 * rMax ## the self distance is zero, to remove it, set it to 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False) ### histogram gives the count in result, and the shells in bin
#print('----')
#print(histogram(d, bins=edges, normed=False))
# (array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0,
# 8, 0, 0, 0, 0, 0, 6, 0, 0, 0, 24, 0, 0, 0, 24, 0, 0]), 51 results
# array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. , 1.1, 1.2,
# 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2. , 2.1, 2.2, 2.3, 2.4, 2.5,
# 2.6, 2.7, 2.8, 2.9, 3. , 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8,
# 3.9, 4. , 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 5. , 5.1])) 52 bins
# base on the current particle z, calculate how much sphere volume outside the rectangle box, do adjustment
adjustlist=meng_calc_outsideVol(bins, z[index], bd[4], bd[5])
#print(adjustlist)
#print(result)
#print(len(adjustlist))
#print(1-np.array(adjustlist))
result_adj=result /(1-np.array(adjustlist))
#print(result)
g[p,:] = result_adj # store the count for current shell current particle into g row
#g[p,:] = result # no adjsutment
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments) # empty array for storing stat for each shell
for i in range(num_increments):
radii[i] = (edges[i] + edges[i+1]) / 2.
rOuter = edges[i + 1]
rInner = edges[i]
g_average[i] = mean(g[:, i]) / (4.0 / 3.0 * pi * (rOuter**3 - rInner**3) / numberDensity)
###### result(count)/ (4/3 pi dr) - current shell density
######--------------------
###### numberDensity - avg density
return (g_average, radii, interior_indices)
# Number of particles in shell/total number of particles/volume of shell/number density
# shell volume = 4/3*pi(r_outer**3-r_inner**3)
####
def meng_calc_outsideVol(rlist,z, zlow,zhigh):
if z < zlow or z > zhigh:
print('z wrong --------------')
print(z)
print(zlow)
print(zhigh)
return
else:
outside_ratio=[]
for i in range(len(rlist)-1): # rlist 52 element, range 0-50
r = (rlist[i] + rlist[i+1]) / 2.
height_top=(z+r)-zhigh
outtop= calc_half_sphere(height_top ,r)
#print(outtop)
height_bot=zlow - (z-r)
outbot= calc_half_sphere(height_bot ,r)
#print(outbot)
#print('----')
outside_ratio.append(outtop + outbot)
return outside_ratio
###https://keisan.casio.com/exec/system/1223382199
def calc_half_sphere(h ,r):
import math
if h <=0:
return 0
else:
dome_area=2 * math.pi * r * h
sphera_total_area= 4 * math.pi * r**2
# c=math.sqrt(h*(2*r - h))
# v= math.pi / 6*h*(3*c**2 + h**2 )
return dome_area/sphera_total_area
############################################
################################################################
################################################################
################################################################
################################################################
################################################################
################################################################
################################################################
################################################################
################################################################
################################################################
################################################################
################################################################
################################################################
################################################################
def sphere(shape, radius, position):
import numpy as np
# assume shape and position are both a 3-tuple of int or float
# the units are pixels / voxels (px for short)
# radius is a int or float in px
semisizes = (radius,) * 3
#print(semisizes) ## (10, 10, 10)
# genereate the grid for the support points
# centered at the position indicated by position
grid = [slice(-x0, dim - x0) for x0, dim in zip(position, shape)]
position = np.ogrid[grid]
#ogrid[0:5,0:5]
#[array([[0],
# [1],
# [2],
# [3],
# [4]]), array([[0, 1, 2, 3, 4]])]
# calculate the distance of all points from `position` center
# scaled by the radius
arr = np.zeros(shape, dtype=float)
for x_i, semisize in zip(position, semisizes):
# this can be generalized for exponent != 2
# in which case `(x_i / semisize)`
# would become `np.abs(x_i / semisize)`
arr += (x_i / semisize) ** 2
# the inner part of the sphere will have distance below 1
return arr <= 1.0
def meng_calc_outsideVol2(xmin,xmax,ymin,ymax,zmin,zmax, rlist, ptx,pty,ptz): # rlist is a list of the shell radius
import numpy as np
# plot in 3D
## 100 z slice
print('input parameter:')
print(xmin,xmax,ymin,ymax,zmin,zmax, rlist, ptx,pty,ptz)
outside_ratio=[]
for i in range(len(rlist)-1): # rlist 52 element, range 0-50
r1=rlist[i]
r2=rlist[i+1]
if r1==0:
arr1=np.zeros((256,256,100), dtype=bool)
else:
arr1 = sphere((256,256, 100), r1, (ptx,pty,ptz)) ## , 00011000
arr2 = sphere((256,256, 100), r2, (ptx,pty,ptz)) ## 00111100
arr=(np.invert(arr1==arr2)) # 00100100 ## make the shell to be true in array
shellvol= np.count_nonzero(arr) #### the shell vol at specific radius
if shellvol==0:
outside_ratio.append(0)
print('ratio:',0)
else:
## put the rectangle cubic box in the volume
arr[int(xmin):int(xmax),int(ymin):int(ymax),int(zmin):int(zmax)]=False
shell_cut_vol= np.count_nonzero(arr) ### the shell vol at specific radius- substract the cubiod, which is the shell vol outside the box
print('shell', i, '/',len(rlist)-1, ' ratioo:', shell_cut_vol /shellvol ) ### if no cubic cut, it will be equal to 1
outside_ratio.append(shell_cut_vol /shellvol)
return outside_ratio
# z,x,y = arr.nonzero()
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(x, y, z, c= 'red')
#
def pairCorrelationFunction_3D_meng2(x, y, z, lowx,highx,lowy,highy,lowz,highz,deltaZ, rMax, dr):
"""Compute the three-dimensional pair correlation function for a set of
spherical particles contained in a cube with side length S. This simple
function finds reference particles such that a sphere of radius rMax drawn
around the particle will fit entirely within the cube, eliminating the need
to compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
z an array of z positions of centers of particles
S length of each side of the cube in space
rMax outer diameter of largest spherical shell
dr increment for increasing radius of spherical shell
Returns a tuple: (g, radii, interior_indices)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
spherical shells used to compute g(r)
reference_indices indices of reference particles
"""
import numpy as np
from numpy import zeros, sqrt, where, pi, mean, arange, histogram
# Find particles which are close enough to the cube center that a sphere of radius
# rMax will not cross any face of the cube
## high, low are the box boundary
Sx=highx-lowx
Sy=highy-lowy
Sz=deltaZ ### deltaz were used to calculate the avg density
# bools1 = x > lowx + rMax
# bools2 = x < lowx + (Sx - rMax)
# bools3 = y > lowy + rMax
# bools4 = y < lowy + (Sy - rMax)
# bools5 = z > lowz
# bools6 = z < highz
# interior_indices, = where(bools1 * bools2 * bools3 * bools4 * bools5 * bools6)
interior_indices, = where(x > 0) # all the x value must be >0, so it select all index
print('hah')
print(interior_indices) #is a list of index of all particles [ 0 1 2 ... 1164 1165 1166]
num_interior_particles = len(interior_indices)
print(num_interior_particles)
if num_interior_particles < 1:
raise RuntimeError ("No particles found for which a sphere of radius rMax\
will lie entirely within a cube of side length S. Decrease rMax\
or increase the size of the cube.")
edges = arange(0., rMax + 1.1 * dr, dr)
#print(edges)
#### [0. 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.....................6 4.7 4.8 4.9 5. 5.1] shell
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments]) # empty array for storing
#print(g)
#[[0. 0. 0. ... 0. 0. 0.] par1 num_interior_particles
# [0. 0. 0. ... 0. 0. 0.] par2
# [0. 0. 0. ... 0. 0. 0.].
# ...
# [0. 0. 0. ... 0. 0. 0.]
# [0. 0. 0. ... 0. 0. 0.]
# [0. 0. 0. ... 0. 0. 0.]]
# col differet shells num_increments
radii = zeros(num_increments)
numberDensity = len(x) / Sx*Sy*Sz ### all particel / square box == mean density
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles): # loop over the interoir index
index = interior_indices[p]
d = sqrt((x[index] - x)**2 + (y[index] - y)**2 + (z[index] - z)**2) # calculate the pair distance of current index to the rest of all points.
# d is a list of distance
d[index] = 2 * rMax ## the self distance is zero, to remove it, set it to 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False) ### histogram gives the count in result, and the shells in bin
#print(bins)
#print('----')
#print(histogram(d, bins=edges, normed=False))
# (array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0,
# 8, 0, 0, 0, 0, 0, 6, 0, 0, 0, 24, 0, 0, 0, 24, 0, 0]), 51 results, shell contained count
# array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. , 1.1, 1.2,
# 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2. , 2.1, 2.2, 2.3, 2.4, 2.5,
# 2.6, 2.7, 2.8, 2.9, 3. , 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8,
# 3.9, 4. , 4.1, 4.2, 4.3, 4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 5. , 5.1])) 52 bins, shell radius
# base on the current particle z, calculate how much sphere volume outside the rectangle box, do adjustment
#adjustlist=meng_calc_outsideVol(bins, z[index], lowz, highz)
print('------------particle#', p , '/',num_interior_particles)
adjustlist=meng_calc_outsideVol2(lowx/11.68/2,
highx/11.68/2,
lowy/11.68/2,
highy/11.68/2,
lowz/11.68/2,
highz/11.68/2,
bins/11.68/2,
x[index]/11.68/2,
y[index]/11.68/2,
z[index]/11.68/2) # version two use the numeric np arry matrix volume calculation method
#print(adjustlist)
#print(result)
#print(len(adjustlist))
#print(1-np.array(adjustlist))
result_adj=result /(1-np.array(adjustlist)) # 1-adjust means the ratio of shell vol inside the cubiod over the total shell vol
#print(result)
g[p,:] = result_adj # store the count for current shell current particle into g row
#g[p,:] = result # no adjsutment
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments) # empty array for storing stat for each shell
for i in range(num_increments):
radii[i] = (edges[i] + edges[i+1]) / 2.
rOuter = edges[i + 1]
rInner = edges[i]
g_average[i] = mean(g[:, i]) / (4.0 / 3.0 * pi * (rOuter**3 - rInner**3) / numberDensity)
###### result(count)/ (4/3 pi dr) - current shell density
######--------------------
###### numberDensity - avg density
return (g_average, radii, interior_indices)
# Number of particles in shell/total number of particles/volume of shell/number density
# shell volume = 4/3*pi(r_outer**3-r_inner**3)
####
|
print 'printing correct sequence'
dipanjan=2
dipanjan=dipanjan-2
if dipanjan == 0:
print ('dipanjan0')
else:
dipanjan=1
print (dipanjan)
extra = [2,3,4]
for x in extra:
y=x+1;
print (y)
|
import pandas as pd
df = pd.read_csv('Tech.csv')
print(df)
print(df.columns)
print(df.head())
for col in df.columns:
if col[:2] == 'nu':
df.rename(columns = { col: 'Employees'}, inplace = True)
print(df.head()) |
# Databricks notebook source
# MAGIC %md
# MAGIC #Titanic Survivor Analysis
# COMMAND ----------
# MAGIC %md
# MAGIC Let's read our dataset from s3, and explore provided variables
# COMMAND ----------
from pyspark.sql.functions import split, size
td = spark.read.csv('/mnt/ts/titanic.csv', header=True)
titanic_data = td.select(
td.pclass.cast('int'),
td.survived.cast('int'),
td.name,
td.sex,
td.age.cast('float'),
td.sibsp.cast('int'),
td.parch.cast('int'),
td.ticket,
td.fare.cast('float'),
td.cabin,
td.embarked,
td.boat,
td.body,
td.home_dest
)
titanic_data.registerTempTable('titanic')
titanic_data.cache()
display(titanic_data)
# COMMAND ----------
# MAGIC %md
# MAGIC Display the summary for provided data
# COMMAND ----------
display(titanic_data.describe())
# COMMAND ----------
# MAGIC %md
# MAGIC ##Handle missing values
# MAGIC
# MAGIC Handling missing values is a crucial part in feature engineering when working with data. There are many approaches to this issue, and we should defenitely try to avoid removing complete rows or columns from the dataset, due to the dataset size. We will try to impute missing values based on data distribution and prediction.
# MAGIC In this notebook we will go through the taken steps in handling missing values in Titanic Dataset. The following features with missing values were imputed in the dataset:
# MAGIC * Age
# MAGIC * Embarked
# MAGIC * Fare
# MAGIC The features _ticket, cabin, boat, body and home\_destination_ will be unfortanutely removed from the dataset, since they contain many missing data fields, and imputing these values can give us false information based on the dataset size. So we are making a tough decision to work without these features.
# COMMAND ----------
# MAGIC %md
# MAGIC ####Impute AGE
# MAGIC First column that we will deal with is _age_. Titanic Dataset is a very well known Kaggle competition, and many people have provided very clever and interesting ideas to deal with the dataset. While dealing with missing values, we will use ideas shared by [Megan L. Risdal on Kaggle](https://www.kaggle.io/svf/924638/c05c7b2409e224c760cdfb527a8dcfc4/__results__.html), since this approach is quite interesting.
# MAGIC Since the dataset is small in size, we will do the age imputation in R, using [Mice](https://cran.r-project.org/web/packages/mice/index.html) package. We will impute missing age values without some of the columns that give us less useful information. The code for the R script is available in a file _ageImputation.R_.<br>
# MAGIC After running the code, we can compare the distribution of the age column before and after the imputation, just to make sure that we are not making any bad decisions:
# MAGIC 
# MAGIC
# MAGIC After making sure the distribution will be the same after imutation phase, we can impute the missing age values.<br>
# COMMAND ----------
# MAGIC %md
# MAGIC ### Impute Embarked
# MAGIC Based on some general ideas on the columns of the Titanic Dataset, prior knowledge and provided information, we can assume that _fare_ and _passenger class_ information can help us retrieve the missing embarkment information. From the dataset, we can see that both passengers that are missing embarkment information paid $80 for the travel and that they belong to the pclass=1.
# MAGIC We can plot the information from which place other travelers with the same pclass and fare values usually embarked. The code for plotting the chart is available in a script _embarkedImputation.R_.
# MAGIC 
# MAGIC <br>As we can see from the chart, other passengers that paid $80 (median fare) and belong to pclass=1 usually embarked from 'C'. Based on this information we can impute our missing values with for embarkment with 'C'.
# COMMAND ----------
# MAGIC %md
# MAGIC ### Impute Fare
# MAGIC The final column with the missing value that we are going to deal with is _fare_ information. The passenger that is missing this information belongs to the third class, and embarked in 'S'. We can plot the median and distirbution of the _fare_ feature for the passengers that are from the same class and departed from the same place. (The R code is available in a script _fareImputation.R_) <br>
# MAGIC  <br>
# MAGIC It seems reasonable based on the plot to replace the missing _fare_ value with median for their class and embarkment, which is $8.05.
# COMMAND ----------
# MAGIC %md
# MAGIC After dealing with the missing data in R, we are reading a final dataset, which contains all the imputed values.
# COMMAND ----------
td = spark.read.csv('/mnt/ts/titanic_final.csv', header=True)
# COMMAND ----------
# MAGIC %md
# MAGIC After reading the dataset, we will retrieve state information from _home\_dest_ column
# COMMAND ----------
titanic_data = td.select(
td.pclass.cast('int'),
td.survived.cast('int'),
td.name,
td.sex,
td.age.cast('float'),
td.sibsp.cast('int'),
td.parch.cast('int'),
td.ticket,
td.fare.cast('float'),
td.cabin,
td.embarked,
td.boat,
td.body,
td.home_dest,
split(split(td.home_dest, ('/'))[size(split(td.home_dest, ('/'))) -1], ',')[
size(split(split(td.home_dest, ('/'))[size(split(td.home_dest, ('/'))) -1], ',')) - 1
].alias('state')
)
# COMMAND ----------
# MAGIC %md
# MAGIC Let's register a temporary table so we can use SQL for creating required reports.
# COMMAND ----------
titanic_data.registerTempTable('titanic')
# COMMAND ----------
# MAGIC %sql
# MAGIC select pclass, count(1) as number_of_survivors
# MAGIC from titanic
# MAGIC where survived=1
# MAGIC group by pclass
# COMMAND ----------
# MAGIC %sql
# MAGIC select sex, count(1) as number_of_survivors
# MAGIC from titanic
# MAGIC where survived=1
# MAGIC group by sex
# COMMAND ----------
# MAGIC %sql
# MAGIC select age, count(1) as number_of_survivors
# MAGIC from titanic
# MAGIC where survived=1
# MAGIC group by age
# MAGIC order by age asc
# COMMAND ----------
# MAGIC %sql
# MAGIC select pclass, age, sex, count(*) as number_of_survivors
# MAGIC from titanic
# MAGIC where survived=1
# MAGIC group by pclass, age, sex
# MAGIC order by number_of_survivors desc
# COMMAND ----------
# MAGIC %sql
# MAGIC select state, count(*) as number_of_survivors
# MAGIC from titanic
# MAGIC where survived=1
# MAGIC group by state
# MAGIC order by number_of_survivors desc
# COMMAND ----------
# MAGIC %md
# MAGIC ### Feature Engineering
# MAGIC Let's create some more features for our predictive model. We can see that each passenger has it's own title in a name, so let's retrieve titles for all our passengers, and count the occurences of each one of them.
# COMMAND ----------
titanic_data = titanic_data.withColumn(
'title',
split(split(titanic_data.name, ',')[1], ' ')[1]
)
display(titanic_data)
# COMMAND ----------
display(titanic_data.groupBy('title').count())
# COMMAND ----------
# MAGIC %md
# MAGIC We can see that some titles are more frequent than the other ones, so we will create a categorical feature with following possible values:
# MAGIC * Mr.
# MAGIC * Mrs.
# MAGIC * Miss.
# MAGIC * Master.
# MAGIC * Other (for less frequent titles)
# COMMAND ----------
from pyspark.sql import functions as F
titanic = titanic_data.withColumn('ptitle', F.when(titanic_data.title=='Mr.', 'Mr.').when(titanic_data.title=='Mrs.', 'Mrs.').when(titanic_data.title=='Miss.', 'Miss.').when(titanic_data.title=='Master.', 'Master.').otherwise('Other'))
titanic = titanic.drop('title').withColumnRenamed('ptitle', 'title')
# COMMAND ----------
display(titanic)
# COMMAND ----------
# MAGIC %md
# MAGIC We will stop at this point with Feature Engineering. There are many great ideas for retrieving more features at the official [Kaggle page of the contest](https://www.kaggle.com/c/titanic), and I am defenitely recommending taking a peak there.
# COMMAND ----------
# MAGIC %md
# MAGIC # Predictive Modeling
# MAGIC Let's start building our predictive models. <br><br><br>
# MAGIC First step is to encode our categorical features and make feature vectors suitable for use in Spark ML pipelines and models.
# COMMAND ----------
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree
from pyspark.ml.feature import OneHotEncoder, StringIndexer
# display(encoded)
for x in ["pclass", "embarked", "title", "sex"]:
indexer = StringIndexer(inputCol=x, outputCol=x+"Index").fit(titanic)
indexed = indexer.transform(titanic)
encoder = OneHotEncoder(dropLast=False, inputCol=x+"Index", outputCol=x+"Feature")
titanic = encoder.transform(indexed)
display(titanic)
# COMMAND ----------
# MAGIC %md
# MAGIC We won't use all the available features in our models, we will focus on the most descriptive ones:
# MAGIC * age
# MAGIC * sibsp
# MAGIC * fare
# MAGIC * pclass
# MAGIC * embarked
# MAGIC * title
# MAGIC * sex
# COMMAND ----------
from pyspark.ml.feature import VectorAssembler
assembler = VectorAssembler(
inputCols=["age", "sibsp", "parch", "fare", "pclassFeature", "embarkedFeature", "titleFeature", "sexFeature"],
outputCol="features")
transformed = assembler.transform(titanic)
display(transformed)
# COMMAND ----------
# MAGIC %md
# MAGIC ##Random Forest
# COMMAND ----------
# MAGIC %md
# MAGIC First model that we will test is Random Forest. The full documentation to the implementation of PySpark ML used is available in the official [PySpark documentation](https://spark.apache.org/docs/latest/ml-classification-regression.html#random-forest-classifier).<br>
# MAGIC We will split our dataset into training and test datasets. <br>
# MAGIC Next step will be making a Pipeline that indexes all the labels in the label column and than trains the Random Forest model. <br>
# MAGIC After building the model, we'll test it on our test dataset.
# COMMAND ----------
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
# Let's index our labels, and fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="survived", outputCol="label").fit(transformed)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = transformed.randomSplit([0.7, 0.3])
# Train a RandomForest model.
rf = RandomForestClassifier(labelCol="label", featuresCol="features", numTrees=10)
# Chain indexers and forest in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, rf])
# Run labelIndexer and train the model.
model = pipeline.fit(trainingData)
# Let's Make predictions on our test data.
predictions = model.transform(testData)
display(predictions)
# COMMAND ----------
# MAGIC %md
# MAGIC For model evaluation we will use PySpark's BinaryClassificationEvaluator, and Area Under ROC Curve measure. The Area Under the ROC Curve (AUC) is a measure of how well a parameter can distinguish between two groups.
# COMMAND ----------
# Let's evaluate our model using BinaryClassificationEvaluator. We will use Area Under ROC Curve measure to evaluate our model.
evaluator = BinaryClassificationEvaluator(
labelCol="label", rawPredictionCol="rawPrediction", metricName="areaUnderROC")
areaUnderROC = evaluator.evaluate(predictions)
print("Area under ROC = %g" % (areaUnderROC))
# COMMAND ----------
# MAGIC %md
# MAGIC We can see that our model behaves well with event the small number of trees (n=10)<br><br>
# MAGIC Let's visualize our results:
# COMMAND ----------
results = predictions.select(['probability', 'label'])
results_collect = results.collect()
results_list = [(float(i[0][0]), 1.0-float(i[1])) for i in results_collect]
# COMMAND ----------
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
fpr = dict()
tpr = dict()
roc_auc = dict()
y_test = [i[1] for i in results_list]
y_score = [i[0] for i in results_list]
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
figure = plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
display(figure)
# COMMAND ----------
# MAGIC %md
# MAGIC Based on this plot, we can say that our results are not really bad, since our ROC curve (blue line) is far from the diagonal dashed line. <br><br>
# MAGIC Let's see which features are the most important for our Random Forest model (we will use this information later on, in Feature Selection phase). <br>RandomForest.featureImportances computes, given a tree ensemble model, the importance of each feature.
# MAGIC
# MAGIC This generalizes the idea of "Gini" importance to other losses, following the explanation of Gini importance from "Random Forests" documentation by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn. The full explanation of the approach is documented in official [Spark documentation](https://spark.apache.org/docs/latest/api/python/pyspark.ml.html#pyspark.ml.classification.DecisionTreeClassificationModel.featureImportances).
# COMMAND ----------
model.stages[1].featureImportances
# COMMAND ----------
# MAGIC %md
# MAGIC ## Decision Trees
# MAGIC Next model that we are going to use for our classification task is [Decision Tree](https://spark.apache.org/docs/latest/ml-classification-regression.html#decision-trees).
# MAGIC We will take the similar steps as in Random Forest implementation, and we will use the same datasets for training and testing.
# COMMAND ----------
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# Intialize a Decision Tree Classifier
dt = DecisionTreeClassifier(labelCol="label", featuresCol="features")
# Make a Pipeline from Label Indexer and Decision Tree Model
pipeline = Pipeline(stages=[labelIndexer, dt])
# Run Label Indexer and train the model
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# COMMAND ----------
evaluator = BinaryClassificationEvaluator(
labelCol="label", rawPredictionCol="rawPrediction", metricName="areaUnderROC")
areaUnderROC = evaluator.evaluate(predictions)
print("Area under ROC = %g" % (areaUnderROC))
# COMMAND ----------
# MAGIC %md
# MAGIC Our Area under ROC curve for Decision Trees is not as good as with Random Forest alghorithm...
# COMMAND ----------
results1 = predictions.select(['probability', 'label'])
results_collect1 = results1.collect()
results_list1 = [(float(i[0][0]), 1.0-float(i[1])) for i in results_collect1]
# COMMAND ----------
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
fpr = dict()
tpr = dict()
roc_auc = dict()
y_test = [i[1] for i in results_list1]
y_score = [i[0] for i in results_list1]
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
figure = plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
display(figure)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Naive Bayes
# MAGIC Next model that we are going to test is [Naive Bayes](https://spark.apache.org/docs/latest/ml-classification-regression.html#naive-bayes), probabilistic classifiers based on applying Bayes’ theorem with strong (naive) independence assumptions between the features.
# MAGIC
# MAGIC We are going to use multinomial classification, although we are trying to predict one of the two possible outcomes.
# COMMAND ----------
from pyspark.ml.classification import NaiveBayes
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# create the trainer and set its parameters
nb = NaiveBayes(smoothing=1.0, modelType="multinomial")
pipeline = Pipeline(stages=[labelIndexer, nb])
# Run Label Indexer and train the model
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# COMMAND ----------
evaluator = BinaryClassificationEvaluator(
labelCol="survived", rawPredictionCol="rawPrediction", metricName="areaUnderROC")
areaUnderROC = evaluator.evaluate(predictions)
print("Area under ROC = %g" % (areaUnderROC))
# COMMAND ----------
# MAGIC %md
# MAGIC Naive Bayes is much more unprecise compared to Random Forest and Decison Trees
# COMMAND ----------
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
fpr = dict()
tpr = dict()
roc_auc = dict()
y_test = [i[1] for i in results_list]
y_score = [i[0] for i in results_list]
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
figure = plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
display(figure)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Logistic Regression
# COMMAND ----------
# MAGIC %md
# MAGIC The final algorithm that we are going to try is [Logistic Regression](https://spark.apache.org/docs/latest/ml-classification-regression.html#logistic-regression). Logistic Regression is one of the most well-known algorithms that is used to predict one of the two possible outcomes.
# COMMAND ----------
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)
# Fit the model
lrModel = lr.fit(trainingData.select(trainingData.survived.alias('label'), trainingData.features))
# Print the coefficients and intercept for logistic regression
print("Coefficients: " + str(lrModel.coefficients))
print("Intercept: " + str(lrModel.intercept))
# COMMAND ----------
predictions = lrModel.transform(testData.select(testData.survived, testData.features))
display(predictions)
# COMMAND ----------
evaluator = BinaryClassificationEvaluator(
labelCol="survived", rawPredictionCol="rawPrediction", metricName="areaUnderROC")
areaUnderROC = evaluator.evaluate(predictions)
print("Area under ROC = %g" % (areaUnderROC))
# COMMAND ----------
# MAGIC %md
# MAGIC Logistic Regression also shows less powerful results than Random Forest.
# COMMAND ----------
display(predictions)
# COMMAND ----------
from pyspark.ml.classification import LogisticRegression
# Extract the summary from the returned LogisticRegressionModel instance trained
# in the earlier example
trainingSummary = lrModel.summary
# Obtain the objective per iteration
objectiveHistory = trainingSummary.objectiveHistory
print("objectiveHistory:")
for objective in objectiveHistory:
print(objective)
# COMMAND ----------
from pyspark.mllib.evaluation import BinaryClassificationMetrics as metric
results = predictions.select(['probability', 'survived'])
## prepare score-label set
results_collect = results.collect()
results_list = [(float(i[0][0]), 1.0-float(i[1])) for i in results_collect]
scoreAndLabels = sc.parallelize(results_list)
metrics = metric(scoreAndLabels)
print("The ROC score is (@numTrees=10): ", metrics.areaUnderROC)
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
fpr = dict()
tpr = dict()
roc_auc = dict()
y_test = [i[1] for i in results_list]
y_score = [i[0] for i in results_list]
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
figure = plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
display(figure)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Feature Selection
# COMMAND ----------
# MAGIC %md
# MAGIC After comparing the results of four tested models, we can see that we are getting quite satisfying results with Random Forest, even for the small number of trees. Random Forest is performing much better than the other tested algorithms. In that manner, we will try to select only the most relevant features, and try to lower the dimensionality of the problem. <br>
# MAGIC After evaluating the results of Random Forest, we have identified the importance of the used features:
# MAGIC SparseVector(17, {0: 0.0589, 1: 0.0454, 2: 0.028, 3: 0.0834, 4: 0.1145, 5: 0.0469, 6: 0.0257, 7: 0.019, 8: 0.0159, 9: 0.003, 10: 0.0059, 11: 0.0009, 12: 0.0524, 13: 0.0211, 14: 0.0021, 15: 0.2103, 16: 0.2664}).<br>
# MAGIC Here we are going to lower the dimensionality of the problem based on feature importance, and using Vector Slicer Feature Selecion. Let's test how our datasets behave with less features, and try to make more robust model while using smaller number of features.
# COMMAND ----------
from pyspark.ml.feature import VectorSlicer
from pyspark.ml.linalg import Vectors
from pyspark.sql.types import Row
slicer = VectorSlicer(inputCol="features", outputCol="selectedFeatures").setIndices([3, 4, 15, 16])
# We are using the same datasets as for the other algorithms
output = slicer.transform(transformed)
otestData = slicer.transform(testData)
otrainData = slicer.transform(trainingData)
# Let's make our model
rf = RandomForestClassifier(labelCol="label", featuresCol="selectedFeatures", numTrees=10)
# Chain indexers and forest in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, rf])
# Train model. This also runs the indexers.
model = pipeline.fit(otrainData)
# Make predictions.
predictions = model.transform(otestData)
# Select example rows to display.
# display(predictions.select("prediction", "label", "features"))
# # Select (prediction, true label) and compute test error
evaluator = BinaryClassificationEvaluator(
labelCol="label", rawPredictionCol="rawPrediction", metricName="areaUnderROC")
areaUnderROC = evaluator.evaluate(predictions)
print("Area under ROC = %g" % (areaUnderROC))
# COMMAND ----------
# MAGIC %md We have selected 4 of the most predictive features, and the results are better than results of other algorthms. <br>
# MAGIC Let's see will the performance grow significantly if we add one more feature.
# COMMAND ----------
from pyspark.ml.feature import VectorSlicer
from pyspark.ml.linalg import Vectors
from pyspark.sql.types import Row
# slicer = VectorSlicer(inputCol="features", outputCol="selectedFeatures").setIndices([3, 4, 15, 16])
slicer = VectorSlicer(inputCol="features", outputCol="selectedFeatures").setIndices([0, 3, 4, 15, 16])
output = slicer.transform(transformed)
otestData = slicer.transform(testData)
otrainData = slicer.transform(trainingData)
# display(output)
# train, test = output.randomSplit([0.7, 0.3])
# display(train)
rf = RandomForestClassifier(labelCol="label", featuresCol="selectedFeatures", numTrees=10)
# Convert indexed labels back to original labels.
# labelConverter = IndexToString(inputCol="prediction", outputCol="predictedLabel",
# labels=labelIndexer.labels)
# Chain indexers and forest in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, rf])
# Train model. This also runs the indexers.
model = pipeline.fit(otrainData)
# Make predictions.
predictions = model.transform(otestData)
# Select example rows to display.
# display(predictions.select("prediction", "label", "features"))
# # Select (prediction, true label) and compute test error
evaluator = BinaryClassificationEvaluator(
labelCol="label", rawPredictionCol="rawPrediction", metricName="areaUnderROC")
areaUnderROC = evaluator.evaluate(predictions)
print("Area under ROC = %g" % (areaUnderROC))
# COMMAND ----------
# MAGIC %md
# MAGIC Our results from Random Forest with reduced number of dimensions to 5 shows almost the same performance as our model trained on all the 17 features (Area under ROC with 17 features was 0.869552, and we have managed to achieve Area under ROC of 0.86754 while using only 5 of the most predictive features).
# MAGIC ### Final Conclusion
# MAGIC In this notebook we have analyzed Titanic Dataset, and modeled the factors that are related to a passenger surviving the crash. <br>
# MAGIC In exploratory phase we have used R to impute the missing values, and explain our variables. <br><br>
# MAGIC For the purpose of Predictive Modeling, we have tried various classification approaches available in Spark 2.1 ML Library. Random Forest algorithm showed best performance, so we have decided to keep it as our predictive model for this purpose. We have decided to use Area under ROC curve as our metric for model performance, which shows us the power of the model to make a distinction betweeen two groups.<br>
# MAGIC After model exploration and model selection, we were dealing with model robustness. We have managed to represent predictive model with 5 dimensions instead of 17 that we have started with, and keep the model performance. <br>
# MAGIC Deployed model is scalable and robust, and it can easily work with significantly larger datasets (although the dataset for this problem can not grow significantly).<br><br>
# MAGIC Some possible improvements for the model might include creating more features from the original dataset, and trying to impute the other missing values or retrieve relevant information from these fields.
|
import math
#solicitando os valores de A, B e C
a = float(input("Informe o valor de A: "))
b = float(input("Informe o valor de B: "))
c = float(input("Informe o valor de C: "))
#cálculo do delta
delta = (b * b) - 4 * a *c
#verificação das condições com elif
if delta > 0.0:
#cálculo de 2 valores para x
x1 = (-b + math.sqrt(delta)) / (2 * a)
x2 = (-b - math.sqrt(delta)) / (2 * a)
print("Para a equação {}x² + {}x + {} = 0, obtivemos os seguintes valores: x1 = {} e x2 = {}".format(a,b,c,x1,x2))
elif delta == 0:
#cálculo de 1 valor para x
x = (-b + math.sqrt(delta)) / (2 * a)
print("Para a equação {}x² + {}x + {} = 0, obtivemos o seguinte valor: x = {}".format(a,b,c,x))
else:
#exibição da mensagem
print("Para a equação {}x² + {}x + {} = 0, não existem valores reais para x".format(a, b, c)) |
import json
#módulo python que gera dados em formato json
#criando um dicionário
contatos = {
"Clark Kent":
{"Celular":"123456",
"Email":"[email protected]"},
"Bruce Wayne":
{"Celular":"654321",
"Email":"[email protected]"}
}
#convertendo o dicionário para uma string o formato json
final = json.dumps(contatos, indent=4)
"""#exibindo a string convertida
print(final)"""
#GRAVANDO UM ARQUIVO JSON
#criando um arquivo
arquivo = open("C:\\Users\\carol\\Documents\\Documentos\\tiworkspace\\python-fundamentos\\manipulaçao-arquivos\\agenda.json", "w")
#escrevendo o JSON dentro do arquivo
arquivo.write(final)
#fechando o arquivo
arquivo.close() |
#valores fora de ordem
valores = [1, 7, 7, 19, 3, 2, 11, 15, 6, 1, 5]
#exibição da lista
print("A lista foi criada assim: {}".format(valores))
#contagem de elementos
contagem = valores.count(7)
print("Nessa lista o número 7 aparece {} vezes".format(contagem))
#invertendo a lista
valores.reverse()
print("A lista agora está invertida: {}".format(valores))
#ordenando a lista
valores.sort()
print("A lista agora está ordenada: {}".format(valores))
#tamanho da lista
tamanho = len(valores)
print("A lista tem {} elementos".format(tamanho))
#soma dos elementos
soma = sum(valores)
print("A soma dos elementos é {}".format(soma)) |
#usando a função open para criar um objeto do tipo arquivo
arquivo = open("C:\\Users\\carol\\Documents\\Documentos\\tiworkspace\\python-fundamentos\\manipulaçao-arquivos\\arquivo_texto.txt")
#verificando o tipo do objeto arquivo
print(type(arquivo))
#lendo o conteúdo do objeto arquivo
#print(arquivo)
print(arquivo.readline())
#Exibindo uma linha por vez, utilizando o loop for e o método readlines()
for linha in arquivo.readlines():
print(linha)
#usando a função open para criar um objeto do tipo arquivo
arquivo = open("C:\\Users\\carol\\Documents\\Documentos\\tiworkspace\\python-fundamentos\\manipulaçao-arquivos\\arquivo_texto.txt")
#Passando o conteúdo do arquivo para uma lista
linhas_do_arquivo = arquivo.readlines()
#comprovando o tipo do objeto linhas_do_arquivo
print("Ei! Eu consegui transformar meu arquivo em uma {} ".format(type(linhas_do_arquivo)))
#colocando a lista em ordem alfabética
linhas_do_arquivo.sort()
#Exibindo nossa lista, agora em ordem alfabética
print(linhas_do_arquivo)
arquivo.close() |
def make_division_by(n):
"""This closure returns a function that returns the division
of an x number by n
"""
# You have to code here!
pass
def run():
division_by_3 = make_division_by(3)
print(division_by_3(18)) # The expected output is 6
division_by_5 = make_division_by(5)
print(division_by_5(100)) # The expected output is 20
division_by_18 = make_division_by(18)
print(division_by_18(54)) # The expected output is 3
if __name__ == '__main__':
import unittest
class ClosureSuite(unittest.TestCase):
def test_closure_make_division_by(self):
# Make the closure test here
pass
run()
|
# Funktioner er noget vi kan bruge til at
# give et navn til et lille stykke kode,
# som vi så kan bruge igen. Hvis du nogensinde
# har copy-pastet noget kode, skulle du nok
# have brugt en funktion i stedet.
# Opgave A - Kan du finde de steder der er
# blevet copy-pastet? Skriv dem som en funktion
# i stedet.
# Hint:
# Her har vi lavet en funktion der
# printer noget når den bliver kaldt.
def hej():
print("Hej med dig")
# her kalder vi funktionen
hej()
import turtle
t = turtle.Turtle()
t.penup()
t.goto(-250, 0)
t.pendown()
for i in range(8):
t.forward(40)
t.left(45)
t.penup()
t.right(45)
t.forward(100)
t.pendown()
for i in range(8):
t.forward(40)
t.left(45)
t.penup()
t.left(90)
t.forward(200)
t.pendown()
for i in range(8):
t.forward(40)
t.left(45)
turtle.done() |
"""Instances generation.
An instance is a tuple of (pid, t_obs, t_end, t_evt, x), where
* pid : str - a patient ID
* t_obs : np.datetime64 - the datetime of the instance observation
* t_end : np.datetime64 - the datetime of the final obs. in the patient record
* t_evt : np.datetime64 or NaT - the date of the event or Not a Time
* x : np.ndarray[np.float, ndim=1] - the observation data
Instances are generated in the following steps:
+ Downsample a dataframe of patient curves by observation date
+ Fill in missing channel values
+ Annotate each observation with t_end and t_evt
Instance labels are generated by choosing a horizon datetime between t_obs and
t_end, and then recording if the horizon_time occurs before or after t_evt
(before is a False, after is a True) - in other words, given an arbitrary
datetime after the observation and before the end of the record, has the event
occurred by that time?
Instances are saved with these datetimes rather than the actual horizon and
label so that the method of choosing a horizon (and hence generating a label)
may be a model hyperparameter.
"""
import pandas as pd
import numpy as np
__all__ = [
'EVENT',
'ONE_YEAR',
'random_expand_horizons',
'channel_mask',
'collate',
'gen_instances',
'downsample',
'fill_channels',
]
EVENT = ('Event', 'event')
# The value of pd.Timedelta(1, 'Y').to_timedelta64()
ONE_YEAR = np.timedelta64(31556952000000000, 'ns')
def random_expand_horizons(t_obs, t_end, t_evt, X):
"""For each instance represented in X, we calculate a horizon time
randomly sampled from the instance observation date to the end of the
patient's record and label the instance with whether or not the event has
occurred (0 if not, 1 if so).
Arguments
---------
t_obs : np.ndarray[np.datetime64, ndim=1]
t_end : np.ndarray[np.datetime64, ndim=1]
t_evt : np.ndarray[np.datetime64, ndim=1]
X : np.ndarray[np.float64, ndim=2]
Returns
-------
X, y : np.ndarray, np.ndarray
X is an ndarray of shape (n instances, n features), where the last
feature is the horizon time expressed as a float64 denoting a number of
years between the observation date and the end of the patient record
y is an ndarray of shape (n instances, 1) of flags 0 or 1
indicating whether or not the event in question has occurred by the
time of the horizon time
"""
n = t_obs.shape[0]
h_spans = np.random.random_sample(n) * (t_end - t_obs)
h_dates = h_spans + t_obs
horizons = h_spans / ONE_YEAR
horizons = horizons.reshape(-1, 1)
X = np.hstack((X, horizons))
y = h_dates > t_evt
y = y.astype(np.float64).reshape(-1, 1)
return X, y
def channel_mask(channels, dropcols):
"""Produces a mask of len(channels) that will filter out dropcols"""
return np.array([ch not in dropcols for (_, ch) in channels])
def collate(instances):
"""Converts a list of instances into a tuple of ndarrays.
Arguments
---------
instances : list of instances
Cf. gen_instances
Returns
-------
(t_obs, t_end, t_evt, X) : (ndarray, ndarray, ndarray, ndarray)
The values of t_obs, t_end, t_evt, and x per instance stacked together
into a single numpy ndarray. Note that the first three are one-dim
arrays of a length equal to the first dimension of X.
"""
t_obs, t_end, t_evt, Xs = [], [], [], []
# Discard the ID; it isn't needed
for (_, obs, end, evt, x) in instances:
t_obs.append(obs)
t_end.append(end)
t_evt.append(evt)
Xs.append(x)
return np.array(t_obs), np.array(t_end), np.array(t_evt), np.array(Xs)
def gen_instances(curves, channels, fillvec, *,
rate=None, density=None, period=None, id_dates=None):
"""Runs the full instance pipeline for generating training instances.
Arguments
---------
For `curves` and each of `rate`, `density`, `period`, and `id_dates`
cf. `downsample`.
For `channels` and `fillvec` cf. `fill_channels`.
Returns
-------
A list of instances. Instances are tuples of (pid, t_obs, t_end, t_evt, x)
pid : str
A patient ID
t_obs : np.datetime64
The datetime of the instance observation
t_end : np.datetime64
The datetime of the final obs. in the patient record
t_evt : np.datetime64 or np.datetime64('NaT')
The date of the event, if any
x : np.ndarray[np.float, ndim=1]
The observation data
Notes
-----
Some of the routines called by this function delegate work to numpy or
pandas threads that are not hindered by the GIL; therefore, the CPU time of
this function is approximately 7-8x its wall clock time (according to some
limited testing). For example, using Ipython's %time magic we have:
"""
samples = downsample(curves, rate=rate, density=density, period=period, id_dates=id_dates)
# Calculate t_end, t_evt
dates = curves.index.get_level_values('date')
t_end = dates[-1].to_datetime64()
try:
evt_dates = dates[curves[EVENT] == 1]
t_evt = evt_dates[0].to_datetime64()
except IndexError:
t_evt = np.datetime64('NaT')
# Calculate x
samples = samples.sort_index(axis=1)
cols = samples.columns.values
vals = samples.values
X_i = samples.index.values
X = fill_channels(cols, vals, channels, fillvec)
instances = [(i, t_obs.to_datetime64(), t_end, t_evt, x)
for (i, t_obs), x in zip(X_i, X)]
return instances
def downsample(curves, *, rate=None, density=None, period=None, id_dates=None):
"""Generate cross sections from a dataframe of curves.
Arguments
---------
curves : pandas.DataFrame
A dataframe of curves. The columns are a multi-index with levels (mode,
channel). The index is a multi-index with levels ('id', 'date'). The
values are float64s giving the value of each channel's curve at the
given date for this patient.
Keyword Only Arguments
----------------------
rate : [float, pandas.Timedelta]
If a rate is provided, then it must be a two-element iterable, the
first of which is the rate, and the second determines the frequency. So
a rate of (50, pd.Timedelta(1, 'Y')) would indicate that you want 50
samples per year for which the patient record exists.
density : float
The random sampling density of cross sections as a fraction of rows in
the source dataframe. Sampling is guaranteed to produce at least one
sample, regardless of how small density is. Cannot be used together
with period.
period : str or pandas.DateOffset
A pandas DateOffset object giving the regular sampling period of the
cross sections. Setting more frequently than daily produces undefined
results. Cannot be used together with density.
id_dates: dict of str,pandas.MultiIndex
A dict containing patient id's as keys and multi-index objects as
elements. The multi-index objects contain the desired patient id's and
dates in a format to be located in curve dataframes. Cannot be used
together with rate, density, or period
Returns
-------
A pandas.DataFrame, the rows of which are a subset of `curves`
Raises
------
ValueError if more than one of `rate`, `density`, `period`, or
`id_dates` is not `None`.
Notes
-----
The time this function takes depends heavily upon the input arguments. As
an example, timing performance across 1000 random patient curves with
Ipython's %timeit magic gives:
%timeit [downsample(x, density=0.05) for x in curves]
845 ms ± 17 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
rate = (50, pd.Timedelta(1, 'Y'))
%timeit [pf.downsample(x, rate=rate) for x in curves]
2.46 s ± 294 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
%timeit [downsample(x, density=1) for x in curves]
5.92 s ± 531 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
%timeit [downsample(x, period='MS') for x in curves]
24.7 s ± 1.62 s per loop (mean ± std. dev. of 7 runs, 1 loop each)
The expected case is for smaller values of density (<0.2) or lower rates
when generating training instances. The ability to specify downsampling by
period exists to support generating prediction instances, which can be done
on a per-patient or subsample basis.
"""
if sum((rate is not None, density is not None,
period is not None, id_dates is not None)) != 1:
raise ValueError('Must specify exactly one of rate, density, period, '
'or id_dates')
if id_dates:
patient_id = curves.index.get_level_values('id')[0]
dates = id_dates[patient_id]
samples = curves.loc[dates]
elif period:
samples = curves.reset_index('id')
samples = samples.resample(period).first()
samples = samples.set_index('id', append=True)
samples = samples.reorder_levels(['id', 'date'])
else:
if density:
n_samples = density * len(curves)
else:
dates = curves.index.get_level_values('date')
interval = dates[-1] - dates[0]
n_samples = rate[0] * (interval / rate[1])
n_samples = max(1, round(n_samples))
samples = curves.sample(n=n_samples)
return samples
def fill_channels(cols, vals, channels, fillvec):
"""Fast filling in of missing feature values.
Arguments
---------
cols : numpy.ndarray
A 1d ndarray of tuples (as if from pd.MultiIndex.to_numpy())
representing the channels available in vals
vals : numpy.ndarray
A 2d ndarray with observations as rows and columns corresponding to the
channels outlined in cols
channels : numpy.ndarray
A 1d ndarray of tuples (as if from pd.MultiIndex.to_numpy())
representing the full set of channel names
fillvec : numpy.ndarray
A 1d ndarray of floats to use if a channel is not present in `X0`;
corresponds to the names in `channels`.
Returns
-------
An ndarray of shape (len(vals), len(channels)) that has missing channels
filled in from `fillvec`.
"""
_, copy_idx, _ = np.intersect1d(channels, cols,
assume_unique=True,
return_indices=True)
X = np.tile(fillvec, (vals.shape[0], 1))
X[:, copy_idx] = vals
return X
|
"""
From the code below create 3 students and add them to a list.
sort the list based on the students age.
Print out the Students names from the list (use a list comprehension for this)
"""
class Student:
def __init__(self, name, age):
self.name = name
self.age = age |
from collections import deque
def search(name):
search_queue=deque()#一个队列
search_queue+=graph[name]
searched=[]
while search_queue:#当队列不为空
person=search_queue.popleft()#同时将搜索过的人清除
if person not in searched:
if person_is_seller(person):
print(person+" is a mango seller!")
return True
else:
search_queue+=graph[person]
searched.append(person)
return False
def person_is_seller(name):
return name[-1]=="m"
if __name__=='__main__':
graph = {}#创建一个字典(散列表)储存元素
graph["you"] = ["alice", "bob", "claire"]
graph["bob"] = ["anuj", "peggy"]
graph["alice"] = ["peggy"]
graph["claire"] = ["thom", "jonny" ]
graph["anuj"] = []
graph["peggy"] = []
graph["thom"] = []
graph["jonny"] = []
search("you")
|
def quicksort(array):
if len(array)<2:
return array
else:
pivot=array[0]
less=[i for i in array[1:] if i<=pivot]
greater=[i for i in array[1:] if i>pivot]
return quicksort(less)+[pivot]+quicksort(greater)
def mergeSort(array):
#归并排序
if len(array)<2:
return array
else:
mid=int(len(array)/2)#将列表分成更小的两个列表
#分别对左右两个列表进行处理,分别返回两个排序好的列表
left=mergeSort(array[:mid])
right=mergeSort(array[mid:])
return merge(left,right)
def merge(left,right):#并两个一已排序好的列表,产生一个新列表
result=[]
i=0
j=0
while i <len(left) and j <len(right):
if left[i]<=right[j]:
result.append(left[i])
i+=1
else:
result.append(right[j])
j+=1
result+=left[i:]
result+=right[j:]
return result
#试验
print(quicksort([10,5,2,3]));
print(mergeSort([10,5,2,3]))
|
from assignment1 import assignment1
from assignment2 import assignment2
from assignment3 import assignment3
def main():
while True:
print("""\n\nEnter the assignment number that you want to run:-
1) Assignment 1
2) Assignment 2
3) Assignment 3
4) Assignment 4
5) Assignment 5
6) Assignment 6
0) Exit""")
option = input()
option = int(option)
if option == 1:
print("Assignment 1:-")
assignment1.main()
elif option == 2:
print("Assignment 2:-")
assignment2.main()
elif option == 3:
print("Assignment 3:-")
assignment3.main()
elif option == 4:
print("Assignment 4:-")
print("Run the assignment4.py for server and the client.py file for client. Run the send_chat_log.py to "
"send chat logs to email IDs")
elif option == 5:
print("Assignment 5:-")
print("Run the assignment5.py for flask server. Open index.html for UI.")
elif option == 6:
print("Assignment 6:-")
print("Run the assignment6.py file for graph.")
elif option == 0:
print("Exiting.")
break
else:
print("Wrong option, please try again")
del option
main()
|
n = 20
for i in range(1, n):
for j in range(1, n):
if j % 2 == 0:
print('/', i, '*', j, '=', i * j)
else:
print(i, '*', j, '=', i * j, end=' ')
if j == 19:
print() |
import matplotlib.pyplot as plt
import numpy as np
# x(t)の関数
def func(x,t,a):
return a * x[t] * (1 - x[t])
def set_a_range():
# a_range = np.arange(3,4,0.001) ← 誤差がでる。
a_range = np.arange(3000,4000,1)
return np.array(list(map(lambda x: x/1000, a_range)))
# a_range * a_range の二次配列 (1000 * 1000 = 1000000)
def make_2d_array(range):
return np.array([[0 for i in range] for j in range], dtype = "float")
a_range = set_a_range()
_2d_array = make_2d_array(a_range)
for i,a in enumerate(a_range):
x = [0.2]
for t in range(1500):
x.append(func(x,t,a))
# t < 500 の範囲を消す。
if t >= 500:
_2d_array[i][t-500] = x[t]
# グラフプロット
for i,a in enumerate(a_range):
y = [0] * len(a_range)
for j,_ in enumerate(a_range):
y[j] = _2d_array[j][i]
plt.scatter(a_range,y,s=5)
plt.xlabel("a")
plt.ylabel("x(t)")
plt.xlim(3.000,4.000)
plt.show()
|
import pygame
import random
import pygame_menu
pygame.init()
bg = pygame.image.load("snake.png")
FRAME_COLOR = (0, 25, 51)
APPLE_COLOR = (204, 0, 102)
RECT_COLOR = (0, 51, 102)
LIGHT_RECT_COLOR = (31, 63, 118)
HEADER_COLOR = (0, 21, 44)
SNAKE_COLOR = (0, 128, 255)
COLS = 25
BLOCK_SIZE = 20
MARGIN = 1
SIDES_MARGIN = 20
HEADER_MARGIN = 70
size = [(BLOCK_SIZE + MARGIN) * COLS + SIDES_MARGIN * 2,
(BLOCK_SIZE + MARGIN) * COLS + SIDES_MARGIN * 2 + HEADER_MARGIN]
screen = pygame.display.set_mode(size)
pygame.display.set_caption('Nas10ka Snake')
courier = pygame.font.SysFont('courier', 32)
timer = pygame.time.Clock()
def start_the_game():
timer_tick = float(1.0)
paused = False
class SnakeBlock:
def __init__(self, x, y):
self.x = x
self.y = y
def is_inside(self):
return 0 <= self.x < COLS and 0 <= self.y < COLS
def __eq__(self, other):
return isinstance(other, SnakeBlock) and self.x == other.x and self.y == other.y
def get_random_apple_block():
random.randint(0, COLS)
return SnakeBlock(
random.randint(0, COLS-1),
random.randint(0, COLS-1)
)
apple = get_random_apple_block()
snake_blocks = [SnakeBlock(8, 9), SnakeBlock(8, 10), SnakeBlock(8, 11)]
d_row = 1
d_col = 0
total = 0
def draw_block(col, row, color):
cols = (SIDES_MARGIN + MARGIN * col) + col * BLOCK_SIZE
rows = (SIDES_MARGIN + MARGIN * row) + row * BLOCK_SIZE + HEADER_MARGIN
pygame.draw.rect(screen, color, [cols, rows, BLOCK_SIZE, BLOCK_SIZE])
while not paused:
screen.fill(FRAME_COLOR)
pygame.draw.rect(screen, HEADER_COLOR, [0, 0, (BLOCK_SIZE + MARGIN) * COLS + SIDES_MARGIN * 2, HEADER_MARGIN])
text_total = courier.render(f"Total: {total * 10}", 0, SNAKE_COLOR)
speed_text = courier.render(f"Speed: {total + 1}", 0, SNAKE_COLOR)
screen.blit(text_total, (BLOCK_SIZE, BLOCK_SIZE))
screen.blit(speed_text, (BLOCK_SIZE + 220, BLOCK_SIZE))
for row in range(COLS):
for col in range(COLS):
if (row + col) % 2 == 0:
color = RECT_COLOR
else:
color = LIGHT_RECT_COLOR
draw_block(col, row, color)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP and d_col != 0:
d_row = -1
d_col = 0
elif event.key == pygame.K_DOWN and d_col != 0:
d_row = 1
d_col = 0
elif event.key == pygame.K_LEFT and d_row != 0:
d_row = 0
d_col = -1
elif event.key == pygame.K_RIGHT and d_row != 0:
d_row = 0
d_col = 1
elif event.key == pygame.K_SPACE and paused:
paused = False
elif event.key == pygame.K_SPACE and not paused:
paused = True
# paused = not paused
head = snake_blocks[-1] # take the latest element in array of snake blocks
if head.x + d_col == apple.x and head.y + d_row== apple.y:
new_head = SnakeBlock(head.x + d_col, head.y + d_row)
if not head.is_inside() or new_head in snake_blocks:
break
snake_blocks.append(new_head)
total += 1
timer_tick += 0.25
for block in snake_blocks:
draw_block(block.x, block.y, SNAKE_COLOR)
apple = get_random_apple_block() # create new apple
else:
new_head = SnakeBlock(head.x + d_col, head.y + d_row)
if not new_head.is_inside() or new_head in snake_blocks:
break
snake_blocks.append(new_head)
snake_blocks.pop(0)
for block in snake_blocks:
draw_block(block.x, block.y, SNAKE_COLOR)
draw_block(apple.x, apple.y, APPLE_COLOR)
pygame.display.flip()
timer.tick(timer_tick)
pass
# def set_difficulty(value, difficulty):
# # Do the job here !
# pass
mytheme = pygame_menu.themes.Theme(background_color=(0, 0, 0, 0), # transparent background
title_background_color=(4, 47, 126, 0),
title_font=pygame_menu.font.FONT_OPEN_SANS,
menubar_close_button=False)
menu = pygame_menu.Menu(250, 400, ' ', theme=mytheme)
menu.add_text_input('Nickname :', default='Player 1')
# menu.add_selector('Difficulty :', [('Hard', 1), ('Easy', 2)], onchange=set_difficulty)
menu.add_button('Play', start_the_game)
menu.add_button('Quit', pygame_menu.events.EXIT)
def draw_background():
screen.blit(bg, (0, 0))
while True:
draw_background()
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
exit()
if menu.is_enabled():
menu.update(events)
menu.draw(screen)
pygame.display.update() |
# Task 1 a: sort list by first element of sublists
l = [[4, 3], [2, 7], [1, 8], [9, 1], [5, 6]]
sorted_l = sorted(l)
print(sorted_l) |
# Task 1d: get counts statistics for the sequence
dna_seq = "GATTACA"
rna_seq = "GAUUACA"
permitted_letters = "ATCGU"
seq_list = [dna_seq, rna_seq]
for seq in seq_list:
print("Validating sequence:", seq)
seq = seq.upper()
for letter in seq:
if not letter in permitted_letters:
print("Sequence invalid, contains invalid letters!")
break
else:
if not ('U' in seq and 'T' in seq):
if 'U' in seq:
print("Sequence is RNA")
elif 'T' in seq:
print("Sequence is DNA")
else:
print("Impossible to determine whether sequence is RNA or DNA!")
continue
# This will stop further execution of the current iteration and jump to the next iteration of the containing loop
# (i.e. go to next sequence)
else:
print("Sequence invalid, contains both U and T!")
continue # same as above
# After passing the above checks, create statistics for each base
stats_dict = {} # Create an empty dictionary for the statistics
# As we want to treat both DNA and RNA, we better create this dictionary dynamically to fit the possible bases
for letter in seq: # Most simple approach (however it's quite redundant)
stats_dict[letter] = 0
# Additional approach that will only set the entries for each unique letter:
for letter in set(seq):
stats_dict[letter] = 0
# Third approach, using a comprehension, making the whole thing a one-liner
# for comprehensions, check LP part IV, chapter 20, super useful stuff - while not of relevance for the exam I suppose
stats_dict = {letter:0 for letter in set(seq)}
for letter in seq:
stats_dict[letter] += 1
sequence_length = len(seq) # The length of a string (or any other iterable, such as lists) can easily be determined using len()
print("Counts for each base:", stats_dict)
print("Total length of the sequence:", sequence_length)
|
# Task 1b: create index file for specified dinucleotide
# To get the data required for this script, download it from http://hgdownload.soe.ucsc.edu/goldenPath/hg19/chromosomes/chr21.fa.gz
# Decompress the file (e.g. using 7-zip) and store it in the 'data' subdir of 'exercises_4'.
# Step 1: Save the script parameter for the input fasta filename in the variable "input_filename". Save
# the script parameter for the output file name in the variable "output_filename". These parameters
# serve as placeholders for the arguments passed when calling the script.
import sys # Import sys module
input_filename = sys.argv[1] # First parsed argument is input
output_filename = sys.argv[2] # Second parsed argument is output
# Step 2: Read the dinucleotide fom stdin and store it in the variable "dinucleotide".
dinucleotide = input("Enter the DNA dinucleotide to be indexed: ").upper() # Capitalize so we won't have to deal with the case bullshit
# Step 3: Check if "dinucleotide" is a valid DNA base dinucleotide.
permitted_bases = "AGCT"
if not (len(dinucleotide) == 2 and dinucleotide[0] in permitted_bases and dinucleotide[1] in permitted_bases):
print("Invalid dinucleotide.")
quit()
# Step 4: Read all lines from the fasta file and store them in a list.
with open(input_filename, mode='r') as input_file:
data = input_file.readlines()
# Step 5: Get the sequence name of the fasta sequence (hint: the first line of a fasta file always starts
# with a ">" followed by the sequence name).
seq_name = data.pop(0).lstrip('>').rstrip('\n') # .pop(0) also removes the first line from the list
# Step 6: Concatenate all lines and store them in a new variable "fasta_sequence".
# Step 7: Replace all newline characters ("\n") with "".
fasta_sequence = ''.join(data).upper().replace('\n', '') # Also capitalize whole sequence
# Step 8: Open the output file for writing.
with open(output_filename, mode='w') as output_file:
# Step 9: Iterate over the whole fasta sequence stored in "fasta_sequence" and determine for every
# position if it contains the given dinucleotide. If so, write the sequence name and the position into
# the output file (separated by a tab ("\t")). Hint: Fasta files consist of lower- and uppercase
# characters! -> Already took care of that by completely capitalizing it
pos = 0
while pos+1 < len(fasta_sequence): # This may take a while...
if fasta_sequence[pos:pos+2] == dinucleotide:
# Make a proper BED file, with columns for name, start, stop
# (with the stop position NOT included in the dinucleotide)
print(seq_name, pos, pos+2, sep='\t', file=output_file)
pos += 1
# Step 10: Close the fileobjects
# Already took care of that by using 'with open() as file_obj'!
|
# Beginners MNIST / Tensorflow website
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Adapted from https://www.tensorflow.org/get_started/mnist/beginners
# and https://www.tensorflow.org/get_started/mnist/pros
def train():
# Download MNIST data using input_data.read_data_sets, save it into a folder.
# Use one hot encoding (easier to deal with when using machine learning, explained in readme
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Placeholder used to allow input of any number of mnist images
# 784 dimensions (28 x 28), dimensions can be of any length
x = tf.placeholder(tf.float32, [None, 784])
# Weights and biases - learnable params, adjusted towards values that have correct output during training
# Low weight means input won't change output much, low bias generally means it won't learn as fast
# https://datascience.stackexchange.com/questions/19099/what-is-weight-and-bias-in-deep-learning, https://stackoverflow.com/questions/2480650/role-of-bias-in-neural-networks
# Variable class allows tensor to be used / modified
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# Create the model
# Softmax ideal for probabilities, gives a list of values between 0 and 1 (0-100%) that add up to 1 (100%)
# Matmul function multiplies matrices (mnist and weight)
y = tf.nn.softmax(tf.matmul(x, W) + b)
print(y)
def test():
print("Test working") |
from math import *
from random import randrange
def bozo(arr, direction=True):
narr = []
if isinstance(arr[0], list):
for li in arr:
narr.extend(li)
else:
narr = arr.copy()
len_narr = len(narr)
while not sort(narr, direction):
first, second = randrange(len_narr), randrange(len_narr)
narr[first], narr[second] = narr[second], narr[first]
return narr
def sort(arr, direction=True):
arr_len = len(arr)
if direction:
for i in range(1, arr_len):
if arr[i - 1] > arr[i]:
return False
else:
for i in range(1, arr_len):
if arr[i - 1] < arr[i]:
return False
return True
i = 0
k = 0
while (k==0):
try:
length = int(input('Введите число: '))
sqrt_len = sqrt(length)
list1 = [int(i) for i in input('Введите эл-ты массива через пробел: ').split()]
k = 1
except:
print("Неверный формат ввода.\n")
list2 = []
list3 = []
for el in list1:
list3.append(el)
i += 1
if i == sqrt_len:
list2.append(list3)
i = 0
list3 = []
del list3, i
els = []
quant = 3
for i in range(quant):
els.append(list1[i])
print(' '.join(map(str, bozo(list1, True))))
print(' '.join(map(str, bozo(list1, False))))
print(' '.join(map(str, bozo(list2, True))))
print(' '.join(map(str, bozo(list2, False))))
print(' '.join(map(str, bozo(els, True))))
print(' '.join(map(str, bozo(els, False)))) |
"""
Author: Lucky Adogun (zen_coder)
License: MIT
Date: 26-10-2020
PROBLEM: Given an array of integers and a target integer, find the position of the
integers that make up the target.
Example:
array = [1,2,3,4]
target = 7
result = [2,3]
"""
def solution_one(nums, target):
compliments = {}
result = []
for index, num in enumerate(nums):
if compliments.get(num) is None:
compliments[target-num] = index
else:
result = [compliments[num], index]
return result
def solution_two(nums, target):
"""
- Loop through the array with its index
- find the compliment by subtracting the target from the iterated items:
eg:
items = [1,2,3,4], target = 7
=> 7-1 = 6
=> 7-2 = 5
=> 7-3 = 4
=> 4-4 = 3
- check if the compliment is already in the array.
If YES:
- store the index of the item that produces it and the item in the hashmap (dictionary)
In the case of array with elements [1,2,3,4] and target 7
=> 7-3 is 4 which is already present in the list, therefore, the index of 3 needs to be stored.
=> 7-4 is 3 which is already present in the list, therefore, the index of 4 needs to be stored.
if NO:
- Don't store the index of the item in the hashmap (dictionary)
Return a list of the hashmap keys.
This algorithm runs on O(n) time complexity.
"""
myHash = {}
for index, item in enumerate(nums):
if target - item in nums:
myHash[index] = item
return list(myHash.keys())
solution_one([0,12,15,3, 1], 18)
solution_two([0,12,15,3, 1], 18)
|
class Game:
def __init__(number_of_players):
for n in range(number_of_players):
self.players.append(Player(range(0, width), range(0, height)))
def start(self):
print("It's adventure time!")
print("How many people are adventuring?")
number_of_players = int(input())
game = Game(number_of_players)
g.turn
def turn(self):
moves = [1, -1]
for player in self.players:
print("Player {} moved!".format(player.id))
new_x = random.choice(moves)
new_y = random.choice(moves)
if new_x >= 0 and new_x < self.width:
player.x += new_x
if new_y >= 0 and new_y < self.height:
player.y += new_y
Game.start() |
class Reading:
""" A class to represent an atmospheric pressure reading from a smartphone """
def __init__(self, observation_unit, value, model, latitude, longitude, altitude):
self.observation_unit = observation_unit
self.value = value
self.latitude = latitude
# ...
def to_atm(self):
if self.observation_unit == 'atm':
return self.value
elif self.observation_unit == 'mbar':
return self.value * 1013.25
reading1 = Reading('mbar', 100, 43, 79, 100)
reading1.to_atm()
|
def is_palindrome(string):
if len(string) <= 1:
return 1
elif string[0] == string[-1]:
return(is_palindrome(string[1:-1]))
else:
return 0
#print is_palindrome("kajak")
#print is_palindrome("kajak")
|
#library classes to help
import math
import random
import sys
def nodeDistance(node1, node2):
hDist = abs(node1.getX() - node2.getX())
vDist = abs(node1.getY() - node2.getY())
return math.sqrt(hDist**2 + vDist**2)
def dnaDistance(DNAObj1, DNAObj2):
dna1 = DNAObj1.getDNA()
dna2 = DNAObj2.getDNA()
if (len(dna1) != len(dna2)):
return -1
else:
diff = 0
for i in xrange(len(dna1)):
if (dna1[i] != dna2[i]):
diff+=1
return diff
class Node:
#both of type float
#xcoord, ycoord
def __init__(self, xcoord, ycoord):
self.xcoord = xcoord
self.ycoord = ycoord
def getX(self):
return self.xcoord
def changeX(self, xcoord):
self.xcoord = xcoord
def getY(self):
return self.ycoord
def changeY(self, ycoord):
self.ycoord = ycoord
def distanceTo(self, node):
return nodeDistance(self, node)
class DNA:
#{A,C,G,T}
#len - any length of specific length
def __init__(self, dnaStr):
self.dnaStr = dnaStr
def getDNA(self):
return self.dnaStr
def setDNA(self, newDNAStr):
#check if valid DNA input
#returns -1 if error, 0 if success
for i in xrange(len(newDNAStr)):
if ((newDNAStr[i] != "A") or (newDNAStr[i] != "C") or (newDNAStr[i] != "G") or(newDNAStr[i] != "T")):
self.dnaStr = None
return False
self.dnaStr = newDNAStr
return True
def validateDNA(self):
return setDNA(self.dnaStr)
def distanceTo(self, dna):
return dnaDistance(self, dna)
#-----------------------------------------------------------------------------
# Random Body Generator
class BodyGenerator:
def __init__(self, nworld_minx, nworld_maxx, nworld_miny, nworld_maxy):
self.nworld_minx = nworld_minx
self.nworld_maxx = nworld_maxx
self.nworld_miny = nworld_miny
self.nworld_maxy = nworld_maxy
def generateRandomNodes(self, numNodes):
bodies = []
for i in xrange(numNodes):
#for integer coordinates
#x = random.randint(self.nworld_minx, self.nworld_maxx)
#y = random.randint(self.nworld_miny, self.nworld_maxy)
#for floating point coordinates
x = random.uniform(self.nworld_minx, self.nworld_maxx)
y = random.uniform(self.nworld_miny, self.nworld_maxy)
bodies.append(Node(x,y))
return bodies
def printNodes(self, nodeList):
# nodeList = self.generateRandomNodes(numNodes)
resList = []
for node in nodeList:
resList.append((node.getX(), node.getY()))
print resList
def generateRandomDNA(self, lengthDNA, numNodes):
#{A,C,G,T}
bases = ["A", "C", "G", "T"]
dnaNodes = []
for i in xrange(numNodes):
dna = ""
for j in xrange(lengthDNA):
dna += random.choice(bases)
dnaNodes.append(DNA(dna))
return dnaNodes
def printDNANodes(self, DNAlist):
# DNAlist = self.generateRandomDNA(lengthDNA, numNodes)
resList = []
for dna in DNAlist:
resList.append(dna.getDNA())
print resList
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
Create a Perceptron class with methods to train a Perceptron binary classifier.
Test your classifer on test data.
"""
# in terminal, >> spyder3
class Perceptron:
# class fields
# weights = []
# constructor
def __init__(self, dim):
# initialize weights to 1
self.weights = [1]*dim
print("weights initialized" + str(self.weights))
# learning rate tells us how much we want to steer our weight towards the actual label
# A higher learning rate could lead to oversteering
self.learning_rate = 0.1
# class helper function to return sign of a constant
def sign(self, val):
if val>=0:
return 1
else: #val<0
return -1
# class algorithm
# instance is the train instance as a feature vector
# self.weights has the weight of each feature
def predict(self, instance):
sum = 0
for i in range(0, len(self.weights)):
sum += instance[i] * self.weights[i]
output = self.sign(sum)
return output
# class function to modify class variable values
def train(self, input, actual):
prediction = self.predict(input)
error = actual - prediction # error is 0 if prediction was correct
for i in range(len(self.weights)):
self.weights[i] += error * input[i] # updates upon error!
def main():
p = Perceptron(5)
train_input = [1, 2, 3, -10, 3] # input vector
train_label = 1
print("before training")
print(p.predict(train_input))
p.train(train_input, train_label)
print("after training")
print(p.predict(train_input)) # run perceptron alg on inputs vector
main()
|
n1 = float(input('Qual a n1? '))
n2 = float(input('Qual a n2? '))
media = (n1+n2)/2
if media < 5:
print('Reprovado')
elif media >= 7:
print('Aprovado')
else:
print('Recuperação né Filhao') |
from random import randint
computador = randint(1,10)
print('Vamos começar o jogo da Advinhação.')
contador = 0
while True:
chute = 0
while chute < 1 or chute > 10:
chute = int(input('Digite um valor:'))
if chute < 1 or chute > 10:
print('Digite novamente, valor inserido não valido')
contador += 1
if chute == computador:
break
print('Não Foi esse valor, tente novamente')
print('Parabens! Após {} tentativas, você acecrtou!'.format(contador)) |
saque = int(input('Qual o valor que você deseja sacar? '))
while True:
if saque // 50 > 0:
print('Foi sacado {} notas de 50'.format(saque//50))
saque = saque % 50
if saque // 20 > 0:
print('Foi sacado {} notas de 20'.format(saque//20))
saque = saque % 20
if saque // 10 > 0:
print('Foi sacado {} notas de 10'.format(saque//10))
saque = saque % 10
if saque // 1 > 0:
print('Foi sacado {} notas de 1'.format(saque//1))
break
print('Obrigado! tenha um bom dia') |
import math
ang = float(input("Digite o Angulo: "))
ang = (ang*math.pi)/180
senang = round(math.sin(ang),3)
cosang = round(math.cos(ang),3)
tanang = round(math.tan(ang),3)
print("Os valores são: {} , {} , {}".format(senang,cosang,tanang)) |
from random import randint
from time import sleep
def sorteia(lista):
for cont in range (0,5):
n = randint(1,10)
lista.append(n)
def somapar(lista):
s = 0
print('Os números sorteados são: ', end='')
sleep(0.3)
for valor in lista:
print(valor, end=' ')
sleep(0.3)
if valor % 2 == 0:
s += valor
print(f'\nE a soma dos pares são é: {s}')
numeros = list()
sorteia(numeros)
somapar(numeros)
|
def notas(*num):
'''
Função notas utiliza de uma lista de notas para processamento de dados
:param num: Lista de notas dos alunos de uma sala
:return: Total, maior nota, menor nota, média geral da sala e situacao de aproveitamento
'''
sala = dict()
sala['total'] = len(num)
sala['maior'] = max(num)
sala['menor'] = min(num)
sala['média'] = sum(num)/len(num)
if sala['média'] > 8:
sala['situacao'] = 'TOP'
elif sala['média'] > 6:
sala['situacao'] = 'OK'
else:
sala['situacao'] = 'RUIM'
print(sala)
help(notas)
notas(1,2,3,4,5) |
from random import randint
opcao = ['Pedra','Papel','Tesoura']
computador = randint(0,2)
print('=-='*10)
print('''Escolha sua opção:
[ 1 ] Pedra
[ 2 ] Papel
[ 3 ] Tesoura
''')
jogador = int(input('Digite o número da sua opcao: '))-1
if computador == 0: #PC joga pedra
if jogador == 0:
print("EMPATE, você jogou {} e o computador jogou {}".format(opcao[jogador],opcao[computador]))
elif jogador == 1:
print("GANHOU, você jogou {} e o computador jogou {}".format(opcao[jogador],opcao[computador]))
elif jogador == 2:
print("PERDEU, você jogou {} e o computador jogou {}".format(opcao[jogador],opcao[computador]))
elif computador == 1: #PC joga Papel
if jogador == 0:
print("PERDEU, você jogou {} e o computador jogou {}".format(opcao[jogador],opcao[computador]))
elif jogador == 1:
print("EMPATE, você jogou {} e o computador jogou {}".format(opcao[jogador],opcao[computador]))
elif jogador == 2:
print("GANHOU, você jogou {} e o computador jogou {}".format(opcao[jogador],opcao[computador]))
elif computador == 2:
if jogador == 0:
print("GANHOU, você jogou {} e o computador jogou {}".format(opcao[jogador],opcao[computador]))
elif jogador == 1:
print("PERDEU, você jogou {} e o computador jogou {}".format(opcao[jogador],opcao[computador]))
elif jogador == 2:
print("EMPATE, você jogou {} e o computador jogou {}".format(opcao[jogador],opcao[computador])) |
num = int(input('Digite o número para verificaçao: '))
for i in range (2, 1000):
if i == num:
continue
if num % i == 0:
print ("O número não é primo", i)
break
if i == 999:
print("O número é primo") |
expressao = input('Digite a expressão: ').strip()
validade = True
tipo_error = ''
quant_parenteses_abrir = expressao.count('(')
quant_parenteses_fechar = expressao.count(')')
parenteses_para_fechar = 0
if quant_parenteses_abrir == quant_parenteses_fechar:
for v in expressao:
if v == ')' and parenteses_para_fechar == 0:
validade = False
tipo_error = 'Você fechou parentese antes de abrir.'
break
if v == '(':
parenteses_para_fechar += 1
if v == ')':
parenteses_para_fechar -= 1
else:
validade = False
tipo_error = 'A quantidade de parenteses abrindo é diferente da quantidade de parentese fechando'
if parenteses_para_fechar > 0:
validade = False
tipo_error = 'Você esqueceu de fechar um parentese'
if validade == True:
print(f'A expressão {expressao} está válida!')
else:
print(f'A expressão {expressao} está errada!')
print(tipo_error) |
import math
catopo = int(input("Digite o Cateto Oposto: "))
catadj = int(input("Digite o Cateto Adjacente: "))
hipote = int(math.sqrt(pow(catopo,2)+pow(catadj,2)))
print("A hipotenusa desse triangulo é: {}".format(hipote))
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 11 21:11:28 2021
@author: Raza_Jutt
"""
def hexaToDec(hexval):
length = len(hexval)
base = 1
dec_val = 0
for i in range(length - 1, -1, -1):
if hexval[i] >= '0' and hexval[i] <= '9':
dec_val += (ord(hexval[i]) - 48) * base
base = base * 16
elif hexval[i] >= 'A' and hexval[i] <= 'F':
dec_val += (ord(hexval[i]) - 55) * base
base = base * 16
return dec_val
def decToHexa(n):
hexaDeciNum = ['0'] * 100
i = 0
s=""
while(n != 0):
temp = 0
temp = n % 16
if(temp < 10):
hexaDeciNum[i] = chr(temp + 48)
i = i + 1
else:
hexaDeciNum[i] = chr(temp + 55)
i = i + 1
n = int(n / 16)
j = i - 1
while(j >= 0):
s = s+(hexaDeciNum[j])
j = j - 1
return s
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 13:28:28 2013
modified 4th Fib 2012 to help use Git and gitHub
rock paper scissors
@author: graham naylor;
For use in linux console. Enter rock paper or scissors (or their initial!).. use: ./mit_rps.py
"""
import os # to allow to clear the console (see rps_player_one()). Works on Linux
PLAYER1_WIN = [[1,3],[2,1],[3,2]]# rock =1, paper = 2, scissor = 3
PLAYER2_WIN = [[1,2],[2,3],[3,1]]# should be tuples to avoid mutation, but,,,
def rps_player_one():
os.system('clear')# the 'clear' is linux orientated , i think its 'cls' for windows.
x = raw_input("\nPlayer 1! rock paper or scissors: ")
rps_entry_check(x)
x = x.lower()
x = x[0]
decode_list = [x,]#create list with first element being player ones entry
rps_player_two(decode_list)#send the list to player two, for their answer to become second element
def rps_player_two(decode_list):
x = raw_input("Player 2!; rock, paper, or scissors: ")
rps_entry_check(x)
x = x.lower()
x = x[0]
decode_list += x #finalise the list entries
rps_response_repository(decode_list)#send the final list to have elements converted into coresonding numbers
def rps_response_repository(decode_list):
data = [rps_response_decode(item) for item in decode_list]# create a list, 'data', of integers from the function argument
rps_decision(data)
def rps_decision(data):
reciprocal = reduce(rps_reciprocalof_listelements, data)#if reciprocal of 1 returned, list elements are equal..
if (reciprocal == 1): #reduce takes each of the elements in 'data' and sends them
rps_players_tie() #to rps_reciprocalof_listelements
else:
for item in PLAYER1_WIN[:]:# loop over a slice copy '[:]' of PLAYER1_WIN
if item == data:
rps_player1_wins()
for item in PLAYER2_WIN[:]:#loop over a slice copy
if item == data:
rps_player2_wins()
def rps_reciprocalof_listelements(x,y):
return x * 1/y# reciprocal of like elements in list returns 1, i.e there was a draw.
def rps_response_decode(decode_list):
for item in decode_list:
if item == 'r':
return 1
elif item == 'p':
return 2
elif item == 's':
return 3
else:
return(0)# i think this should probably call rps_restart_question()
def rps_entry_check(x):
if any([i>'z' or i<'a' for i in x]): #check to see if entry is alphabetic
print "Please enter rock, paper or scissors..\n"
restart = raw_input("\nRestart (y/Y), else q to quit : ")
if (restart == 'y' or restart == 'Y'):
rps_player_one()
else:
exit()
def rps_restart_question():
restart = raw_input("\nEnter y or Y to restart, else q to quit : ")
if (restart == 'y' or restart == 'Y'):
rps_player_one()
else:
exit()
def rps_player1_wins():
print "Player 1 Wins"
rps_restart_question()
def rps_player2_wins():
print "Player 2 Wins"
rps_restart_question()
def rps_players_tie():
print "It's a tie"
rps_restart_question()
rps_player_one() |
from tkinter import *
from tkinter import filedialog
from PIL import Image, ImageTk
class MainScreen:
def __init__(self, master):
self.pic = ""
master.minsize(width=600, height=400)
#master.configure(background="firebrick3")
frame = Frame(master)
self.directoryLabel = Label(frame, text="Choose Photo")
self.picEntry = Entry(frame, width=50)
self.directoryLabel.grid(row=4, sticky=E)
self.picEntry.grid(row=4, column=1)
self.choosePic = Button(frame, text="Browse", command=self.askFile)
self.choosePic.grid(row=4, column = 2)
self.enterButton = Button(frame, text="Enter", command=self.submit)
self.enterButton.grid(row=5, columnspan=1)
self.quitButton = Button(frame, text="Quit", command=frame.quit)
self.quitButton.grid(row=5, columnspan=4)
frame.pack()
frame.mainloop()
def askFile(self):
self.pic = filedialog.askopenfilename()
self.picEntry.insert(0, self.pic)
image = Image.open(self.pic)
photo = ImageTk.PhotoImage(image)
def submit(self):
return
root = Tk()
button = MainScreen(root)
|
#!/usr/bin/python
#encoding:utf-8
#Filename:while.py
number = 23
running = True
while (running) :
#不加encoding:utf-8连中文注释都会报错
#这里如果不加int()会一直走到guess>number里面不知道为什
guess = int(raw_input("Enter an integer :"))
if guess == number :
print "Congratulations, you guessed it "
running = False
elif guess > number :
print "No, it is a little lower than that ."
else:
print "No,it is a little highter than that ."
else:
print "The while loop is over"
#Do anything else you want to do here
print 'Done'
|
#!/usr/bin/python
#encoding:utf-8
#Filename:continue.py
while True :
s = raw_input("Enter something :")
if s == 'quit' :
break
elif len(s) < 3 :
print '<3'
continue
print "Input is of sufficient length"
#Do other kinds of processing here
|
#
#
#
# Python: 3.7.4
#
# Author: Keith Korter
#
#
#
# Purpose: creates a database and adds new data into that database.
#
#
#
import os
import sqlite3
conn = sqlite3.connect('data.db')
with conn:
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS tbl_filesName( \
ID INTEGER PRIMARY KEY AUTOINCREMENT, \
col_fname VARCHAR(25) \
)")
conn.commit()
conn.close()
fileList = ('information.docx','Hello.txt','myImage.png','myMovie.mpg','World.txt','data.pdf','myPhoto.jpg')
for f in fileList:
if f.endswith('.txt'):
conn = sqlite3.connect('data.db')
with conn:
cur = conn.cursor()
cur.execute("INSERT INTO tbl_filesName(col_fname) VALUES (?)",(f,))
conn.commit()
print(f)
conn.close()
|
import pandas
import matplotlib.pyplot as plt
def mean(L):
return(sum(L) / float(len(L)))
def calc_equity_premium(p, q, sigma, rho, theta, b):
first = sigma*sigma * theta
second= mean(map(lambda x: pow(1-x, -1*theta), b))
third = mean(map(lambda x: pow(1-x, 1-theta), b))
fourth = mean(b)
return(first + p*(1-q)*(second - third - fourth))
df = pandas.read_excel('/Users/garidor/Desktop/first-year/spring/macro/part2/ps3/DataForPS3.xlsx')
col = "% fall in real per capita GDP"
b = df[col].values / 100.0
b.sort()
p = 0.017
n = len(b)
approx_num_observations = 3500
q = 0.4
gamma = 0.025
sigma = 0.02
rho = 0.03
theta = 4
gamma = 0.025
vals = []
print(calc_equity_premium(p, q, sigma, rho, theta, b[:]))
for i in range(1, 7):
vals.append(calc_equity_premium(p, q, sigma, rho, i, b[:]))
plt.plot([1, 2, 3, 4, 5, 6], vals)
plt.show()
p = (n - 6.0) / (approx_num_observations - 6.0)
print(calc_equity_premium(p, q, sigma, rho, theta, b[:(n-6)]))
p = (n - 30.0) / (approx_num_observations - 30.0)
print(calc_equity_premium(p, q, sigma, rho, theta, b[(n-30):])) |
from datetime import datetime
from pytz import timezone
class DateTimeManager:
# current date+time captured upon class's creation
def __init__(self):
self.date_time_in_pt = self.standardize_to_pt_time(datetime.now())
self.day_of_the_week = self.shift_week_start_num(self.date_time_in_pt.isoweekday())
self.hour = self.format_hour(self.date_time_in_pt.hour)
self.minute = self.format_minute(self.date_time_in_pt.minute)
# convert user's local time to pacific time
def standardize_to_pt_time(self, date_time):
return date_time.astimezone(timezone('US/Pacific'))
# returns the following format: '00:00'
def format_current_time_as_string(self):
return '\'' + str(self.hour) + ':' + str(self.minute) + '\''
# start the week on Sunday instead of on Monday + use ordinal counting
def shift_week_start_num(self, weekday_num):
weekday_num = str(weekday_num)
if weekday_num == '7': return '0'
else: return weekday_num
# ensures the hour is written in a two-digit format (ex: 1 --> 01)
def format_hour(self, hour):
hour = str(hour)
if len(hour) < 2: return '0' + hour
else: return hour
# ensures the minute is written in a two-digit format (ex: 1 --> 01)
def format_minute(self, minute):
minute = str(minute)
if len(minute) < 2: return '0' + minute
else: return minute
|
from creative_ai.utils.print_helpers import ppGramJson
class TrigramModel():
def __init__(self):
"""
Requires: nothing
Modifies: self (this instance of the NGramModel object)
Effects: This is the NGramModel constructor. It sets up an empty
dictionary as a member variable.
"""
self.nGramCounts = {}
def __str__(self):
"""
Requires: nothing
Modifies: nothing
Effects: Returns the string to print when you call print on an
NGramModel object. This string will be formatted in JSON
and display the currently trained dataset.
"""
return ppGramJson(self.nGramCounts)
###############################################################################
# >> CORE IMPLEMENTION <<
###############################################################################
def trainModel(self, text):
"""
Requires: text is a list of lists of strings
Modifies: self.nGramCounts, a three-dimensional dictionary.
Effects: this function populates the self.nGramCounts dictionary,
which has strings as keys and dictionaries as values,
where those inner dictionaries have strings as keys
and dictionaries of {string: integer} pairs as values.
"""
for sentence in text:
for i in range(0, len(sentence) - 2):
if sentence[i] not in self.nGramCounts:
self.nGramCounts[sentence[i]] = {}
if sentence[i + 1] not in self.nGramCounts[sentence[i]]:
self.nGramCounts[sentence[i]][sentence[i + 1]] = {}
if sentence[i + 2] not in self.nGramCounts[sentence[i]][sentence[i+1]]:
self.nGramCounts[sentence[i]][sentence[i + 1]][sentence[i+2]] = 0
self.nGramCounts[sentence[i]][sentence[i+1]][sentence[i+2]] += 1
def trainingDataHasNGram(self, sentence):
"""
Requires: sentence is a list of strings
Modifies: nothing
Effects: returns True if this n-gram model can be used to choose
the next token for the sentence.
"""
return len(sentence) >= 2 and sentence[-2] in self.nGramCounts and sentence[-1] in self.nGramCounts[sentence[-2]]
def getCandidateDictionary(self, sentence):
"""
Requires: sentence is a list of strings, and trainingDataHasNGram
has returned True for this particular language model
Modifies: nothing
Effects: returns the dictionary of candidate next words to be added
to the current sentence.
"""
return self.nGramCounts[sentence[-2]][sentence[-1]]
###############################################################################
# End Core
###############################################################################
###############################################################################
# Main
###############################################################################
if __name__ == '__main__':
# An example trainModel test case
uni = TrigramModel()
text = [ ['the', 'brown', 'fox'], ['the', 'lazy', 'dog'] ]
uni.trainModel(text)
#{"'the'": {"'brown'": {"'fox'": 1}, "'lazy'": {"'dog'": 1 }}}
print(uni)
text0 = ['the', 'brown']
#should print true
print(uni.trainingDataHasNGram(text0))
text1 = ['the', 'brown', 'dog', 'the', 'lazy']
#should print true
print(uni.trainingDataHasNGram(text1))
text1 = ['the', 'brown', 'dog']
#should print false
print(uni.trainingDataHasNGram(text1))
sentence = ['i', 'the', 'brown']
print(uni.getCandidateDictionary(sentence)) #should return {'fox': 1} |
#作业1. 实现一个文件复制器的函数,通过给函数传入一个路径,复制该路径下面所有的文件(目录不需要复制)到当前目录
import os
def file_copy(file_path):
#file_path ='/Users/zhangcaiyan/Desktop/Lemon_python/lemon_python_08'
try:
res = os.listdir(file_path)
print(res)
os.chdir(file_path)
except Exception as e:
print(e)
else:
for i in res:
#file_path = os.path.join(path,i)
if os.path.isfile(i):
with open(i,'rb') as f:
#content = f.read()
#new_file = 'cp' + i
#new_file = os.path.join(path2,new_file)
with open(f'cp_{i}','wb') as f1:
content = f.read()
f1.write(content)
file_path ='/Users/zhangcaiyan/Desktop/Lemon_python/lemon_python_08'
file_copy(file_path)
#file_copy(r:"/Users/zhangcaiyan/Desktop/Lemon_python/lemon_python_08")
#作业2 优化之前的作业的石头剪刀布游戏,用户输入时,如果输入非数字会引发异常,请通过异常捕获来处理这个问题
print("************第二题*****************")
import random
li =["石头","剪刀","布"]
while True:
rand = random.randint(1,3)
print("电脑的输出为:{}".format(rand))
print('石头【1】 剪刀【2】 布【3】 游戏结束【4】')
try:
user = int(input("请输入上面的选项:"))
except ValueError as e:
print("输入错误,请重新输入")
continue
else:
if 1 <= user <=3:
if rand - user == -1 and rand - user == 2:
print("用户输入为:{},电脑输入为:{},用户胜".format(li[user-1],li[rand-1]))
elif rand == user:
print("用户输入为:{},电脑输入为:{},平局".format(li[user-1],li[rand-1]))
else:
print("用户输入为:{},电脑输入为:{},电脑胜".format(li[user-1],li[rand-1]))
elif user == 4:
print("游戏结束")
break
else:
print("输入错误")
|
# game.py
print("'Rock', 'Paper', 'Scissors,' 'Shoot!'")
import random
import os
import dotenv
dotenv.load_dotenv()
PLAYER_NAME = os.getenv("PLAYER_NAME")
print(f"Welcome '{PLAYER_NAME}' to my Rock-Paper-Scissors game!")
#non .env way
#PLAYER_NAME = input ("Please select a player name:")
#print("WELCOME" f"{PLAYER_NAME} to my Rock-Paper-Scissors game...")
user_choice = input("Please choose either 'rock', 'paper', or 'scissors': ")
print("You chose: ", user_choice)
if (user_choice == "rock") or (user_choice == "paper") or (user_choice == "scissors"):
print("Thank you for choosing.")
else:
print("OOPS, invalid input. Please try again.")
exit()
valid_options = ["rock", "paper", "scissors"]
computer_choice = random.choice(valid_options)
print("The computer chose: ", computer_choice)
#adapted from Jan's code from slack
if user_choice == "rock":
if computer_choice == "rock":
print("IT'S A TIE")
elif computer_choice == "paper":
print("OH, THE COMPUTER WON...")
elif computer_choice == "scissors":
print("YOU WON! CONGRATS!")
elif user_choice == "paper":
if computer_choice == "rock":
print("YOU WON! CONGRATS!")
elif computer_choice == "paper":
print("IT'S A TIE")
elif computer_choice == "scissors":
print("OH, THE COMPUTER WON...")
elif user_choice == "scissors":
if computer_choice == "rock":
print("OH, THE COMPUTER WON...")
elif computer_choice == "paper":
print("YOU WON! CONGRATS!")
elif computer_choice == "scissors":
print("IT'S A TIE")
#my original code
#if (user_choice == "rock") and (computer_choice == "rock"):
# print("WE HAVE A TIE!")
# if (user_choice == "rock") and (computer_choice == "scissors"):
# print("CONGRATULATIONS! YOU WON!")
# if (user_choice == "rock") and (computer_choice == "paper"):
# print("UH OH! The computer won. Better luck next time!")
# if (user_choice == "paper") and (computer_choice == "paper"):
# print("WE HAVE A TIE!")
# if (user_choice == "paper") and (computer_choice == "scissors"):
# print("UH OH! The computer won. Better luck next time!")
# if (user_choice == "paper") and (computer_choice == "rock"):
# print("CONGRATULATIONS! YOU WON!")
# if (user_choice == "scissors") and (computer_choice == "scissors"):
# print("WE HAVE A TIE!")
# if (user_choice == "scissors") and (computer_choice == "rock"):
# print("UH OH! The computer won. Better luck next time!")
# if (user_choice == "scissors") and (computer_choice == "paper"):
# print("CONGRATULATIONS! YOU WON!")
print("THANK YOU FOR PLAYING! PLEASE PLAY AGAIN.") |
#!/usr/bin/env python
# coding: utf-8
import os
import csv
filepath = os.path.join("Resources", "budget_data.csv")
budget_info = {} #Made an empty dictionary to store information from the csv so I don't have to keep it open
#opening CSV file and storing info in the dictionary
with open(filepath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvreader)
for row in csvreader:
budget_info[row[0]] = int(row[1])
#calculating the information needed for the output, and storing them into dictionaries for ease of accesss
dates = list(budget_info.keys())
total_months = len(dates) #stores the total number of months
total = 0 #stores the net total
for date in dates:
total += budget_info[date]
#made a dictionary to store the difference in profit/loss compared to each month before
differences_dict = {}
for i in range(1, len(dates)):
current_val = budget_info[dates[i]]
previous_val = budget_info[dates[i - 1]]
differences_dict[dates[i]] = (current_val - previous_val)
#store the biggest change and the smallest change then loop throught the dictionary to find it
maxchange = max(differences_dict.values())
minchange = min(differences_dict.values())
for items in differences_dict.items():
if(items[1] == maxchange):
maxprofit = (items[0], items[1])
elif(items[1] == minchange):
minprofit = (items[0], items[1])
#store values from the differences and take the average of it to get average change manually
values = list(differences_dict.values())
average = round(sum(values) / len(values), 2)
#Storing strings so I can write to a text file and also print them out
str1 = "Financial Analysis"
str2 = "-------------------------"
str3 = f"Total Months: {total_months}"
str4 = f"Total: ${total}"
str5 = f"Average Change: ${average}"
str6 = f"Greatest Increase in Profits: {maxprofit[0]} (${maxprofit[1]})"
str7 = f"Greatest Decrease in Profits: {minprofit[0]} (${minprofit[1]})"
#Writing strings to terminal and also writing to text file
file = open("Analysis/PyBankOutput.txt","w")
file.write(str1 + "\n")
print(str1)
file.write(str2 + "\n")
print(str2)
file.write(str3 + "\n")
print(str3)
file.write(str4 + "\n")
print(str4)
file.write(str5 + "\n")
print(str5)
file.write(str6 + "\n")
print(str6)
file.write(str7 + "\n")
print(str7)
file.close() |
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
ps = PorterStemmer()
words = ['python', 'pythoned', 'pythoning', 'pythoned', 'pythonly']
# for w in words:
# print(ps.stem(w))
new_text = "It is important to be pythonly while you are pythoning with python. All pythoners have poorly pythoned atleast once!"
new_words = word_tokenize(new_text)
for w in new_words:
print(ps.stem(w))
|
cars = 100
space_in_a_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
car_pool_capacity = cars_driven * space_in_a_car
average_passenger_per_car = passengers / cars_driven
print("there are", cars, "cars available")
print("there are only", drivers, "drivers available")
print("there will be", cars_not_driven, "cars not driven today")
print("we can transport", car_pool_capacity, "people capacity today")
print("we have ", passengers, "to car pool today")
print("we need to put about ", average_passenger_per_car, "passengers per each car")
|
def myFileWordCounter():
myFileInput = input("Write a file name here! :")
file = open(myFileInput, 'r+')
#readLines = file.readlines()
splitWordsInFile = file.read().split()
# for i in readLines:
# print(i)
print(len(splitWordsInFile))
# takeAddedText = input("Write what you want to add here! :")
multipleLines = [ 'Hello! \n', 'See you later! \n', 'Okay. \n' ]
file.writelines(multipleLines)
file = open(myFileInput, 'r')
readLines = file.readlines()
for i in readLines:
print(i)
myFileWordCounter()
|
# first ask base case
# if the len of the list is less than or equal to one, then we have a sorted list
# otherwise, we will use splice to extract the left and right halves.
def mergeSort(n):
print("Splitting ", n)
if len(n) > 1:
mid = len(n) // 2
left = n[:mid]
right = n[mid:]
mergeSort(left)
mergeSort(right)
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
n[k] = left[i]
i += 1
else:
n[k] = right[j]
j += 1
k += 1
while i<len(left):
n[k] = left[i]
i += 1
k += 1
while j < len(right):
n[k] = right[j]
j += 1
k += 1
print("Merging ", n)
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
mergeSort(alist)
print(alist)
|
# here we use 2 lists to create a HashTable class that implements
# the Map abstract data type. One list, called slots, will hold the
# key item. A parrallel list, called data, will hold the data
# values. When we look up a key, the corresponding position in the
# data list will hold the associated data value. We will treat the
# key list as a has table. Note: the initial size for the hash table
# will be 11; this is arbitrary, but we can to use a prime number
# so the collision resolution algo can be efficent.
class HashTable:
def __init__(self):
self.size = 11
self.slots = [None] * self.size # initially all None
self.data = [None] * self.size
def put(self, key, data):
hashValue = self.hashFunction(key, len(self.slots))
if self.slots[hashValue] == None:
self.slots[hashValue] = key
self.data[hashValue] = data
else:
# if key is already in the map
if self.slots[hashValue] == key:
self.data[hashValue] = data # swap old value for new
else:
nextSlot = self.rehash(hashValue, len(self.slots))
while self.slots[nextSlot] != None and \
self.slots[nextSlot] != key:
nextSlot = self.rehash(nextSlot, len(self.slots))
if self.slots[nextSlot] == None:
self.slots[nextSlot] = key
self.data[nextSlot] = data
else:
self.data[nextSlot] = data #replace
# hashFunction implements simple remainder method.
def hashFunction(self, key, size):
return(key % size)
# the collision resolution algo is liner probing
# with a "plus 1" rehash function
def rehash(self, oldHash, size):
return((oldHash + 1) % size)
# get begins by computing the initial hash value.
# if the value is not in the initial slot, rehash is used
# to locate the next possible position.
def get(self, key):
startSlot = self.hashFunction(key, len(self.slots))
data = None
stop = False
found = False
position = startSlot
while self.slots[position] != None and not found and not stop:
if self.slots[position] == key:
found = True
data = self.data[position]
else:
position = self.rehash(position, len(self.slots))
# guarantees that the search will terminate
# by check to make sure we have not returned
# the initial slot. If that happens, we have
# exhausted all possible slots, and the item
# must not be present.
if position == startSlot:
stop = True
return(data)
#def del(self, key):
# self.slots[key] == None
def len(self):
return(len(self.slots))
# def in(self, key):
# hashValue = self.hashFunction(key, len(self.slots))
# if self.slots[hashValue] == key:
# return True
# else:
# return False
def __getitem__(self, key):
return(self.get(key))
def __setitem__(self, key, data):
self.put(key, data)
|
# ordinal value hash function
def hash(astring, tableSize):
sum = 0
for pos in range(len(astring)):
sum += ord(astring[pos])
return sum%tableSize
print(hash('cat', 11))
|
# Uses Heath Nutrition and Population statistics,
# stored in the file HNP_Data.csv.gz,
# assumed to be located in the working directory.
# Prompts the user for an Indicator Name. If it exists and is associated with
# a numerical value for some countries or categories, for some the years 1960-2015,
# then finds out the maximum value, and outputs:
# - that value;
# - the years when that value was reached, from oldest to more recents years;
# - for each such year, the countries or categories for which that value was reached,
# listed in lexicographic order.
#
# Written by *** and Eric Martin for COMP9021
import sys
import os
import csv
import gzip
from collections import defaultdict
def is_truncate(num):
l = str(num).split(("."))
left = l[0]
right = l[1]
if(right == "0"):
return int(left)
else:
return num
filename = 'HNP_Data.csv.gz'
if not os.path.exists(filename):
print(f'There is no file named {filename} in the working directory, giving up...')
sys.exit()
indicator_of_interest = input('Enter an Indicator Name: ')
first_year = 1960
number_of_years = 56
max_value = None
#countries_for_max_value_per_year = {}
countries_for_max_value_per_year = defaultdict(list)
maxvalue = float("-inf")
with gzip.open(filename) as csvfile:
file = csv.reader(line.decode('utf8').replace('\0', '') for line in csvfile)
rows = [row for row in file]
column = rows[0]
for i in rows:
if (len(i) >= 3):
if i[2].strip() == indicator_of_interest.strip():
for j in range(4, len(i)):
if (not i[j] == ""):
maxvalue = max(maxvalue, float(i[j]))
for i in rows:
if (len(i) >= 3):
if i[2].strip() == indicator_of_interest.strip():
for j in range(4, len(i)):
if (not i[j] == ""):
if (float(i[j]) == maxvalue):
countries_for_max_value_per_year[rows[0][j]].append(i[0])
'''
if(rows[0][j] not in countries_for_max_value_per_year):
countries_for_max_value_per_year[rows[0][j]] = [i[0]]
else:
countries_for_max_value_per_year[rows[0][j]].append([i[0]])
'''
if (maxvalue != float("-inf")):
max_value = is_truncate(maxvalue)
if max_value is None:
print('Sorry, either the indicator of interest does not exist or it has no data.')
else:
print('The maximum value is:', max_value)
print('It was reached in these years, for these countries or categories:')
print('\n'.join(f' {year}: {countries_for_max_value_per_year[year]}'
for year in sorted(countries_for_max_value_per_year)
)
)
|
#!/usr/bin/python3
# Unsing the template offered by Zac Partridge, thanks a lot
### Question anwser:
# This AI program is implemented by using alpha-beta algorithm
# The main function is ab_pruning() function(implementing alpha-beta algorithm), which is actually a recursive function, return a int value to decide a move to play
# the function initially set alpha = -9999 and beta = 9999, using get_available_move()function in every recursive to get the child_nodes(branches) of each node
# after recursive, the function will compute the heuristic value of every child node and return a place where to playin depth 0
# When more than one value of child nodes in depth 0 are the biggest, use evaluate_singleboard()function to choose the best move to play
# In this program, I design a function to change the default depth of recursive in ab_pruning() to avoid time out.
# In early move, the function only goes to the depth of 5. Along with the increase of moves(after 15/25/40/60), the default depth is also increased by this function
# In every leaf node, the heuristic value is compute by evaluate() function. the main algorithm to compute it is that
# checking the X number and O number in every rows, colums and diagonals in every sub_board, if almost win(2 X or 2 O in a row/colum/diagonal),
# give a bigger heuristic value, otherwise, give a small value. add all 9 sub_boards value to compute the whole heuristic value
# Also In the recursive in ab_pruning(), this program also identify the win state. If any player wins in a recursive, return a very big or very small value(10000 or -10000)
# depands on which player is playing
# To see the detail of every functions, please see the comments above every functions
import socket
import sys
import numpy as np
import copy
# a board cell can hold:
# 0 - Empty
# 1 - I played here
# 2 - They played here
# the boards are of size 10 because index 0 isn't used
boards = np.zeros((10, 10), dtype="int8")
curr = 0 # this is the current board to play in
move_number = 0
# This function is a support function of ab_pruning(), returning the moves(or child_nodes) that can play in every recursive
def get_available_move(current_move, current_board):
available_list = []
for i in range(1, 10):
if current_board[current_move][i] == 0:
available_list.append(i)
return available_list
# This function is the heuristic function
# When the alpha-beta algorithm get the default depth(leaf node), using this function to compute the heuristic value
# When there are 2 X and 1 available moves in a row/colum/diagonal, give more heuristic value(3), but when 2 O and 1 available move, give more minus heuristic value(-3)
# Otherwise, every time we meet there are 1 X or O and 2 available moves in a row/colum/diagonal, heuristic value in every sub board get 1 or -1
def evaluate(current_board):
my_value = 0
their_value = 0
for i in range(1, len(current_board)):
# for row
x = 0
o = 0
for j in range(1, 4):
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
x = 0
o = 0
for j in range(4, 7):
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
x = 0
o = 0
for j in range(7, 10):
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
# for colum
colum_list = [1, 4, 7]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
colum_list = [2, 5, 8]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
colum_list = [3, 6, 9]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
# for diagnoal
colum_list = [1, 5, 9]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
colum_list = [3, 5, 7]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
final_value = my_value - their_value
return final_value
# This function is a support function in ab_pruning() function.
# when many branches in depth 0 return same value(alpha value), using this function to choose the best one
# almost the same as evaluate() function, to see the detail, please see the comments above evaluate()
def evaluate_single(current_board, i):
my_value = 0
their_value = 0
# for row
x = 0
o = 0
for j in range(1, 4):
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
x = 0
o = 0
for j in range(4, 7):
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
x = 0
o = 0
for j in range(7, 10):
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
# for colum
colum_list = [1, 4, 7]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
colum_list = [2, 5, 8]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
colum_list = [3, 6, 9]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
# for diagnoal
colum_list = [1, 5, 9]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
colum_list = [3, 5, 7]
x = 0
o = 0
for j in colum_list:
if current_board[i][j] == 1:
x += 1
elif current_board[i][j] == 2:
o += 1
if x == 1 and o == 0:
my_value += 1
elif x == 2 and o == 0:
my_value += 3
elif o == 1 and x == 0:
their_value += 1
elif o == 2 and x == 0:
their_value += 3
return my_value - their_value
# This function is the implement of alpha-beta pruning algorithm, but adds some details to make the performance better
# Beside the algorithm, this function add the identify of win-state, when win state occurs, the recursive stops,
# return a big value or small value(depands on which player is playing)
# In the depth of 0, instead of returning alpha, this function returns the place(a number) where we play next move
# when in depth 0, if many branchs return the same value(alpha value), using the heuristic function(evaluate_singleboard)
# to choose the best one
def ab_pruning(current_board, current_playing_board, player, alpha, beta, depth):
if win_state(current_board):
if player == 1:
return -10000
elif player == 2:
return 10000
if depth < get_max_iteration():
templist = get_available_move(current_playing_board, current_board)
mydic = {}
if player == 1:
for i in range(len(templist)):
copyboard = copy.deepcopy(current_board)
copyboard[current_playing_board][templist[i]] = player
this_value = ab_pruning(copyboard, templist[i], switch_player(player), alpha, beta, depth + 1)
alpha = max(alpha, this_value)
mydic[templist[i]] = this_value
# If there are any move that could lead to win, return this move
if (alpha == 10000 or alpha == 9999) and depth == 0:
newdic = {}
for k in mydic.keys():
if mydic[k] == alpha:
newdic[k] = evaluate_single(current_board, k)
newdic1 = sorted(newdic.items(), key=lambda x: x[1], reverse=True)
return newdic1[0][0]
if alpha >= beta:
return alpha
if depth == 0:
print('The heuristic value of every child nodes are')
print(mydic)
for i in mydic.keys():
if mydic[i] == alpha:
return i
if alpha == -9999 and depth == 0:
return templist[0]
return alpha
elif player == 2:
for i in range(len(templist)):
copyboard = copy.deepcopy(current_board)
copyboard[current_playing_board][templist[i]] = player
this_value = ab_pruning(copyboard, templist[i], switch_player(player), alpha, beta, depth + 1)
beta = min(beta, this_value)
mydic[templist[i]] = this_value
if alpha >= beta:
return beta
return beta
else:
return evaluate(current_board)
# This funcition is a support function of ab_pruning function
# when meeting a win state in a subrecursive, immediately return
# To identify a win state, just check whether a row, colum or diagnoal contains 3 X or 3 O
def win_state(current_board):
for i in range(1, len(current_board)):
# for rows
if current_board[i][1] != 0 and current_board[i][1] == current_board[i][2] == current_board[i][3]:
return True
if current_board[i][4] != 0 and current_board[i][4] == current_board[i][5] == current_board[i][6]:
return True
if current_board[i][7] != 0 and current_board[i][7] == current_board[i][8] == current_board[i][9]:
return True
# for colums
if current_board[i][1] != 0 and current_board[i][1] == current_board[i][4] == current_board[i][7]:
return True
if current_board[i][2] != 0 and current_board[i][2] == current_board[i][5] == current_board[i][8]:
return True
if current_board[i][3] != 0 and current_board[i][3] == current_board[i][6] == current_board[i][9]:
return True
# for diagnoal
if current_board[i][1] != 0 and current_board[i][1] == current_board[i][5] == current_board[i][9]:
return True
if current_board[i][3] != 0 and current_board[i][3] == current_board[i][5] == current_board[i][7]:
return True
return False
# This function is a support function of ab_pruning function
# Changing the player in every recursive
def switch_player(player):
if player == 1:
return 2
elif player == 2:
return 1
# This function modifys the deep of recursive in alpha-beta pruning algorthm
# In early rounds, the recursive only go to the deep of 5
# in later rounds(after 15), the deep could be increased without timeout
def get_max_iteration():
max_iteration = 5
if move_number >= 15 and move_number < 25:
return 7
elif move_number >= 25 and move_number < 40:
return 9
elif move_number >= 40 and move_number < 60:
return 11
elif move_number >= 60:
return 13
return max_iteration
def play():
global move_number
copyboard = boards.copy()
# initially alpha = -9999 and beta = 9999, depth = 0
n = ab_pruning(copyboard, curr, 1, -9999, 9999, 0)
#print('we play: ', end='')
#print(n)
#print()
place(curr, n, 1)
move_number += 1
return n
def place_for_iter(copyboard, playing_board, num, player):
copyboard[playing_board][num] = player
# place a move in the global boards
def place(board, num, player):
global curr
curr = num
boards[board][num] = player
# read what the server sent us and
# only parses the strings that are necessary
def parse(string):
if "(" in string:
command, args = string.split("(")
args = args.split(")")[0]
args = args.split(",")
else:
command, args = string, []
if command == "second_move":
place(int(args[0]), int(args[1]), 2)
return play()
elif command == "third_move":
# place the move that was generated for us
place(int(args[0]), int(args[1]), 1)
# place their last move
place(curr, int(args[2]), 2)
return play()
elif command == "next_move":
place(curr, int(args[0]), 2)
return play()
elif command == "win":
print("Yay!! We win!! :)")
return -1
elif command == "loss":
print("We lost :(")
return -1
return 0
# connect to socket
def main():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = int(sys.argv[2]) # Usage: ./agent.py -p (port)
s.connect(('localhost', port))
while True:
text = s.recv(1024).decode()
if not text:
continue
for line in text.split("\n"):
response = parse(line)
if response == -1:
s.close()
return
elif response > 0:
s.sendall((str(response) + "\n").encode())
if __name__ == "__main__":
main()
|
"""
This module provides functionality for plotting in jupyter (http://jupyter.org/) notebooks
based on dygraphs (http://dygraphs.com/) and pandas (https://pandas.pydata.org/).
"""
import json
import uuid
import pandas
from IPython.display import HTML
def dygraphplot(*dataframeandoptions):
"""
Plots the given dataframe in a jupyter notebook cell.
Keyword arguments:
dataframe: The input data for the plot. The input data is given as a dict. It contains the
pandas.DataFrame as value for key 'df' and an optional dict as value for the key 'opt'.
The first column of the data frame contains the x-axis data, while
the remaining columns contain the series data. All columns except the first one needs to
be parseable to numeric.
The dict contains the dygraph config options.
"""
html = """
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/dygraph/2.1.0/dygraph.min.css">
"""
for dfandoptions in dataframeandoptions:
df = dfandoptions['df']
options = dfandoptions.get('opt', {'legend': 'always'})
# Check all but the first columns. According to dygraphs spec, these columns must contain
# numeric values.
for col in df.columns.values[1:]:
try:
pandas.to_numeric(df[col])
except:
raise Exception("Dataframe contains non-numeric column: {}".format(col))
html = html+"""
<div id="{0}"></div>
<script type="text/javascript">
requirejs.config({{
paths: {{
"Dygraph": ["//cdnjs.cloudflare.com/ajax/libs/dygraph/2.1.0/dygraph.min"]
}}
}});
require(['Dygraph'], function(Dygraph){{
new Dygraph(document.getElementById("{0}"), "{1}", {2})
}})
</script>
""".format(
uuid.uuid4(),
df.to_csv(index=False).replace("\n", "\\n\"+\""),
json.dumps(options)
)
return HTML(html)
|
"""
Exercício 2
Receba um número inteiro positivo na entrada e imprima os N primeiros números ímpares naturais.
Para a saída, siga o formato do exemplo abaixo.
Exemplo:
Digite o valor de n: 5
1
3
5
7
9
"""
def calculo_n_impares(n):
contador = 0
numero_natural = 0
while contador < n:
resultado = numero_natural % 2
if resultado == 0:
numero_natural += 1
else:
print(numero_natural)
numero_natural += 1
contador += 1
numero = int(input("Digite o valor de n: "))
calculo_n_impares(numero) |
"""
Escreva a função maior_primo que recebe um número inteiro maior ou igual a 2 como parâmetro e
devolve o maior número primo menor ou igual ao número passado à função.
Exemplo:
> maior_primo(100)
97
> maior_primo(7)
7
Dica: escreva uma função éPrimo(k) e faça um laço percorrendo os números até o número dado checando
se o número é primo ou não; se for, guarde numa variável. Ao fim do laço, o valor armazenado
na variável é o maior primo encontrado.
"""
def maior_primo(number):
primos = []
for i in range(number):
c = 0
for j in range(number):
if i % (j+1) == 0:
c += 1
if c == 2:
primos.append(i)
return max(primos)
|
'''
Faça um programa em Python que receba quatro notas, calcule e imprima a média aritmética. Observe o exemplo abaixo:
Exemplo:
Entrada de Dados:
Digite a primeira nota: 4
Digite a segunda nota: 5
Digite a terceira nota: 6
Digite a quarta nota: 7
Saída de Dados:
A média aritmética é 5.5
Dica: uso do print
Quando você usa o comando print para imprimir mais de uma coisa, ele inclui automaticamente espaços entre os argumentos
impressos. Cuidado para não incluir espaços demais na sua resposta! O corretor perceberá e tirará pontos
'''
def aritmetica_media():
n1 = int(input("Digite a primeira nota: "))
n2 = int(input("Digite a segunda nota: "))
n3 = int(input("Digite a terceira nota: "))
n4 = int(input("Digite a quarta nota: "))
media = (n1 + n2 + n3 + n4)/4
print("A média aritmética é {}".format(media))
aritmetica_media()
|
# -*- coding: utf-8 -*-
"""
Coursework 3: Clustering
References:
https://scikit-learn.org/stable/modules/clustering.html
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
https://docs.scipy.org/doc/scipy/reference/spatial.distance.html
"""
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
# #############################################################################
# Load data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=1000, centers=centers, cluster_std=0.4,
random_state=0)
plt.plot(X[:,0],X[:,1],'ro', markersize=1)
plt.show()
#%%
"""
Exercise 1: KMeans
"""
# Test different number of clusters
n_clusters = range(2,16)
silhouette = [-1]
for n in n_clusters:
kmeans = KMeans(n_clusters=n, random_state=0).fit(X)
labels = kmeans.labels_
silhouette.append(metrics.silhouette_score(X, labels))
# Plot Silhouette coefficients
plt.plot(range(1,16), silhouette);
plt.xticks(n_clusters);
plt.xlabel('$k$',fontsize = 18)
plt.ylabel(r'$\bar{s}$', fontsize = 18)
# Compute optimum value for the number of clusters
n_clusters = np.argmax(silhouette)+2
# Train optimum model of KMeans
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(X)
labels = kmeans.labels_
# Plot optimum model of KMeans
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
plt.figure(figsize=(8,4))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=5)
plt.title('Fixed number of KMeans clusters: %d' % n_clusters)
plt.show()
#%%
"""
Exercise 2: DBSCAN
"""
# Test different number of epsilon
epsilon = np.linspace(0.1,1,10)
silhouette_euc = []
silhouette_man = []
for eps in epsilon:
db = DBSCAN(eps=eps, min_samples=10, metric='euclidean').fit(X)
labels = db.labels_
if(len(set(labels)) == 1):
silhouette_euc.append(-1)
else:
silhouette_euc.append(metrics.silhouette_score(X, labels))
db = DBSCAN(eps=eps, min_samples=10, metric='manhattan').fit(X)
labels = db.labels_
if(len(set(labels)) == 1):
silhouette_man.append(-1)
else:
silhouette_man.append(metrics.silhouette_score(X, labels))
# Plot Silhouette coefficients
p1, = plt.plot(epsilon, silhouette_euc, label = 'Euclidean');
p2, = plt.plot(epsilon, silhouette_man, label = 'Manhattan');
plt.xlabel('$\epsilon$', fontsize = 18)
plt.ylabel(r'$\bar{s}$', fontsize = 18)
plt.legend(handles = [p1,p2])
plt.xticks(epsilon);
# Compute optimum value of epsilon
eps_euc = epsilon[np.argmax(silhouette_euc)]
eps_man = epsilon[np.argmax(silhouette_man)]
# Train optimum model of DBSCAN
db = DBSCAN(eps=eps_man, min_samples=10, metric='manhattan').fit(X)
labels = db.labels_
# Plot different characteristics of the model
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters_)
print('Estimated number of noise points: %d' % n_noise_)
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
# Plot optimum model of DBSCAN
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
plt.figure(figsize=(8,4))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=5)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=3)
plt.title('Estimated number of DBSCAN clusters: %d' % n_clusters_)
plt.show() |
idadeAnos = int(input("Informe sua idade (em anos): "))
idadeMeses = int(input("Informe sua idade (os meses): "))
idadeDias = int(input("Informe sua idade (os dias): "))
idade = idadeDias+(idadeMeses*30)+(idadeAnos*365)
print("Você já viveu ",idade, " dias!")
|
lado = float(input("Informe o lado do quadrado: "))
print("Área do quadrado: ",lado**2) |
salarioA = float(input('Informe seu salario: R$'))
percent = float(input('Inforem percentual de reajuste: '))
novoSalario = salarioA+(salarioA*percent/100)
print('Seu salario Reajustado: ', novoSalario) |
maior = 0
nMaior = 0
for x in range(10):
n=x+1
valor = float(input('Informe o '+str(n)+'º valor: '))
while valor < 0:
print("O valor deve ser positivo!\n")
valor = float(input('Informe o '+str(n)+'º valor novamente: '))
if valor > maior:
maior = valor
nMaior = n
print("O maior valor é o "+str(nMaior)+"º: "+str(maior))
|
import os
repetir = True
while(repetir):
os.system('cls' if os.name == 'nt' else 'clear')
valor = float(input('Informe um valor: '))
if valor >=0:
print('\nÉ positivo!')
else:
print('\nÉ negativo!')
continuar = input("Deseja continuar: \n1 - SIM \nQualquer outra tecla - Não \n")
if continuar == '1':
repetir = True
else:
repetir= False
|
tempC = float(input("Informe a temperatura em °C: "))
tempF = (tempC*(9/5))+32
print('Temp °F: ', tempF) |
s = ["ACESSO PERMITIDO", "ACESSO NEGADO"]
senha = input('Informe a senha: ')
if senha == "1234":
print(s[0])
else:
print(s[1])
|
class Solution(object):
def hammingDistance(self, x, y):
res = 0;
while not (x == 0 and y == 0):
if not x%2 == y%2:
res = res + 1;
x = x/2;
y = y/2;
return res;
|
'''
Mean_Average_Precision:
takes two arguments:
the first argument is a list of the correct values
the second argument is a list of tuples, the values in the tuple are the guesses
in its current form this code is only suitable for use in the Best Buy competitionand the fact that it is MAP@5 is gauranteed if 5 guesses are provided for each known value
Max_Score:
takes two arguments:
the first argument is a list of the correct values
the second argument is a list of tuples, the values in the tuple are the guesses
This function computes the best possible score given the guesses provided (it treats any correct guess as a first guess). This is useful for seeing if we are predominatly loosing points due to ordering or if it is due to not getting a right answer at all.
Sample Use:
act = [1,1,4,4,5]
pre = [(1,2,3,4,5),(1,2,3,4,5),(1,2,3,4,5),(1,2,3,4,5),(1,2,3,4,5)]
print Mean_Average_Precision(act,pre)
=0.54
print Max_Score(act,pre)
=1.0
'''
def Mean_Average_Precision(actual, predicted):
if len(actual) != len(predicted):
print "actual and predicted don't have same number of elements"
return
mean_average_precision = 0
for i,p in zip(actual, predicted):
n = float(1)
average_precision = 0
for guess in p:
if i == guess:
average_precision = 1/n
break
else:
n += 1
mean_average_precision += average_precision
mean_average_precision /= len(actual)
return mean_average_precision
def Max_Score(actual, predicted):
if len(actual) != len(predicted):
print "actual and predicted don't have same number of elements"
return
max_score = float(0)
for i,p in zip(actual, predicted):
if i in p:
max_score += 1
max_score /= len(actual)
return max_score
|
import time
import random
characters = ["troll", "gorgon", "pirate", "wicked fairie"]
character = random.choice(characters)
def print_pause(message):
print(message)
time.sleep(2)
def introduction():
print_pause("You find yourself standing in an open field, "
"filled with grass and yellow wildflowers.")
print_pause(f"Rumor has it that a {character} is somewhere around here, "
"and has been terrifying the nearby village.")
print_pause("In front of you is a house.")
print_pause("To your right is a dark cave.")
print_pause("In your hand you hold your trusty "
"(but not very effective) dagger.")
def vaild_input(prompt, option1, option2):
while True:
choice5 = input(prompt)
if option1 in choice5:
break
elif option2 in choice5:
break
return choice5
def field():
print_pause("You run back to the field. Luckily, "
"you don't seem to have been followed.")
def second_choice(items):
second_choice = vaild_input("Would you like to (1) "
"fight or (2) run away?\n", "1", "2")
if second_choice == "1":
if "Sword" in items:
attack()
play_again()
else:
death()
play_again()
elif second_choice == "2":
field()
choices(items)
def house(items):
print_pause("You approach the door of the house.")
print_pause("You are about to knock when the door "
f"opens and out steps a {character}.")
print_pause(f"Eep! This is the {character}'s house!")
print_pause(f"The {character} attacks you!")
second_choice(items)
def cave(items):
print_pause("You peer cautiously into the cave.")
print_pause("It turns out to be only a very small cave.")
print_pause("Your eye catches a glint of metal behind a rock.")
print_pause("You have found the magical sword of Ogoroth!")
print_pause("You discard your silly old dagger "
"and take the sword with you.")
print_pause("You walk back out to the field.\n")
items.append("Sword")
def attack():
print_pause(f"As the {character} moves to attack, "
"you unsheath your new sword.")
print_pause("The Sword of Ogoroth shines brightly in your"
" hand as you brace yourself for the attack.")
print_pause(f"But the {character} takes one look "
"at your shiny new toy and runs away!")
print_pause(f"You have rid the town of the {character}.\n"
"You are victorious!")
def death():
print_pause("You do your best..")
print_pause(f"but your dagger is no match for the {character}")
print_pause("You have been defeated!")
def back_to_the_cave():
print_pause("You peer cautiously into the cave.")
print_pause("You've been here before, and gotten all the"
" good stuff. It's just an empty cave now.")
print_pause("You walk back out to the field.\n")
def choice1(items):
print_pause("Enter 1 to knock on the door of the house.")
print_pause("Enter 2 to peer into the cave.")
def choices(items):
choice1(items)
choice = vaild_input("What would you like to do?\n(Please enter 1 or 2)\n", "1", "2")
if choice == "1":
house(items)
elif choice == "2":
if "Sword" in items:
back_to_the_cave()
choices(items)
else:
cave(items)
choices(items)
def play_again():
play_again = vaild_input("Would you like to play again ? (y/n)", "y", "n")
if play_again == "y":
play()
elif play_again == "n":
print_pause("Thanks for playing! see you next time :)")
def play():
items = []
introduction()
choices(items)
play()
|
def faculty(num):
num = int(num)
fac = 1
while num != 0:
fac *= num
num -= 1
return fac
def is_curious(number):
number = str(number)
total = 0
answer = 0
for digit in number:
total += faculty(int(digit))
if int(number) == total:
print "found!", number
return int(number)
if __name__ == '__main__':
for i in range(3, 999999):
is_curious(i)
|
def main():
tri = []
pen = []
hexa = []
for a in range(284, 2000000):
tri.append(a*(a+1)/2)
for b in range(165, 2000000):
pen.append(b*(3*b-1)/2)
for c in range(143, 2000000):
hexa.append(c*(2*c-1))
for number in tri:
if number in pen:
print number
if number in hexa:
print "found! ", number
if __name__ == '__main__':
print main()
|
import sys
def isprime(n):
n = abs(int(n))
if n < 2:
return False
if n == 2:
return True
if not n & 1:
return False
for i in range(3, int(n**0.5)+1, 2):
if n%i == 0:
return False
return True
def test(num):
num = str(num)
check = []
for digit in range(1, len(num)+1):
if str(digit) in num:
check.append(1)
else:
check.append(0)
if 0 in check:
return False
else:
#print "found!", num
return True
if __name__ == '__main__':
lijst = []
for num in range(0, 9999999):
if isprime(num):
if test(num):
m = num
print m
|
# -*- coding: utf-8 -*-
"""
find the root of equation polynomial in a simple case (x^2-y)
"""
epsillon=0.01
y=24
guess=y/2.0
while abs((guess**2)-y)>epsillon:
guess=guess-((guess**2)-y)/(2*guess)
print(guess) |
import re
nome = input("Digite seu nome: ")
telefone = input("Digite seu telefone ( xx xxxxx-xxxx): ")
email = input("Digite seu email: ")
pattern_nome = '[A-Z]'
pattern_tel = '([0-9]{2}\s[0-9]{4,5}\-?[0-9]{4})'
pattern_email = '^\w*(\.\w*)?@\w*\.[a-z]+(\.[a-z]+)?$'
resultado_nome = None
resultado_tel= None
resultado_email = None
while (resultado_nome==None):
resultado_nome = re.match(pattern_nome,nome) #procura resultado exato
if (resultado_nome != None):
break
else:
nome = input("Digite seu nome com as letras iniciais maiúsculas: ")
while (resultado_tel==None):
resultado_tel = re.match(pattern_tel,telefone) #procura resultado exato
if (resultado_tel != None):
break
else:
telefone = input("Telefone invalido digite seu telefone com ddd novamente: ")
while (resultado_email==None):
resultado_email = re.match(pattern_email,email)
if (resultado_email != None):
break
else:
email = input("Email invalido !! digite seu email novamente: ")
print("Nome: ",nome, "\nTelefone: ", resultado_tel.group(),"\nEmail:", resultado_email.group())
|
#Simple registration app in python with a GUI built from tkinter
#
# Created by Robert Zeelie 26\09\19
#
#Just Run program but you can change type of output by changing button1 command between either print1 or print2
from tkinter import *
from PIL import Image, ImageTk
#---------------------------------------------------------------------------------------------------------------------------
#create the window and add size and title to it
window = Tk()
window.geometry("700x500")
window.title(" RAZ Tech")
#---------------------------------------------------------------------------------------------------------------------------
#Adding a logo
#first get the picture then save it in pic
image = Image.open("C:/Python/Python Projects/TKINTER/Exam Registration/pic.jpg")
pic = ImageTk.PhotoImage(image)
#build pic and add it to window
label0 = Label(image = pic)
label0.pack()
#---------------------------------------------------------------------------------------------------------------------------
#functions for the buttons to perform
#exit function
def exit1():
exit()
#this function prints information to the console
def print1():
Name = fullName.get()
emailAdd = email.get()
DOB = (placeHolder1.get() + " " + placeHolder2.get() + " " + placeHolder3.get())
Country = placeHolder4.get()
info = ("\n\tNew Student Information\n\nName : " + Name + "\nDate Of Birth : " + DOB + "\nCountry : " + Country + "\nEmaiil Address : " + emailAdd)
print(info)
exit()
#writes information to a text file
def print2():
#creating and opening a new write to txt file
NewFile = open("Registration.txt","w")
#get all the information and organise it
Name = fullName.get()
emailAdd = email.get()
DOB = (placeHolder1.get() + " " + placeHolder2.get() + " " + placeHolder3.get())
Country = placeHolder4.get()
info = ("\n\t\t*** New Student Information ***\n\nStudent Name : " + Name + "\nStudent Date Of Birth : " + DOB + "\nStudent Country Of Birth : " + Country + "\nStudent Email Address : " + emailAdd + "\n\n\t\tSuccessfuly Registered For Exams")
#write information to document
NewFile.write(info)
#close file
NewFile.close()
exit()
#---------------------------------------------------------------------------------------------------------------------------
#add a label to the (window, text displayed in label, foreground(text color), background, relief is the border, font settings(type, size, costomize)) and finally placing it
label1 = Label(window, text = " Exam Registration ", fg = "white", bg = "black", relief = "solid", font = ("arial", 40, "bold"))
#you can use .place(x, y) or .pack(fill = BOTH, padx = 2, pady = 2) at the end for example below
#label1.place(x = 40, y = 10)
#you can also use .grid(row = 50, colum = 50) but we'll use pack for now
label1.pack(fill = BOTH, padx = 15, pady = 1)
label2 = Label(window, text = " Please Enter Required Information ", width = 30, font = ("arial", 16, "bold"))
label2.pack(fill = BOTH, padx = 0, pady = 15)
#---------------------------------------------------------------------------------------------------------------------------
label3 = Label(window, text = " Full Name : ", font = ("arial", 12))
label3.place(x = 10, y = 270)
#create string variable fullName then creat textbox and config before placing
fullName = StringVar()
textBox1 = Entry(window, textvar = fullName)
textBox1.config(width = 30)
textBox1.place(x = 130, y = 270)
#---------------------------------------------------------------------------------------------------------------------------
label4 = Label(window, text = " Email : ", font = ("arial", 12))
label4.place(x = 10, y = 320)
email = StringVar()
textBox2 = Entry(window, textvar = email)
textBox2.config(width = 30)
textBox2.place(x = 130, y = 320)
#---------------------------------------------------------------------------------------------------------------------------
label5 = Label(window, text = " D.O.B : ", font = ("arial", 12))
label5.place(x = 340, y = 270)
#drop box menu
placeHolder1 = StringVar()
days = ['1','2','3','4','5','6','7','8','9','10','11']
placeHolder1.set("Day")
dropBox = OptionMenu(window, placeHolder1, *days)
dropBox.place(x = 450, y = 270)
placeHolder2 = StringVar()
months = ['January', 'Febuary', 'March', 'April', 'May']
placeHolder2.set("Month")
dropBox = OptionMenu(window, placeHolder2, *months)
dropBox.place(x = 515, y = 270)
placeHolder3 = StringVar()
years = ['1970', '1971', '1980', '1990', '1997', '1999', '2000', '2010', '2019']
placeHolder3.set("Year")
dropBox = OptionMenu(window, placeHolder3, *years)
dropBox.place(x = 595, y = 270)
#---------------------------------------------------------------------------------------------------------------------------
label6 = Label(window, text = " Country : ", font = ("arial", 12))
label6.place(x = 340, y = 320)
#drop box menu creation
placeHolder4 = StringVar()
countries = ['South Africa', 'Zimbabwe', 'Botswana', 'Not In Africa']
placeHolder4.set("Select Country")
dropBox = OptionMenu(window, placeHolder4, *countries)
dropBox.config(width = 30)
dropBox.place(x = 450, y = 320)
#---------------------------------------------------------------------------------------------------------------------------
#add a button using the same styling options #relief options = ridge , groove , sunken , raised , solid #command makes the button do something
button1 = Button(window, text = " Submit ", fg = "black", bg = "grey", relief = "raised", font = ("arial", 12, "bold"), command = print2)
button1.place(x = 400, y = 450)
button2 = Button(window, text = " Cancel ", fg = "black", bg = "grey", relief = "raised", font = ("arial", 12, "bold"), command = exit1)
button2.place(x = 200, y = 450)
#---------------------------------------------------------------------------------------------------------------------------
#display window
window.mainloop() |
def merge(list1, list2):
if list1 is None:
list1 = []
if list2 is None:
list2 = []
m = len(list1)+len(list2)
list = [0 for i in range(m)]
for k in range(m):
if not list1:
list[k] = list2[0]
list2 = list2[1:]
elif not list2:
list[k] = list1[0]
list1 = list1[1:]
elif list1[0] <= list2[0]:
list[k] = list1[0]
list1 = list1[1:]
else:
list[k] = list2[0]
list2 = list2[1:]
return list
def split(list):
x = int(round(len(list)/2,0))
list1 = list[:x]
list2 = list[x:]
return list1, list2
def sort(list):
if len(list) == 1:
return list
else:
list1, list2 = split(list)
return merge(sort(list1), sort(list2))
import random
list = random.sample(range(0,100), 10)
print(list)
sort(list)
|
from turtle import Screen
from Paddle import Paddle
from Ball import Ball
from Score import ScoreBoard
import time
screen = Screen()
screen.setup(width=1000, height=600)
screen.bgcolor('black')
screen.title('PONG')
screen.tracer(0)
score_board = ScoreBoard()
left_paddle = Paddle((-400, 0))
right_paddle = Paddle((400, 0))
right_paddle.shapesize(stretch_wid=10, stretch_len=2)
game = True
ball = Ball()
screen.listen()
screen.onkey(fun=left_paddle.user_move_up, key='Up')
screen.onkey(fun=left_paddle.user_move_down, key='Down')
while game:
screen.update()
time.sleep(ball.move_speed)
ball.move()
# Cpu paddle movement
right_paddle.sety(right_paddle.ycor() + right_paddle.move_y)
if right_paddle.ycor() >= 240 or right_paddle.ycor() <= -240:
right_paddle.move_y *= -1
# Ball collision
if ball.ycor() > 280 or ball.ycor() < -280:
ball.move_y *= -1
# Keep score/Collision
if ball.xcor() > 500:
ball.reset_position()
score_board.keep_user_score()
if ball.xcor() < -500:
ball.reset_position()
score_board.keep_cpu_score()
# Ball and Paddle collision
if ball.distance(right_paddle) < 50 and ball.xcor() > 360 or ball.distance(left_paddle) < 50 and ball.xcor() < -360:
ball.bounce_x()
if score_board.user_score == 5:
game = False
score_board.end_game()
if score_board.cpu_score == 5:
game = False
score_board.end_game()
screen.exitonclick()
# TODO 1. Create the screen
# TODO 2. Create a Bar/Paddle
# TODO 3. Create another Bar
# TODO 4. Create the Ball and make it move
# TODO 5. Detect collision with wall and bounce
# TODO 6. End game after 10 points
# TODO 7. Ask to play again if yes restart if no end
|
# 9. Palindrome Number
# https://leetcode.com/problems/palindrome-number/
class Solution:
# 주어진 숫자가 회문인지 판별하는 문제다.
def isPalindrome(self, x: int) -> bool:
return str(x) == str(x)[::-1]
if __name__ == '__main__':
# 1년전에 풀었는데 풀이를 보니 다시 푼 것과 코드가 완전히 일치했다.
# 놀라운 점은 코드는 같은데 걸린 시간 7배 빨라졌다. LeetCode.. 살림 살이 많이 펴진듯
# easy/0007 번 문제와 같이 문자열로 풀지 말고 integer로 활용해 풀어야했던 문제인듯 싶다.
sol = Solution()
assert sol.isPalindrome(123) == False
|
'''
program to check if number is prime or not
prime number is always greater than 1
if (num % ) == 0:
'''
def prime_number(num):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
print(num, ": Number is not a prime")
break
else:
print(num, ": Number is prime")
print(f": Number is prime: {num}")
else:
print(num, ": Number is not a prime")
for i in range(1):
print(f"running for the {i + 1}th time")
n = int(input("Enter any number : "))
prime_number(n)
|
'''
Convert given temperature from Celsius to Fahreneit
and Fahrenheit to Celsius
a is a given number
'''
def celsiusTofahrenheit(C):
fahrenheit = (C * 9/5) + 32
return fahrenheit
def fahrenheitTocelsius(F):
celsisus = (F - 32) * 5/9
return celsisus
for i in range(1):
print(f"running for the {i + 1}th time")
a = float(input("Enter temperature in celsius : "))
b = float(input("Enter temperature in fahrenheit : "))
fahrenheit = celsiusTofahrenheit(a)
print(f"Temperature from celsius to fahrenheit:{fahrenheit}")
celsisus = fahrenheitTocelsius(b)
print(f"Temperature from fahrenheit to celsius:{celsisus}")
print("\n")
|
class User:
users_list = []
def __init__(self, first_name, last_name, password):
self.first_name = first_name
self.last_name = last_name
self.password = password
@classmethod
def save_user(cls, user):
cls.users_list.append(user)
@classmethod
def find_user(cls, username):
for user in cls.users_list:
if user.first_name == username:
return user
else:
return None
@classmethod
def validate_user(cls, user, password):
"""
Takes in a user and a password and verifies that the password provided is equal to the users password
:param user:
:param password:
:return: boolean
"""
if user.password == password:
return True
else:
return False
|
class Woman:
def __init__(self, name):
self.name = name
self.__age = 18
def __secret(self):
# 在对象的方法内部,是可以访问对象的私有属性
print("%s的年龄是 %d" % (self.name, self.__age))
xiaofang = Woman("小芳")
# 私有属性在外界不能直接被访问
print(xiaofang._Woman__age)
# 私有方法,同样不允许在外界直接访问
xiaofang._Woman__secret()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.