index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
800 | 026e06e777d64f8724ec5e89a7829b3a42a25d6b | <mask token>
class Student(db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student', lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = 'excuses'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'], request.form[
'last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=['GET', 'PATCH'])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == 'POST':
new_excuse = Excuse(content=request.form.get('content'),
is_believable=request.form.get('is_believable'), student_id=id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=id))
excuses_list = found_student.excuses.all()
return render_template('excuses/index.html', excuses=excuses_list,
student=found_student)
@app.route('/students/<int:id>/excuses/new')
def new_excuse(id):
return render_template('/excuses/new.html', id=id)
@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[
'GET', 'PATCH', 'DELETE'])
def edit_excuse(id, excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
elif request.method == b'PATCH':
found_excuse.content = request.form.get('content')
found_excuse.is_believable = request.form.get('is_believable')
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
return render_template('excuses/edit.html', excuse=found_excuse,
student=found_student)
| <mask token>
Migrate(app, db)
class Student(db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student', lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = 'excuses'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'], request.form[
'last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=['GET', 'PATCH'])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == 'POST':
new_excuse = Excuse(content=request.form.get('content'),
is_believable=request.form.get('is_believable'), student_id=id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=id))
excuses_list = found_student.excuses.all()
return render_template('excuses/index.html', excuses=excuses_list,
student=found_student)
@app.route('/students/<int:id>/excuses/new')
def new_excuse(id):
return render_template('/excuses/new.html', id=id)
@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[
'GET', 'PATCH', 'DELETE'])
def edit_excuse(id, excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
elif request.method == b'PATCH':
found_excuse.content = request.form.get('content')
found_excuse.is_believable = request.form.get('is_believable')
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
return render_template('excuses/edit.html', excuse=found_excuse,
student=found_student)
| <mask token>
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'
] = 'postgres://localhost/flask_one_to_many'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
modus = Modus(app)
db = SQLAlchemy(app)
Migrate(app, db)
class Student(db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student', lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = 'excuses'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'], request.form[
'last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=['GET', 'PATCH'])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == 'POST':
new_excuse = Excuse(content=request.form.get('content'),
is_believable=request.form.get('is_believable'), student_id=id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=id))
excuses_list = found_student.excuses.all()
return render_template('excuses/index.html', excuses=excuses_list,
student=found_student)
@app.route('/students/<int:id>/excuses/new')
def new_excuse(id):
return render_template('/excuses/new.html', id=id)
@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[
'GET', 'PATCH', 'DELETE'])
def edit_excuse(id, excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
elif request.method == b'PATCH':
found_excuse.content = request.form.get('content')
found_excuse.is_believable = request.form.get('is_believable')
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
return render_template('excuses/edit.html', excuse=found_excuse,
student=found_student)
| from flask import Flask, request, redirect, url_for, render_template
from flask_modus import Modus
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'
] = 'postgres://localhost/flask_one_to_many'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
modus = Modus(app)
db = SQLAlchemy(app)
Migrate(app, db)
class Student(db.Model):
__tablename__ = 'students'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student', lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = 'excuses'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey('students.id'))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'], request.form[
'last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=['GET', 'PATCH'])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route('/students/<int:id>/excuses', methods=['GET', 'POST'])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == 'POST':
new_excuse = Excuse(content=request.form.get('content'),
is_believable=request.form.get('is_believable'), student_id=id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=id))
excuses_list = found_student.excuses.all()
return render_template('excuses/index.html', excuses=excuses_list,
student=found_student)
@app.route('/students/<int:id>/excuses/new')
def new_excuse(id):
return render_template('/excuses/new.html', id=id)
@app.route('/students/<int:id>/excuses/<int:excuse_id>/edit', methods=[
'GET', 'PATCH', 'DELETE'])
def edit_excuse(id, excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
elif request.method == b'PATCH':
found_excuse.content = request.form.get('content')
found_excuse.is_believable = request.form.get('is_believable')
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id=found_student.id))
return render_template('excuses/edit.html', excuse=found_excuse,
student=found_student)
| from flask import Flask, request, redirect, url_for, render_template
from flask_modus import Modus
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config[
'SQLALCHEMY_DATABASE_URI'] = "postgres://localhost/flask_one_to_many"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
modus = Modus(app)
db = SQLAlchemy(app)
Migrate(app, db)
class Student(db.Model):
__tablename__ = "students"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Text)
last_name = db.Column(db.Text)
excuses = db.relationship('Excuse', backref='student',
lazy='dynamic')
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
class Excuse(db.Model):
__tablename__ = "excuses"
id = db.Column(db.Integer, primary_key = True)
content = db.Column(db.Text)
is_believable = db.Column(db.Text)
student_id = db.Column(db.Integer, db.ForeignKey("students.id"))
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/students', methods=["GET", "POST"])
def index():
if request.method == 'POST':
new_student = Student(request.form['first_name'],
request.form['last_name'])
db.session.add(new_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/index.html', students=Student.query.all())
@app.route('/students/new')
def new():
return render_template('students/new.html')
@app.route('/students/<int:id>/edit')
def edit(id):
return render_template('students/edit.html', student=Student.query.get(id))
@app.route('/students/<int:id>', methods=["GET", "PATCH"])
def show(id):
found_student = Student.query.get(id)
if request.method == b'PATCH':
found_student.first_name = request.form['first_name']
found_student.last_name = request.form['last_name']
db.session.add(found_student)
db.session.commit()
return redirect(url_for('index'))
return render_template('students/show.html', student=found_student)
@app.route("/students/<int:id>/excuses", methods = ["GET", "POST"])
def excuses_index(id):
found_student = Student.query.get(id)
if request.method == "POST":
new_excuse = Excuse(content = request.form.get("content"), is_believable = request.form.get("is_believable"), student_id = id)
db.session.add(new_excuse)
db.session.commit()
return redirect(url_for("excuses_index", id = id))
excuses_list = found_student.excuses.all()
return render_template("excuses/index.html", excuses=excuses_list, student= found_student)
@app.route("/students/<int:id>/excuses/new")
def new_excuse(id):
return render_template("/excuses/new.html", id = id)
@app.route("/students/<int:id>/excuses/<int:excuse_id>/edit", methods = ["GET", "PATCH","DELETE"])
def edit_excuse(id,excuse_id):
print(id)
found_student = Student.query.get(id)
found_excuse = Excuse.query.get(excuse_id)
excuses_list = found_student.excuses.all()
if request.method == b'DELETE':
db.session.delete(found_excuse)
db.session.commit()
return redirect(url_for('excuses_index', id = found_student.id))
elif request.method == b"PATCH":
found_excuse.content = request.form.get("content")
found_excuse.is_believable = request.form.get("is_believable")
db.session.add(found_excuse)
db.session.commit()
return redirect(url_for("excuses_index", id = found_student.id))
# return render_template("excuses/index.html",excuses = excuses_list, student = found_student)
return render_template("excuses/edit.html",excuse = found_excuse, student = found_student) | [
13,
14,
15,
16,
17
] |
801 | 848934680253ff2950db7723b1fe82b2ae799900 | <mask token>
class LimitedRetriesPolicy(BaseRetryPolicy):
<mask token>
def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',
**kwargs):
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=
envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.
format(envelope.message_id, delay, death_count + 1))
else:
logger.warning('Message [{}] exceeded retry limit; death count: {}'
.format(envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(envelope.delivery_tag,
requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id)
)
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=
consumer, initial_delay=delay, max_delay=delay,
delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=
'retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=
consumer, retry_delays=retry_delays, retry_queue_suffix=
retry_queue_suffix, **kwargs)
| <mask token>
class UnlimitedRetriesPolicy(BaseRetryPolicy):
<mask token>
def __init__(self, consumer, initial_delay, max_delay,
delay_incremented_by, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int initial_delay: `initial_delay` is the initial/first backoff delay
in seconds.
:param int max_delay: `max_delay` is the final/maximum backoff delay in seconds
that should net be exceeded. When exceeded, this max is used.
:param int delay_incremented_by: `delay_incremented_by` is number of seconds
the backoff should be incremented by after each death.
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(UnlimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
assert initial_delay >= 0
assert delay_incremented_by >= 0
assert max_delay >= initial_delay
self.initial_delay = initial_delay
self.max_delay = max_delay
self.delay_incremented_by = delay_incremented_by
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
delay = self.initial_delay + death_count * self.delay_incremented_by
if delay > self.max_delay:
delay = self.max_delay
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=envelope
.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning('Retry handling message [{}] after {}s; death count: {}'
.format(envelope.message_id, delay, death_count + 1))
class LimitedRetriesPolicy(BaseRetryPolicy):
"""Limited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
limited number of retries.
:attr:`consumer`: message consumer instance
:attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message
is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',
**kwargs):
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=
envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.
format(envelope.message_id, delay, death_count + 1))
else:
logger.warning('Message [{}] exceeded retry limit; death count: {}'
.format(envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(envelope.delivery_tag,
requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id)
)
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=
consumer, initial_delay=delay, max_delay=delay,
delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=
'retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=
consumer, retry_delays=retry_delays, retry_queue_suffix=
retry_queue_suffix, **kwargs)
| <mask token>
class BaseRetryPolicy(RetryPolicy):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class UnlimitedRetriesPolicy(BaseRetryPolicy):
"""Unlimited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
unlimited retries.
:attr:`initial_delay`: is the initial/first backoff delay in seconds
:attr:`delay_incremented_by`: is number of seconds the backoff should be incremented
by after each death
:attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be
exceeded
"""
def __init__(self, consumer, initial_delay, max_delay,
delay_incremented_by, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int initial_delay: `initial_delay` is the initial/first backoff delay
in seconds.
:param int max_delay: `max_delay` is the final/maximum backoff delay in seconds
that should net be exceeded. When exceeded, this max is used.
:param int delay_incremented_by: `delay_incremented_by` is number of seconds
the backoff should be incremented by after each death.
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(UnlimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
assert initial_delay >= 0
assert delay_incremented_by >= 0
assert max_delay >= initial_delay
self.initial_delay = initial_delay
self.max_delay = max_delay
self.delay_incremented_by = delay_incremented_by
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
delay = self.initial_delay + death_count * self.delay_incremented_by
if delay > self.max_delay:
delay = self.max_delay
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=envelope
.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning('Retry handling message [{}] after {}s; death count: {}'
.format(envelope.message_id, delay, death_count + 1))
class LimitedRetriesPolicy(BaseRetryPolicy):
"""Limited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
limited number of retries.
:attr:`consumer`: message consumer instance
:attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message
is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',
**kwargs):
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=
envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.
format(envelope.message_id, delay, death_count + 1))
else:
logger.warning('Message [{}] exceeded retry limit; death count: {}'
.format(envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(envelope.delivery_tag,
requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id)
)
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=
consumer, initial_delay=delay, max_delay=delay,
delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=
'retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=
consumer, retry_delays=retry_delays, retry_queue_suffix=
retry_queue_suffix, **kwargs)
| <mask token>
class RetryPolicy(object):
<mask token>
def __init__(self, **kwargs):
super(RetryPolicy, self).__init__()
def retry(self, envelope):
"""This method is implemented by the subclass."""
raise NotImplementedError()
class BaseRetryPolicy(RetryPolicy):
"""Base retry policy class for :class:`.UnlimitedRetriesPolicy` and
:class:`.LimitedRetriesPolicy`.
It has implementation for geting mesage death count and retry queue creation.
"""
def __init__(self, consumer, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param str retry_queue_suffix: Suffix used when creating retry queues. Retry
queue names are constructed in this form "queue_name.<suffix>.<delay>".
Optional, default to ``retry``
"""
super(BaseRetryPolicy, self).__init__(**kwargs)
retry_queue_suffix = retry_queue_suffix.strip()
self.consumer = consumer
assert len(retry_queue_suffix) > 0
self.retry_queue_suffix = retry_queue_suffix
self.min_retry_queue_ttl = 20 * 1000
def set_original_delivery_info_header(self, envelope):
"""Save original message delivery infomation in a header."""
if not envelope.get_header('x-original-delivery-info'):
original_delivery_info = {'consumer_tag': envelope.
delivery_info.consumer_tag, 'delivery_tag': envelope.
delivery_info.delivery_tag, 'redelivered': envelope.
delivery_info.redelivered, 'exchange': envelope.
delivery_info.exchange, 'routing_key': envelope.
delivery_info.routing_key}
envelope.set_header('x-original-delivery-info',
original_delivery_info)
def get_death_count(self, envelope):
"""Return the death count of a message by examining "x-death" header.
:param Envelope envelope: Message envelope
:return int: death count
"""
death_header = envelope.get_header('x-death')
if death_header is None:
return 0
count = 0
for death in death_header:
if not death['queue'].startswith(self.consumer.queue_name):
continue
count += death.get('count', 1)
return count
def declare_retry_queue(self, delay):
"""Declare a retry queue for the provided delay.
Each different delay has a different queue where all retry messages with the
same delay will be sent to till they expire and get sent back to the original
queue for handling retry. The queue is declared with a TTL and automatically
gets deleted. The queue TTL is equal to the provided delay. The retry
queue's dead letter exchange is (default) direct exchange and the dead letter
routing key is the original queue name where the messages originally
came from. The messages will be sent back to the original queue when they
reach their TTL, for handling retry.
The retry queue is redeclared before every a new message is sent to it.
Redeclaration resets the queue's TTL, preventing it from being destroyed.
:param int delay: Retry delay in seconds
:return: retry queue name
:rtype: str
"""
delay_in_ms = int(delay * 1000)
retry_queue_name = '{}.{}.{}'.format(self.consumer.queue_name, self
.retry_queue_suffix, delay_in_ms)
queue_ttl = delay_in_ms * 2
if queue_ttl < self.min_retry_queue_ttl:
queue_ttl = self.min_retry_queue_ttl
self.consumer.channel.queue_declare(callback=None, queue=
retry_queue_name, durable=self.consumer.durable, nowait=True,
arguments={'x-dead-letter-exchange': '',
'x-dead-letter-routing-key': self.consumer.queue_name,
'x-message-ttl': delay_in_ms, 'x-expires': queue_ttl})
logger.warning('Retry queue "{}" is created/redeclared'.format(
retry_queue_name))
return retry_queue_name
class UnlimitedRetriesPolicy(BaseRetryPolicy):
"""Unlimited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
unlimited retries.
:attr:`initial_delay`: is the initial/first backoff delay in seconds
:attr:`delay_incremented_by`: is number of seconds the backoff should be incremented
by after each death
:attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be
exceeded
"""
def __init__(self, consumer, initial_delay, max_delay,
delay_incremented_by, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int initial_delay: `initial_delay` is the initial/first backoff delay
in seconds.
:param int max_delay: `max_delay` is the final/maximum backoff delay in seconds
that should net be exceeded. When exceeded, this max is used.
:param int delay_incremented_by: `delay_incremented_by` is number of seconds
the backoff should be incremented by after each death.
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(UnlimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
assert initial_delay >= 0
assert delay_incremented_by >= 0
assert max_delay >= initial_delay
self.initial_delay = initial_delay
self.max_delay = max_delay
self.delay_incremented_by = delay_incremented_by
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
delay = self.initial_delay + death_count * self.delay_incremented_by
if delay > self.max_delay:
delay = self.max_delay
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=envelope
.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning('Retry handling message [{}] after {}s; death count: {}'
.format(envelope.message_id, delay, death_count + 1))
class LimitedRetriesPolicy(BaseRetryPolicy):
"""Limited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
limited number of retries.
:attr:`consumer`: message consumer instance
:attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message
is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, retry_delays, retry_queue_suffix='retry',
**kwargs):
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer,
retry_queue_suffix, **kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(exchange='', routing_key=
retry_queue_name, properties=envelope.properties, body=
envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.
format(envelope.message_id, delay, death_count + 1))
else:
logger.warning('Message [{}] exceeded retry limit; death count: {}'
.format(envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(envelope.delivery_tag,
requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id)
)
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(consumer=
consumer, initial_delay=delay, max_delay=delay,
delay_incremented_by=0, retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retries_limit, retry_queue_suffix=
'retry', **kwargs):
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(consumer=
consumer, retry_delays=retry_delays, retry_queue_suffix=
retry_queue_suffix, **kwargs)
| # -*- coding: utf-8 -*-
"""
Noting is perfect, errors and timeouts may happen, and when such failures happen, the
consumer has to decide what to do with that. By default, the consumer would reject the
envelope (RabbitMQ message) when a failure happens. However, errors and timeouts
issues, unless there is a software bug, usually solved with retries. Just like the
routing, the consumer doesn't make the retry decision itself, the consumer delegates
it to a retry policy. Retry policy defines how the retry is performed. Retries
usually happens with back-offs to avoid worsening the situation by hammering other
services with more requests, especially if it was a timeout issue. The consumer can be
configured to use a retry policy by calling :meth:`.Consumer.set_retry_policy`, passing
an instance of :class:`.RetryPolicy`. When a retry policy is set, the consumer won't
reject messages, but rather, it send them to the retry policy to deal with the
situation by invoking :meth:`.RetryPolicy.retry` method. Based on it's implementation,
The retry policy decides how to do retries.
There are 4 different retry policies available:
1. :class:`.UnlimitedRetriesPolicy`, Unlimited retries policy
2. :class:`.LimitedRetriesPolicy`, Limited retries policy
3. :class:`.FixedDelayUnlimitedRetriesPolicy`, Fixed delay unlimited retries policy
4. :class:`.FixedDelayLimitedRetriesPolicy`, Fixed delay limited retries policy
Custom retry policies can be created by implementing the base class
:class:`.RetryPolicy`
"""
import logging
logger = logging.getLogger(__name__)
class RetryPolicy(object):
"""Base class for retry policies.
Subclasses MUST implement :meth:`retry` method.
"""
def __init__(self, **kwargs):
# type: (RetryPolicy) -> None
super(RetryPolicy, self).__init__()
def retry(self, envelope):
# type: (RetryPolicy, Envelope) -> None
"""This method is implemented by the subclass."""
raise NotImplementedError()
class BaseRetryPolicy(RetryPolicy):
"""Base retry policy class for :class:`.UnlimitedRetriesPolicy` and
:class:`.LimitedRetriesPolicy`.
It has implementation for geting mesage death count and retry queue creation.
"""
def __init__(self, consumer, retry_queue_suffix='retry', **kwargs):
# type: (BaseRetryPolicy, Consumer, str) -> None
"""
:param Consumer consumer: message consumer instance
:param str retry_queue_suffix: Suffix used when creating retry queues. Retry
queue names are constructed in this form "queue_name.<suffix>.<delay>".
Optional, default to ``retry``
"""
super(BaseRetryPolicy, self).__init__(**kwargs)
retry_queue_suffix = retry_queue_suffix.strip()
self.consumer = consumer
assert len(retry_queue_suffix) > 0
self.retry_queue_suffix = retry_queue_suffix
# To avoid frequent retry queue create and destroy for low retry delays
self.min_retry_queue_ttl = 20 * 1000 # 20 seconds
def set_original_delivery_info_header(self, envelope):
# type: (BaseRetryPolicy, Envelope) -> None
"""Save original message delivery infomation in a header."""
if not envelope.get_header('x-original-delivery-info'):
original_delivery_info = {
'consumer_tag': envelope.delivery_info.consumer_tag,
'delivery_tag': envelope.delivery_info.delivery_tag,
'redelivered': envelope.delivery_info.redelivered,
'exchange': envelope.delivery_info.exchange,
'routing_key': envelope.delivery_info.routing_key
}
envelope.set_header('x-original-delivery-info',
original_delivery_info)
def get_death_count(self, envelope):
# type: (BaseRetryPolicy, Envelope) -> int
"""Return the death count of a message by examining "x-death" header.
:param Envelope envelope: Message envelope
:return int: death count
"""
death_header = envelope.get_header('x-death')
if death_header is None:
return 0
count = 0
for death in death_header:
if not death['queue'].startswith(self.consumer.queue_name):
continue
count += death.get('count', 1)
return count
def declare_retry_queue(self, delay):
# type: (BaseRetryPolicy, int) -> str
"""Declare a retry queue for the provided delay.
Each different delay has a different queue where all retry messages with the
same delay will be sent to till they expire and get sent back to the original
queue for handling retry. The queue is declared with a TTL and automatically
gets deleted. The queue TTL is equal to the provided delay. The retry
queue's dead letter exchange is (default) direct exchange and the dead letter
routing key is the original queue name where the messages originally
came from. The messages will be sent back to the original queue when they
reach their TTL, for handling retry.
The retry queue is redeclared before every a new message is sent to it.
Redeclaration resets the queue's TTL, preventing it from being destroyed.
:param int delay: Retry delay in seconds
:return: retry queue name
:rtype: str
"""
delay_in_ms = int(delay * 1000)
retry_queue_name = '{}.{}.{}'.format(
self.consumer.queue_name, self.retry_queue_suffix, delay_in_ms)
# To avoid frequent queue create and destroy for low retry delays
queue_ttl = delay_in_ms * 2
if queue_ttl < self.min_retry_queue_ttl:
queue_ttl = self.min_retry_queue_ttl
self.consumer.channel.queue_declare(
callback=None,
queue=retry_queue_name,
durable=self.consumer.durable,
nowait=True,
arguments={
'x-dead-letter-exchange': '',
'x-dead-letter-routing-key': self.consumer.queue_name,
'x-message-ttl': delay_in_ms,
'x-expires': queue_ttl
})
logger.warning(
'Retry queue "{}" is created/redeclared'.format(retry_queue_name))
return retry_queue_name
class UnlimitedRetriesPolicy(BaseRetryPolicy):
"""Unlimited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
unlimited retries.
:attr:`initial_delay`: is the initial/first backoff delay in seconds
:attr:`delay_incremented_by`: is number of seconds the backoff should be incremented
by after each death
:attr:`max_delay`: is the final/maximum backoff delay in seconds that should net be
exceeded
"""
def __init__(self,
consumer,
initial_delay,
max_delay,
delay_incremented_by,
retry_queue_suffix='retry',
**kwargs):
# type: (UnlimitedRetriesPolicy, Consumer, int, int, int, str) -> None
"""
:param Consumer consumer: message consumer instance
:param int initial_delay: `initial_delay` is the initial/first backoff delay
in seconds.
:param int max_delay: `max_delay` is the final/maximum backoff delay in seconds
that should net be exceeded. When exceeded, this max is used.
:param int delay_incremented_by: `delay_incremented_by` is number of seconds
the backoff should be incremented by after each death.
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(UnlimitedRetriesPolicy,
self).__init__(consumer, retry_queue_suffix, **kwargs)
assert initial_delay >= 0
assert delay_incremented_by >= 0
assert max_delay >= initial_delay
self.initial_delay = initial_delay
self.max_delay = max_delay
self.delay_incremented_by = delay_incremented_by
def retry(self, envelope):
# type: (UnlimitedRetriesPolicy, Envelope) -> None
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
delay = self.initial_delay + (death_count * self.delay_incremented_by)
if delay > self.max_delay:
delay = self.max_delay
retry_queue_name = self.declare_retry_queue(delay)
# Save original delivery information
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(
exchange='',
routing_key=retry_queue_name,
properties=envelope.properties,
body=envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.format(
envelope.message_id, delay, death_count + 1))
class LimitedRetriesPolicy(BaseRetryPolicy):
"""Limited Retries Policy.
This is an implementation of :class:`.RetryPolicy` which does incremental backoff,
limited number of retries.
:attr:`consumer`: message consumer instance
:attr:`retry_delays`: immutable list of retry backoff delays in seconds. Message
is sent to dlx when this list is exhausted. e.g ``(1, 5, 10, 60, 5 * 60)``
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self,
consumer,
retry_delays,
retry_queue_suffix='retry',
**kwargs):
# type: (LimitedRetriesPolicy, Consumer, Iterable[int], str) -> None
"""
:param Consumer consumer: message consumer instance
:param Iterable[int] retry_delays: Immutable list of retry backoff delays in
seconds. Message is sent to dlx when this list is exhausted.
e.g ``(1, 5, 10, 60, 5 * 60)``
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert len(retry_delays) > 0
super(LimitedRetriesPolicy, self).__init__(consumer, retry_queue_suffix,
**kwargs)
self.retry_delays = retry_delays
def retry(self, envelope):
# type: (LimitedRetriesPolicy, Envelope) -> None
"""Send message to retry queue to retry handling it later.
Death count is calculated by examining 'x-death' header. Based on the death
count, the message is sent to a retry queue where it waits there till it
expires and gets sent back to the original queue for handling retry.
The death count is used as an index for `retry_delays` list. Where each
item in the list represents a retry delay in seconds.
The message will be rejected if the death count exceeded the length of
`retry_delays` list.
:param Envelope envelope: Message envelope
"""
death_count = self.get_death_count(envelope)
if death_count < len(self.retry_delays):
delay = self.retry_delays[death_count]
retry_queue_name = self.declare_retry_queue(delay)
# Save original delivery information
if envelope.get_header('x-original-delivery-info') is None:
self.set_original_delivery_info_header(envelope)
self.consumer.channel.basic_publish(
exchange='',
routing_key=retry_queue_name,
properties=envelope.properties,
body=envelope.payload)
self.consumer.channel.basic_ack(envelope.delivery_tag)
logger.warning(
'Retry handling message [{}] after {}s; death count: {}'.format(
envelope.message_id, delay, death_count + 1))
else:
logger.warning(
'Message [{}] exceeded retry limit; death count: {}'.format(
envelope.message_id, death_count + 1))
self.consumer.channel.basic_reject(
envelope.delivery_tag, requeue=False)
logger.error('Message [{}] is rejected'.format(envelope.message_id))
class FixedDelayUnlimitedRetriesPolicy(UnlimitedRetriesPolicy):
"""Fixed delay unlimited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
unlimited retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self, consumer, delay, retry_queue_suffix='retry', **kwargs):
# type: (FixedDelayUnlimitedRetriesPolicy, Consumer, int, str) -> None
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
super(FixedDelayUnlimitedRetriesPolicy, self).__init__(
consumer=consumer,
initial_delay=delay,
max_delay=delay,
delay_incremented_by=0,
retry_queue_suffix=retry_queue_suffix,
**kwargs)
class FixedDelayLimitedRetriesPolicy(LimitedRetriesPolicy):
"""Fixed delay limited retries policy.
This is an implementation of :class:`.RetryPolicy` which does fix backoff delay,
limited number of retries.
:attr:`consumer`: consumer instance
:attr:`delay`: retry delay in seconds.
:attr:`retries_limit`: retries limit count.
:attr:`retry_queue_suffix`: suffix str used when naming retry queues.
"""
def __init__(self,
consumer,
delay,
retries_limit,
retry_queue_suffix='retry',
**kwargs):
# type: (FixedDelayLimitedRetriesPolicy, Consumer, int, int, str) -> None
"""
:param Consumer consumer: message consumer instance
:param int delay: retry delay in seconds
:param int retries_limit: retries limit count
:param: str retry_queue_suffix: suffix used when naming retry queues.
"""
assert retries_limit > 0
retry_delays = tuple([delay] * retries_limit)
super(FixedDelayLimitedRetriesPolicy, self).__init__(
consumer=consumer,
retry_delays=retry_delays,
retry_queue_suffix=retry_queue_suffix,
**kwargs)
| [
9,
13,
15,
23,
27
] |
802 | 892eb8d1802b01c035993232cc80c710211ab102 | <mask token>
def car(env):
while True:
print('The car will start parking at: ', env.now)
parking_timeout = 5
yield env.timeout(parking_timeout)
print('The car will start driving at: ', env.now)
driving_timeout = 2
yield env.timeout(driving_timeout)
<mask token>
| <mask token>
def car(env):
while True:
print('The car will start parking at: ', env.now)
parking_timeout = 5
yield env.timeout(parking_timeout)
print('The car will start driving at: ', env.now)
driving_timeout = 2
yield env.timeout(driving_timeout)
<mask token>
env.process(car(env))
env.run(until=20)
| <mask token>
def car(env):
while True:
print('The car will start parking at: ', env.now)
parking_timeout = 5
yield env.timeout(parking_timeout)
print('The car will start driving at: ', env.now)
driving_timeout = 2
yield env.timeout(driving_timeout)
env = simpy.Environment()
env.process(car(env))
env.run(until=20)
| import simpy
def car(env):
while True:
print('The car will start parking at: ', env.now)
parking_timeout = 5
yield env.timeout(parking_timeout)
print('The car will start driving at: ', env.now)
driving_timeout = 2
yield env.timeout(driving_timeout)
env = simpy.Environment()
env.process(car(env))
env.run(until=20)
| #processes are described by generator functions
#during the lifetime of a process, the process function(generator function)
#creates events and yields them
#when a process yields an event, it gets suspended
#Simpy resumes the process when the event is triggered
#multiple processes waiting on the same event is resumed in the same order
#it yielded the event
import simpy
def car(env):
# i = 0
# while i<=10:
while True:
print("The car will start parking at: ",env.now)
parking_timeout = 5
yield env.timeout(parking_timeout)
print("The car will start driving at: ",env.now)
driving_timeout = 2
yield env.timeout(driving_timeout)
# if i == 10:
# print("the car is done moving")
# yield env.timeout(1)
# i += 1
env = simpy.Environment()
env.process(car(env)) #the generator function creates the process called car
#env.run()
env.run(until=20)
| [
1,
2,
3,
4,
5
] |
803 | 1f69cf5f6d15048e6ead37b5da836c9e2f783f74 | <mask token>
| <mask token>
print('loading data...')
with open('movienumbers.pickle', 'rb') as input_file:
movienumbers = pickle.load(input_file)
with open('ratings.pickle', 'rb') as input_file:
ratings = pickle.load(input_file)
with open('userratings.pickle', 'rb') as input_file:
userratings = pickle.load(input_file)
with open('metaratings.pickle', 'rb') as input_file:
metaratings = pickle.load(input_file)
print('Pickled data successfully loaded.')
<mask token>
for movieid, reviews in userratings.items():
score = 0
for eachreviews in reviews:
score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score / len(reviews)
userscore[movieid] = average
print(userscore)
<mask token>
for movieid, reviews in metaratings.items():
score_1 = 0
for eachreviews in reviews:
score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score_1 / len(reviews)
criticsscore[movieid] = average
print(criticsscore)
<mask token>
for movieid, score in userscore.items():
if movieid in criticsscore and criticsscore[movieid] > score:
counter += 1
else:
counter += 0
print('Critics overpraise these movies ' + str(counter) +
' times more than normal viewers out of ' + str(len(criticsscore)) +
' movies in total.')
if counter < len(criticsscore) - counter:
print(
'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'
)
else:
print(
'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'
)
<mask token>
for movieid, score in criticsscore.items():
if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score -
ratings[movieid] / 10):
useriscloser += 1
else:
criticiscloser += 1
print('Critics are more closer to the ratings for ' + str(criticiscloser) +
' times, while normal viewers are closer ' + str(useriscloser) +
' times out of ' + str(len(criticsscore)) + ' movies in total.')
if useriscloser > criticiscloser:
print(
'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'
)
else:
print(
'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'
)
| <mask token>
print('loading data...')
with open('movienumbers.pickle', 'rb') as input_file:
movienumbers = pickle.load(input_file)
with open('ratings.pickle', 'rb') as input_file:
ratings = pickle.load(input_file)
with open('userratings.pickle', 'rb') as input_file:
userratings = pickle.load(input_file)
with open('metaratings.pickle', 'rb') as input_file:
metaratings = pickle.load(input_file)
print('Pickled data successfully loaded.')
<mask token>
userscore = {}
for movieid, reviews in userratings.items():
score = 0
for eachreviews in reviews:
score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score / len(reviews)
userscore[movieid] = average
print(userscore)
criticsscore = {}
for movieid, reviews in metaratings.items():
score_1 = 0
for eachreviews in reviews:
score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score_1 / len(reviews)
criticsscore[movieid] = average
print(criticsscore)
counter = 0
for movieid, score in userscore.items():
if movieid in criticsscore and criticsscore[movieid] > score:
counter += 1
else:
counter += 0
print('Critics overpraise these movies ' + str(counter) +
' times more than normal viewers out of ' + str(len(criticsscore)) +
' movies in total.')
if counter < len(criticsscore) - counter:
print(
'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'
)
else:
print(
'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'
)
useriscloser = 0
criticiscloser = 0
for movieid, score in criticsscore.items():
if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score -
ratings[movieid] / 10):
useriscloser += 1
else:
criticiscloser += 1
print('Critics are more closer to the ratings for ' + str(criticiscloser) +
' times, while normal viewers are closer ' + str(useriscloser) +
' times out of ' + str(len(criticsscore)) + ' movies in total.')
if useriscloser > criticiscloser:
print(
'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'
)
else:
print(
'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'
)
| import pickle
print('loading data...')
with open('movienumbers.pickle', 'rb') as input_file:
movienumbers = pickle.load(input_file)
with open('ratings.pickle', 'rb') as input_file:
ratings = pickle.load(input_file)
with open('userratings.pickle', 'rb') as input_file:
userratings = pickle.load(input_file)
with open('metaratings.pickle', 'rb') as input_file:
metaratings = pickle.load(input_file)
print('Pickled data successfully loaded.')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
userscore = {}
for movieid, reviews in userratings.items():
score = 0
for eachreviews in reviews:
score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score / len(reviews)
userscore[movieid] = average
print(userscore)
criticsscore = {}
for movieid, reviews in metaratings.items():
score_1 = 0
for eachreviews in reviews:
score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[
'compound']
average = score_1 / len(reviews)
criticsscore[movieid] = average
print(criticsscore)
counter = 0
for movieid, score in userscore.items():
if movieid in criticsscore and criticsscore[movieid] > score:
counter += 1
else:
counter += 0
print('Critics overpraise these movies ' + str(counter) +
' times more than normal viewers out of ' + str(len(criticsscore)) +
' movies in total.')
if counter < len(criticsscore) - counter:
print(
'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'
)
else:
print(
'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'
)
useriscloser = 0
criticiscloser = 0
for movieid, score in criticsscore.items():
if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score -
ratings[movieid] / 10):
useriscloser += 1
else:
criticiscloser += 1
print('Critics are more closer to the ratings for ' + str(criticiscloser) +
' times, while normal viewers are closer ' + str(useriscloser) +
' times out of ' + str(len(criticsscore)) + ' movies in total.')
if useriscloser > criticiscloser:
print(
'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'
)
else:
print(
'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'
)
| # The actual code begins here
# This file is intended to load everything downloaded from loaddata.py, preventing user getting banned from IMDB
# The code is written to see what are some key words of the reviews from critics and normal viewers
# And to see what are some of the differences
# The second task is to asses the people's emotion vs. actual score given
# First, we need to load back everything we dumped to folder via pickle.
import pickle
print('loading data...')
with open('movienumbers.pickle','rb') as input_file:
movienumbers = pickle.load(input_file)
with open('ratings.pickle','rb') as input_file:
ratings = pickle.load(input_file)
with open('userratings.pickle','rb') as input_file:
userratings = pickle.load(input_file)
with open('metaratings.pickle','rb') as input_file:
metaratings = pickle.load(input_file)
print('Pickled data successfully loaded.')
# then, it's time to use nltp to see the score of the critics vs. viewers on movies
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# print(movienumbers)
# print(ratings)
# print(userratings)
# print(metaratings)
# Userratings is a dictionary in ways like this "ttxxxxxx : [reviews1, reviews2,...]"
# print(userratings['tt0111161'])
#
# print(metaratings['tt0111161'])
# print(ratings['tt0111161'])
userscore = {}
for movieid, reviews in userratings.items():
score = 0
for eachreviews in reviews:
score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']
average = score / len(reviews)
userscore[movieid] = average
print(userscore)
# Meta ratings is a dictionary in ways like this "ttxxxxxx : [reviews1, reviews2,...]"
criticsscore = {}
for movieid, reviews in metaratings.items():
score_1 = 0
for eachreviews in reviews:
score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']
average = score_1 / len(reviews)
criticsscore[movieid] = average
print(criticsscore)
# Question 1: Are critics always more positive than the audience?
counter = 0
for movieid, score in userscore.items():
if movieid in criticsscore and criticsscore[movieid] > score:
counter += 1
else:
counter += 0
# Displaying results to question 1
print("Critics overpraise these movies " + str(counter) + " times more than normal viewers out of "
+ str(len(criticsscore)) + " movies in total.")
if counter < (len(criticsscore) - counter):
print("Because the critics overpraise less than half of the movies sampled here, the critics are more refrained "
"than the users on IMDb.")
else:
print("Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained "
"than the users on IMDb.")
# Question 2: Is the IMDB score closer to the users' sentiment? Or the critics.
useriscloser = 0
criticiscloser = 0
for movieid, score in criticsscore.items():
if abs(userscore[movieid] - (ratings[movieid])/10) > abs(score - (ratings[movieid]/10)):
useriscloser += 1
else:
criticiscloser += 1
# Displaying results to question 2
print("Critics are more closer to the ratings for " + str(criticiscloser) +
" times, while normal viewers are closer " + str(useriscloser) + " times out of " +
str(len(criticsscore)) + " movies in total.")
if useriscloser > criticiscloser:
print("Because the more movies have users resembling closer to the rating, the critics are less accurate "
"than the users on IMDb.")
else:
print("Because the more movies have critics resembling closer to the rating, the users are less accurate "
"than the users on IMDb.") | [
0,
1,
2,
3,
4
] |
804 | e31267871453d87aee409f1c751c36908f7f151a | <mask token>
| <mask token>
__all__ = ['resolver']
| <mask token>
from acres.resolution import resolver
__all__ = ['resolver']
| """
Package with a facade to the several expansion strategies.
"""
from acres.resolution import resolver
__all__ = ['resolver']
| null | [
0,
1,
2,
3
] |
805 | be58862b66708c9de8cf7642c9de52ec744b079e | <mask token>
def application(environ, start_response):
"""AJAX scripts for email templates."""
request = DRequest(environ)
resp = None
try:
Db.start_transaction()
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
args = form['args'].value
req = json.loads(args)
support = SupportSession(key=request.support_key())
handler = handlers[req['command']]
resp = Response(json.dumps(handler(request, req)))
Db.finish_transaction()
except SupportSessionExpired:
Db.cancel_transaction()
resp = Response(json.dumps({'Error': 'Session Expired'}))
except DbError as e:
Db.cancel_transaction()
resp = Response(json.dumps({'Error': e.args[0]}))
except Exception as e:
Db.cancel_transaction()
import traceback
traceback.print_exc()
resp = Response(json.dumps({'Error': 'Internal Error'}))
request.cookie_freshen(resp)
resp.headers['content-type'] = 'application/json'
resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'
return resp(environ, start_response)
def get(request, req):
return db.Support.get_all()
<mask token>
def delete(request, req):
return db.Support.delete(req['support_id'])
def add(request, req):
return db.Support.new()
<mask token>
| <mask token>
def application(environ, start_response):
"""AJAX scripts for email templates."""
request = DRequest(environ)
resp = None
try:
Db.start_transaction()
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
args = form['args'].value
req = json.loads(args)
support = SupportSession(key=request.support_key())
handler = handlers[req['command']]
resp = Response(json.dumps(handler(request, req)))
Db.finish_transaction()
except SupportSessionExpired:
Db.cancel_transaction()
resp = Response(json.dumps({'Error': 'Session Expired'}))
except DbError as e:
Db.cancel_transaction()
resp = Response(json.dumps({'Error': e.args[0]}))
except Exception as e:
Db.cancel_transaction()
import traceback
traceback.print_exc()
resp = Response(json.dumps({'Error': 'Internal Error'}))
request.cookie_freshen(resp)
resp.headers['content-type'] = 'application/json'
resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'
return resp(environ, start_response)
def get(request, req):
return db.Support.get_all()
def edit(request, req):
return db.Support.edit(req)
def delete(request, req):
return db.Support.delete(req['support_id'])
def add(request, req):
return db.Support.new()
<mask token>
| <mask token>
def application(environ, start_response):
"""AJAX scripts for email templates."""
request = DRequest(environ)
resp = None
try:
Db.start_transaction()
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
args = form['args'].value
req = json.loads(args)
support = SupportSession(key=request.support_key())
handler = handlers[req['command']]
resp = Response(json.dumps(handler(request, req)))
Db.finish_transaction()
except SupportSessionExpired:
Db.cancel_transaction()
resp = Response(json.dumps({'Error': 'Session Expired'}))
except DbError as e:
Db.cancel_transaction()
resp = Response(json.dumps({'Error': e.args[0]}))
except Exception as e:
Db.cancel_transaction()
import traceback
traceback.print_exc()
resp = Response(json.dumps({'Error': 'Internal Error'}))
request.cookie_freshen(resp)
resp.headers['content-type'] = 'application/json'
resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'
return resp(environ, start_response)
def get(request, req):
return db.Support.get_all()
def edit(request, req):
return db.Support.edit(req)
def delete(request, req):
return db.Support.delete(req['support_id'])
def add(request, req):
return db.Support.new()
handlers = {'get': get, 'edit': edit, 'delete': delete, 'add': add}
| from werkzeug.wrappers import Response
from p.DRequest import DRequest
from db.Support import SupportSession
from db.Exceptions import DbError, SupportSessionExpired
import db.Db as Db
import db.Support
import cgi
import simplejson as json
def application(environ, start_response):
"""AJAX scripts for email templates."""
request = DRequest(environ)
resp = None
try:
Db.start_transaction()
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
args = form['args'].value
req = json.loads(args)
support = SupportSession(key=request.support_key())
handler = handlers[req['command']]
resp = Response(json.dumps(handler(request, req)))
Db.finish_transaction()
except SupportSessionExpired:
Db.cancel_transaction()
resp = Response(json.dumps({'Error': 'Session Expired'}))
except DbError as e:
Db.cancel_transaction()
resp = Response(json.dumps({'Error': e.args[0]}))
except Exception as e:
Db.cancel_transaction()
import traceback
traceback.print_exc()
resp = Response(json.dumps({'Error': 'Internal Error'}))
request.cookie_freshen(resp)
resp.headers['content-type'] = 'application/json'
resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'
return resp(environ, start_response)
def get(request, req):
return db.Support.get_all()
def edit(request, req):
return db.Support.edit(req)
def delete(request, req):
return db.Support.delete(req['support_id'])
def add(request, req):
return db.Support.new()
handlers = {'get': get, 'edit': edit, 'delete': delete, 'add': add}
| # $Header: //depot/cs/s/ajax_support.wsgi#10 $
from werkzeug.wrappers import Response
from p.DRequest import DRequest
from db.Support import SupportSession
from db.Exceptions import DbError, SupportSessionExpired
import db.Db as Db
import db.Support
import cgi
import simplejson as json
def application(environ, start_response):
"""AJAX scripts for email templates."""
request = DRequest(environ)
resp = None
try :
Db.start_transaction()
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
args = form['args'].value
req = json.loads(args)
support = SupportSession(key=request.support_key())
handler = handlers[req['command']]
resp = Response(json.dumps(handler(request, req)))
Db.finish_transaction()
except SupportSessionExpired:
Db.cancel_transaction()
resp = Response(json.dumps({ 'Error': 'Session Expired' }))
except DbError as e:
Db.cancel_transaction()
resp = Response(json.dumps({ 'Error': e.args[0]}))
except Exception as e:
Db.cancel_transaction()
import traceback
traceback.print_exc()
resp = Response(json.dumps({ 'Error': "Internal Error"}))
request.cookie_freshen(resp)
resp.headers['content-type'] = 'application/json'
resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'
return resp(environ, start_response)
def get(request, req):
return db.Support.get_all()
def edit(request, req):
return db.Support.edit(req);
def delete(request, req):
return db.Support.delete(req['support_id'])
def add(request, req):
return db.Support.new()
handlers = { 'get': get, 'edit': edit, 'delete': delete, 'add': add }
| [
4,
5,
6,
7,
8
] |
806 | 328a03acab2a0550bea0795d22110a152db6c503 | <mask token>
| <mask token>
def run_training(arguments_parser):
data = DatasetLoader(arguments_parser)
data.setup()
arguments_parser.num_training_steps = len(data.train_dataloader()
) * arguments_parser.max_epochs
dict_args = vars(arguments_parser)
model = Model(**dict_args)
arguments_parser.early_stop_callback = EarlyStopping('val_loss')
trainer = pl.Trainer.from_argparse_args(arguments_parser)
trainer.fit(model, data)
<mask token>
| <mask token>
print(os.getcwd())
<mask token>
def run_training(arguments_parser):
data = DatasetLoader(arguments_parser)
data.setup()
arguments_parser.num_training_steps = len(data.train_dataloader()
) * arguments_parser.max_epochs
dict_args = vars(arguments_parser)
model = Model(**dict_args)
arguments_parser.early_stop_callback = EarlyStopping('val_loss')
trainer = pl.Trainer.from_argparse_args(arguments_parser)
trainer.fit(model, data)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--pretrained', type=str, default='bert-base-uncased')
parser.add_argument('--nr_frozen_epochs', type=int, default=5)
parser.add_argument('--training_portion', type=float, default=0.9)
parser.add_argument('--batch_size', type=float, default=32)
parser.add_argument('--learning_rate', type=float, default=2e-05)
parser.add_argument('--frac', type=float, default=1)
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
run_training(args)
| import os
print(os.getcwd())
from TransformerModel.Model import Model
from dataset.DatasetLoader import DatasetLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
import argparse
from argparse import ArgumentParser, ArgumentTypeError
def run_training(arguments_parser):
data = DatasetLoader(arguments_parser)
data.setup()
arguments_parser.num_training_steps = len(data.train_dataloader()
) * arguments_parser.max_epochs
dict_args = vars(arguments_parser)
model = Model(**dict_args)
arguments_parser.early_stop_callback = EarlyStopping('val_loss')
trainer = pl.Trainer.from_argparse_args(arguments_parser)
trainer.fit(model, data)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--pretrained', type=str, default='bert-base-uncased')
parser.add_argument('--nr_frozen_epochs', type=int, default=5)
parser.add_argument('--training_portion', type=float, default=0.9)
parser.add_argument('--batch_size', type=float, default=32)
parser.add_argument('--learning_rate', type=float, default=2e-05)
parser.add_argument('--frac', type=float, default=1)
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
run_training(args)
| # %%
import os
print(os.getcwd())
# %%
from TransformerModel.Model import Model
from dataset.DatasetLoader import DatasetLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
import argparse
from argparse import ArgumentParser, ArgumentTypeError
# %%
def run_training(arguments_parser):
data = DatasetLoader(arguments_parser)
data.setup()
arguments_parser.num_training_steps = (
len(data.train_dataloader()) * arguments_parser.max_epochs
)
dict_args = vars(arguments_parser)
model = Model(**dict_args)
arguments_parser.early_stop_callback = EarlyStopping("val_loss")
trainer = pl.Trainer.from_argparse_args(arguments_parser)
trainer.fit(model, data)
# %%
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--pretrained", type=str, default="bert-base-uncased")
parser.add_argument("--nr_frozen_epochs", type=int, default=5)
parser.add_argument("--training_portion", type=float, default=0.9)
parser.add_argument("--batch_size", type=float, default=32)
parser.add_argument("--learning_rate", type=float, default=2e-5)
parser.add_argument("--frac", type=float, default=1)
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
run_training(args)
# %%
| [
0,
1,
2,
3,
4
] |
807 | c712875273f988a3aa6dab61f79e99a077823060 | <mask token>
def matches(needle, haystack):
for straw in haystack:
if needle == straw:
return True
return False
def appendSection(section):
if len(section) < 2:
return
if not section[0].endswith('-'):
print('warning: section name does not end with -: ' + section[0])
return
match = re.match('\\S+', section[0])
if match:
if section[0] == '-':
section[0] = module
else:
section[0] = match.group(0)
else:
print('warning: section name had no non-whitespace match: ' +
section[0])
return
if matches(section[0], sectionMatches):
return
if any(section[0].endswith(x) for x in sectionEnds):
return
sections.append(section)
<mask token>
| <mask token>
if len(sys.argv) < 2:
print('USAGE: lua_syntax.py MODULENAME INFILE')
exit(0)
<mask token>
for arg in sys.argv[3:]:
sectionIgnores.append(arg)
def matches(needle, haystack):
for straw in haystack:
if needle == straw:
return True
return False
def appendSection(section):
if len(section) < 2:
return
if not section[0].endswith('-'):
print('warning: section name does not end with -: ' + section[0])
return
match = re.match('\\S+', section[0])
if match:
if section[0] == '-':
section[0] = module
else:
section[0] = match.group(0)
else:
print('warning: section name had no non-whitespace match: ' +
section[0])
return
if matches(section[0], sectionMatches):
return
if any(section[0].endswith(x) for x in sectionEnds):
return
sections.append(section)
<mask token>
for line in file:
line = line.strip()
if line.startswith('LANGUAGE'):
continue
if line.startswith('='):
appendSection(section)
section = []
else:
if len(line) == 0:
continue
if any(line.startswith(x) for x in lineStarts):
continue
if any(line.endswith(x) for x in lineEnds):
continue
if matches(line, lineMatches):
continue
section.append(line)
appendSection(section)
file.close()
<mask token>
for section in sections:
prefix = ' '
name = section[0]
if name == module:
prefix = module + '.'
file.write(module + '\n')
elif name.endswith('.SwigStatic'):
name = name.split('.')[0]
prefix = module + '.' + name + '.'
else:
file.write(module + '.' + name + '\n')
lines = section[1:]
lines.sort()
for line in lines:
if not line.endswith('.SwigStatic'):
file.write(prefix + line + '\n')
num = num + 1
if num < len(sections):
file.write('\n')
file.close()
| <mask token>
if len(sys.argv) < 2:
print('USAGE: lua_syntax.py MODULENAME INFILE')
exit(0)
module = sys.argv[1]
infile = sys.argv[2]
sections = []
sectionMatches = ['string', 'string.SwigStatic']
sectionEnds = ['Vector']
lineMatches = ['string', 'lua:cdata']
lineStarts = ['~', '__', 'of', 'ofx']
lineEnds = ['Vector']
for arg in sys.argv[3:]:
sectionIgnores.append(arg)
def matches(needle, haystack):
for straw in haystack:
if needle == straw:
return True
return False
def appendSection(section):
if len(section) < 2:
return
if not section[0].endswith('-'):
print('warning: section name does not end with -: ' + section[0])
return
match = re.match('\\S+', section[0])
if match:
if section[0] == '-':
section[0] = module
else:
section[0] = match.group(0)
else:
print('warning: section name had no non-whitespace match: ' +
section[0])
return
if matches(section[0], sectionMatches):
return
if any(section[0].endswith(x) for x in sectionEnds):
return
sections.append(section)
file = open(infile)
section = []
for line in file:
line = line.strip()
if line.startswith('LANGUAGE'):
continue
if line.startswith('='):
appendSection(section)
section = []
else:
if len(line) == 0:
continue
if any(line.startswith(x) for x in lineStarts):
continue
if any(line.endswith(x) for x in lineEnds):
continue
if matches(line, lineMatches):
continue
section.append(line)
appendSection(section)
file.close()
section = []
file = open(module + '_syntax.txt', 'w')
num = 0
for section in sections:
prefix = ' '
name = section[0]
if name == module:
prefix = module + '.'
file.write(module + '\n')
elif name.endswith('.SwigStatic'):
name = name.split('.')[0]
prefix = module + '.' + name + '.'
else:
file.write(module + '.' + name + '\n')
lines = section[1:]
lines.sort()
for line in lines:
if not line.endswith('.SwigStatic'):
file.write(prefix + line + '\n')
num = num + 1
if num < len(sections):
file.write('\n')
file.close()
| import sys
import re
if len(sys.argv) < 2:
print('USAGE: lua_syntax.py MODULENAME INFILE')
exit(0)
module = sys.argv[1]
infile = sys.argv[2]
sections = []
sectionMatches = ['string', 'string.SwigStatic']
sectionEnds = ['Vector']
lineMatches = ['string', 'lua:cdata']
lineStarts = ['~', '__', 'of', 'ofx']
lineEnds = ['Vector']
for arg in sys.argv[3:]:
sectionIgnores.append(arg)
def matches(needle, haystack):
for straw in haystack:
if needle == straw:
return True
return False
def appendSection(section):
if len(section) < 2:
return
if not section[0].endswith('-'):
print('warning: section name does not end with -: ' + section[0])
return
match = re.match('\\S+', section[0])
if match:
if section[0] == '-':
section[0] = module
else:
section[0] = match.group(0)
else:
print('warning: section name had no non-whitespace match: ' +
section[0])
return
if matches(section[0], sectionMatches):
return
if any(section[0].endswith(x) for x in sectionEnds):
return
sections.append(section)
file = open(infile)
section = []
for line in file:
line = line.strip()
if line.startswith('LANGUAGE'):
continue
if line.startswith('='):
appendSection(section)
section = []
else:
if len(line) == 0:
continue
if any(line.startswith(x) for x in lineStarts):
continue
if any(line.endswith(x) for x in lineEnds):
continue
if matches(line, lineMatches):
continue
section.append(line)
appendSection(section)
file.close()
section = []
file = open(module + '_syntax.txt', 'w')
num = 0
for section in sections:
prefix = ' '
name = section[0]
if name == module:
prefix = module + '.'
file.write(module + '\n')
elif name.endswith('.SwigStatic'):
name = name.split('.')[0]
prefix = module + '.' + name + '.'
else:
file.write(module + '.' + name + '\n')
lines = section[1:]
lines.sort()
for line in lines:
if not line.endswith('.SwigStatic'):
file.write(prefix + line + '\n')
num = num + 1
if num < len(sections):
file.write('\n')
file.close()
| #! /usr/bin/python
#
# convert the swig -debug-lsymbols output text file format into
# a simple list of lua module names and classes
#
# Dan Wilcox <[email protected]> 2017
#
import sys
import re
if len(sys.argv) < 2:
print("USAGE: lua_syntax.py MODULENAME INFILE")
exit(0)
module = sys.argv[1]
infile = sys.argv[2]
sections = []
sectionMatches = [
"string", # swig std::string wrappers
"string.SwigStatic" # swig std::string wrappers
]
sectionEnds = [
"Vector" # swig std::vector wrappers
]
lineMatches = [
"string", # swig std::string wrappers
"lua:cdata", # c pointers
]
lineStarts = [
"~", # destructors
"__", # lua metatable __add, __sub, etc
"of", # of core type prefixes
"ofx" # ofx addon type prefixes
]
lineEnds = [
"Vector" # swig std::vector wrappers
]
# any other user-supplied section ignores
for arg in sys.argv[3:]:
sectionIgnores.append(arg)
# check if a string matches one in an array
def matches(needle, haystack):
for straw in haystack:
if needle == straw:
return True
return False
# append a section to the sections array if the name passes muster
def appendSection(section):
# drop static classes which don't have any symbols
if len(section) < 2:
return
# section names are followed by a " -", so double check
if not section[0].endswith("-"):
print("warning: section name does not end with -: "+section[0])
return
# grab first non-whitespace name ie. "Color" from "Color -"
match = re.match("\S+", section[0])
if match:
if section[0] == "-": # main module is just a "-"
section[0] = module
else: # class name
section[0] = match.group(0)
else:
print("warning: section name had no non-whitespace match: "+section[0])
return
# drop sections which match certain strings
if matches(section[0], sectionMatches):
return
# drop sections which contain certain strings
if any(section[0].endswith(x) for x in sectionEnds):
return
# if got this far, the section must be good...
sections.append(section)
# parse swig output into sections
file = open(infile)
section = []
for line in file:
# strip whitespace
line = line.strip()
# ignore beginning and end lines
if line.startswith("LANGUAGE"):
continue
# section headers are a series of = chars, ie. ==========
if line.startswith("="):
appendSection(section)
section = []
# append line within a section
else:
# empty line
if len(line) == 0:
continue
# drop lines with certain prefixes
if any(line.startswith(x) for x in lineStarts):
continue
# drop lines with certain suffixes
if any(line.endswith(x) for x in lineEnds):
continue
# drop lines which match certain strings
if matches(line, lineMatches):
continue
# line must be good
section.append(line)
appendSection(section) # catch any left overs
file.close()
section = []
# for section in sections:
# print(section)
# exit(0)
# output module & section names to each section line
file = open(module+"_syntax.txt", "w")
num = 0
for section in sections:
# grab name from first line and output
prefix = " "
name = section[0]
if name == module: # main module
prefix = module+"."
file.write(module+"\n")
elif name.endswith(".SwigStatic"): # static members
name = name.split(".")[0] # drop SwigStatic suffix
prefix = module+"."+name+"."
else: # class instance members
file.write(module+"."+name+"\n")
# sort remaining lines
lines = section[1:]
lines.sort()
# output with module.class prefix
for line in lines:
if not line.endswith(".SwigStatic"): # drop statics from main module
file.write(prefix+line+"\n")
num = num + 1
# linebreak between sections
if num < len(sections):
file.write("\n")
file.close()
| [
2,
3,
4,
5,
6
] |
808 | 3d3b9956a98f11a170d66280abe7f193cef9ccfb | <mask token>
| <mask token>
for i_angle in np.arange(0, 360, 45):
ri_filenames.append('r%di%d.csv' % (i_angle, i_angle))
ri_filenames.append('r%di%d.csv' % (i_angle + 45, i_angle))
ri_filenames.append('r360i360.csv')
<mask token>
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. yaw', 'Calculated distance vs. yaw'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp1'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp1_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. pitch', 'Calculated distance vs. pitch'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp2'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp2_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. roll', 'Calculated distance vs. roll'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp3'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp3_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. position', 'Calculated distance vs. position'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp4'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in angle_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp4_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=2, subplot_titles=['Line of sight',
'Blocked'], shared_xaxes=True)
rssi_los_hist2d = []
dist_los_hist2d = []
experiment = 'distance_los'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_los_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_los_hist2d.append(column)
rssi_los_hist2d = np.array(rssi_los_hist2d).T
dist_los_hist2d = np.array(dist_los_hist2d).T
rssi_blocked_hist2d = []
dist_blocked_hist2d = []
experiment = 'distance_blocked'
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_blocked_hist2d.append(column)
column = np.zeros(1000)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_blocked_hist2d.append(column)
rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T
dist_blocked_hist2d = np.array(dist_blocked_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_los_hist2d[int(-db_lim[0]):int(-
db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_los_hist2d[int(dist_lim[0] /
0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_blocked_hist2d[int(-db_lim[0]):
int(-db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=2)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_blocked_hist2d[int(dist_lim[
0] / 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=2)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=2)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Separation (m)', row=2, col=1)
fig.update_xaxes(title='Separation (m)', row=2, col=2)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'distance_%s.png' % antenna)
)
| <mask token>
output_directory = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/figures'
antennas = ['original_whip']
folder = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/data'
ri_filenames = []
for i_angle in np.arange(0, 360, 45):
ri_filenames.append('r%di%d.csv' % (i_angle, i_angle))
ri_filenames.append('r%di%d.csv' % (i_angle + 45, i_angle))
ri_filenames.append('r360i360.csv')
angle_filenames = [('%d.csv' % n) for n in np.arange(0, 405, 45)]
distance_filenames = [('%1.2f.csv' % n) for n in np.arange(0.75, 3.25, 0.25)]
ref_line = dict(color='white', width=1)
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. yaw', 'Calculated distance vs. yaw'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp1'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp1_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. pitch', 'Calculated distance vs. pitch'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp2'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp2_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. roll', 'Calculated distance vs. roll'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp3'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp3_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. position', 'Calculated distance vs. position'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp4'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in angle_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp4_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=2, subplot_titles=['Line of sight',
'Blocked'], shared_xaxes=True)
rssi_los_hist2d = []
dist_los_hist2d = []
experiment = 'distance_los'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_los_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_los_hist2d.append(column)
rssi_los_hist2d = np.array(rssi_los_hist2d).T
dist_los_hist2d = np.array(dist_los_hist2d).T
rssi_blocked_hist2d = []
dist_blocked_hist2d = []
experiment = 'distance_blocked'
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_blocked_hist2d.append(column)
column = np.zeros(1000)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_blocked_hist2d.append(column)
rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T
dist_blocked_hist2d = np.array(dist_blocked_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_los_hist2d[int(-db_lim[0]):int(-
db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_los_hist2d[int(dist_lim[0] /
0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_blocked_hist2d[int(-db_lim[0]):
int(-db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=2)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_blocked_hist2d[int(dist_lim[
0] / 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=2)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=2)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Separation (m)', row=2, col=1)
fig.update_xaxes(title='Separation (m)', row=2, col=2)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'distance_%s.png' % antenna)
)
| import numpy as np
import plotly
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import pandas as pd
import os
output_directory = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/figures'
antennas = ['original_whip']
folder = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/data'
ri_filenames = []
for i_angle in np.arange(0, 360, 45):
ri_filenames.append('r%di%d.csv' % (i_angle, i_angle))
ri_filenames.append('r%di%d.csv' % (i_angle + 45, i_angle))
ri_filenames.append('r360i360.csv')
angle_filenames = [('%d.csv' % n) for n in np.arange(0, 405, 45)]
distance_filenames = [('%1.2f.csv' % n) for n in np.arange(0.75, 3.25, 0.25)]
ref_line = dict(color='white', width=1)
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. yaw', 'Calculated distance vs. yaw'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp1'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp1_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. pitch', 'Calculated distance vs. pitch'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp2'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp2_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. roll', 'Calculated distance vs. roll'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp3'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp3_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. position', 'Calculated distance vs. position'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp4'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in angle_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp4_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=2, subplot_titles=['Line of sight',
'Blocked'], shared_xaxes=True)
rssi_los_hist2d = []
dist_los_hist2d = []
experiment = 'distance_los'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_los_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_los_hist2d.append(column)
rssi_los_hist2d = np.array(rssi_los_hist2d).T
dist_los_hist2d = np.array(dist_los_hist2d).T
rssi_blocked_hist2d = []
dist_blocked_hist2d = []
experiment = 'distance_blocked'
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_blocked_hist2d.append(column)
column = np.zeros(1000)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_blocked_hist2d.append(column)
rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T
dist_blocked_hist2d = np.array(dist_blocked_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_los_hist2d[int(-db_lim[0]):int(-
db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_los_hist2d[int(dist_lim[0] /
0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_blocked_hist2d[int(-db_lim[0]):
int(-db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=2)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_blocked_hist2d[int(dist_lim[
0] / 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=2)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=2)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Separation (m)', row=2, col=1)
fig.update_xaxes(title='Separation (m)', row=2, col=2)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'distance_%s.png' % antenna)
)
| #%%
# -*- coding: utf-8 -*-
import numpy as np
import plotly
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import pandas as pd
import os
output_directory = r'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/figures'
antennas = ['original_whip']
folder = r'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/data'
ri_filenames = []
for i_angle in np.arange(0, 360, 45):
ri_filenames.append('r%di%d.csv'%(i_angle, i_angle))
ri_filenames.append('r%di%d.csv'%(i_angle+45, i_angle))
ri_filenames.append('r360i360.csv')
angle_filenames = ['%d.csv'%(n) for n in np.arange(0, 405, 45)]
distance_filenames = ['%1.2f.csv'%(n) for n in np.arange(.75, 3.25, .25)]
ref_line = dict(color='white', width=1)
# Plot yaw data
for antenna in antennas:
fig = make_subplots(rows=2, cols=1,
subplot_titles=['Initiator RSSI vs. yaw',
'Calculated distance vs. yaw'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp1'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'orientation_exp1_%s.png'%(antenna)))
# Plot pitch data
for antenna in antennas:
fig = make_subplots(rows=2, cols=1,
subplot_titles=['Initiator RSSI vs. pitch',
'Calculated distance vs. pitch'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp2'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'orientation_exp2_%s.png'%(antenna)))
# Plot roll data
for antenna in antennas:
fig = make_subplots(rows=2, cols=1,
subplot_titles=['Initiator RSSI vs. roll',
'Calculated distance vs. roll'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp3'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'orientation_exp3_%s.png'%(antenna)))
# Plot position data
for antenna in antennas:
fig = make_subplots(rows=2, cols=1,
subplot_titles=['Initiator RSSI vs. position',
'Calculated distance vs. position'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp4'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in angle_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(0, 360, 45),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(0, 360, 45),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'orientation_exp4_%s.png'%(antenna)))
# Plot separation data
for antenna in antennas:
fig = make_subplots(rows=2, cols=2,
subplot_titles=['Line of sight', 'Blocked'],
shared_xaxes=True)
rssi_los_hist2d = []
dist_los_hist2d = []
experiment = 'distance_los'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_los_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_los_hist2d.append(column)
rssi_los_hist2d = np.array(rssi_los_hist2d).T
dist_los_hist2d = np.array(dist_los_hist2d).T
rssi_blocked_hist2d = []
dist_blocked_hist2d = []
experiment = 'distance_blocked'
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_blocked_hist2d.append(column)
column = np.zeros(1000)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_blocked_hist2d.append(column)
rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T
dist_blocked_hist2d = np.array(dist_blocked_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(.75, 3.25, .25),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_los_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(.75, 3.25, .25),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_los_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(.75, 3.25, .25),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_blocked_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=2)
fig.add_trace(go.Heatmap(
x=np.arange(.75, 3.25, .25),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_blocked_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=2)
fig.add_trace(go.Scatter(x=np.arange(.75, 3.25, .25), y=np.arange(.75, 3.25, .25), mode='lines', line=ref_line), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(.75, 3.25, .25), y=np.arange(.75, 3.25, .25), mode='lines', line=ref_line), row=2, col=2)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Separation (m)', row=2, col=1)
fig.update_xaxes(title='Separation (m)', row=2, col=2)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'distance_%s.png'%(antenna))) | [
0,
1,
2,
3,
4
] |
809 | 4cb601d7fc4023e145c6d510d27507214ddbd2d3 | <mask token>
def register(request):
if request.method == 'GET':
return render(request, 'home/home.html')
else:
name = request.POST['name']
username = request.POST['uname']
email = request.POST['email']
password = request.POST['password']
if name and username and email and password:
if not User.objects.filter(username=username).exists():
user = User.objects.create_user(first_name=name, username=
username, email=email, password=password)
u = authenticate(username=username, password=password)
if u is not None:
print('authenticated')
login(request, u)
request.session['id'] = user.id
return redirect('user')
else:
redirect('/')
def login_view(request):
if request.method == 'GET':
if 'id' in request.session:
return redirect('user')
return render(request, 'home/login.html')
else:
username = request.POST['uname']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
print('user active')
login(request, user)
request.session['id'] = User.objects.filter(username=username
).values('id')[0]['id']
return redirect('user')
else:
return render(request, 'home/home.html')
else:
return redirect('/')
def user(request):
if request.method == 'GET':
try:
uid = request.GET['id']
except:
uid = request.session['id']
user = User.objects.get(pk=int(uid))
genre = Genres.objects.all()
fbook = UserBook.objects.filter(user=user)
genre_list = []
for i in fbook:
if i.book.genre.id in genre_list:
pass
else:
genre_list.append(i.book.genre.id)
if len(genre_list) != 0:
number = 5 // len(genre_list)
isselected = 1
recbook = set()
for i in genre_list:
book = Books.objects.filter(genre=int(i)).order_by('-rating')
while len(recbook) < 5:
if len(book) >= number:
for k in range(0, number):
recbook.add(book[k])
else:
for k in range(0, len(book)):
recbook.add(book[k])
break
else:
isselected = 0
recbook = ''
return render(request, 'home/user.html', {'user': user, 'genre':
genre, 'fbook': fbook, 'recbook': recbook, 'isset': isselected})
else:
user = User.objects.get(pk=int(request.session['id']))
book = request.POST['book']
userbook = UserBook(user=user, book=Books.objects.get(pk=int(book)))
userbook.save()
return redirect('user')
def genre(request):
if request.method == 'GET':
id = request.GET['id']
books = Books.objects.filter(genre=id)
return render(request, 'home/genre.html', {'books': books})
def book(request):
if request.method == 'GET':
id = request.GET['id']
book = Books.objects.get(pk=int(id))
if UserBook.objects.filter(user=User.objects.get(pk=int(request.
session['id'])), book=book).exists():
follow = 1
else:
follow = 0
comment = UserCommentBook.objects.filter(book=book)
return render(request, 'home/book.html', {'book': book, 'comment':
comment, 'follow': follow})
else:
comment = request.POST['comment']
book = request.POST['id']
comment = UserCommentBook(user=User.objects.get(pk=int(request.
session['id'])), book=Books.objects.get(pk=int(book)), comment=
comment)
comment.save()
return redirect('book/?id=' + str(book))
<mask token>
| <mask token>
def register(request):
if request.method == 'GET':
return render(request, 'home/home.html')
else:
name = request.POST['name']
username = request.POST['uname']
email = request.POST['email']
password = request.POST['password']
if name and username and email and password:
if not User.objects.filter(username=username).exists():
user = User.objects.create_user(first_name=name, username=
username, email=email, password=password)
u = authenticate(username=username, password=password)
if u is not None:
print('authenticated')
login(request, u)
request.session['id'] = user.id
return redirect('user')
else:
redirect('/')
def login_view(request):
if request.method == 'GET':
if 'id' in request.session:
return redirect('user')
return render(request, 'home/login.html')
else:
username = request.POST['uname']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
print('user active')
login(request, user)
request.session['id'] = User.objects.filter(username=username
).values('id')[0]['id']
return redirect('user')
else:
return render(request, 'home/home.html')
else:
return redirect('/')
def user(request):
if request.method == 'GET':
try:
uid = request.GET['id']
except:
uid = request.session['id']
user = User.objects.get(pk=int(uid))
genre = Genres.objects.all()
fbook = UserBook.objects.filter(user=user)
genre_list = []
for i in fbook:
if i.book.genre.id in genre_list:
pass
else:
genre_list.append(i.book.genre.id)
if len(genre_list) != 0:
number = 5 // len(genre_list)
isselected = 1
recbook = set()
for i in genre_list:
book = Books.objects.filter(genre=int(i)).order_by('-rating')
while len(recbook) < 5:
if len(book) >= number:
for k in range(0, number):
recbook.add(book[k])
else:
for k in range(0, len(book)):
recbook.add(book[k])
break
else:
isselected = 0
recbook = ''
return render(request, 'home/user.html', {'user': user, 'genre':
genre, 'fbook': fbook, 'recbook': recbook, 'isset': isselected})
else:
user = User.objects.get(pk=int(request.session['id']))
book = request.POST['book']
userbook = UserBook(user=user, book=Books.objects.get(pk=int(book)))
userbook.save()
return redirect('user')
def genre(request):
if request.method == 'GET':
id = request.GET['id']
books = Books.objects.filter(genre=id)
return render(request, 'home/genre.html', {'books': books})
def book(request):
if request.method == 'GET':
id = request.GET['id']
book = Books.objects.get(pk=int(id))
if UserBook.objects.filter(user=User.objects.get(pk=int(request.
session['id'])), book=book).exists():
follow = 1
else:
follow = 0
comment = UserCommentBook.objects.filter(book=book)
return render(request, 'home/book.html', {'book': book, 'comment':
comment, 'follow': follow})
else:
comment = request.POST['comment']
book = request.POST['id']
comment = UserCommentBook(user=User.objects.get(pk=int(request.
session['id'])), book=Books.objects.get(pk=int(book)), comment=
comment)
comment.save()
return redirect('book/?id=' + str(book))
def logout_view(request):
logout(request)
return redirect('/')
| <mask token>
def home(request):
if request.method == 'GET':
daily_users = User.objects.filter(date_joined__contains=date.today()
).count()
return render(request, 'home/home.html', {'users': daily_users})
def register(request):
if request.method == 'GET':
return render(request, 'home/home.html')
else:
name = request.POST['name']
username = request.POST['uname']
email = request.POST['email']
password = request.POST['password']
if name and username and email and password:
if not User.objects.filter(username=username).exists():
user = User.objects.create_user(first_name=name, username=
username, email=email, password=password)
u = authenticate(username=username, password=password)
if u is not None:
print('authenticated')
login(request, u)
request.session['id'] = user.id
return redirect('user')
else:
redirect('/')
def login_view(request):
if request.method == 'GET':
if 'id' in request.session:
return redirect('user')
return render(request, 'home/login.html')
else:
username = request.POST['uname']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
print('user active')
login(request, user)
request.session['id'] = User.objects.filter(username=username
).values('id')[0]['id']
return redirect('user')
else:
return render(request, 'home/home.html')
else:
return redirect('/')
def user(request):
if request.method == 'GET':
try:
uid = request.GET['id']
except:
uid = request.session['id']
user = User.objects.get(pk=int(uid))
genre = Genres.objects.all()
fbook = UserBook.objects.filter(user=user)
genre_list = []
for i in fbook:
if i.book.genre.id in genre_list:
pass
else:
genre_list.append(i.book.genre.id)
if len(genre_list) != 0:
number = 5 // len(genre_list)
isselected = 1
recbook = set()
for i in genre_list:
book = Books.objects.filter(genre=int(i)).order_by('-rating')
while len(recbook) < 5:
if len(book) >= number:
for k in range(0, number):
recbook.add(book[k])
else:
for k in range(0, len(book)):
recbook.add(book[k])
break
else:
isselected = 0
recbook = ''
return render(request, 'home/user.html', {'user': user, 'genre':
genre, 'fbook': fbook, 'recbook': recbook, 'isset': isselected})
else:
user = User.objects.get(pk=int(request.session['id']))
book = request.POST['book']
userbook = UserBook(user=user, book=Books.objects.get(pk=int(book)))
userbook.save()
return redirect('user')
def genre(request):
if request.method == 'GET':
id = request.GET['id']
books = Books.objects.filter(genre=id)
return render(request, 'home/genre.html', {'books': books})
def book(request):
if request.method == 'GET':
id = request.GET['id']
book = Books.objects.get(pk=int(id))
if UserBook.objects.filter(user=User.objects.get(pk=int(request.
session['id'])), book=book).exists():
follow = 1
else:
follow = 0
comment = UserCommentBook.objects.filter(book=book)
return render(request, 'home/book.html', {'book': book, 'comment':
comment, 'follow': follow})
else:
comment = request.POST['comment']
book = request.POST['id']
comment = UserCommentBook(user=User.objects.get(pk=int(request.
session['id'])), book=Books.objects.get(pk=int(book)), comment=
comment)
comment.save()
return redirect('book/?id=' + str(book))
def logout_view(request):
logout(request)
return redirect('/')
| from django.shortcuts import render, redirect
from .models import *
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from datetime import date
def home(request):
if request.method == 'GET':
daily_users = User.objects.filter(date_joined__contains=date.today()
).count()
return render(request, 'home/home.html', {'users': daily_users})
def register(request):
if request.method == 'GET':
return render(request, 'home/home.html')
else:
name = request.POST['name']
username = request.POST['uname']
email = request.POST['email']
password = request.POST['password']
if name and username and email and password:
if not User.objects.filter(username=username).exists():
user = User.objects.create_user(first_name=name, username=
username, email=email, password=password)
u = authenticate(username=username, password=password)
if u is not None:
print('authenticated')
login(request, u)
request.session['id'] = user.id
return redirect('user')
else:
redirect('/')
def login_view(request):
if request.method == 'GET':
if 'id' in request.session:
return redirect('user')
return render(request, 'home/login.html')
else:
username = request.POST['uname']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
print('user active')
login(request, user)
request.session['id'] = User.objects.filter(username=username
).values('id')[0]['id']
return redirect('user')
else:
return render(request, 'home/home.html')
else:
return redirect('/')
def user(request):
if request.method == 'GET':
try:
uid = request.GET['id']
except:
uid = request.session['id']
user = User.objects.get(pk=int(uid))
genre = Genres.objects.all()
fbook = UserBook.objects.filter(user=user)
genre_list = []
for i in fbook:
if i.book.genre.id in genre_list:
pass
else:
genre_list.append(i.book.genre.id)
if len(genre_list) != 0:
number = 5 // len(genre_list)
isselected = 1
recbook = set()
for i in genre_list:
book = Books.objects.filter(genre=int(i)).order_by('-rating')
while len(recbook) < 5:
if len(book) >= number:
for k in range(0, number):
recbook.add(book[k])
else:
for k in range(0, len(book)):
recbook.add(book[k])
break
else:
isselected = 0
recbook = ''
return render(request, 'home/user.html', {'user': user, 'genre':
genre, 'fbook': fbook, 'recbook': recbook, 'isset': isselected})
else:
user = User.objects.get(pk=int(request.session['id']))
book = request.POST['book']
userbook = UserBook(user=user, book=Books.objects.get(pk=int(book)))
userbook.save()
return redirect('user')
def genre(request):
if request.method == 'GET':
id = request.GET['id']
books = Books.objects.filter(genre=id)
return render(request, 'home/genre.html', {'books': books})
def book(request):
if request.method == 'GET':
id = request.GET['id']
book = Books.objects.get(pk=int(id))
if UserBook.objects.filter(user=User.objects.get(pk=int(request.
session['id'])), book=book).exists():
follow = 1
else:
follow = 0
comment = UserCommentBook.objects.filter(book=book)
return render(request, 'home/book.html', {'book': book, 'comment':
comment, 'follow': follow})
else:
comment = request.POST['comment']
book = request.POST['id']
comment = UserCommentBook(user=User.objects.get(pk=int(request.
session['id'])), book=Books.objects.get(pk=int(book)), comment=
comment)
comment.save()
return redirect('book/?id=' + str(book))
def logout_view(request):
logout(request)
return redirect('/')
| from django.shortcuts import render, redirect
from .models import *
from django.contrib.auth import authenticate ,login,logout
from django.contrib.auth.models import User
from datetime import date
# Create your views here.
def home(request):
if request.method=='GET':
daily_users = User.objects.filter(date_joined__contains=date.today()).count()
return render(request,'home/home.html',{'users':daily_users})
def register(request):
if request.method=='GET':
return render(request,'home/home.html')
else:
name = request.POST['name']
username = request.POST['uname']
email = request.POST['email']
password = request.POST['password']
if name and username and email and password:
if not User.objects.filter(username=username).exists():
user = User.objects.create_user(first_name=name,
username=username,
email=email,
password=password)
u = authenticate(username=username, password=password)
if u is not None:
print("authenticated")
login(request, u)
request.session['id'] = user.id
return redirect('user')
else:
redirect('/')
def login_view(request):
if request.method=='GET':
if 'id' in request.session:
return redirect('user')
return render(request,'home/login.html')
else:
username = request.POST['uname']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
print("user active")
login(request, user)
request.session['id'] = User.objects.filter(username=username).values('id')[0]['id']
return redirect('user')
else:
return render(request, 'home/home.html')
else:
return redirect('/')
def user(request):
if request.method=='GET':
try:
uid = request.GET['id']
except:
uid = request.session['id']
#print(uid)
user = User.objects.get(pk=int(uid))
#print(user.username)
genre = Genres.objects.all()
fbook = UserBook.objects.filter(user=user)
genre_list = []
for i in fbook:
if i.book.genre.id in genre_list:
pass
else:
genre_list.append(i.book.genre.id)
if len(genre_list)!=0:
number = 5//len(genre_list)
isselected = 1
recbook = set()
for i in genre_list:
book = Books.objects.filter(genre=int(i)).order_by('-rating')
while len(recbook)<5:
if len(book)>=number:
for k in range(0,number):
recbook.add(book[k])
else:
for k in range(0,len(book)):
recbook.add(book[k])
break
else:
isselected = 0
recbook =""
return render(request,'home/user.html',{'user':user,'genre':genre,"fbook":fbook,'recbook':recbook,'isset':isselected})
else:
user = User.objects.get(pk=int(request.session['id']))
book = request.POST['book']
userbook = UserBook(
user = user,
book=Books.objects.get(pk=int(book))
)
userbook.save()
return redirect('user')
def genre(request):
if request.method=='GET':
id = request.GET['id']
books = Books.objects.filter(genre=id)
return render(request,'home/genre.html',{'books':books,})
def book(request):
if request.method=='GET':
id = request.GET['id']
book = Books.objects.get(pk=(int(id)))
if UserBook.objects.filter(user=User.objects.get(pk=int(request.session['id'])),book=book).exists():
follow = 1
else:
follow = 0
comment = UserCommentBook.objects.filter(book=book)
return render(request, 'home/book.html',{'book':book,'comment':comment,'follow':follow})
else:
comment = request.POST['comment']
book = request.POST['id']
comment = UserCommentBook(
user = User.objects.get(pk=int(request.session['id'])),
book = Books.objects.get(pk=int(book)),
comment=comment,
)
comment.save()
return redirect('book/?id='+str(book))
def logout_view(request):
logout(request)
return redirect('/') | [
5,
6,
7,
8,
9
] |
810 | 0ed99037d7ff708b7931fbc3553b1aeb19a20f53 | <mask token>
| <mask token>
class IntQueue(Queue):
<mask token>
def __init__(self, maxSize):
"""
maxSize is the maximum number of items
that can be in the queue at any given time
"""
self.front = 0
self.end = 0
self.qSize = 0
self.data = arr('i', (0 for i in range(maxSize)))
def isEmpty(self):
"""
Return true/false on whether the queue is empty
"""
return self.qSize == 0
def size(self):
"""
Return the number of elements inside the queue
"""
return self.qSize
def peek(self):
if self.isEmpty():
raise Exception('Queue is empty')
self.front = self.front % len(self.data)
return self.data[self.front]
def isFull(self):
return self.qSize == len(self.data)
def offer(self, value):
"""
Add an element to the queue
"""
if self.isFull():
raise Exception('Queue too small!')
self.data[self.end] = value
self.end += 1
self.qSize += 1
self.end = self.end % len(self.data)
def poll(self):
"""
Make sure you check is the queue is not empty before calling poll!
"""
if self.isEmpty():
raise Exception('Queue is empty')
self.qSize -= 1
self.front = self.front % len(self.data)
d = self.data[self.front]
self.front += 1
return d
<mask token>
| <mask token>
class IntQueue(Queue):
"""
An integer only implementation of a queue
"""
def __init__(self, maxSize):
"""
maxSize is the maximum number of items
that can be in the queue at any given time
"""
self.front = 0
self.end = 0
self.qSize = 0
self.data = arr('i', (0 for i in range(maxSize)))
def isEmpty(self):
"""
Return true/false on whether the queue is empty
"""
return self.qSize == 0
def size(self):
"""
Return the number of elements inside the queue
"""
return self.qSize
def peek(self):
if self.isEmpty():
raise Exception('Queue is empty')
self.front = self.front % len(self.data)
return self.data[self.front]
def isFull(self):
return self.qSize == len(self.data)
def offer(self, value):
"""
Add an element to the queue
"""
if self.isFull():
raise Exception('Queue too small!')
self.data[self.end] = value
self.end += 1
self.qSize += 1
self.end = self.end % len(self.data)
def poll(self):
"""
Make sure you check is the queue is not empty before calling poll!
"""
if self.isEmpty():
raise Exception('Queue is empty')
self.qSize -= 1
self.front = self.front % len(self.data)
d = self.data[self.front]
self.front += 1
return d
def benchMarkTest():
"""
BenchMark IntQueue vs ArrayDeque.
"""
n = 10000000
intQ = IntQueue(n)
start = time.process_time()
for i in range(0, n):
intQ.offer(i)
for i in range(0, n):
intQ.poll()
end = time.process_time()
print('IntQueue Time: ', end - start)
arrayDeque = deque()
start = time.process_time()
for i in range(0, n):
arrayDeque.append(i)
for i in range(0, n):
arrayDeque.popleft()
end = time.process_time()
print('ArrayDeque Time: ', end - start)
if __name__ == '__main__':
"""
Example usage
"""
q = IntQueue(5)
q.offer(1)
q.offer(2)
q.offer(3)
q.offer(4)
q.offer(5)
print(q.poll())
print(q.poll())
print(q.poll())
print(q.poll())
print(q.isEmpty())
q.offer(1)
q.offer(2)
q.offer(3)
print(q.poll())
print(q.poll())
print(q.poll())
print(q.poll())
print(q.isEmpty())
benchMarkTest()
| <mask token>
import time
from array import array as arr
from collections import deque
from Queue import Queue
class IntQueue(Queue):
"""
An integer only implementation of a queue
"""
def __init__(self, maxSize):
"""
maxSize is the maximum number of items
that can be in the queue at any given time
"""
self.front = 0
self.end = 0
self.qSize = 0
self.data = arr('i', (0 for i in range(maxSize)))
def isEmpty(self):
"""
Return true/false on whether the queue is empty
"""
return self.qSize == 0
def size(self):
"""
Return the number of elements inside the queue
"""
return self.qSize
def peek(self):
if self.isEmpty():
raise Exception('Queue is empty')
self.front = self.front % len(self.data)
return self.data[self.front]
def isFull(self):
return self.qSize == len(self.data)
def offer(self, value):
"""
Add an element to the queue
"""
if self.isFull():
raise Exception('Queue too small!')
self.data[self.end] = value
self.end += 1
self.qSize += 1
self.end = self.end % len(self.data)
def poll(self):
"""
Make sure you check is the queue is not empty before calling poll!
"""
if self.isEmpty():
raise Exception('Queue is empty')
self.qSize -= 1
self.front = self.front % len(self.data)
d = self.data[self.front]
self.front += 1
return d
def benchMarkTest():
"""
BenchMark IntQueue vs ArrayDeque.
"""
n = 10000000
intQ = IntQueue(n)
start = time.process_time()
for i in range(0, n):
intQ.offer(i)
for i in range(0, n):
intQ.poll()
end = time.process_time()
print('IntQueue Time: ', end - start)
arrayDeque = deque()
start = time.process_time()
for i in range(0, n):
arrayDeque.append(i)
for i in range(0, n):
arrayDeque.popleft()
end = time.process_time()
print('ArrayDeque Time: ', end - start)
if __name__ == '__main__':
"""
Example usage
"""
q = IntQueue(5)
q.offer(1)
q.offer(2)
q.offer(3)
q.offer(4)
q.offer(5)
print(q.poll())
print(q.poll())
print(q.poll())
print(q.poll())
print(q.isEmpty())
q.offer(1)
q.offer(2)
q.offer(3)
print(q.poll())
print(q.poll())
print(q.poll())
print(q.poll())
print(q.isEmpty())
benchMarkTest()
| '''
* @file IntQueue.py
* @author (original JAVA) William Fiset, [email protected]
* liujingkun, [email protected]
* (conversion to Python) Armin Zare Zadeh, [email protected]
* @date 23 Jun 2020
* @version 0.1
* @brief This file contains an implementation of an integer only queue.
*
'''
import time
from array import array as arr
from collections import deque
from Queue import Queue
class IntQueue(Queue):
'''
An integer only implementation of a queue
'''
def __init__(self, maxSize):
"""
maxSize is the maximum number of items
that can be in the queue at any given time
"""
self.front = 0
self.end = 0
self.qSize = 0
self.data = arr('i', (0 for i in range(maxSize)))
def isEmpty(self):
"""
Return true/false on whether the queue is empty
"""
return self.qSize == 0
def size(self):
"""
Return the number of elements inside the queue
"""
return self.qSize
def peek(self):
if self.isEmpty():
raise Exception('Queue is empty')
self.front = self.front % len(self.data)
return self.data[self.front]
def isFull(self):
return self.qSize == len(self.data)
def offer(self, value):
"""
Add an element to the queue
"""
if self.isFull():
raise Exception("Queue too small!")
self.data[self.end] = value
self.end += 1
self.qSize += 1
self.end = self.end % len(self.data)
def poll(self):
"""
Make sure you check is the queue is not empty before calling poll!
"""
if self.isEmpty():
raise Exception('Queue is empty')
self.qSize -= 1
self.front = self.front % len(self.data)
d = self.data[self.front]
self.front += 1
return d
def benchMarkTest():
"""
BenchMark IntQueue vs ArrayDeque.
"""
n = 10000000
intQ = IntQueue(n)
# IntQueue times at around 12.109375 seconds
start = time.process_time()
for i in range(0, n):
intQ.offer(i)
for i in range(0, n):
intQ.poll()
end = time.process_time()
print("IntQueue Time: ", (end - start))
# ArrayDeque times at around 1.1875 seconds
arrayDeque = deque()
start = time.process_time()
for i in range(0, n):
arrayDeque.append(i)
for i in range(0, n):
arrayDeque.popleft()
end = time.process_time()
print("ArrayDeque Time: ", (end - start))
if __name__ == '__main__':
"""
Example usage
"""
q = IntQueue(5)
q.offer(1)
q.offer(2)
q.offer(3)
q.offer(4)
q.offer(5)
print(q.poll()) # 1
print(q.poll()) # 2
print(q.poll()) # 3
print(q.poll()) # 4
print(q.isEmpty()) # false
q.offer(1);
q.offer(2);
q.offer(3);
print(q.poll()) # 5
print(q.poll()) # 1
print(q.poll()) # 2
print(q.poll()) # 3
print(q.isEmpty()) # true
benchMarkTest()
| [
0,
8,
11,
12,
13
] |
811 | acff8618754658104ac36214901d346447a0134f | <mask token>
| <mask token>
firebase_admin.initialize_app(cred, {'databaseURL':
'https://mikro-b4844.firebaseio.com/'})
<mask token>
print(ref.get())
<mask token>
while True:
print(ref.get())
if ref.get() == 'Off' and i == 0:
i = 1
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'Off')
if ref.get() == 'On' and i == 1:
i = 0
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'On')
| <mask token>
cred = credentials.Certificate('iot_mikro.json')
firebase_admin.initialize_app(cred, {'databaseURL':
'https://mikro-b4844.firebaseio.com/'})
ref = db.reference('lampu')
print(ref.get())
i = 0
while True:
print(ref.get())
if ref.get() == 'Off' and i == 0:
i = 1
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'Off')
if ref.get() == 'On' and i == 1:
i = 0
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'On')
| import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
import paho.mqtt.client as mqtt
cred = credentials.Certificate('iot_mikro.json')
firebase_admin.initialize_app(cred, {'databaseURL':
'https://mikro-b4844.firebaseio.com/'})
ref = db.reference('lampu')
print(ref.get())
i = 0
while True:
print(ref.get())
if ref.get() == 'Off' and i == 0:
i = 1
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'Off')
if ref.get() == 'On' and i == 1:
i = 0
client = mqtt.Client()
client.connect('127.0.0.1', 1883, 60)
client.publish('building/lampu', 'On')
| import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
import paho.mqtt.client as mqtt
# Fetch the service account key JSON file contents
cred = credentials.Certificate('iot_mikro.json')
# Initialize the app with a service account, granting admin privileges
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://mikro-b4844.firebaseio.com/'
})
ref = db.reference('lampu')
print(ref.get())
i=0
while True:
print(ref.get())
if ref.get()=="Off" and i==0 :
i=1
client = mqtt.Client()
client.connect("127.0.0.1",1883,60)
client.publish("building/lampu", "Off")
if ref.get()=="On" and i==1 :
i=0
client = mqtt.Client()
client.connect("127.0.0.1",1883,60)
client.publish("building/lampu", "On")
# client.disconnect();
| [
0,
1,
2,
3,
4
] |
812 | 077b6d3d7417bbc26e9f23af6f437ff05e3d5771 | <mask token>
| <mask token>
print('输入鲈鱼的先验概率例如:70,对应70%')
<mask token>
for i in range(0, int(a) * 50):
rowa_data = sh.row_values(i)
L.append(rowa_data)
<mask token>
for j in range(5000, 5000 + (100 - int(a)) * 50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
<mask token>
plt.figure(figsize=(8, 6))
plt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)
plt.xlabel('长度', fontproperties=font_set)
plt.ylabel('宽度', fontproperties=font_set)
plt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')
plt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')
<mask token>
plt.plot(x, y, color='red')
plt.legend()
plt.show()
<mask token>
for i in L:
if i[0] + i[1] <= 9:
count = count + 1
<mask token>
print('鲈鱼准确率:%s' % (count / (int(a) * 50)))
<mask token>
for i in G:
if i[0] + i[1] >= 9:
countG = countG + 1
<mask token>
print('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))
<mask token>
print(pb)
<mask token>
print(pab)
print(pab / pb)
| __author__ = '那位先生Beer'
<mask token>
print('输入鲈鱼的先验概率例如:70,对应70%')
a = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')
font_set = FontProperties(fname='c:\\windows\\fonts\\simsun.ttc', size=15)
data = xlrd.open_workbook('xqtest.xls')
shxrange = range(data.nsheets)
sh = data.sheet_by_name('1')
L = []
for i in range(0, int(a) * 50):
rowa_data = sh.row_values(i)
L.append(rowa_data)
L = np.array(L)
L = L[:, 0:2]
G = []
for j in range(5000, 5000 + (100 - int(a)) * 50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
G = np.array(G)
G = G[:, 0:2]
plt.figure(figsize=(8, 6))
plt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)
plt.xlabel('长度', fontproperties=font_set)
plt.ylabel('宽度', fontproperties=font_set)
plt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')
plt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')
x = np.linspace(0, 8)
y = -x + 9
plt.plot(x, y, color='red')
plt.legend()
plt.show()
count = 0
for i in L:
if i[0] + i[1] <= 9:
count = count + 1
q = count / (int(a) * 50)
print('鲈鱼准确率:%s' % (count / (int(a) * 50)))
countG = 0
for i in G:
if i[0] + i[1] >= 9:
countG = countG + 1
p = countG / ((100 - int(a)) * 50)
print('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))
pb = int(a) / 100 * q + (1 - int(a) / 100) * p
print(pb)
pab = int(a) / 100 * q
print(pab)
print(pab / pb)
| __author__ = '那位先生Beer'
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import xlrd
import numpy as np
print('输入鲈鱼的先验概率例如:70,对应70%')
a = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')
font_set = FontProperties(fname='c:\\windows\\fonts\\simsun.ttc', size=15)
data = xlrd.open_workbook('xqtest.xls')
shxrange = range(data.nsheets)
sh = data.sheet_by_name('1')
L = []
for i in range(0, int(a) * 50):
rowa_data = sh.row_values(i)
L.append(rowa_data)
L = np.array(L)
L = L[:, 0:2]
G = []
for j in range(5000, 5000 + (100 - int(a)) * 50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
G = np.array(G)
G = G[:, 0:2]
plt.figure(figsize=(8, 6))
plt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)
plt.xlabel('长度', fontproperties=font_set)
plt.ylabel('宽度', fontproperties=font_set)
plt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')
plt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')
x = np.linspace(0, 8)
y = -x + 9
plt.plot(x, y, color='red')
plt.legend()
plt.show()
count = 0
for i in L:
if i[0] + i[1] <= 9:
count = count + 1
q = count / (int(a) * 50)
print('鲈鱼准确率:%s' % (count / (int(a) * 50)))
countG = 0
for i in G:
if i[0] + i[1] >= 9:
countG = countG + 1
p = countG / ((100 - int(a)) * 50)
print('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))
pb = int(a) / 100 * q + (1 - int(a) / 100) * p
print(pb)
pab = int(a) / 100 * q
print(pab)
print(pab / pb)
| __author__ = "那位先生Beer"
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import xlrd
import numpy as np
print('输入鲈鱼的先验概率例如:70,对应70%')
a=input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')
font_set = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=15)
#根据生成的数据画出图像(横坐标为长度,纵坐标为亮度)
data=xlrd.open_workbook('xqtest.xls')
shxrange=range(data.nsheets)
sh=data.sheet_by_name("1")
L=[]
for i in range(0,(int(a))*50):
rowa_data=sh.row_values(i)
L.append(rowa_data)
L=np.array(L)
L=L[:,0:2]
G=[]
for j in range(5000,5000+(100-int(a))*50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
G=np.array(G)
G=G[:,0:2]
plt.figure(figsize=(8,6))
plt.title("生成的鲈鱼和鲑鱼数据的散点图",fontproperties=font_set)
plt.xlabel("长度",fontproperties=font_set)
plt.ylabel("宽度",fontproperties=font_set)
plt.scatter(L[:,0],L[:,1],marker="o",label="鲈鱼")
plt.scatter(G[:,0],G[:,1],marker="s",label="鲑鱼")
# 分类模型
x = np.linspace(0,8)
y = -x+9
plt.plot(x,y, color="red")
plt.legend()
plt.show()
#模拟的数据鲈鱼比较小,可得出其在直线下面,即y+x<=9:
#计算准确率
count=0
for i in L:
if i[0]+i[1]<=9:
count=count+1
q=(count/((int(a))*50))
print('鲈鱼准确率:%s'%(count/((int(a))*50)))
countG=0
for i in G:
if i[0]+i[1]>=9:
countG=countG+1
p=(countG/((100-int(a))*50))
print('鲑鱼准确率:%s'%(countG/((100-int(a))*50)))
#p(b)=p(b|a)*p(a) + p(b|-a)p(-a)
pb=(int(a)/100)*q + (1-(int(a)/100))*p
print(pb)
#p(ab)=p(b|a)*p(a)
pab=(int(a)/100)*q
print(pab)
print(pab/pb)
| [
0,
1,
2,
3,
4
] |
813 | 0ea67ac97ec8e7f287a2430c67f8f7d841d8b646 | <mask token>
class TestSummary(base.BaseTestCase):
<mask token>
<mask token>
| <mask token>
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
<mask token>
| <mask token>
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
def test_nulls(self):
s = report.SummaryModel(begin=None, end=None, tenant_id=None,
res_type=None, rate=None)
self.assertIsNone(s.begin)
self.assertIsNone(s.end)
self.assertEqual(s.tenant_id, 'ALL')
self.assertEqual(s.res_type, 'ALL')
self.assertEqual(s.rate, '0')
| <mask token>
from oslotest import base
from cloudkitty.api.v1.datamodels import report
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
def test_nulls(self):
s = report.SummaryModel(begin=None, end=None, tenant_id=None,
res_type=None, rate=None)
self.assertIsNone(s.begin)
self.assertIsNone(s.end)
self.assertEqual(s.tenant_id, 'ALL')
self.assertEqual(s.res_type, 'ALL')
self.assertEqual(s.rate, '0')
| # -*- coding: utf-8 -*-
# Copyright 2017 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test SummaryModel objects."""
from oslotest import base
from cloudkitty.api.v1.datamodels import report
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
def test_nulls(self):
s = report.SummaryModel(begin=None,
end=None,
tenant_id=None,
res_type=None,
rate=None)
self.assertIsNone(s.begin)
self.assertIsNone(s.end)
self.assertEqual(s.tenant_id, "ALL")
self.assertEqual(s.res_type, "ALL")
self.assertEqual(s.rate, "0")
| [
1,
2,
3,
4,
5
] |
814 | 883b4de18dddede97f850e3a184a0e1072bda99e | <mask token>
| <mask token>
def solve(dpArr, list, box, i):
global boxes
global ans
if box == boxes:
s = 0
for j in list:
s += len(j)
if s == len(dpArr):
mx = 0
for j in list:
if sum(j) > mx:
mx = sum(j)
if mx < ans or ans == -1:
ans = mx
return
for j in range(1, len(dpArr) + 1):
if i + j > len(dpArr):
break
solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)
<mask token>
| <mask token>
def solve(dpArr, list, box, i):
global boxes
global ans
if box == boxes:
s = 0
for j in list:
s += len(j)
if s == len(dpArr):
mx = 0
for j in list:
if sum(j) > mx:
mx = sum(j)
if mx < ans or ans == -1:
ans = mx
return
for j in range(1, len(dpArr) + 1):
if i + j > len(dpArr):
break
solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)
<mask token>
solve(dpArr=inp, list=[], box=0, i=0)
print('Minimum weigth for', boxes, 'box(es) =', ans)
| boxes = 0
ans = -1
def solve(dpArr, list, box, i):
global boxes
global ans
if box == boxes:
s = 0
for j in list:
s += len(j)
if s == len(dpArr):
mx = 0
for j in list:
if sum(j) > mx:
mx = sum(j)
if mx < ans or ans == -1:
ans = mx
return
for j in range(1, len(dpArr) + 1):
if i + j > len(dpArr):
break
solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)
inp = input('Enter Input : ')
inp, boxes = list(map(int, inp.split('/')[0].split())), int(inp.split('/')[1])
solve(dpArr=inp, list=[], box=0, i=0)
print('Minimum weigth for', boxes, 'box(es) =', ans)
| # #1
# def bi_search(l, r, arr, x):
# # Code Here
# if(l == r):
# return arr[r] == x
# mid = (l + r)//2 + 1
# if(arr[mid] > x):
# return bi_search(l,mid-1,arr,x)
# else:
# return bi_search(mid,r,arr,x)
# inp = input('Enter Input : ').split('/')
# arr, k = list(map(int, inp[0].split())), int(inp[1])
# print(bi_search(0, len(arr) - 1, sorted(arr), k))
# #2
# def bi_search(l, r, arr, x):
# if(l == r):
# if arr[l] > x :
# return arr[l]
# else:
# return None
# mid = (l + r)//2 + 1
# res = None
# if(arr[mid] > x):
# res = bi_search(l,mid-1,arr,x)
# else:
# res = bi_search(mid,r,arr,x)
# return res if res else (arr[mid] if arr[mid] > x else None)
# inp = input('Enter Input : ').split('/')
# arr, arr2 = sorted(list(map(int, inp[0].split()))), list(map(int, inp[1].split()))
# for k in arr2:
# res = bi_search(0, len(arr) - 1, arr, k)
# print(res if res else "No First Greater Value")
#3
# class Data:
# def __init__(self, key, value):
# self.key = key
# self.value = value
# def __str__(self):
# return "({0}, {1})".format(self.key, self.value)
# class hash:
# def __init__(self,max,chain):
# self.data = [None for i in range(max)]
# self.limit= max
# self.chain= chain
# self.length = 0
# def code(self,a):
# return sum([ord(i) for i in a])
# def isFull(self):
# return self.length == self.limit
# def insert(self,value):
# key,val = value.split(" ")
# s = self.code(key)
# co = 0
# now = 0
# while(co <= self.chain):
# if(co != 0):
# print ("collision number",co,"at",now)
# if(co == self.chain):
# break
# now = (s + (0 if not co else co*co) ) % self.limit
# if(self.data[now] == None):
# self.data[now] = Data(key,val)
# self.length += 1
# break
# co += 1
# if(co >= self.chain):
# print("Max of collisionChain")
# def __str__(self):
# return "\n".join(list(map(str,[ "#{0} {1}".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + "\n---------------------------"
# print(" ***** Fun with hashing *****")
# val,arr = input("Enter Input : ").split("/")
# h = hash(int(val.split(" ")[0]),int(val.split(" ")[1]))
# arr = arr.split(",")
# for i in arr:
# h.insert(i)
# print(h)
# if(h.isFull()):
# print("This table is full !!!!!!")
# break
#4
# import math
# class Data:
# def __init__(self, value):
# self.value = value
# def __str__(self):
# return str(self.value)
# class hash:
# def __init__(self,max,chain,t):
# self.data = [None for i in range(max)]
# self.limit = max
# self.chain = chain
# self.length = 0
# self.threshold = t
# self.bu = list()
# def code(self,a):
# # return sum([ord(i) for i in a])
# return int(a)
# def isFull(self):
# return self.length == self.limit
# def findNearPrime(self):
# i = self.limit * 2
# while(True):
# c = True
# for j in range(2, int(math.sqrt(i)) + 1):
# if(not i % j):
# i += 1
# c = False
# break
# if c :
# break
# return i
# def handlerIllegal(self,co,value):
# if(self.length * 100 // self.limit >= self.threshold):
# print("****** Data over threshold - Rehash !!! ******")
# self.resize()
# self.Rehash()
# elif (co >= self.chain):
# print("****** Max collision - Rehash !!! ******")
# self.resize()
# self.Rehash()
# def resize(self):
# self.data += [None for i in range(self.findNearPrime() - self.limit)]
# self.limit = len(self.data)
# def Rehash(self):
# for i in range(self.limit):
# self.data[i] = None
# for i in self.bu:
# self.insert(i,False)
# def insert(self,value,Rehash = True):
# s = self.code(value)
# co = 0
# now = 0
# while(co <= self.chain):
# if(co != 0):
# print ("collision number",co,"at",now)
# if(co == self.chain):
# break
# now = (s + (0 if not co else co*co) ) % self.limit
# if(self.data[now] == None):
# self.data[now] = Data(value)
# if(Rehash):
# self.length += 1
# break
# co += 1
# if(Rehash):
# self.handlerIllegal(co,value)
# def addBuff(self,value):
# self.bu.append(value)
# def __str__(self):
# return "\n".join(list(map(str,[ "#{0} {1}".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + "\n----------------------------------------"
# print(" ***** Rehashing *****")
# val,arr = input("Enter Input : ").split("/")
# h = hash(int(val.split(" ")[0]),int(val.split(" ")[1]),int(val.split(" ")[2]))
# arr = arr.split()
# print("Initial Table :",h,sep="\n")
# for i in arr:
# print("Add :",i)
# h.addBuff(i)
# h.insert(i)
# print(h)
# if(h.isFull()):
# print("This table is full !!!!!!")
# break
# 5
boxes = 0
ans = -1
def solve(dpArr,list,box,i):
global boxes
global ans
if(box == boxes):
s = 0
for j in list:
s += len(j)
if(s == len(dpArr)):
mx = 0
for j in list:
if(sum(j) > mx):
mx = sum(j)
if(mx < ans or ans == -1):
ans = mx
return
for j in range(1,len(dpArr) + 1):
if ( i + j > len(dpArr) ):
break
solve(dpArr,list + [dpArr[i:i + j]],box + 1 ,i + j)
inp = input("Enter Input : ")
inp,boxes = list(map(int,inp.split("/")[0].split() )) , int( inp.split("/")[1])
# for i in range(1,len(inp)):
# inp[i] += inp[i-1]
solve(dpArr = inp,list = [],box = 0,i = 0)
print("Minimum weigth for",boxes,"box(es) =",ans) | [
0,
1,
2,
3,
4
] |
815 | 95b75395cafc6ba9f75ecf48157421e37ced2518 | <mask token>
def rows(**ro):
print(ro)
<mask token>
| <mask token>
print(id(a))
<mask token>
print('hello.....')
print(type(a))
print(id(a))
<mask token>
print(id(b))
b.append(10)
print(id(b))
<mask token>
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
<mask token>
print(message)
<mask token>
print(message)
<mask token>
print(lastname)
print(name + ' ' + lastname)
<mask token>
print('Another way of writing... \n' + full)
print(full.upper())
print(full.find('ip'))
print('Dipesh' in full)
print('Patel' in full)
print(full.replace('Rafaliya', 'Patel'))
print(bin(a))
print(hex(a))
<mask token>
print(x)
print(bin(x))
<mask token>
print(complex)
<mask token>
print(q)
<mask token>
print(w)
<mask token>
print(e)
<mask token>
print(r)
<mask token>
print(t)
<mask token>
print(g)
<mask token>
print(m)
<mask token>
print(abs(PI))
print(round(PI))
<mask token>
print(math.floor(no))
print(math.ceil(no))
<mask token>
if age >= 21:
print('Adult')
elif age >= 13:
print('Teenager')
else:
print('Child')
print('Adult' if age >= 21 else 'Teenager')
for p in 'Dipesh':
print(p)
for l in range(0, 10, 2):
print(l)
<mask token>
while answer != guess:
guess = int(input('Enter your Guess:: '))
else:
pass
def evenodd(numb):
if numb % 2 == 0:
return 'even'
else:
return 'odd'
print('The Number is ' + evenodd(20))
def rows(**ro):
print(ro)
rows(name='Dipesh', id=1)
| <mask token>
a = 5.0
print(id(a))
a = 10
print('hello.....')
print(type(a))
print(id(a))
b = [5, 6, 7]
print(id(b))
b.append(10)
print(id(b))
name = input('Enter Your Name:: ')
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
message = 'Python "Programming"'
print(message)
message = """Python
New Line..
Programmin"""
print(message)
lastname = input('Enter Your Last Name:: ')
print(lastname)
print(name + ' ' + lastname)
full = f'{name} {lastname}'
print('Another way of writing... \n' + full)
print(full.upper())
print(full.find('ip'))
print('Dipesh' in full)
print('Patel' in full)
print(full.replace('Rafaliya', 'Patel'))
print(bin(a))
print(hex(a))
x = 5
print(x)
print(bin(x))
complex = a + 5.0j
print(complex)
y = 3
q = a + y
print(q)
w = a - y
print(w)
e = a * y
print(e)
r = a / y
print(r)
t = a // y
print(t)
g = a ** y
print(g)
m = a % y
print(m)
PI = 3.14
print(abs(PI))
print(round(PI))
no = -8.56
print(math.floor(no))
print(math.ceil(no))
age = 10
if age >= 21:
print('Adult')
elif age >= 13:
print('Teenager')
else:
print('Child')
print('Adult' if age >= 21 else 'Teenager')
for p in 'Dipesh':
print(p)
for l in range(0, 10, 2):
print(l)
answer = 10
guess = 1
while answer != guess:
guess = int(input('Enter your Guess:: '))
else:
pass
def evenodd(numb):
if numb % 2 == 0:
return 'even'
else:
return 'odd'
print('The Number is ' + evenodd(20))
def rows(**ro):
print(ro)
rows(name='Dipesh', id=1)
| import math
a = 5.0
print(id(a))
a = 10
print('hello.....')
print(type(a))
print(id(a))
b = [5, 6, 7]
print(id(b))
b.append(10)
print(id(b))
name = input('Enter Your Name:: ')
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
message = 'Python "Programming"'
print(message)
message = """Python
New Line..
Programmin"""
print(message)
lastname = input('Enter Your Last Name:: ')
print(lastname)
print(name + ' ' + lastname)
full = f'{name} {lastname}'
print('Another way of writing... \n' + full)
print(full.upper())
print(full.find('ip'))
print('Dipesh' in full)
print('Patel' in full)
print(full.replace('Rafaliya', 'Patel'))
print(bin(a))
print(hex(a))
x = 5
print(x)
print(bin(x))
complex = a + 5.0j
print(complex)
y = 3
q = a + y
print(q)
w = a - y
print(w)
e = a * y
print(e)
r = a / y
print(r)
t = a // y
print(t)
g = a ** y
print(g)
m = a % y
print(m)
PI = 3.14
print(abs(PI))
print(round(PI))
no = -8.56
print(math.floor(no))
print(math.ceil(no))
age = 10
if age >= 21:
print('Adult')
elif age >= 13:
print('Teenager')
else:
print('Child')
print('Adult' if age >= 21 else 'Teenager')
for p in 'Dipesh':
print(p)
for l in range(0, 10, 2):
print(l)
answer = 10
guess = 1
while answer != guess:
guess = int(input('Enter your Guess:: '))
else:
pass
def evenodd(numb):
if numb % 2 == 0:
return 'even'
else:
return 'odd'
print('The Number is ' + evenodd(20))
def rows(**ro):
print(ro)
rows(name='Dipesh', id=1)
| import math
# type defining of the variable and playing with variables.
a = 5.0
print(id(a))
a = 10
print("hello.....")
print(type(a))
print(id(a))
# locating addresses...
b = [5, 6, 7]
print(id(b))
b.append(10)
print(id(b))
# Strings...
name = input("Enter Your Name:: ") # iNPUTTING AS NAME
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
# Escape Sequence
# \'
# \"
# \\
# \n
message = 'Python "Programming"'
print(message)
message = """Python
New Line..
Programmin"""
print(message)
# string Concatenation
lastname = input("Enter Your Last Name:: ") # iNPUTTING AS NAME
print(lastname)
print(name + " " + lastname)
full = f"{name} {lastname}"
print("Another way of writing... \n" + full)
print(full.upper()) # converts into upper case.
print(full.find("ip")) # finding location of specific char. Returns index number.
print("Dipesh" in full) # returns Boolean value either true or false..
print("Patel" in full)
print(full.replace("Rafaliya", "Patel"))
# Binary representation of any number...
print(bin(a)) # binary of a = 10
print(hex(a)) # Hexadecimal of a..
x = 0b0101
print((x)) # binary num a
print(bin(x)) # binary printing of a
# complex Number...
complex = a + 5j
print(complex) # printing complex number
y = 3
# operations
q = a + y # addition
print(q)
w = a - y # substraction
print(w)
e = a * y # multiplication
print(e)
r = a / y # division
print(r)
t = a // y # division but only print integer value
print(t)
g = a ** y # to the power of
print(g)
m = a % y # remainder
print(m)
# constants variables..
PI = 3.14 # this is a var with a constant value
print(abs(PI)) # absolute value of PI
print(round(PI)) # round up value of PI
no = -8.56
print(math.floor(no)) # floor value of no
print(math.ceil(no)) # ceiling value of no
# if-elif-else loop
age = 10
if age >= 21:
print("Adult")
elif age >= 13:
print("Teenager")
else:
print("Child")
# ternary operator
print("Adult" if age >= 21 else "Teenager")
# for loops
for p in "Dipesh":
print(p)
for l in range(0, 10, 2): # range is a kind of list...
print(l)
answer = 10
guess = 1
while answer != guess: # while loop for guessing
guess = int(input("Enter your Guess:: "))
else:
pass # this is used to break the loop...
# defining a function ... Number is even or odd..
def evenodd(numb):
if numb % 2 == 0:
return "even"
else:
return "odd"
print("The Number is " + evenodd(20))
# printing the row at a time...
def rows(**ro):
print(ro)
rows(name="Dipesh", id=1)
| [
1,
3,
4,
5,
6
] |
816 | 21bdf315c98a4cf69482cc7db41bc30d44781596 | <mask token>
def upgrade():
op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(),
nullable=True))
op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', [
'personal_collections'], ['id'])
<mask token>
| <mask token>
def upgrade():
op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(),
nullable=True))
op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', [
'personal_collections'], ['id'])
def downgrade():
op.drop_constraint(None, 'Gifs', type_='foreignkey')
op.drop_column('Gifs', 'personal_collections')
| <mask token>
revision = '43eabda1d630'
down_revision = '9cad4dfb5125'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(),
nullable=True))
op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', [
'personal_collections'], ['id'])
def downgrade():
op.drop_constraint(None, 'Gifs', type_='foreignkey')
op.drop_column('Gifs', 'personal_collections')
| <mask token>
from alembic import op
import sqlalchemy as sa
revision = '43eabda1d630'
down_revision = '9cad4dfb5125'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(),
nullable=True))
op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', [
'personal_collections'], ['id'])
def downgrade():
op.drop_constraint(None, 'Gifs', type_='foreignkey')
op.drop_column('Gifs', 'personal_collections')
| """added personal collection
Revision ID: 43eabda1d630
Revises: 9cad4dfb5125
Create Date: 2018-03-28 13:55:03.557872
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '43eabda1d630'
down_revision = '9cad4dfb5125'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', ['personal_collections'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'Gifs', type_='foreignkey')
op.drop_column('Gifs', 'personal_collections')
# ### end Alembic commands ###
| [
1,
2,
3,
4,
5
] |
817 | 7503a0c8f83ff0ce370ed7bce733b09d9a2c69c4 | <mask token>
| <mask token>
class Input(Base):
def clear(self):
element = self.driver.find_element_by_xpath(self.params['xpath'])
if self.params.get('clear', None):
element.clear()
return True
element.click()
space_num = self.params['space'] if self.params.get('space', None
) else 4
while space_num:
space_num -= 1
element.send_keys(Keys.BACK_SPACE)
<mask token>
| <mask token>
class Input(Base):
def clear(self):
element = self.driver.find_element_by_xpath(self.params['xpath'])
if self.params.get('clear', None):
element.clear()
return True
element.click()
space_num = self.params['space'] if self.params.get('space', None
) else 4
while space_num:
space_num -= 1
element.send_keys(Keys.BACK_SPACE)
def text(self):
print(self.params)
element = self.driver.find_element_by_xpath(self.params['xpath'])
element.send_keys(self.params['text'])
| from selenium.webdriver.common.keys import Keys
from titan.components import Base
class Input(Base):
def clear(self):
element = self.driver.find_element_by_xpath(self.params['xpath'])
if self.params.get('clear', None):
element.clear()
return True
element.click()
space_num = self.params['space'] if self.params.get('space', None
) else 4
while space_num:
space_num -= 1
element.send_keys(Keys.BACK_SPACE)
def text(self):
print(self.params)
element = self.driver.find_element_by_xpath(self.params['xpath'])
element.send_keys(self.params['text'])
| # -*- coding: utf-8 -*-
from selenium.webdriver.common.keys import Keys
from titan.components import Base
class Input(Base):
def clear(self):
element = self.driver.find_element_by_xpath(self.params['xpath'])
if self.params.get('clear', None):
element.clear()
return True
element.click()
space_num = self.params['space']if self.params.get('space', None) else 4
while space_num:
space_num -= 1
element.send_keys(Keys.BACK_SPACE)
def text(self):
print(self.params)
element = self.driver.find_element_by_xpath(self.params['xpath'])
element.send_keys(self.params['text'])
| [
0,
2,
3,
4,
5
] |
818 | b7738c27e11e9566d90157717633312031cdffd6 | <mask token>
class announcement:
def __init__(eps_df, revenue_df):
conn = sqlite3.connect('earnings.db', timeout=120)
cur = conn.cursor()
symbol_href = self.driver.find_element_by_class_name('lfkTWp')
symbol = symbol_href.text
eps_history_df = pd.read_sql(
'select * from estimize_eps where Symbol == "%s"' % symbol, conn)
revenue_history_df = pd.read_sql('select * from estimize_revenue', conn
)
price_history_df = pd.read_sql('select * from price_history', conn)
def get_combined_df(eps_df, revenue_df):
del eps_df['Historical Beat Rate']
del revenue_df['Historical Beat Rate']
date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,
expand=True)
date_reported_df = date_reported_df.rename(columns={(0):
'Date Reported', (1): 'Time Reported'})
date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df
['Date Reported'])
eps_df['Date Reported'] = date_reported_df['Date Reported']
eps_df['Time Reported'] = date_reported_df['Time Reported']
date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,
expand=True)
date_reported_df = date_reported_df.rename(columns={(0):
'Date Reported', (1): 'Time Reported'})
date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df
['Date Reported'])
revenue_df['Date Reported'] = date_reported_df['Date Reported']
revenue_df['Time Reported'] = date_reported_df['Time Reported']
eps_df = eps_df.sort_values(by='Date Reported')
revenue_df = revenue_df.sort_values(by='Date Reported')
eps_df = eps_df.set_index(['Date Reported', 'Time Reported',
'Symbol'], append=True, drop=True)
revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',
'Symbol'], append=True, drop=True)
eps_df.columns = 'EPS ' + eps_df.columns
revenue_df.columns = 'Revenue ' + revenue_df.columns
df = eps_df.join(revenue_df)
return df
def get_historical_beat():
df['Historical EPS Beat Ratio'] = None
df['Historical EPS Beat Percent'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
this_df = df[df.index.get_level_values('Symbol') == symbol]
beat_rate = this_df[this_df.index.get_level_values(
'Date Reported') <= date_reported].tail(8)
if len(beat_rate) >= 4:
beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]
) / float(len(beat_rate))
beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[
'EPS Actual']
beat_rate_percent = beat_rate_percent.replace([np.inf, -np.
inf], np.nan)
beat_rate_percent = beat_rate_percent.mean()
df.loc[index_num, ['Historical EPS Beat Ratio']
] = beat_rate_ratio
df.loc[index_num, ['Historical EPS Beat Percent']
] = beat_rate_percent
def get_average_change():
df['Average Change 5 Days'] = None
df['Average Abnormal Change 5 Days'] = None
df['Average Change 10 Days'] = None
df['Average Abnormal Change 10 Days'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
returns_df = df[df.index.get_level_values('Date Reported') <
date_reported].tail(8)
if len(returns_df) >= 4:
df.loc[index_num, ['Average Change 5 Days']] = returns_df[
'5 Day Change'].mean()
df.loc[index_num, ['Average Change 10 Days']] = returns_df[
'10 Day Change'].mean()
df.loc[index_num, ['Average Abnormal Change 5 Days']
] = returns_df['5 Day Change Abnormal'].mean()
df.loc[index_num, ['Average Abnormal Change 10 Days']
] = returns_df['10 Day Change Abnormal'].mean()
def get_YoY_growth():
df['YoY Growth'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
time_reported = time_reported.replace("'", '')
quarter_numer, year = time_reported.split(' ')
this_df = df['EPS Actual']
try:
this_quarter = this_df[this_df.index.get_level_values(
'Time Reported') == quarter_numer + " '" + year].values[0]
last_quarter = this_df[this_df.index.get_level_values(
'Time Reported') == quarter_numer + " '" + str(int(year
) - 1)].values[0]
df.loc[index_num, ['YoY Growth']] = (this_quarter -
last_quarter) / last_quarter
except Exception as e:
pass
<mask token>
<mask token>
| <mask token>
class announcement:
def __init__(eps_df, revenue_df):
conn = sqlite3.connect('earnings.db', timeout=120)
cur = conn.cursor()
symbol_href = self.driver.find_element_by_class_name('lfkTWp')
symbol = symbol_href.text
eps_history_df = pd.read_sql(
'select * from estimize_eps where Symbol == "%s"' % symbol, conn)
revenue_history_df = pd.read_sql('select * from estimize_revenue', conn
)
price_history_df = pd.read_sql('select * from price_history', conn)
def get_combined_df(eps_df, revenue_df):
del eps_df['Historical Beat Rate']
del revenue_df['Historical Beat Rate']
date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,
expand=True)
date_reported_df = date_reported_df.rename(columns={(0):
'Date Reported', (1): 'Time Reported'})
date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df
['Date Reported'])
eps_df['Date Reported'] = date_reported_df['Date Reported']
eps_df['Time Reported'] = date_reported_df['Time Reported']
date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,
expand=True)
date_reported_df = date_reported_df.rename(columns={(0):
'Date Reported', (1): 'Time Reported'})
date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df
['Date Reported'])
revenue_df['Date Reported'] = date_reported_df['Date Reported']
revenue_df['Time Reported'] = date_reported_df['Time Reported']
eps_df = eps_df.sort_values(by='Date Reported')
revenue_df = revenue_df.sort_values(by='Date Reported')
eps_df = eps_df.set_index(['Date Reported', 'Time Reported',
'Symbol'], append=True, drop=True)
revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',
'Symbol'], append=True, drop=True)
eps_df.columns = 'EPS ' + eps_df.columns
revenue_df.columns = 'Revenue ' + revenue_df.columns
df = eps_df.join(revenue_df)
return df
def get_historical_beat():
df['Historical EPS Beat Ratio'] = None
df['Historical EPS Beat Percent'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
this_df = df[df.index.get_level_values('Symbol') == symbol]
beat_rate = this_df[this_df.index.get_level_values(
'Date Reported') <= date_reported].tail(8)
if len(beat_rate) >= 4:
beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]
) / float(len(beat_rate))
beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[
'EPS Actual']
beat_rate_percent = beat_rate_percent.replace([np.inf, -np.
inf], np.nan)
beat_rate_percent = beat_rate_percent.mean()
df.loc[index_num, ['Historical EPS Beat Ratio']
] = beat_rate_ratio
df.loc[index_num, ['Historical EPS Beat Percent']
] = beat_rate_percent
def get_average_change():
df['Average Change 5 Days'] = None
df['Average Abnormal Change 5 Days'] = None
df['Average Change 10 Days'] = None
df['Average Abnormal Change 10 Days'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
returns_df = df[df.index.get_level_values('Date Reported') <
date_reported].tail(8)
if len(returns_df) >= 4:
df.loc[index_num, ['Average Change 5 Days']] = returns_df[
'5 Day Change'].mean()
df.loc[index_num, ['Average Change 10 Days']] = returns_df[
'10 Day Change'].mean()
df.loc[index_num, ['Average Abnormal Change 5 Days']
] = returns_df['5 Day Change Abnormal'].mean()
df.loc[index_num, ['Average Abnormal Change 10 Days']
] = returns_df['10 Day Change Abnormal'].mean()
def get_YoY_growth():
df['YoY Growth'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
time_reported = time_reported.replace("'", '')
quarter_numer, year = time_reported.split(' ')
this_df = df['EPS Actual']
try:
this_quarter = this_df[this_df.index.get_level_values(
'Time Reported') == quarter_numer + " '" + year].values[0]
last_quarter = this_df[this_df.index.get_level_values(
'Time Reported') == quarter_numer + " '" + str(int(year
) - 1)].values[0]
df.loc[index_num, ['YoY Growth']] = (this_quarter -
last_quarter) / last_quarter
except Exception as e:
pass
def get_market_cap():
finviz_page = r.get('https://finviz.com/quote.ashx?t=%s' % symbol)
soup = BeautifulSoup(finviz_page.text, features='lxml')
table_row = soup.findAll('tr', attrs={'class': 'table-dark-row'})[1]
market_cap = table_row.text.replace('Market Cap', '').split('\n')[1]
if 'K' in market_cap:
market_cap = float(market_cap[:-1]) * 1000
elif 'M' in market_cap:
market_cap = float(market_cap[:-1]) * 1000000
elif 'B' in market_cap:
market_cap = float(market_cap[:-1]) * 1000000000
market_cap = int(market_cap)
if market_cap > 10000000000:
market_cap_text = 'Large'
elif market_cap > 2000000000:
market_cap_text = 'Medium'
elif market_cap > 300000000:
market_cap_text = 'Small'
elif market_cap > 50000000:
market_cap_text = 'Micro'
else:
market_cap_text = 'Nano'
df['Market Cap Text'] = market_cap_text
<mask token>
| <mask token>
class announcement:
def __init__(eps_df, revenue_df):
conn = sqlite3.connect('earnings.db', timeout=120)
cur = conn.cursor()
symbol_href = self.driver.find_element_by_class_name('lfkTWp')
symbol = symbol_href.text
eps_history_df = pd.read_sql(
'select * from estimize_eps where Symbol == "%s"' % symbol, conn)
revenue_history_df = pd.read_sql('select * from estimize_revenue', conn
)
price_history_df = pd.read_sql('select * from price_history', conn)
def get_combined_df(eps_df, revenue_df):
del eps_df['Historical Beat Rate']
del revenue_df['Historical Beat Rate']
date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,
expand=True)
date_reported_df = date_reported_df.rename(columns={(0):
'Date Reported', (1): 'Time Reported'})
date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df
['Date Reported'])
eps_df['Date Reported'] = date_reported_df['Date Reported']
eps_df['Time Reported'] = date_reported_df['Time Reported']
date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,
expand=True)
date_reported_df = date_reported_df.rename(columns={(0):
'Date Reported', (1): 'Time Reported'})
date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df
['Date Reported'])
revenue_df['Date Reported'] = date_reported_df['Date Reported']
revenue_df['Time Reported'] = date_reported_df['Time Reported']
eps_df = eps_df.sort_values(by='Date Reported')
revenue_df = revenue_df.sort_values(by='Date Reported')
eps_df = eps_df.set_index(['Date Reported', 'Time Reported',
'Symbol'], append=True, drop=True)
revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',
'Symbol'], append=True, drop=True)
eps_df.columns = 'EPS ' + eps_df.columns
revenue_df.columns = 'Revenue ' + revenue_df.columns
df = eps_df.join(revenue_df)
return df
def get_historical_beat():
df['Historical EPS Beat Ratio'] = None
df['Historical EPS Beat Percent'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
this_df = df[df.index.get_level_values('Symbol') == symbol]
beat_rate = this_df[this_df.index.get_level_values(
'Date Reported') <= date_reported].tail(8)
if len(beat_rate) >= 4:
beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]
) / float(len(beat_rate))
beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[
'EPS Actual']
beat_rate_percent = beat_rate_percent.replace([np.inf, -np.
inf], np.nan)
beat_rate_percent = beat_rate_percent.mean()
df.loc[index_num, ['Historical EPS Beat Ratio']
] = beat_rate_ratio
df.loc[index_num, ['Historical EPS Beat Percent']
] = beat_rate_percent
def get_average_change():
df['Average Change 5 Days'] = None
df['Average Abnormal Change 5 Days'] = None
df['Average Change 10 Days'] = None
df['Average Abnormal Change 10 Days'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
returns_df = df[df.index.get_level_values('Date Reported') <
date_reported].tail(8)
if len(returns_df) >= 4:
df.loc[index_num, ['Average Change 5 Days']] = returns_df[
'5 Day Change'].mean()
df.loc[index_num, ['Average Change 10 Days']] = returns_df[
'10 Day Change'].mean()
df.loc[index_num, ['Average Abnormal Change 5 Days']
] = returns_df['5 Day Change Abnormal'].mean()
df.loc[index_num, ['Average Abnormal Change 10 Days']
] = returns_df['10 Day Change Abnormal'].mean()
def get_YoY_growth():
df['YoY Growth'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
time_reported = time_reported.replace("'", '')
quarter_numer, year = time_reported.split(' ')
this_df = df['EPS Actual']
try:
this_quarter = this_df[this_df.index.get_level_values(
'Time Reported') == quarter_numer + " '" + year].values[0]
last_quarter = this_df[this_df.index.get_level_values(
'Time Reported') == quarter_numer + " '" + str(int(year
) - 1)].values[0]
df.loc[index_num, ['YoY Growth']] = (this_quarter -
last_quarter) / last_quarter
except Exception as e:
pass
def get_market_cap():
finviz_page = r.get('https://finviz.com/quote.ashx?t=%s' % symbol)
soup = BeautifulSoup(finviz_page.text, features='lxml')
table_row = soup.findAll('tr', attrs={'class': 'table-dark-row'})[1]
market_cap = table_row.text.replace('Market Cap', '').split('\n')[1]
if 'K' in market_cap:
market_cap = float(market_cap[:-1]) * 1000
elif 'M' in market_cap:
market_cap = float(market_cap[:-1]) * 1000000
elif 'B' in market_cap:
market_cap = float(market_cap[:-1]) * 1000000000
market_cap = int(market_cap)
if market_cap > 10000000000:
market_cap_text = 'Large'
elif market_cap > 2000000000:
market_cap_text = 'Medium'
elif market_cap > 300000000:
market_cap_text = 'Small'
elif market_cap > 50000000:
market_cap_text = 'Micro'
else:
market_cap_text = 'Nano'
df['Market Cap Text'] = market_cap_text
def get_estimize_data(self):
url = 'https://www.estimize.com/calendar?tab=equity&date=' + datetime.now(
).strftime('%Y-%m-%d')
self.driver.get(url)
myElem = WebDriverWait(self.driver, self.delay).until(EC.
presence_of_element_located((By.CLASS_NAME, 'dAViVi')))
companies_reporting_div = self.driver.find_element_by_class_name('dAViVi')
if '0 Events' == companies_reporting_div.text.split('\n')[1]:
return
tickers = self.get_tickers()
eps_df = pd.read_html(self.driver.page_source)[0]
eps_df['Symbol'] = tickers
eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
eps_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',
'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',
'Symbol']
url = (
'https://www.estimize.com/calendar?tab=equity&metric=revenue&date=' +
self.read_date.strftime('%Y-%m-%d'))
self.driver.get(url)
myElem = WebDriverWait(self.driver, self.delay).until(EC.
presence_of_element_located((By.TAG_NAME, 'table')))
revenue_df = pd.read_html(self.driver.page_source)[0]
tickers = self.get_tickers()
revenue_df['Symbol'] = tickers
revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
revenue_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',
'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',
'Symbol']
return eps_df, revenue_df
<mask token>
| <mask token>
class announcement:
def __init__(eps_df, revenue_df):
conn = sqlite3.connect('earnings.db', timeout=120)
cur = conn.cursor()
symbol_href = self.driver.find_element_by_class_name('lfkTWp')
symbol = symbol_href.text
eps_history_df = pd.read_sql(
'select * from estimize_eps where Symbol == "%s"' % symbol, conn)
revenue_history_df = pd.read_sql('select * from estimize_revenue', conn
)
price_history_df = pd.read_sql('select * from price_history', conn)
def get_combined_df(eps_df, revenue_df):
del eps_df['Historical Beat Rate']
del revenue_df['Historical Beat Rate']
date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,
expand=True)
date_reported_df = date_reported_df.rename(columns={(0):
'Date Reported', (1): 'Time Reported'})
date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df
['Date Reported'])
eps_df['Date Reported'] = date_reported_df['Date Reported']
eps_df['Time Reported'] = date_reported_df['Time Reported']
date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,
expand=True)
date_reported_df = date_reported_df.rename(columns={(0):
'Date Reported', (1): 'Time Reported'})
date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df
['Date Reported'])
revenue_df['Date Reported'] = date_reported_df['Date Reported']
revenue_df['Time Reported'] = date_reported_df['Time Reported']
eps_df = eps_df.sort_values(by='Date Reported')
revenue_df = revenue_df.sort_values(by='Date Reported')
eps_df = eps_df.set_index(['Date Reported', 'Time Reported',
'Symbol'], append=True, drop=True)
revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',
'Symbol'], append=True, drop=True)
eps_df.columns = 'EPS ' + eps_df.columns
revenue_df.columns = 'Revenue ' + revenue_df.columns
df = eps_df.join(revenue_df)
return df
def get_historical_beat():
df['Historical EPS Beat Ratio'] = None
df['Historical EPS Beat Percent'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
this_df = df[df.index.get_level_values('Symbol') == symbol]
beat_rate = this_df[this_df.index.get_level_values(
'Date Reported') <= date_reported].tail(8)
if len(beat_rate) >= 4:
beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]
) / float(len(beat_rate))
beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[
'EPS Actual']
beat_rate_percent = beat_rate_percent.replace([np.inf, -np.
inf], np.nan)
beat_rate_percent = beat_rate_percent.mean()
df.loc[index_num, ['Historical EPS Beat Ratio']
] = beat_rate_ratio
df.loc[index_num, ['Historical EPS Beat Percent']
] = beat_rate_percent
def get_average_change():
df['Average Change 5 Days'] = None
df['Average Abnormal Change 5 Days'] = None
df['Average Change 10 Days'] = None
df['Average Abnormal Change 10 Days'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
returns_df = df[df.index.get_level_values('Date Reported') <
date_reported].tail(8)
if len(returns_df) >= 4:
df.loc[index_num, ['Average Change 5 Days']] = returns_df[
'5 Day Change'].mean()
df.loc[index_num, ['Average Change 10 Days']] = returns_df[
'10 Day Change'].mean()
df.loc[index_num, ['Average Abnormal Change 5 Days']
] = returns_df['5 Day Change Abnormal'].mean()
df.loc[index_num, ['Average Abnormal Change 10 Days']
] = returns_df['10 Day Change Abnormal'].mean()
def get_YoY_growth():
df['YoY Growth'] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
time_reported = time_reported.replace("'", '')
quarter_numer, year = time_reported.split(' ')
this_df = df['EPS Actual']
try:
this_quarter = this_df[this_df.index.get_level_values(
'Time Reported') == quarter_numer + " '" + year].values[0]
last_quarter = this_df[this_df.index.get_level_values(
'Time Reported') == quarter_numer + " '" + str(int(year
) - 1)].values[0]
df.loc[index_num, ['YoY Growth']] = (this_quarter -
last_quarter) / last_quarter
except Exception as e:
pass
def get_market_cap():
finviz_page = r.get('https://finviz.com/quote.ashx?t=%s' % symbol)
soup = BeautifulSoup(finviz_page.text, features='lxml')
table_row = soup.findAll('tr', attrs={'class': 'table-dark-row'})[1]
market_cap = table_row.text.replace('Market Cap', '').split('\n')[1]
if 'K' in market_cap:
market_cap = float(market_cap[:-1]) * 1000
elif 'M' in market_cap:
market_cap = float(market_cap[:-1]) * 1000000
elif 'B' in market_cap:
market_cap = float(market_cap[:-1]) * 1000000000
market_cap = int(market_cap)
if market_cap > 10000000000:
market_cap_text = 'Large'
elif market_cap > 2000000000:
market_cap_text = 'Medium'
elif market_cap > 300000000:
market_cap_text = 'Small'
elif market_cap > 50000000:
market_cap_text = 'Micro'
else:
market_cap_text = 'Nano'
df['Market Cap Text'] = market_cap_text
def get_estimize_data(self):
url = 'https://www.estimize.com/calendar?tab=equity&date=' + datetime.now(
).strftime('%Y-%m-%d')
self.driver.get(url)
myElem = WebDriverWait(self.driver, self.delay).until(EC.
presence_of_element_located((By.CLASS_NAME, 'dAViVi')))
companies_reporting_div = self.driver.find_element_by_class_name('dAViVi')
if '0 Events' == companies_reporting_div.text.split('\n')[1]:
return
tickers = self.get_tickers()
eps_df = pd.read_html(self.driver.page_source)[0]
eps_df['Symbol'] = tickers
eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
eps_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',
'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',
'Symbol']
url = (
'https://www.estimize.com/calendar?tab=equity&metric=revenue&date=' +
self.read_date.strftime('%Y-%m-%d'))
self.driver.get(url)
myElem = WebDriverWait(self.driver, self.delay).until(EC.
presence_of_element_located((By.TAG_NAME, 'table')))
revenue_df = pd.read_html(self.driver.page_source)[0]
tickers = self.get_tickers()
revenue_df['Symbol'] = tickers
revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
revenue_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',
'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',
'Symbol']
return eps_df, revenue_df
def get_tickers(self):
soup = BeautifulSoup(self.driver.page_source, features='lxml')
ticker_links = soup.findAll('a', attrs={'class': 'lfkTWp'})
tickers = []
for ticker in ticker_links:
tickers.append(ticker.contents[0])
return tickers
| import sqlite3
class announcement:
def __init__(eps_df, revenue_df):
conn = sqlite3.connect("earnings.db", timeout=120)
cur = conn.cursor()
symbol_href = self.driver.find_element_by_class_name("lfkTWp")
symbol = symbol_href.text
eps_history_df = pd.read_sql(
'select * from estimize_eps where Symbol == "%s"' % symbol, conn
)
revenue_history_df = pd.read_sql("select * from estimize_revenue", conn)
price_history_df = pd.read_sql("select * from price_history", conn)
def get_combined_df(eps_df, revenue_df):
del eps_df["Historical Beat Rate"]
del revenue_df["Historical Beat Rate"]
date_reported_df = eps_df["Date Reported"].str.split(" ", n=1, expand=True)
date_reported_df = date_reported_df.rename(
columns={0: "Date Reported", 1: "Time Reported"}
)
date_reported_df["Date Reported"] = pd.to_datetime(
date_reported_df["Date Reported"]
)
eps_df["Date Reported"] = date_reported_df["Date Reported"]
eps_df["Time Reported"] = date_reported_df["Time Reported"]
date_reported_df = revenue_df["Date Reported"].str.split(" ", n=1, expand=True)
date_reported_df = date_reported_df.rename(
columns={0: "Date Reported", 1: "Time Reported"}
)
date_reported_df["Date Reported"] = pd.to_datetime(
date_reported_df["Date Reported"]
)
revenue_df["Date Reported"] = date_reported_df["Date Reported"]
revenue_df["Time Reported"] = date_reported_df["Time Reported"]
eps_df = eps_df.sort_values(by="Date Reported")
revenue_df = revenue_df.sort_values(by="Date Reported")
eps_df = eps_df.set_index(
["Date Reported", "Time Reported", "Symbol"], append=True, drop=True
)
revenue_df = revenue_df.set_index(
["Date Reported", "Time Reported", "Symbol"], append=True, drop=True
)
eps_df.columns = "EPS " + eps_df.columns
revenue_df.columns = "Revenue " + revenue_df.columns
df = eps_df.join(revenue_df)
return df
def get_historical_beat():
df["Historical EPS Beat Ratio"] = None
df["Historical EPS Beat Percent"] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
this_df = df[df.index.get_level_values("Symbol") == symbol]
beat_rate = this_df[
this_df.index.get_level_values("Date Reported") <= date_reported
].tail(8)
if len(beat_rate) >= 4:
beat_rate_ratio = len(beat_rate[beat_rate["EPS Surprise"] > 0]) / float(
len(beat_rate)
)
beat_rate_percent = beat_rate["EPS Surprise"] / beat_rate["EPS Actual"]
beat_rate_percent = beat_rate_percent.replace([np.inf, -np.inf], np.nan)
beat_rate_percent = beat_rate_percent.mean()
# TODO: Do the same for revenue
df.loc[index_num, ["Historical EPS Beat Ratio"]] = beat_rate_ratio
df.loc[index_num, ["Historical EPS Beat Percent"]] = beat_rate_percent
def get_average_change():
df["Average Change 5 Days"] = None
df["Average Abnormal Change 5 Days"] = None
df["Average Change 10 Days"] = None
df["Average Abnormal Change 10 Days"] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
returns_df = df[
df.index.get_level_values("Date Reported") < date_reported
].tail(8)
if len(returns_df) >= 4:
df.loc[index_num, ["Average Change 5 Days"]] = returns_df[
"5 Day Change"
].mean()
df.loc[index_num, ["Average Change 10 Days"]] = returns_df[
"10 Day Change"
].mean()
df.loc[index_num, ["Average Abnormal Change 5 Days"]] = returns_df[
"5 Day Change Abnormal"
].mean()
df.loc[index_num, ["Average Abnormal Change 10 Days"]] = returns_df[
"10 Day Change Abnormal"
].mean()
def get_YoY_growth():
df["YoY Growth"] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
time_reported = time_reported.replace("'", "")
quarter_numer, year = time_reported.split(" ")
this_df = df["EPS Actual"]
try:
this_quarter = this_df[
this_df.index.get_level_values("Time Reported")
== quarter_numer + " '" + year
].values[0]
last_quarter = this_df[
this_df.index.get_level_values("Time Reported")
== quarter_numer + " '" + str(int(year) - 1)
].values[0]
df.loc[index_num, ["YoY Growth"]] = (
this_quarter - last_quarter
) / last_quarter
except Exception as e:
pass
def get_market_cap():
finviz_page = r.get("https://finviz.com/quote.ashx?t=%s" % symbol)
soup = BeautifulSoup(finviz_page.text, features="lxml")
table_row = soup.findAll("tr", attrs={"class": "table-dark-row"})[1]
market_cap = table_row.text.replace("Market Cap", "").split("\n")[1]
if "K" in market_cap:
market_cap = float(market_cap[:-1]) * 1000
elif "M" in market_cap:
market_cap = float(market_cap[:-1]) * 1000000
elif "B" in market_cap:
market_cap = float(market_cap[:-1]) * 1000000000
market_cap = int(market_cap)
if market_cap > 10000000000:
market_cap_text = "Large"
elif market_cap > 2000000000:
market_cap_text = "Medium"
elif market_cap > 300000000:
market_cap_text = "Small"
elif market_cap > 50000000:
market_cap_text = "Micro"
else:
market_cap_text = "Nano"
df["Market Cap Text"] = market_cap_text
def get_estimize_data(self):
# request the estimize website for data
url = "https://www.estimize.com/calendar?tab=equity&date=" + datetime.now().strftime(
"%Y-%m-%d"
)
self.driver.get(url)
# check if there are no companies reporting earnings
myElem = WebDriverWait(self.driver, self.delay).until(
EC.presence_of_element_located((By.CLASS_NAME, "dAViVi"))
)
companies_reporting_div = self.driver.find_element_by_class_name("dAViVi")
if "0 Events" == companies_reporting_div.text.split("\n")[1]:
return
# method to extra the ticker symbols from the webpage
tickers = self.get_tickers()
# method to get the historical data from yahoo
# self.get_yahoo_historical(tickers)
# TODO: update price history table with missing yahoo price data entries
# read the table and make a dataframe out of it
eps_df = pd.read_html(self.driver.page_source)[0]
eps_df["Symbol"] = tickers
eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
eps_df.columns = [
"Date Reported",
"Num of Estimates",
"Delta",
"Surprise",
"Historical Beat Rate",
"Wall St",
"Estimize",
"Actual",
"Symbol",
]
# same as above, but for revenues table instead of EPS table
url = (
"https://www.estimize.com/calendar?tab=equity&metric=revenue&date="
+ self.read_date.strftime("%Y-%m-%d")
)
self.driver.get(url)
myElem = WebDriverWait(self.driver, self.delay).until(
EC.presence_of_element_located((By.TAG_NAME, "table"))
)
revenue_df = pd.read_html(self.driver.page_source)[0]
tickers = self.get_tickers()
revenue_df["Symbol"] = tickers
revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
revenue_df.columns = [
"Date Reported",
"Num of Estimates",
"Delta",
"Surprise",
"Historical Beat Rate",
"Wall St",
"Estimize",
"Actual",
"Symbol",
]
return eps_df, revenue_df
def get_tickers(self):
# extract ticker symbopls from the html source
soup = BeautifulSoup(self.driver.page_source, features="lxml")
ticker_links = soup.findAll("a", attrs={"class": "lfkTWp"})
# create list of symbols that were extracted
tickers = []
for ticker in ticker_links:
tickers.append(ticker.contents[0])
return tickers
| [
6,
7,
8,
9,
11
] |
819 | 8ccec24e1a7060269ffbb376ba0c480da9eabe0a | <mask token>
class Model:
def __init__(self, training=True):
self.classes = settings.classes_name
self.num_classes = len(settings.classes_name)
self.image_size = settings.image_size
self.cell_size = settings.cell_size
self.boxes_per_cell = settings.box_per_cell
self.output_size = self.cell_size * self.cell_size * (self.
num_classes + self.boxes_per_cell * 5)
self.scale = 1.0 * self.image_size / self.cell_size
self.boundary1 = self.cell_size * self.cell_size * self.num_classes
self.boundary2 = (self.boundary1 + self.cell_size * self.cell_size *
self.boxes_per_cell)
self.object_scale = settings.object_scale
self.no_object_scale = settings.no_object_scale
self.class_scale = settings.class_scale
self.coord_scale = settings.coordinate_scale
self.offset = np.transpose(np.reshape(np.array([np.arange(self.
cell_size)] * self.cell_size * self.boxes_per_cell), (self.
boxes_per_cell, self.cell_size, self.cell_size)), (1, 2, 0))
self.images = tf.placeholder(tf.float32, [None, settings.image_size,
settings.image_size, 3])
if settings.model_type == 'normal':
self.logits = self.build_network(self.images, num_outputs=self.
output_size, alpha=settings.alpha_relu, training=training)
if settings.model_type == 'fast':
self.logits = self.build_fast_network(self.images, num_outputs=
self.output_size, alpha=settings.alpha_relu, training=training)
if training:
self.batch = tf.Variable(0)
self.labels = tf.placeholder(tf.float32, [None, self.cell_size,
self.cell_size, 5 + self.num_classes])
self.loss_layer(self.logits, self.labels)
self.total_loss = tf.contrib.losses.get_total_loss()
self.learning_rate = tf.train.exponential_decay(settings.
learning_rate, self.batch * settings.batch_size, settings.
decay_step, settings.decay_rate, True)
self.optimizer = tf.train.GradientDescentOptimizer(self.
learning_rate).minimize(self.total_loss, global_step=self.batch
)
def build_network(self, images, num_outputs, alpha, keep_prob=settings.
dropout, training=True, scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha), weights_initializer=tf.
truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0,
0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope=
'conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 256, 1, scope='conv_8')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 256, 1, scope='conv_13')
net = slim.conv2d(net, 512, 3, scope='conv_14')
net = slim.conv2d(net, 256, 1, scope='conv_15')
net = slim.conv2d(net, 512, 3, scope='conv_16')
net = slim.conv2d(net, 256, 1, scope='conv_17')
net = slim.conv2d(net, 512, 3, scope='conv_18')
net = slim.conv2d(net, 512, 1, scope='conv_19')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 512, 1, scope='conv_24')
net = slim.conv2d(net, 1024, 3, scope='conv_25')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]
), name='pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope=
'conv_28')
net = slim.conv2d(net, 1024, 3, scope='conv_29')
net = slim.conv2d(net, 1024, 3, scope='conv_30')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(net, keep_prob=keep_prob, is_training=
training, scope='dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn=
None, scope='fc_36')
return net
def build_fast_network(self, images, num_outputs, alpha, keep_prob=
settings.dropout, training=True, scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha), weights_initializer=tf.
truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0,
0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope=
'conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]
), name='pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope=
'conv_28')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(net, keep_prob=keep_prob, is_training=
training, scope='dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn=
None, scope='fc_36')
return net
def calc_iou(self, boxes1, boxes2, scope='iou'):
with tf.variable_scope(scope):
boxes1 = tf.stack([boxes1[:, :, :, :, 0] - boxes1[:, :, :, :, 2
] / 2.0, boxes1[:, :, :, :, 1] - boxes1[:, :, :, :, 3] /
2.0, boxes1[:, :, :, :, 0] + boxes1[:, :, :, :, 2] / 2.0,
boxes1[:, :, :, :, 1] + boxes1[:, :, :, :, 3] / 2.0])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 4, 0])
boxes2 = tf.stack([boxes2[:, :, :, :, 0] - boxes2[:, :, :, :, 2
] / 2.0, boxes2[:, :, :, :, 1] - boxes2[:, :, :, :, 3] /
2.0, boxes2[:, :, :, :, 0] + boxes2[:, :, :, :, 2] / 2.0,
boxes2[:, :, :, :, 1] + boxes2[:, :, :, :, 3] / 2.0])
boxes2 = tf.transpose(boxes2, [1, 2, 3, 4, 0])
lu = tf.maximum(boxes1[:, :, :, :, :2], boxes2[:, :, :, :, :2])
rd = tf.minimum(boxes1[:, :, :, :, 2:], boxes2[:, :, :, :, 2:])
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[:, :, :, :, 0] * intersection[:, :,
:, :, 1]
square1 = (boxes1[:, :, :, :, 2] - boxes1[:, :, :, :, 0]) * (boxes1
[:, :, :, :, 3] - boxes1[:, :, :, :, 1])
square2 = (boxes2[:, :, :, :, 2] - boxes2[:, :, :, :, 0]) * (boxes2
[:, :, :, :, 3] - boxes2[:, :, :, :, 1])
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
def loss_layer(self, predicts, labels, scope='loss_layer'):
with tf.variable_scope(scope):
predict_classes = tf.reshape(predicts[:, :self.boundary1], [
settings.batch_size, self.cell_size, self.cell_size, self.
num_classes])
predict_scales = tf.reshape(predicts[:, self.boundary1:self.
boundary2], [settings.batch_size, self.cell_size, self.
cell_size, self.boxes_per_cell])
predict_boxes = tf.reshape(predicts[:, self.boundary2:], [
settings.batch_size, self.cell_size, self.cell_size, self.
boxes_per_cell, 4])
response = tf.reshape(labels[:, :, :, 0], [settings.batch_size,
self.cell_size, self.cell_size, 1])
boxes = tf.reshape(labels[:, :, :, 1:5], [settings.batch_size,
self.cell_size, self.cell_size, 1, 4])
boxes = tf.tile(boxes, [1, 1, 1, self.boxes_per_cell, 1]
) / self.image_size
classes = labels[:, :, :, 5:]
offset = tf.constant(self.offset, dtype=tf.float32)
offset = tf.reshape(offset, [1, self.cell_size, self.cell_size,
self.boxes_per_cell])
offset = tf.tile(offset, [settings.batch_size, 1, 1, 1])
predict_boxes_tran = tf.stack([(predict_boxes[:, :, :, :, 0] +
offset) / self.cell_size, (predict_boxes[:, :, :, :, 1] +
tf.transpose(offset, (0, 2, 1, 3))) / self.cell_size, tf.
square(predict_boxes[:, :, :, :, 2]), tf.square(
predict_boxes[:, :, :, :, 3])])
predict_boxes_tran = tf.transpose(predict_boxes_tran, [1, 2, 3,
4, 0])
iou_predict_truth = self.calc_iou(predict_boxes_tran, boxes)
object_mask = tf.reduce_max(iou_predict_truth, 3, keep_dims=True)
object_mask = tf.cast(iou_predict_truth >= object_mask, tf.float32
) * response
noobject_mask = tf.ones_like(object_mask, dtype=tf.float32
) - object_mask
boxes_tran = tf.stack([boxes[:, :, :, :, 0] * self.cell_size -
offset, boxes[:, :, :, :, 1] * self.cell_size - tf.
transpose(offset, (0, 2, 1, 3)), tf.sqrt(boxes[:, :, :, :,
2]), tf.sqrt(boxes[:, :, :, :, 3])])
boxes_tran = tf.transpose(boxes_tran, [1, 2, 3, 4, 0])
class_delta = response * (predict_classes - classes)
class_loss = tf.reduce_mean(tf.reduce_sum(tf.square(class_delta
), axis=[1, 2, 3]), name='class_loss') * self.class_scale
object_delta = object_mask * (predict_scales - iou_predict_truth)
object_loss = tf.reduce_mean(tf.reduce_sum(tf.square(
object_delta), axis=[1, 2, 3]), name='object_loss'
) * self.object_scale
noobject_delta = noobject_mask * predict_scales
noobject_loss = tf.reduce_mean(tf.reduce_sum(tf.square(
noobject_delta), axis=[1, 2, 3]), name='noobject_loss'
) * self.no_object_scale
coord_mask = tf.expand_dims(object_mask, 4)
boxes_delta = coord_mask * (predict_boxes - boxes_tran)
coord_loss = tf.reduce_mean(tf.reduce_sum(tf.square(boxes_delta
), axis=[1, 2, 3, 4]), name='coord_loss') * self.coord_scale
tf.contrib.losses.add_loss(class_loss)
tf.contrib.losses.add_loss(object_loss)
tf.contrib.losses.add_loss(noobject_loss)
tf.contrib.losses.add_loss(coord_loss)
<mask token>
| <mask token>
class Model:
def __init__(self, training=True):
self.classes = settings.classes_name
self.num_classes = len(settings.classes_name)
self.image_size = settings.image_size
self.cell_size = settings.cell_size
self.boxes_per_cell = settings.box_per_cell
self.output_size = self.cell_size * self.cell_size * (self.
num_classes + self.boxes_per_cell * 5)
self.scale = 1.0 * self.image_size / self.cell_size
self.boundary1 = self.cell_size * self.cell_size * self.num_classes
self.boundary2 = (self.boundary1 + self.cell_size * self.cell_size *
self.boxes_per_cell)
self.object_scale = settings.object_scale
self.no_object_scale = settings.no_object_scale
self.class_scale = settings.class_scale
self.coord_scale = settings.coordinate_scale
self.offset = np.transpose(np.reshape(np.array([np.arange(self.
cell_size)] * self.cell_size * self.boxes_per_cell), (self.
boxes_per_cell, self.cell_size, self.cell_size)), (1, 2, 0))
self.images = tf.placeholder(tf.float32, [None, settings.image_size,
settings.image_size, 3])
if settings.model_type == 'normal':
self.logits = self.build_network(self.images, num_outputs=self.
output_size, alpha=settings.alpha_relu, training=training)
if settings.model_type == 'fast':
self.logits = self.build_fast_network(self.images, num_outputs=
self.output_size, alpha=settings.alpha_relu, training=training)
if training:
self.batch = tf.Variable(0)
self.labels = tf.placeholder(tf.float32, [None, self.cell_size,
self.cell_size, 5 + self.num_classes])
self.loss_layer(self.logits, self.labels)
self.total_loss = tf.contrib.losses.get_total_loss()
self.learning_rate = tf.train.exponential_decay(settings.
learning_rate, self.batch * settings.batch_size, settings.
decay_step, settings.decay_rate, True)
self.optimizer = tf.train.GradientDescentOptimizer(self.
learning_rate).minimize(self.total_loss, global_step=self.batch
)
def build_network(self, images, num_outputs, alpha, keep_prob=settings.
dropout, training=True, scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha), weights_initializer=tf.
truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0,
0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope=
'conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 256, 1, scope='conv_8')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 256, 1, scope='conv_13')
net = slim.conv2d(net, 512, 3, scope='conv_14')
net = slim.conv2d(net, 256, 1, scope='conv_15')
net = slim.conv2d(net, 512, 3, scope='conv_16')
net = slim.conv2d(net, 256, 1, scope='conv_17')
net = slim.conv2d(net, 512, 3, scope='conv_18')
net = slim.conv2d(net, 512, 1, scope='conv_19')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 512, 1, scope='conv_24')
net = slim.conv2d(net, 1024, 3, scope='conv_25')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]
), name='pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope=
'conv_28')
net = slim.conv2d(net, 1024, 3, scope='conv_29')
net = slim.conv2d(net, 1024, 3, scope='conv_30')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(net, keep_prob=keep_prob, is_training=
training, scope='dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn=
None, scope='fc_36')
return net
def build_fast_network(self, images, num_outputs, alpha, keep_prob=
settings.dropout, training=True, scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha), weights_initializer=tf.
truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0,
0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope=
'conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]
), name='pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope=
'conv_28')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(net, keep_prob=keep_prob, is_training=
training, scope='dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn=
None, scope='fc_36')
return net
def calc_iou(self, boxes1, boxes2, scope='iou'):
with tf.variable_scope(scope):
boxes1 = tf.stack([boxes1[:, :, :, :, 0] - boxes1[:, :, :, :, 2
] / 2.0, boxes1[:, :, :, :, 1] - boxes1[:, :, :, :, 3] /
2.0, boxes1[:, :, :, :, 0] + boxes1[:, :, :, :, 2] / 2.0,
boxes1[:, :, :, :, 1] + boxes1[:, :, :, :, 3] / 2.0])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 4, 0])
boxes2 = tf.stack([boxes2[:, :, :, :, 0] - boxes2[:, :, :, :, 2
] / 2.0, boxes2[:, :, :, :, 1] - boxes2[:, :, :, :, 3] /
2.0, boxes2[:, :, :, :, 0] + boxes2[:, :, :, :, 2] / 2.0,
boxes2[:, :, :, :, 1] + boxes2[:, :, :, :, 3] / 2.0])
boxes2 = tf.transpose(boxes2, [1, 2, 3, 4, 0])
lu = tf.maximum(boxes1[:, :, :, :, :2], boxes2[:, :, :, :, :2])
rd = tf.minimum(boxes1[:, :, :, :, 2:], boxes2[:, :, :, :, 2:])
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[:, :, :, :, 0] * intersection[:, :,
:, :, 1]
square1 = (boxes1[:, :, :, :, 2] - boxes1[:, :, :, :, 0]) * (boxes1
[:, :, :, :, 3] - boxes1[:, :, :, :, 1])
square2 = (boxes2[:, :, :, :, 2] - boxes2[:, :, :, :, 0]) * (boxes2
[:, :, :, :, 3] - boxes2[:, :, :, :, 1])
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
def loss_layer(self, predicts, labels, scope='loss_layer'):
with tf.variable_scope(scope):
predict_classes = tf.reshape(predicts[:, :self.boundary1], [
settings.batch_size, self.cell_size, self.cell_size, self.
num_classes])
predict_scales = tf.reshape(predicts[:, self.boundary1:self.
boundary2], [settings.batch_size, self.cell_size, self.
cell_size, self.boxes_per_cell])
predict_boxes = tf.reshape(predicts[:, self.boundary2:], [
settings.batch_size, self.cell_size, self.cell_size, self.
boxes_per_cell, 4])
response = tf.reshape(labels[:, :, :, 0], [settings.batch_size,
self.cell_size, self.cell_size, 1])
boxes = tf.reshape(labels[:, :, :, 1:5], [settings.batch_size,
self.cell_size, self.cell_size, 1, 4])
boxes = tf.tile(boxes, [1, 1, 1, self.boxes_per_cell, 1]
) / self.image_size
classes = labels[:, :, :, 5:]
offset = tf.constant(self.offset, dtype=tf.float32)
offset = tf.reshape(offset, [1, self.cell_size, self.cell_size,
self.boxes_per_cell])
offset = tf.tile(offset, [settings.batch_size, 1, 1, 1])
predict_boxes_tran = tf.stack([(predict_boxes[:, :, :, :, 0] +
offset) / self.cell_size, (predict_boxes[:, :, :, :, 1] +
tf.transpose(offset, (0, 2, 1, 3))) / self.cell_size, tf.
square(predict_boxes[:, :, :, :, 2]), tf.square(
predict_boxes[:, :, :, :, 3])])
predict_boxes_tran = tf.transpose(predict_boxes_tran, [1, 2, 3,
4, 0])
iou_predict_truth = self.calc_iou(predict_boxes_tran, boxes)
object_mask = tf.reduce_max(iou_predict_truth, 3, keep_dims=True)
object_mask = tf.cast(iou_predict_truth >= object_mask, tf.float32
) * response
noobject_mask = tf.ones_like(object_mask, dtype=tf.float32
) - object_mask
boxes_tran = tf.stack([boxes[:, :, :, :, 0] * self.cell_size -
offset, boxes[:, :, :, :, 1] * self.cell_size - tf.
transpose(offset, (0, 2, 1, 3)), tf.sqrt(boxes[:, :, :, :,
2]), tf.sqrt(boxes[:, :, :, :, 3])])
boxes_tran = tf.transpose(boxes_tran, [1, 2, 3, 4, 0])
class_delta = response * (predict_classes - classes)
class_loss = tf.reduce_mean(tf.reduce_sum(tf.square(class_delta
), axis=[1, 2, 3]), name='class_loss') * self.class_scale
object_delta = object_mask * (predict_scales - iou_predict_truth)
object_loss = tf.reduce_mean(tf.reduce_sum(tf.square(
object_delta), axis=[1, 2, 3]), name='object_loss'
) * self.object_scale
noobject_delta = noobject_mask * predict_scales
noobject_loss = tf.reduce_mean(tf.reduce_sum(tf.square(
noobject_delta), axis=[1, 2, 3]), name='noobject_loss'
) * self.no_object_scale
coord_mask = tf.expand_dims(object_mask, 4)
boxes_delta = coord_mask * (predict_boxes - boxes_tran)
coord_loss = tf.reduce_mean(tf.reduce_sum(tf.square(boxes_delta
), axis=[1, 2, 3, 4]), name='coord_loss') * self.coord_scale
tf.contrib.losses.add_loss(class_loss)
tf.contrib.losses.add_loss(object_loss)
tf.contrib.losses.add_loss(noobject_loss)
tf.contrib.losses.add_loss(coord_loss)
def leaky_relu(alpha):
def op(inputs):
return tf.maximum(alpha * inputs, inputs)
return op
| <mask token>
slim = tf.contrib.slim
class Model:
def __init__(self, training=True):
self.classes = settings.classes_name
self.num_classes = len(settings.classes_name)
self.image_size = settings.image_size
self.cell_size = settings.cell_size
self.boxes_per_cell = settings.box_per_cell
self.output_size = self.cell_size * self.cell_size * (self.
num_classes + self.boxes_per_cell * 5)
self.scale = 1.0 * self.image_size / self.cell_size
self.boundary1 = self.cell_size * self.cell_size * self.num_classes
self.boundary2 = (self.boundary1 + self.cell_size * self.cell_size *
self.boxes_per_cell)
self.object_scale = settings.object_scale
self.no_object_scale = settings.no_object_scale
self.class_scale = settings.class_scale
self.coord_scale = settings.coordinate_scale
self.offset = np.transpose(np.reshape(np.array([np.arange(self.
cell_size)] * self.cell_size * self.boxes_per_cell), (self.
boxes_per_cell, self.cell_size, self.cell_size)), (1, 2, 0))
self.images = tf.placeholder(tf.float32, [None, settings.image_size,
settings.image_size, 3])
if settings.model_type == 'normal':
self.logits = self.build_network(self.images, num_outputs=self.
output_size, alpha=settings.alpha_relu, training=training)
if settings.model_type == 'fast':
self.logits = self.build_fast_network(self.images, num_outputs=
self.output_size, alpha=settings.alpha_relu, training=training)
if training:
self.batch = tf.Variable(0)
self.labels = tf.placeholder(tf.float32, [None, self.cell_size,
self.cell_size, 5 + self.num_classes])
self.loss_layer(self.logits, self.labels)
self.total_loss = tf.contrib.losses.get_total_loss()
self.learning_rate = tf.train.exponential_decay(settings.
learning_rate, self.batch * settings.batch_size, settings.
decay_step, settings.decay_rate, True)
self.optimizer = tf.train.GradientDescentOptimizer(self.
learning_rate).minimize(self.total_loss, global_step=self.batch
)
def build_network(self, images, num_outputs, alpha, keep_prob=settings.
dropout, training=True, scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha), weights_initializer=tf.
truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0,
0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope=
'conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 256, 1, scope='conv_8')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 256, 1, scope='conv_13')
net = slim.conv2d(net, 512, 3, scope='conv_14')
net = slim.conv2d(net, 256, 1, scope='conv_15')
net = slim.conv2d(net, 512, 3, scope='conv_16')
net = slim.conv2d(net, 256, 1, scope='conv_17')
net = slim.conv2d(net, 512, 3, scope='conv_18')
net = slim.conv2d(net, 512, 1, scope='conv_19')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 512, 1, scope='conv_24')
net = slim.conv2d(net, 1024, 3, scope='conv_25')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]
), name='pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope=
'conv_28')
net = slim.conv2d(net, 1024, 3, scope='conv_29')
net = slim.conv2d(net, 1024, 3, scope='conv_30')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(net, keep_prob=keep_prob, is_training=
training, scope='dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn=
None, scope='fc_36')
return net
def build_fast_network(self, images, num_outputs, alpha, keep_prob=
settings.dropout, training=True, scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha), weights_initializer=tf.
truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0,
0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope=
'conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]
), name='pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope=
'conv_28')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(net, keep_prob=keep_prob, is_training=
training, scope='dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn=
None, scope='fc_36')
return net
def calc_iou(self, boxes1, boxes2, scope='iou'):
with tf.variable_scope(scope):
boxes1 = tf.stack([boxes1[:, :, :, :, 0] - boxes1[:, :, :, :, 2
] / 2.0, boxes1[:, :, :, :, 1] - boxes1[:, :, :, :, 3] /
2.0, boxes1[:, :, :, :, 0] + boxes1[:, :, :, :, 2] / 2.0,
boxes1[:, :, :, :, 1] + boxes1[:, :, :, :, 3] / 2.0])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 4, 0])
boxes2 = tf.stack([boxes2[:, :, :, :, 0] - boxes2[:, :, :, :, 2
] / 2.0, boxes2[:, :, :, :, 1] - boxes2[:, :, :, :, 3] /
2.0, boxes2[:, :, :, :, 0] + boxes2[:, :, :, :, 2] / 2.0,
boxes2[:, :, :, :, 1] + boxes2[:, :, :, :, 3] / 2.0])
boxes2 = tf.transpose(boxes2, [1, 2, 3, 4, 0])
lu = tf.maximum(boxes1[:, :, :, :, :2], boxes2[:, :, :, :, :2])
rd = tf.minimum(boxes1[:, :, :, :, 2:], boxes2[:, :, :, :, 2:])
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[:, :, :, :, 0] * intersection[:, :,
:, :, 1]
square1 = (boxes1[:, :, :, :, 2] - boxes1[:, :, :, :, 0]) * (boxes1
[:, :, :, :, 3] - boxes1[:, :, :, :, 1])
square2 = (boxes2[:, :, :, :, 2] - boxes2[:, :, :, :, 0]) * (boxes2
[:, :, :, :, 3] - boxes2[:, :, :, :, 1])
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
def loss_layer(self, predicts, labels, scope='loss_layer'):
with tf.variable_scope(scope):
predict_classes = tf.reshape(predicts[:, :self.boundary1], [
settings.batch_size, self.cell_size, self.cell_size, self.
num_classes])
predict_scales = tf.reshape(predicts[:, self.boundary1:self.
boundary2], [settings.batch_size, self.cell_size, self.
cell_size, self.boxes_per_cell])
predict_boxes = tf.reshape(predicts[:, self.boundary2:], [
settings.batch_size, self.cell_size, self.cell_size, self.
boxes_per_cell, 4])
response = tf.reshape(labels[:, :, :, 0], [settings.batch_size,
self.cell_size, self.cell_size, 1])
boxes = tf.reshape(labels[:, :, :, 1:5], [settings.batch_size,
self.cell_size, self.cell_size, 1, 4])
boxes = tf.tile(boxes, [1, 1, 1, self.boxes_per_cell, 1]
) / self.image_size
classes = labels[:, :, :, 5:]
offset = tf.constant(self.offset, dtype=tf.float32)
offset = tf.reshape(offset, [1, self.cell_size, self.cell_size,
self.boxes_per_cell])
offset = tf.tile(offset, [settings.batch_size, 1, 1, 1])
predict_boxes_tran = tf.stack([(predict_boxes[:, :, :, :, 0] +
offset) / self.cell_size, (predict_boxes[:, :, :, :, 1] +
tf.transpose(offset, (0, 2, 1, 3))) / self.cell_size, tf.
square(predict_boxes[:, :, :, :, 2]), tf.square(
predict_boxes[:, :, :, :, 3])])
predict_boxes_tran = tf.transpose(predict_boxes_tran, [1, 2, 3,
4, 0])
iou_predict_truth = self.calc_iou(predict_boxes_tran, boxes)
object_mask = tf.reduce_max(iou_predict_truth, 3, keep_dims=True)
object_mask = tf.cast(iou_predict_truth >= object_mask, tf.float32
) * response
noobject_mask = tf.ones_like(object_mask, dtype=tf.float32
) - object_mask
boxes_tran = tf.stack([boxes[:, :, :, :, 0] * self.cell_size -
offset, boxes[:, :, :, :, 1] * self.cell_size - tf.
transpose(offset, (0, 2, 1, 3)), tf.sqrt(boxes[:, :, :, :,
2]), tf.sqrt(boxes[:, :, :, :, 3])])
boxes_tran = tf.transpose(boxes_tran, [1, 2, 3, 4, 0])
class_delta = response * (predict_classes - classes)
class_loss = tf.reduce_mean(tf.reduce_sum(tf.square(class_delta
), axis=[1, 2, 3]), name='class_loss') * self.class_scale
object_delta = object_mask * (predict_scales - iou_predict_truth)
object_loss = tf.reduce_mean(tf.reduce_sum(tf.square(
object_delta), axis=[1, 2, 3]), name='object_loss'
) * self.object_scale
noobject_delta = noobject_mask * predict_scales
noobject_loss = tf.reduce_mean(tf.reduce_sum(tf.square(
noobject_delta), axis=[1, 2, 3]), name='noobject_loss'
) * self.no_object_scale
coord_mask = tf.expand_dims(object_mask, 4)
boxes_delta = coord_mask * (predict_boxes - boxes_tran)
coord_loss = tf.reduce_mean(tf.reduce_sum(tf.square(boxes_delta
), axis=[1, 2, 3, 4]), name='coord_loss') * self.coord_scale
tf.contrib.losses.add_loss(class_loss)
tf.contrib.losses.add_loss(object_loss)
tf.contrib.losses.add_loss(noobject_loss)
tf.contrib.losses.add_loss(coord_loss)
def leaky_relu(alpha):
def op(inputs):
return tf.maximum(alpha * inputs, inputs)
return op
| import tensorflow as tf
import settings
import numpy as np
slim = tf.contrib.slim
class Model:
def __init__(self, training=True):
self.classes = settings.classes_name
self.num_classes = len(settings.classes_name)
self.image_size = settings.image_size
self.cell_size = settings.cell_size
self.boxes_per_cell = settings.box_per_cell
self.output_size = self.cell_size * self.cell_size * (self.
num_classes + self.boxes_per_cell * 5)
self.scale = 1.0 * self.image_size / self.cell_size
self.boundary1 = self.cell_size * self.cell_size * self.num_classes
self.boundary2 = (self.boundary1 + self.cell_size * self.cell_size *
self.boxes_per_cell)
self.object_scale = settings.object_scale
self.no_object_scale = settings.no_object_scale
self.class_scale = settings.class_scale
self.coord_scale = settings.coordinate_scale
self.offset = np.transpose(np.reshape(np.array([np.arange(self.
cell_size)] * self.cell_size * self.boxes_per_cell), (self.
boxes_per_cell, self.cell_size, self.cell_size)), (1, 2, 0))
self.images = tf.placeholder(tf.float32, [None, settings.image_size,
settings.image_size, 3])
if settings.model_type == 'normal':
self.logits = self.build_network(self.images, num_outputs=self.
output_size, alpha=settings.alpha_relu, training=training)
if settings.model_type == 'fast':
self.logits = self.build_fast_network(self.images, num_outputs=
self.output_size, alpha=settings.alpha_relu, training=training)
if training:
self.batch = tf.Variable(0)
self.labels = tf.placeholder(tf.float32, [None, self.cell_size,
self.cell_size, 5 + self.num_classes])
self.loss_layer(self.logits, self.labels)
self.total_loss = tf.contrib.losses.get_total_loss()
self.learning_rate = tf.train.exponential_decay(settings.
learning_rate, self.batch * settings.batch_size, settings.
decay_step, settings.decay_rate, True)
self.optimizer = tf.train.GradientDescentOptimizer(self.
learning_rate).minimize(self.total_loss, global_step=self.batch
)
def build_network(self, images, num_outputs, alpha, keep_prob=settings.
dropout, training=True, scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha), weights_initializer=tf.
truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0,
0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope=
'conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 256, 1, scope='conv_8')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 256, 1, scope='conv_13')
net = slim.conv2d(net, 512, 3, scope='conv_14')
net = slim.conv2d(net, 256, 1, scope='conv_15')
net = slim.conv2d(net, 512, 3, scope='conv_16')
net = slim.conv2d(net, 256, 1, scope='conv_17')
net = slim.conv2d(net, 512, 3, scope='conv_18')
net = slim.conv2d(net, 512, 1, scope='conv_19')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 512, 1, scope='conv_24')
net = slim.conv2d(net, 1024, 3, scope='conv_25')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]
), name='pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope=
'conv_28')
net = slim.conv2d(net, 1024, 3, scope='conv_29')
net = slim.conv2d(net, 1024, 3, scope='conv_30')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(net, keep_prob=keep_prob, is_training=
training, scope='dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn=
None, scope='fc_36')
return net
def build_fast_network(self, images, num_outputs, alpha, keep_prob=
settings.dropout, training=True, scope='yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=leaky_relu(alpha), weights_initializer=tf.
truncated_normal_initializer(0.0, 0.01),
weights_regularizer=slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0,
0]]), name='pad_1')
net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope=
'conv_2')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
net = slim.conv2d(net, 192, 3, scope='conv_4')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
net = slim.conv2d(net, 128, 1, scope='conv_6')
net = slim.conv2d(net, 256, 3, scope='conv_7')
net = slim.conv2d(net, 512, 3, scope='conv_9')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
net = slim.conv2d(net, 256, 1, scope='conv_11')
net = slim.conv2d(net, 512, 3, scope='conv_12')
net = slim.conv2d(net, 1024, 3, scope='conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
net = slim.conv2d(net, 512, 1, scope='conv_22')
net = slim.conv2d(net, 1024, 3, scope='conv_23')
net = slim.conv2d(net, 1024, 3, scope='conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]
), name='pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope=
'conv_28')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope='flat_32')
net = slim.fully_connected(net, 512, scope='fc_33')
net = slim.fully_connected(net, 4096, scope='fc_34')
net = slim.dropout(net, keep_prob=keep_prob, is_training=
training, scope='dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn=
None, scope='fc_36')
return net
def calc_iou(self, boxes1, boxes2, scope='iou'):
with tf.variable_scope(scope):
boxes1 = tf.stack([boxes1[:, :, :, :, 0] - boxes1[:, :, :, :, 2
] / 2.0, boxes1[:, :, :, :, 1] - boxes1[:, :, :, :, 3] /
2.0, boxes1[:, :, :, :, 0] + boxes1[:, :, :, :, 2] / 2.0,
boxes1[:, :, :, :, 1] + boxes1[:, :, :, :, 3] / 2.0])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 4, 0])
boxes2 = tf.stack([boxes2[:, :, :, :, 0] - boxes2[:, :, :, :, 2
] / 2.0, boxes2[:, :, :, :, 1] - boxes2[:, :, :, :, 3] /
2.0, boxes2[:, :, :, :, 0] + boxes2[:, :, :, :, 2] / 2.0,
boxes2[:, :, :, :, 1] + boxes2[:, :, :, :, 3] / 2.0])
boxes2 = tf.transpose(boxes2, [1, 2, 3, 4, 0])
lu = tf.maximum(boxes1[:, :, :, :, :2], boxes2[:, :, :, :, :2])
rd = tf.minimum(boxes1[:, :, :, :, 2:], boxes2[:, :, :, :, 2:])
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[:, :, :, :, 0] * intersection[:, :,
:, :, 1]
square1 = (boxes1[:, :, :, :, 2] - boxes1[:, :, :, :, 0]) * (boxes1
[:, :, :, :, 3] - boxes1[:, :, :, :, 1])
square2 = (boxes2[:, :, :, :, 2] - boxes2[:, :, :, :, 0]) * (boxes2
[:, :, :, :, 3] - boxes2[:, :, :, :, 1])
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
def loss_layer(self, predicts, labels, scope='loss_layer'):
with tf.variable_scope(scope):
predict_classes = tf.reshape(predicts[:, :self.boundary1], [
settings.batch_size, self.cell_size, self.cell_size, self.
num_classes])
predict_scales = tf.reshape(predicts[:, self.boundary1:self.
boundary2], [settings.batch_size, self.cell_size, self.
cell_size, self.boxes_per_cell])
predict_boxes = tf.reshape(predicts[:, self.boundary2:], [
settings.batch_size, self.cell_size, self.cell_size, self.
boxes_per_cell, 4])
response = tf.reshape(labels[:, :, :, 0], [settings.batch_size,
self.cell_size, self.cell_size, 1])
boxes = tf.reshape(labels[:, :, :, 1:5], [settings.batch_size,
self.cell_size, self.cell_size, 1, 4])
boxes = tf.tile(boxes, [1, 1, 1, self.boxes_per_cell, 1]
) / self.image_size
classes = labels[:, :, :, 5:]
offset = tf.constant(self.offset, dtype=tf.float32)
offset = tf.reshape(offset, [1, self.cell_size, self.cell_size,
self.boxes_per_cell])
offset = tf.tile(offset, [settings.batch_size, 1, 1, 1])
predict_boxes_tran = tf.stack([(predict_boxes[:, :, :, :, 0] +
offset) / self.cell_size, (predict_boxes[:, :, :, :, 1] +
tf.transpose(offset, (0, 2, 1, 3))) / self.cell_size, tf.
square(predict_boxes[:, :, :, :, 2]), tf.square(
predict_boxes[:, :, :, :, 3])])
predict_boxes_tran = tf.transpose(predict_boxes_tran, [1, 2, 3,
4, 0])
iou_predict_truth = self.calc_iou(predict_boxes_tran, boxes)
object_mask = tf.reduce_max(iou_predict_truth, 3, keep_dims=True)
object_mask = tf.cast(iou_predict_truth >= object_mask, tf.float32
) * response
noobject_mask = tf.ones_like(object_mask, dtype=tf.float32
) - object_mask
boxes_tran = tf.stack([boxes[:, :, :, :, 0] * self.cell_size -
offset, boxes[:, :, :, :, 1] * self.cell_size - tf.
transpose(offset, (0, 2, 1, 3)), tf.sqrt(boxes[:, :, :, :,
2]), tf.sqrt(boxes[:, :, :, :, 3])])
boxes_tran = tf.transpose(boxes_tran, [1, 2, 3, 4, 0])
class_delta = response * (predict_classes - classes)
class_loss = tf.reduce_mean(tf.reduce_sum(tf.square(class_delta
), axis=[1, 2, 3]), name='class_loss') * self.class_scale
object_delta = object_mask * (predict_scales - iou_predict_truth)
object_loss = tf.reduce_mean(tf.reduce_sum(tf.square(
object_delta), axis=[1, 2, 3]), name='object_loss'
) * self.object_scale
noobject_delta = noobject_mask * predict_scales
noobject_loss = tf.reduce_mean(tf.reduce_sum(tf.square(
noobject_delta), axis=[1, 2, 3]), name='noobject_loss'
) * self.no_object_scale
coord_mask = tf.expand_dims(object_mask, 4)
boxes_delta = coord_mask * (predict_boxes - boxes_tran)
coord_loss = tf.reduce_mean(tf.reduce_sum(tf.square(boxes_delta
), axis=[1, 2, 3, 4]), name='coord_loss') * self.coord_scale
tf.contrib.losses.add_loss(class_loss)
tf.contrib.losses.add_loss(object_loss)
tf.contrib.losses.add_loss(noobject_loss)
tf.contrib.losses.add_loss(coord_loss)
def leaky_relu(alpha):
def op(inputs):
return tf.maximum(alpha * inputs, inputs)
return op
| import tensorflow as tf
import settings
import numpy as np
slim = tf.contrib.slim
class Model:
def __init__(self, training = True):
self.classes = settings.classes_name
self.num_classes = len(settings.classes_name)
self.image_size = settings.image_size
self.cell_size = settings.cell_size
self.boxes_per_cell = settings.box_per_cell
self.output_size = (self.cell_size * self.cell_size) * (self.num_classes + self.boxes_per_cell * 5)
self.scale = 1.0 * self.image_size / self.cell_size
self.boundary1 = self.cell_size * self.cell_size * self.num_classes
self.boundary2 = self.boundary1 + self.cell_size * self.cell_size * self.boxes_per_cell
self.object_scale = settings.object_scale
self.no_object_scale = settings.no_object_scale
self.class_scale = settings.class_scale
self.coord_scale = settings.coordinate_scale
self.offset = np.transpose(np.reshape(np.array([np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell), (self.boxes_per_cell, self.cell_size, self.cell_size)), (1, 2, 0))
self.images = tf.placeholder(tf.float32, [None, settings.image_size, settings.image_size, 3])
if settings.model_type == 'normal':
self.logits = self.build_network(self.images, num_outputs = self.output_size, alpha = settings.alpha_relu, training = training)
if settings.model_type == 'fast':
self.logits = self.build_fast_network(self.images, num_outputs = self.output_size, alpha = settings.alpha_relu, training = training)
if training:
self.batch = tf.Variable(0)
self.labels = tf.placeholder(tf.float32, [None, self.cell_size, self.cell_size, 5 + self.num_classes])
self.loss_layer(self.logits, self.labels)
self.total_loss = tf.contrib.losses.get_total_loss()
self.learning_rate = tf.train.exponential_decay(settings.learning_rate, self.batch * settings.batch_size, settings.decay_step, settings.decay_rate, True)
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.total_loss, global_step = self.batch)
def build_network(self, images, num_outputs, alpha, keep_prob = settings.dropout, training = True, scope = 'yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn = leaky_relu(alpha), weights_initializer = tf.truncated_normal_initializer(0.0, 0.01), weights_regularizer = slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0, 0]]), name = 'pad_1')
net = slim.conv2d(net, 64, 7, 2, padding = 'VALID', scope = 'conv_2')
net = slim.max_pool2d(net, 2, padding = 'SAME', scope = 'pool_3')
net = slim.conv2d(net, 192, 3, scope = 'conv_4')
net = slim.max_pool2d(net, 2, padding = 'SAME', scope = 'pool_5')
net = slim.conv2d(net, 128, 1, scope = 'conv_6')
net = slim.conv2d(net, 256, 3, scope = 'conv_7')
net = slim.conv2d(net, 256, 1, scope = 'conv_8')
net = slim.conv2d(net, 512, 3, scope = 'conv_9')
net = slim.max_pool2d(net, 2, padding = 'SAME', scope = 'pool_10')
net = slim.conv2d(net, 256, 1, scope = 'conv_11')
net = slim.conv2d(net, 512, 3, scope = 'conv_12')
net = slim.conv2d(net, 256, 1, scope = 'conv_13')
net = slim.conv2d(net, 512, 3, scope = 'conv_14')
net = slim.conv2d(net, 256, 1, scope = 'conv_15')
net = slim.conv2d(net, 512, 3, scope = 'conv_16')
net = slim.conv2d(net, 256, 1, scope = 'conv_17')
net = slim.conv2d(net, 512, 3, scope = 'conv_18')
net = slim.conv2d(net, 512, 1, scope = 'conv_19')
net = slim.conv2d(net, 1024, 3, scope = 'conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope = 'pool_21')
net = slim.conv2d(net, 512, 1, scope = 'conv_22')
net = slim.conv2d(net, 1024, 3, scope = 'conv_23')
net = slim.conv2d(net, 512, 1, scope = 'conv_24')
net = slim.conv2d(net, 1024, 3, scope = 'conv_25')
net = slim.conv2d(net, 1024, 3, scope = 'conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]), name = 'pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope = 'conv_28')
net = slim.conv2d(net, 1024, 3, scope = 'conv_29')
net = slim.conv2d(net, 1024, 3, scope = 'conv_30')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope = 'flat_32')
net = slim.fully_connected(net, 512, scope = 'fc_33')
net = slim.fully_connected(net, 4096, scope = 'fc_34')
net = slim.dropout(net, keep_prob = keep_prob, is_training = training, scope = 'dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn = None, scope = 'fc_36')
return net
def build_fast_network(self, images, num_outputs, alpha, keep_prob = settings.dropout, training = True, scope = 'yolo'):
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn = leaky_relu(alpha), weights_initializer = tf.truncated_normal_initializer(0.0, 0.01), weights_regularizer = slim.l2_regularizer(0.0005)):
net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0, 0]]), name = 'pad_1')
net = slim.conv2d(net, 64, 7, 2, padding = 'VALID', scope = 'conv_2')
net = slim.max_pool2d(net, 2, padding = 'SAME', scope = 'pool_3')
net = slim.conv2d(net, 192, 3, scope = 'conv_4')
net = slim.max_pool2d(net, 2, padding = 'SAME', scope = 'pool_5')
net = slim.conv2d(net, 128, 1, scope = 'conv_6')
net = slim.conv2d(net, 256, 3, scope = 'conv_7')
net = slim.conv2d(net, 512, 3, scope = 'conv_9')
net = slim.max_pool2d(net, 2, padding = 'SAME', scope = 'pool_10')
net = slim.conv2d(net, 256, 1, scope = 'conv_11')
net = slim.conv2d(net, 512, 3, scope = 'conv_12')
net = slim.conv2d(net, 1024, 3, scope = 'conv_20')
net = slim.max_pool2d(net, 2, padding='SAME', scope = 'pool_21')
net = slim.conv2d(net, 512, 1, scope = 'conv_22')
net = slim.conv2d(net, 1024, 3, scope = 'conv_23')
net = slim.conv2d(net, 1024, 3, scope = 'conv_26')
net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]), name = 'pad_27')
net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope = 'conv_28')
net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
net = slim.flatten(net, scope = 'flat_32')
net = slim.fully_connected(net, 512, scope = 'fc_33')
net = slim.fully_connected(net, 4096, scope = 'fc_34')
net = slim.dropout(net, keep_prob = keep_prob, is_training = training, scope = 'dropout_35')
net = slim.fully_connected(net, num_outputs, activation_fn = None, scope = 'fc_36')
return net
def calc_iou(self, boxes1, boxes2, scope = 'iou'):
with tf.variable_scope(scope):
boxes1 = tf.stack([boxes1[:, :, :, :, 0] - boxes1[:, :, :, :, 2] / 2.0,
boxes1[:, :, :, :, 1] - boxes1[:, :, :, :, 3] / 2.0,
boxes1[:, :, :, :, 0] + boxes1[:, :, :, :, 2] / 2.0,
boxes1[:, :, :, :, 1] + boxes1[:, :, :, :, 3] / 2.0])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 4, 0])
boxes2 = tf.stack([boxes2[:, :, :, :, 0] - boxes2[:, :, :, :, 2] / 2.0,
boxes2[:, :, :, :, 1] - boxes2[:, :, :, :, 3] / 2.0,
boxes2[:, :, :, :, 0] + boxes2[:, :, :, :, 2] / 2.0,
boxes2[:, :, :, :, 1] + boxes2[:, :, :, :, 3] / 2.0])
boxes2 = tf.transpose(boxes2, [1, 2, 3, 4, 0])
lu = tf.maximum(boxes1[:, :, :, :, :2], boxes2[:, :, :, :, :2])
rd = tf.minimum(boxes1[:, :, :, :, 2:], boxes2[:, :, :, :, 2:])
intersection = tf.maximum(0.0, rd - lu)
inter_square = intersection[:, :, :, :, 0] * intersection[:, :, :, :, 1]
square1 = (boxes1[:, :, :, :, 2] - boxes1[:, :, :, :, 0]) * (boxes1[:, :, :, :, 3] - boxes1[:, :, :, :, 1])
square2 = (boxes2[:, :, :, :, 2] - boxes2[:, :, :, :, 0]) * (boxes2[:, :, :, :, 3] - boxes2[:, :, :, :, 1])
union_square = tf.maximum(square1 + square2 - inter_square, 1e-10)
return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
def loss_layer(self, predicts, labels, scope = 'loss_layer'):
with tf.variable_scope(scope):
predict_classes = tf.reshape(predicts[:, :self.boundary1], [settings.batch_size, self.cell_size, self.cell_size, self.num_classes])
predict_scales = tf.reshape(predicts[:, self.boundary1:self.boundary2], [settings.batch_size, self.cell_size, self.cell_size, self.boxes_per_cell])
predict_boxes = tf.reshape(predicts[:, self.boundary2:], [settings.batch_size, self.cell_size, self.cell_size, self.boxes_per_cell, 4])
response = tf.reshape(labels[:, :, :, 0], [settings.batch_size, self.cell_size, self.cell_size, 1])
boxes = tf.reshape(labels[:, :, :, 1:5], [settings.batch_size, self.cell_size, self.cell_size, 1, 4])
boxes = tf.tile(boxes, [1, 1, 1, self.boxes_per_cell, 1]) / self.image_size
classes = labels[:, :, :, 5:]
offset = tf.constant(self.offset, dtype = tf.float32)
offset = tf.reshape(offset, [1, self.cell_size, self.cell_size, self.boxes_per_cell])
offset = tf.tile(offset, [settings.batch_size, 1, 1, 1])
predict_boxes_tran = tf.stack([(predict_boxes[:, :, :, :, 0] + offset) / self.cell_size,
(predict_boxes[:, :, :, :, 1] + tf.transpose(offset, (0, 2, 1, 3))) / self.cell_size,
tf.square(predict_boxes[:, :, :, :, 2]),
tf.square(predict_boxes[:, :, :, :, 3])])
predict_boxes_tran = tf.transpose(predict_boxes_tran, [1, 2, 3, 4, 0])
iou_predict_truth = self.calc_iou(predict_boxes_tran, boxes)
object_mask = tf.reduce_max(iou_predict_truth, 3, keep_dims=True)
object_mask = tf.cast((iou_predict_truth >= object_mask), tf.float32) * response
noobject_mask = tf.ones_like(object_mask, dtype=tf.float32) - object_mask
boxes_tran = tf.stack([boxes[:, :, :, :, 0] * self.cell_size - offset,
boxes[:, :, :, :, 1] * self.cell_size - tf.transpose(offset, (0, 2, 1, 3)),
tf.sqrt(boxes[:, :, :, :, 2]),
tf.sqrt(boxes[:, :, :, :, 3])])
boxes_tran = tf.transpose(boxes_tran, [1, 2, 3, 4, 0])
class_delta = response * (predict_classes - classes)
class_loss = tf.reduce_mean(tf.reduce_sum(tf.square(class_delta), axis=[1, 2, 3]), name = 'class_loss') * self.class_scale
object_delta = object_mask * (predict_scales - iou_predict_truth)
object_loss = tf.reduce_mean(tf.reduce_sum(tf.square(object_delta), axis=[1, 2, 3]), name = 'object_loss') * self.object_scale
noobject_delta = noobject_mask * predict_scales
noobject_loss = tf.reduce_mean(tf.reduce_sum(tf.square(noobject_delta), axis=[1, 2, 3]), name = 'noobject_loss') * self.no_object_scale
coord_mask = tf.expand_dims(object_mask, 4)
boxes_delta = coord_mask * (predict_boxes - boxes_tran)
coord_loss = tf.reduce_mean(tf.reduce_sum(tf.square(boxes_delta), axis=[1, 2, 3, 4]), name = 'coord_loss') * self.coord_scale
tf.contrib.losses.add_loss(class_loss)
tf.contrib.losses.add_loss(object_loss)
tf.contrib.losses.add_loss(noobject_loss)
tf.contrib.losses.add_loss(coord_loss)
def leaky_relu(alpha):
def op(inputs):
return tf.maximum(alpha * inputs, inputs)
return op
| [
6,
7,
8,
9,
10
] |
820 | 920cd41b18f5cfb45f46c44ed707cebe682d4dd9 | # Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: [email protected]
'''
@author: clarkmatthew
extension of the boto instance class, with added convenience methods + objects
Add common instance test routines to this class
Examples:
from eucaops import Eucaops
from nephoria.windows_instance import WinInstance
tester = Eucaops(credpath='eucarc-10.111.5.80-eucalyptus-sys_admin')
wins = WinInstance.make_euinstance_from_instance(tester.get_instances(idstring='i-89E13DA8')[0], tester=tester, keypair='test')
vol = tester.get_volume(status='available', zone=wins.placement)
wins.attach_volume(vol)
'''
import socket
import os
import re
import time
import copy
import types
import operator
from prettytable import PrettyTable, ALL
from boto.ec2.instance import Instance
from nephoria.aws.ec2.euvolume import EuVolume
from cloud_utils.log_utils import eulogger, get_line, markup
from nephoria.euca.taggedresource import TaggedResource
from boto.ec2.instance import InstanceState
from datetime import datetime
from cloud_utils.net_utils import winrm_connection
termline = get_line()
class WinInstanceDiskType():
gigabyte = 1073741824
megabyte = 1048576
def __init__(self, win_instance, wmic_dict):
self.check_dict_requires(wmic_dict)
self.__dict__ = self.convert_numbers_in_dict(copy.copy(wmic_dict))
self.win_instance = win_instance
self.size_in_gb = self.get_size_in_gb()
self.size_in_mb = self.get_size_in_mb()
self.size = long(self.size or 0)
self.last_updated = time.time()
self.setup()
def setup(self):
raise Exception('Not Implemented')
def check_dict_requires(self, wmic_dict):
raise Exception('Not Implemented')
def convert_numbers_in_dict(self, dict):
#convert strings representing numbers to ints
for key in dict:
value = str(dict[key])
if (re.search("\S", str(dict[key])) and not re.search("\D", str(dict[key]))):
dict[key] = long(dict[key])
return dict
def get_partition_ids(self):
retlist = []
for part in self.disk_partitions:
retlist.append(part.deviceid)
return retlist
def get_logicaldisk_ids(self):
retlist = []
for part in self.disk_partitions:
retlist.extend(part.get_logicaldisk_ids())
return retlist
def get_size_in_gb(self):
'''
Attempts to convert self.size from bytes to gigabytes as well as round up > .99 to account for a differences
in how the size is represented
'''
self.size = int(self.size or 0)
gigs = self.size / self.gigabyte
if (self.size % self.gigabyte) /float(self.gigabyte) > .99:
gigs += 1
return gigs
def get_size_in_mb(self):
'''
Attempts to convert self.size from bytes to gigabytes as well as round up > .99 to account for a differences
in how the size is represented
'''
self.size = int(self.size or 0)
mb = self.size / self.megabyte
if (self.size % self.megabyte) /float(self.megabyte) > .99:
mb += 1
return mb
def print_self(self):
self.get_summary(printmethod=self.win_instance.debug)
def get_summary(self, printheader=True, printmethod=None):
raise Exception('Method not implemented')
def print_self_full(self, printmethod=None):
'''
formats and prints self.dict
'''
self.win_instance.print_dict(dict=self.__dict__, printmethod=printmethod)
class WinInstanceDiskDrive(WinInstanceDiskType):
def setup(self):
if not hasattr(self, 'serialnumber'):
self.serialnumber = ''
if not hasattr(self, 'caption'):
self.caption = ''
if hasattr(self, 'model'):
self.caption = self.model
else:
self.model = self.caption
self.cygwin_scsi_drive = self.win_instance.get_cygwin_scsi_dev_for_windows_drive(windisk=self)
self.update_ebs_info()
self.disk_partitions = []
def check_dict_requires(self, wmic_dict):
if not ('deviceid' in wmic_dict and
'size' in wmic_dict and
('caption' in wmic_dict or 'model in wmic_dict') and
'index' in wmic_dict):
raise Exception('wmic_dict passed does not contain needed attributes; deviceid, size, caption, and index')
def get_partition_ids(self):
retlist = []
for part in self.disk_partitions:
retlist.append(part.deviceid)
return retlist
def get_logicaldisk_ids(self):
retlist = []
for part in self.disk_partitions:
retlist.extend(part.get_logicaldisk_ids())
return retlist
def update_md5_info_from_ebs(self):
self.md5 = None
self.md5len = None
for vol in self.win_instance.attached_vols:
if vol.guestdev == self.deviceid:
if not vol.md5:
vol.md5len = 1024
vol.md5 = self.win_instance.get_dev_md5(self.cygwin_scsi_drive, vol.md5len)
self.md5 = vol.md5
self.md5len = vol.md5len
break
def update_ebs_info_from_serial_number(self):
'''
Attempts to parse the serial number field from an EBS volume and find the correlating ebs volume
example format: vol-81C13EA4-dev-sdg
'''
if re.match("^vol-", self.serialnumber):
split = self.serialnumber.split('-')
self.ebs_volume = str(split[0]) + "-" + str(split[1])
self.ebs_cloud_dev = "/" + str(split[2]) + "/" + str(split[3])
else:
self.ebs_volume = ''
self.ebs_cloud_dev = ''
def update_ebs_info(self):
self.update_ebs_info_from_serial_number()
if not self.ebs_volume:
if self.index == 0 and self.win_instance.root_device_type == 'ebs':
bdm = self.win_instance.block_device_mapping[self.win_instance.root_device_name]
self.ebs_volume = bdm.volume_id
else:
for vol in self.win_instance.attached_vols:
if vol.guestdev == self.deviceid:
self.ebs_volume = vol.id
break
if not self.ebs_cloud_dev and self.ebs_volume:
volume = self.win_instance.tester.get_volume(volume_id=self.ebs_volume)
if hasattr(volume,'attach_data') and volume.attach_data:
self.ebs_cloud_dev = volume.attach_data.device
self.update_md5_info_from_ebs()
def get_summary(self, printheader=True, printmethod=None):
buf = ""
deviceid = 20
size = 16
sizegb = 7
ebsvol = 12
serialnumber = 24
caption = 36
part_count = 6
logical_ids = 8
cygdrive = 10
md5 = 32
header = "DISKDRIVE DEV ID".center(deviceid) + "|" + \
"SIZE B".center(size) + "|" + \
"SIZE GB".center(sizegb) + "|" + \
"EBS VOL".center(ebsvol) + "|" + \
"CAPTION".center(caption) + "|" + \
"PARTS".center(part_count) + "|" + \
"LOGICAL".center(logical_ids) + "|" + \
"CYGDRIVE".center(cygdrive) + "|" + \
"SERIAL NUMBER".center(serialnumber) + "|" + \
"MD5 CHECK SUM".center(md5) + "|"
summary = str(self.deviceid).center(deviceid) + "|" + \
str(self.size).center(size) + "|" + \
str(self.size_in_gb).center(sizegb) + "|" + \
str(self.ebs_volume).center(ebsvol) + "|" + \
str(self.caption).center(caption) + "|" + \
str(self.partitions).center(part_count) + "|" + \
str(",".join(str(x) for x in self.get_logicaldisk_ids())).center(logical_ids) + "|" + \
str(self.cygwin_scsi_drive).center(cygdrive) + "|" + \
str(self.serialnumber).center(serialnumber) + "|" + \
str(self.md5).center(md5) + "|"
length = len(header)
if len(summary) > length:
length = len(summary)
line = get_line(length)
if printheader:
buf += line + header + line
buf += summary + line
if printmethod:
printmethod(buf)
return buf
class WinInstanceDiskPartition(WinInstanceDiskType):
def setup(self):
#self.cygwin_scsi_drive = self.win_instance.get_cygwin_scsi_dev_for_windows_drive(drive_id=self.deviceid)
self.logicaldisks = []
#Set values in case 'brief' was used when fetching partitions
if not hasattr(self,'deviceid'):
self.deviceid = self.name
if not hasattr(self,'bootable'):
self.bootable = self.bootpartition
if not hasattr(self,'diskindex'):
self.diskindex = self.get_disk_index_from_name()
def check_dict_requires(self, wmic_dict):
if not ('name' in wmic_dict and
'size' in wmic_dict and
'bootpartition' in wmic_dict and
'index' in wmic_dict):
raise Exception('wmic_dict passed does not contain needed attributes; deviceid, size, index and bootable')
def get_disk_index_from_name(self):
diskindex = None
diskindexstring = self.name.split(',')[0]
if re.search('disk', diskindexstring, re.IGNORECASE):
diskindex = int(diskindexstring.split('#')[1])
return diskindex
def get_logicaldisk_ids(self):
retlist = []
for disk in self.logicaldisks:
retlist.append(disk.deviceid)
return retlist
def get_summary(self, printheader=True, printmethod=None):
buf = ""
deviceid = 24
size = 16
sizegb = 12
sizemb = 12
bootable = 10
header = "PARTITION DEV ID".center(deviceid) + "|" + \
"SIZE B".center(size) + "|" + \
"SIZE GB".center(sizegb) + "|" + \
"SIZE MB".center(sizemb) + "|" + \
"BOOTABLE".center(bootable) + "|"
summary = str(self.deviceid).center(deviceid) + "|" + \
str(self.size).center(size) + "|" + \
str(self.size_in_gb).center(sizegb) + "|" + \
str(self.size_in_mb).center(sizemb) + "|" + \
str(self.bootable).center(bootable) + "|"
length = len(header)
if len(summary) > length:
length = len(summary)
line = get_line(length)
if printheader:
buf += line + header + line
buf += summary + line
if printmethod:
printmethod(buf)
return buf
class WinInstanceLogicalDisk(WinInstanceDiskType):
def setup(self):
self.cygwin_scsi_drive = self.win_instance.get_cygwin_scsi_dev_for_windows_drive(windisk=self)
self.partition = None
def check_dict_requires(self, wmic_dict):
if not ('deviceid' in wmic_dict and
'size' in wmic_dict and
'description' in wmic_dict and
'freespace' in wmic_dict and
'filesystem' in wmic_dict):
raise Exception('wmic_dict passed does not contain needed attributes; deviceid, size, and description')
def get_summary(self, printheader=True, printmethod=None):
buf = ""
deviceid = 24
size = 16
freespace = 16
filesystem = 24
description = 30
cygdrive = 10
header = "LOGICAL DEV ID".center(deviceid) + "|" + \
"SIZE".center(size) + "|" + \
"FREE SPACE".center(freespace) + "|" + \
"FILE SYSTEM".center(filesystem) + "|" + \
"DESCRIPTION".center(description) + "|" + \
"CYGDRIVE".center(cygdrive) + "|"
summary = str(self.deviceid).center(deviceid) + "|" + \
str(self.size).center(size) + "|" + \
str(self.freespace).center(freespace) + "|" + \
str(self.filesystem).center(filesystem) + "|" + \
str(self.description).center(description) + "|" + \
str(self.cygwin_scsi_drive).center(cygdrive) + "|"
length = len(header)
if len(summary) > length:
length = len(summary)
line = get_line(length)
if printheader:
buf += line + header + line
buf += summary + line
if printmethod:
printmethod(buf)
return buf
class WinInstance(Instance, TaggedResource):
gigabyte = 1073741824
megabyte = 1048576
@classmethod
def make_euinstance_from_instance(cls,
instance,
tester,
debugmethod = None,
keypair=None,
keypath=None,
password=None,
username="Administrator",
auto_connect = True,
verbose=True,
timeout=120,
private_addressing = False,
reservation = None,
cmdstart=None,
try_non_root_exec=True,
winrm_port='5985',
winrm_protocol='http',
rdp_port='3389',
rootfs_device = "sda",
block_device_prefix = "sd",
bdm_root_vol = None,
virtio_blk = True,
cygwin_path = None,
disk_update_interval=10,
retry=2,
brief=False
):
'''
Primary constructor for this class. Note: to avoid an ssh session within this method, provide keys, username/pass later.
Arguments:
instance - mandatory- a Boto instance object used to build this euinstance object
keypair - optional- a boto keypair object used for creating ssh connection to the instance
username - optional- string used to create ssh connection as an alternative to keypair
password - optional- string used to create ssh connection to this instance as an alternative to keypair
exec_password -optional -string used for su or sudo where prompted for password, will default to 'password'
auto_connect -optional -boolean, if True will attempt to automatically create an ssh session for this instance
try_non_root_exec -optional -boolean, if True will attempt to use sudo if available else su -c to execute privileged commands
timeout - optional- integer used for ssh connection timeout
debugmethod - optional - method, used for debug output
verbose - optional - boolean to determine if debug is to be printed using debug()
retry - optional - integer, ssh connection attempts for non-authentication failures
'''
newins = WinInstance(instance.connection)
newins.__dict__ = instance.__dict__
newins.tester = tester
newins.winrm_port = winrm_port
newins.rdp_port = rdp_port
newins.bdm_root_vol = None
newins.winrm_protocol = winrm_protocol
newins.debugmethod = debugmethod
if newins.debugmethod is None:
newins.log = eulogger.Eulogger(identifier= str(instance.id))
newins.debugmethod= newins.log.debug
if (keypair is not None):
if isinstance(keypair,types.StringTypes):
keyname = keypair
keypair = tester.get_keypair(keyname)
else:
keyname = keypair.name
newins.keypath = keypath or os.getcwd() + "/" + keyname + ".pem"
newins.keypair = keypair
newins.password = password
newins.username = username
newins.verbose = verbose
newins.attached_vols=[]
newins.timeout = timeout
newins.virtio_blk = virtio_blk
newins.disk_update_interval = disk_update_interval
newins.retry = retry
newins.brief = brief
newins.rootfs_device = rootfs_device
newins.block_device_prefix = block_device_prefix
newins.private_addressing = private_addressing
newins.reservation = reservation or newins.get_reservation()
if newins.reservation:
newins.security_groups = newins.tester.get_instance_security_groups(newins)
else:
newins.security_groups = None
newins.laststate = newins.state
newins.cmdstart = cmdstart
newins.auto_connect = auto_connect
newins.set_last_status()
newins.update_vm_type_info()
newins.cygwin_path = cygwin_path
newins.system_info = None
newins.diskdrives = []
newins.disk_partitions = []
newins.logicaldisks = []
newins.cygwin_dev_map = {}
#newins.set_block_device_prefix()
if newins.root_device_type == 'ebs':
try:
volume = newins.tester.get_volume(volume_id = newins.block_device_mapping.get(newins.root_device_name).volume_id)
newins.bdm_root_vol = EuVolume.make_euvol_from_vol(volume, tester=newins.tester,cmdstart=newins.cmdstart)
except:pass
newins.winrm = None
if newins.auto_connect and newins.state == 'running':
newins.connect_to_instance(timeout=timeout)
return newins
@property
def age(self):
launchtime = self.tester.get_datetime_from_resource_string(self.launch_time)
# return the elapsed time in seconds
return (time.mktime(datetime.utcnow().utctimetuple()) -
time.mktime(launchtime.utctimetuple()))
def update(self, validate=False, dry_run=False,
err_state='terminated', err_code=-1):
ret = None
tb = ""
retries = 2
for x in xrange(0, retries):
try:
#send with validation True, fail later...
ret = super(WinInstance, self).update(validate=True,
dry_run=dry_run)
break
except ValueError:
if validate:
raise
tb = self.tester.get_traceback()
self.debug('Failed to update instance. Attempt:{0}/{1}'
.format(x, retries))
if not ret:
failmsg = 'Failed to update instance. Instance may no longer ' \
'be present on system"{0}"'.format(self.id)
self.debug('{0}\n{1}'.format(tb, failmsg))
self.debug('{0} setting fake state to:"{1}"'.format(self.id,
err_state))
state = InstanceState(name=err_state, code=err_code)
self._state = state
ret = self.state
self.set_last_status()
return ret
def update_vm_type_info(self):
self.vmtype_info = self.tester.get_vm_type_from_zone(self.placement,self.instance_type)
return self.vmtype_info
def set_last_status(self,status=None):
self.laststate = self.state
self.laststatetime = time.time()
self.age_at_state = self.tester.get_instance_time_launched(self)
#Also record age from user's perspective, ie when they issued the run instance request (if this is available)
if self.cmdstart:
self.age_from_run_cmd = "{0:.2f}".format(time.time() - self.cmdstart)
else:
self.age_from_run_cmd = None
def print_dict(self, dict=None, printmethod=None):
'''
formats and prints
'''
printmethod = printmethod or self.debug
buf = "\n"
dict = dict or self.__dict__
longest_key = 0
for key in dict:
if len(key) > longest_key:
longest_key = len(key)
for key in dict:
buf += str(key).ljust(longest_key) + " -----> :" + str(dict[key]) + "\n"
printmethod(buf)
def printself(self, title=True, footer=True, printmethod=None, printme=True):
def state_markup(state):
# Markup instance state...
if state == 'running':
return markup(state, markups=[1, 92])
if state == 'terminated':
return markup(state, markups=[1, 97])
if state == 'shutting-down':
return markup(state, markups=[1, 95])
if state == 'pending':
return markup(state, markups=[1, 93])
if state == 'stopped':
return markup(state, markups=[1, 91])
else:
return markup(state, markups=[1, 91])
def multi_line(lines):
# Utility method for creating multi line table entries...
buf = ""
maxlen = 0
for line in lines:
if len(line) + 2 > maxlen:
maxlen = len(line) + 2
for line in lines:
buf += str(line).ljust(maxlen) + "\n"
buf = buf.rstrip()
return (buf, maxlen)
bdmvol = self.root_device_type
if self.bdm_root_vol:
bdmvol += ":" + self.bdm_root_vol.id
reservation_id = None
if self.reservation:
reservation_id = self.reservation.id
owner_id = self.reservation.owner_id
else:
owner_id = "???"
# Create a multi line field for instance's run info
idlist = [markup("{0} {1}".format('ID:', self.id), markups=[1, 4, 94]),
"{0} {1}".format(markup('TYPE:'), self.instance_type),
"{0} {1}".format(markup('RES:'), reservation_id),
"{0}".format(markup("ACCOUNT ID:")), owner_id]
id_string, idlen = multi_line(idlist)
try:
emi = self.tester.get_emi(self.image_id)
emi_name = str(emi.name[0:18]) + ".."
except:
emi_name = ""
# Create a multi line field for the instance's image info
virt_type = 'PV'
if self.virtualization_type == 'hvm':
virt_type = 'HVM'
emi_string, emilen = multi_line(
[markup("{0} {1}".format('EMI:', self.image_id)),
"{0} {1}".format(markup('OS:'), self.platform or 'linux'),
"{0} {1}".format(markup('VIRT:'), virt_type),
"{0}".format(markup('IMAGE NAME:')),
emi_name])
# Create a multi line field for the instance's state info
if self.age:
age = int(self.age)
state_string, state_len = multi_line(["STATE: " + state_markup(self.laststate),
"{0} {1}".format(markup('AGE:'), age),
"{0} {1}".format(markup("ZONE:"), self.placement),
markup('ROOTDEV:'), bdmvol])
# Create the primary table called pt...
netinfo = 'INSTANCE NETWORK INFO:'
idheader = 'INSTANCE ID'
imageheader = 'INSTANCE IMAGE'
stateheader = 'INSTANCE STATE'
pt = PrettyTable([idheader, imageheader, stateheader, netinfo])
pt.align[netinfo] = 'l'
pt.valign[netinfo] = 'm'
pt.align[idheader] = 'l'
pt.align[imageheader] = 'l'
pt.align[stateheader] = 'l'
pt.max_width[idheader] = idlen
pt.max_width[imageheader] = emilen
pt.max_width[stateheader] = state_len
pt.padding_width = 0
pt.hrules = ALL
# PrettyTable headers do not work with ascii markups, so make a sudo header
new_header = []
for field in pt._field_names:
new_header.append(markup(field, markups=[1, 4]))
pt.add_row(new_header)
pt.header = False
# Create a subtable 'netpt' to summarize and format the networking portion...
# Set the maxwidth of each column so the tables line up when showing multiple instances
vpc_col = ('VPC', 4)
subnet_col = ('SUBNET', 6)
if self.vpc_id:
vpc_col = ('VPC', 12)
subnet_col = ('SUBNET', 15)
secgrp_col = ('SEC GRPS', 11)
privaddr_col = ('P', 1)
privip_col = ('PRIV IP', 15)
pubip_col = ('PUB IP', 15)
net_cols = [vpc_col, subnet_col, secgrp_col, privaddr_col, privip_col, pubip_col]
# Get the Max width of the main tables network summary column...
# Start with 2 to account for beginning and end column borders
netinfo_width = 2
netinfo_header = []
for col in net_cols:
netinfo_width += col[1] + 1
netinfo_header.append(col[0])
pt.max_width[netinfo] = netinfo_width
netpt = PrettyTable([vpc_col[0], subnet_col[0], secgrp_col[0], privaddr_col[0],
privip_col[0], pubip_col[0]])
netpt.padding_width = 0
netpt.vrules = ALL
for col in net_cols:
netpt.max_width[col[0]] = col[1]
sec_grps = []
for grp in self.groups:
sec_grps.append(str(grp.id))
sec_grps = ",".join(sec_grps)
private_addressing = "N"
if self.private_addressing:
private_addressing = "Y"
netpt.add_row([str(self.vpc_id).center(vpc_col[1]),
str(self.subnet_id).center(subnet_col[1]),
str(sec_grps).center(secgrp_col[1]),
str(private_addressing).center(privaddr_col[1]),
str(self.private_ip_address).center(privip_col[1]),
str(self.ip_address).center(pubip_col[1])])
# To squeeze a potentially long keyname under the network summary table, get the length
# and format this column to allow for wrapping a keyname under the table...
# netbuf = netpt.get_string()
netbuf = "{0}:{1} {2}:{3}\n".format(markup("NODE"),
self.tags.get('euca:node', "???").ljust(16),
markup("KEYPAIR"), self.key_name)
netbuf += "\n".join(netpt.get_string().splitlines()[0:-1])
# Create the row in the main table...
pt.add_row([id_string, emi_string, state_string, netbuf])
if printme:
printmethod = printmethod or self.log.debug
printmethod("\n" + str(pt) + "\n")
return pt
def get_password(self,
private_key_path=None,
key=None,
dir=None,
exten=".pem",
encoded=True,
force_update=False):
'''
:param private_key_path: private key file used to decrypt password
:param key: name of private key
:param dir: Path to private key
:param exten: extension of private key
:param encoded: boolean of whether string returned from server is
Base64 encoded
:return: decrypted password
'''
if self.password is None or force_update:
self.password = self.tester.get_windows_instance_password(
self,
private_key_path=private_key_path,
key=key,
dir=dir,
exten=exten,
encoded=encoded)
return self.password
def reset_ssh_connection(self, timeout=None):
# todo: Remove ssh reference from this method, use something like
# reset_instance_connection, etc..
self.debug('Note ssh not implemented at this time, using winrm for '
'shell access instead...')
return self.reset_winrm_connection(timeout=timeout)
def reset_winrm_connection(self, timeout=None, force=False):
# todo:
timeout = timeout or self.timeout
self.debug('reset_winrm_connection for:'+str(self.id))
self.get_password(force_update=True)
if self.username is None or self.password is None:
#Allow but warn here as this may be a valid negative test
self.debug('Warning username and/or password were None in '
'winrm connnection?')
# Create a new winrm interface if this is a new instance or
# an attribute has changed...
try:
#Check the port in order to provide debug if the connection fails
self.test_port_status(port=self.winrm_port, ip=self.ip_address)
except:pass
if force or not (self.winrm and \
self.winrm.hostname == self.ip_address and \
self.winrm.username == self.username and \
self.winrm.password == self.password):
if self.winrm:
self.winrm.close_shell()
self.winrm = winrm_connection.Winrm_Connection(
hostname = self.ip_address,
username = self.username,
password = self.password,
port = self.winrm_port,
protocol = self.winrm_protocol,
debug_method = self.debug,
verbose=True
)
def get_reservation(self):
res = None
try:
res = self.tester.get_reservation_for_instance(self)
except Exception, e:
self.update()
self.debug('Could not get reservation for instance in state:' +
str(self.state) + ", err:" + str(e))
return res
def connect_to_instance(self, wait_for_boot=180, timeout=120):
'''
Attempts to connect to an instance via ssh.
:params wait_for_boot: time to wait, allowing guest to boot before
attempting to poll for ports active status
:params timeout: -optional - time in seconds to wait when polling
port(s) status(s) before failure
'''
self.debug("{0}connect_to_instance starting.\nwait_for_boot:{1} "
"seconds\ntimeout from boot:{2}{3}"
.format(termline, wait_for_boot, timeout, termline))
try:
self.poll_for_port_status_with_boot_delay(waitforboot=wait_for_boot,
timeout=timeout)
except Exception, e:
self.debug('Warning failed to poll port status:' + str(e))
self.debug("Attempting to create connection to instance:" + self.id)
attempts = 0
start = time.time()
elapsed = 0
if self.winrm is not None:
self.winrm.close_shell()
self.winrm = None
while (elapsed < timeout):
attempts += 1
try:
self.update()
self.reset_winrm_connection()
self.debug('Try some sys...')
self.sys("whoami")
except Exception, se:
tb = self.tester.get_traceback()
self.debug('Caught exception attempting to connect '
'winrm shell:\n'+ str(tb) + str(se))
elapsed = int(time.time()-start)
self.debug('connect_to_instance: Attempts:' + str(attempts) +
', elapsed:'+str(elapsed)+'/'+str(timeout))
if self.winrm is not None:
self.winrm.close_shell()
self.winrm = None
time.sleep(5)
pass
else:
break
elapsed = int(time.time()-start)
if self.winrm is None:
self.get_connection_debug()
raise RuntimeError(str(self.id) +
":Failed establishing management connection to "
"instance, elapsed:" + str(elapsed) +
"/" + str(timeout))
self.debug('Connect_to_instance updating attached volumes/disk '
'info for vols: ' + str(self.attached_vols))
if self.brief:
self.update_system_info()
else:
self.update_system_and_disk_info()
self.init_attached_volumes()
self.debug("{0}connect_to_instance completed{1}"
.format(termline, termline))
def get_connection_debug(self):
# Add network debug/diag info here...
# First show arp cache from local machine
# todo Consider getting info from relevant euca components:
# - iptables info
# - route info
# - instance xml
try:
# Show local ARP info...
arp_out = "\nLocal ARP cache for instance ip: " \
+ str(self.ip_address) + "\n"
arp_fd = os.popen('arp ' + str(self.ip_address))
for line in arp_fd:
arp_out += line
self.debug(arp_out)
except Exception as AE:
self.log.debug('Failed to get arp info:' + str(AE))
try:
self.tester.get_console_output(self)
except Exception as CE:
self.log.debug('Failed to get console output:' + str(CE))
def update_root_device_diskdrive(self):
if not self.root_device_type == 'ebs':
return
for disk in self.diskdrives:
if disk.index == 0:
if disk.ebs_volume:
for vol in self.attached_vols:
if vol.id == disk.ebs_volume:
if not disk.md5:
disk.update_md5_info_from_ebs()
return
volume = self.tester.get_volume(volume_id=disk.ebs_volume)
if not isinstance(volume, EuVolume):
volume = EuVolume.make_euvol_from_vol(volume, self.tester)
volume.guestdev = disk.deviceid
volume.md5len = 1024
volume.md5 = self.get_dev_md5(disk.cygwin_scsi_drive, volume.md5len)
if not self.get_volume_from_attached_list_by_id(volume.id):
self.debug("{0} updating with root vol:{1}{2}"
.format(termline,
volume.id,
termline))
self.attached_vols.append(volume)
disk.update_md5_info_from_ebs()
return
def get_volume_from_attached_list_by_id(self, volume_id):
for vol in self.attached_vols:
if vol.id == volume_id:
return vol
def update_system_and_disk_info(self):
try:
self.update_system_info()
except Exception, sie:
tb = self.tester.get_traceback()
self.debug(str(tb) + "\nError updating system info:" + str(sie))
try:
self.update_disk_info()
self.update_root_device_diskdrive()
self.print_partition_summary()
self.print_logicaldisk_summary()
self.print_diskdrive_summary()
except Exception, ude:
tb = self.tester.get_traceback()
self.debug(str(tb) + "\nError updating disk info:" + str(ude))
def has_sudo(self):
return False
def debug(self,msg,traceback=1,method=None,frame=False):
'''
Used to print debug, defaults to print() but over ridden by self.debugmethod if not None
msg - mandatory -string, message to be printed
'''
if ( self.verbose is True ):
self.debugmethod(msg)
def sys(self, cmd, verbose=True, code=None, include_stderr=False, enable_debug=False, timeout=None):
'''
Issues a command against the ssh connection to this instance
Returns a list of the lines from stdout+stderr as a result of the command
cmd - mandatory - string, the command to be executed
verbose - optional - boolean flag to enable debug
timeout - optional - command timeout in seconds
'''
if (self.winrm is None):
raise Exception("WinInstance winrm connection is None")
return self.winrm.sys(command=cmd, include_stderr=include_stderr, timeout=timeout, verbose=verbose, code=code)
def test_rdp_port_status(self, ip=None, port=3389, timeout=10):
'''
Description: Attempts to test that the host is accepting tcp connections to the RDP port
'''
ip = ip or self.ip_address
return self.test_port_status(ip=ip, port=port, timeout=timeout)
def test_port_status(self, port, ip=None, timeout=5, tcp=True, verbose=True):
ip = ip or self.ip_address
return self.tester.test_port_status(ip, int(port), timeout=timeout, tcp=tcp, verbose=verbose)
def poll_for_port_status_with_boot_delay(self, interval=15, ports=[], socktimeout=5,timeout=180, waitforboot=300):
'''
Make sure some time has passed before we test on the guest side before running guest test...
'''
launch_seconds = self.tester.get_instance_time_launched(self)
sleeptime = 0 if launch_seconds > waitforboot else (waitforboot - launch_seconds)
self.debug("Instance was launched "+str(launch_seconds)+" seconds ago, waiting:"+str(sleeptime)+" for instance to boot")
time.sleep(sleeptime)
return self.poll_for_ports_status(ports,
ip=self.ip_address,
interval=interval,
socktimeout=socktimeout,
timeout=timeout)
def wait_for_time_since_launch(self,waitforboot=420):
'''
When using larger instance store images, this can allow for the delays caused by image size/transfer.
'''
boot_seconds = self.tester.get_instance_time_launched(self)
sleeptime = 0 if boot_seconds > waitforboot else (waitforboot - boot_seconds)
self.debug("Instance was launched "+str(boot_seconds)+"/"+str(waitforboot) + " seconds ago, waiting:"+str(sleeptime)+" for instance to boot")
start = time.time()
elapsed = 0
print "Waiting for Windows to fully boot:",
while elapsed < sleeptime:
print "Waiting for Windows to fully boot:"+str(sleeptime-elapsed),
time.sleep(5)
elapsed=int(time.time()-start)
self.debug("test_wait_for_instance_boot: done waiting, instance up for "+str(waitforboot)+" seconds")
def poll_for_ports_status(self, ports=[], ip=None, interval=10, socktimeout=5, timeout=180):
ip = ip or self.ip_address
ports = ports or [self.rdp_port, self.winrm_port]
start = time.time()
elapsed = 0
attempt = 0
while elapsed < timeout:
attempt +=1
self.debug('test_poll_for_ports_status, ports: ' + ",".join(str(x) for x in ports) + ", attempt:" + str(attempt))
for port in ports:
if elapsed < timeout:
try:
self.debug('Trying ip:port:' + str(self.ip_address) + ':' + str(port) + ", elapsed:" + str(elapsed))
self.test_port_status(ip=ip, port=int(port), timeout=5)
return
except socket.error, se:
self.debug('test_ports_status failed socket error:'+str(se[0]))
#handle specific errors here, for now just for debug...
ecode=se[0]
if ecode == socket.errno.ETIMEDOUT or ecode == "timed out":
self.debug("test_poll_for_ports_status: Connect "+str(ip)+":" +str(port)+ " timed out retrying. Time remaining("+str(timeout-elapsed)+")")
except Exception, e:
tb = self.tester.get_traceback()
self.debug(tb)
self.debug('test_poll_for_ports_status:'+str(ip)+':'+str(port)+' FAILED after attempts:'+str(attempt)+', elapsed:'+str(elapsed)+', err:'+str(e) )
elapsed = int(time.time() -start)
if elapsed < timeout:
time.sleep(interval)
raise Exception('test_poll_for_ports_status:'+str(ip)+':'+str(port)+' FAILED after attempts:'+str(attempt)+', elapsed:'+str(elapsed)+' seconds')
def init_attached_volumes(self):
self.debug('init_attahced_volumes... attached_vols: ' + str(self.attached_vols))
syncdict = self.sync_attached_volumes_with_clouds_view()
if syncdict['errors']:
errmsg = 'Errors syncing guest volumes with cloud at init:' + ",".join(str(e) for e in syncdict['errors'])
errmsg += 'Failed to sync guest volumes with cloud at init:' + ",".join(str(x) for x in syncdict['badvols'])
self.debug(errmsg)
time.sleep(60)
raise Exception(errmsg)
def sync_attached_volumes_with_clouds_view(self):
self.debug(termline +
"Starting sync_attached_volumes_with_clouds_view"
+ termline )
badvols = []
errors = []
ret = {'errors':errors, 'badvols':badvols}
#Get a list of volumes that the cloud believes are currently attached
cloud_volumes = self.tester.get_volumes(attached_instance=self.id)
#Make a copy of a list of volumes this instance thinks are currenlty attached
locallist = copy.copy(self.attached_vols)
self.debug('Cloud list:' + str(cloud_volumes))
self.debug('Local list:' + str(locallist))
for vol in cloud_volumes:
for local_vol in locallist:
if local_vol.id == vol.id:
locallist.remove(local_vol)
if not isinstance(vol, EuVolume):
vol = EuVolume.make_euvol_from_vol(vol, self.tester)
try:
self.update_volume_guest_info(volume=vol)
except Exception, e:
badvols.append(vol)
errors.append(vol.id + ' Error syncing with cloud:' + str (e) + '. \n')
for local_vol in locallist:
badvols.append(local_vol)
errors.append(local_vol.id + ' Error unattached volume found in guests attach list. \n')
self.debug(termline +
"Finishing sync_attached_volumes_with_clouds_view"
+ termline )
return ret
def update_system_info(self):
'''
Gather basic system info for this windows instance object and store in self.system_info
Example:
# print wins.system_info.OS_NAME
'Microsoft Windows 7 Professional'
'''
currentkey = None
swap = re.compile('([!@#$%^&*. ])')
info = self.sys('systeminfo')
if self.system_info:
system_info = self.system_info
else:
system_info = type('obj', (object,),{})
if info:
for line in info:
if re.match("^\w.+:", line):
linevals = line.split(':')
currentkey = linevals.pop(0)
#clean up the key string...
currentkey = re.sub('[()]', '', currentkey)
currentkey = re.sub(swap, '_', currentkey)
currentkey = currentkey.lower()
value = ":".join(str(x) for x in linevals) or ""
setattr(system_info, currentkey, str(value).strip())
elif currentkey:
#this is an additional value to our previous key
prev_value = getattr(system_info, currentkey)
if not isinstance(prev_value, types.ListType):
updated_value = [prev_value]
updated_value.append(str(line).strip())
setattr(system_info, currentkey, updated_value)
self.system_info = system_info
def get_cygwin_path(self, prefix="c:\\"):
if self.cygwin_path:
return self.cygwin_path
path = None
self.debug('Trying to find cygwin path...')
out = self.sys('dir ' + str(prefix) + ' /B')
for line in out:
if re.search('cygwin', line):
path = str(prefix) + str(line.strip()) + "\\"
self.cygwin_path = path
break
return path
def cygwin_curl(self, url, connect_timeout=30):
cygpath = self.get_cygwin_path()
if cygpath is None:
raise Exception('Could not find cygwin path on guest for curl?')
curl = cygpath + 'bin\curl.exe --connect-timeout ' + str(connect_timeout) + ' '
return self.sys(curl + str(url), code=0, timeout=connect_timeout)
def get_metadata(self, element_path='', prefix='latest/meta-data/', use_cygwin=True):
"""Return the lines of metadata from the element path provided"""
### If i can reach the metadata service ip use it to get metadata otherwise try the clc directly
try:
if use_cygwin:
return self.cygwin_curl("http://169.254.169.254/"+str(prefix)+str(element_path), connect_timeout=10)
else:
return self.sys("curl --connect-timeout 10 http://169.254.169.254/"+str(prefix)+str(element_path), code=0)
except:
if use_cygwin:
return self.cygwin_curl("http://" + self.tester.get_ec2_ip() + ":8773/"+str(prefix) + str(element_path))
else:
return self.sys("curl http://" + self.tester.get_ec2_ip() + ":8773/"+str(prefix) + str(element_path), code=0)
def print_diskdrive_summary(self,printmethod=None):
printmethod = printmethod or self.debug
if not self.diskdrives:
printmethod('No disk drives to print?')
return
disklist = copy.copy(self.diskdrives)
buf = (disklist.pop()).get_summary()
for disk in disklist:
buf += disk.get_summary(printheader=False)
printmethod(buf)
def print_partition_summary(self,printmethod=None):
printmethod = printmethod or self.debug
if not self.disk_partitions:
printmethod('No disk partitions to print?')
return
partlist = copy.copy(self.disk_partitions)
buf = (partlist.pop()).get_summary()
for part in partlist:
buf += part.get_summary(printheader=False)
printmethod(buf)
def print_logicaldisk_summary(self,printmethod=None):
printmethod = printmethod or self.debug
if not self.logicaldisks:
printmethod('No disk disk_partitions to print?')
return
disklist = copy.copy(self.logicaldisks)
buf = (disklist.pop()).get_summary()
for disk in disklist:
buf += disk.get_summary(printheader=False)
printmethod(buf)
def update_disk_info(self , forceupdate=False):
if self.diskdrives:
if not forceupdate and (time.time() - self.diskdrives[0].last_updated) <= self.disk_update_interval:
return
self.debug('Fetching updated disk info...')
self.diskdrives = []
self.disk_partitions = []
self.logicaldisks = []
self.diskdrives = self.get_updated_diskdrive_info()
self.disk_partitions = self.get_updated_partition_info()
self.logicaldisks = self.get_updated_logicaldisk_info()
self.associate_diskdrives_to_partitions()
self.associate_partitions_to_logicaldrives()
def get_updated_diskdrive_info(self):
'''
Populate self.diskdrives with WinInstanceDisk objects containing info parsed from wmic command.
Since wmic doesn't seem to use delimeters this method attempts to derive the lengh of each column/header
in order to parse out the info per disk.
:pararm force: boolean. Will force an update, otherwise this method will wait a minimum of
self.disk_update_interval before updating again.
'''
#cmd = "wmic diskdrive get /format:textvaluelist.xsl"
self.debug('Getting updated diskdrive info...')
cmd = "wmic diskdrive list full"
diskdrives = []
for disk_dict in self.get_parsed_wmic_command_output(cmd):
try:
diskdrives.append(WinInstanceDiskDrive(self,disk_dict))
except Exception, e:
tb = self.tester.get_traceback()
self.debug('Error attempting to create WinInstanceDiskDrive from following dict:')
self.print_dict(dict=disk_dict)
raise Exception(str(tb) + "\n Error attempting to create WinInstanceDiskDrive:" + str(e))
self.debug('get_updated_diskdrive_info, Done')
return diskdrives
def get_updated_partition_info(self):
'''
Populate self.diskdrives with WinInstanceDisk objects containing info parsed from wmic command.
Since wmic doesn't seem to use delimeters this method attempts to derive the lengh of each column/header
in order to parse out the info per disk.
:pararm force: boolean. Will force an update, otherwise this method will wait a minimum of
self.disk_update_interval before updating again.
'''
self.debug('Getting udpated partition info...')
cmd = "wmic partition list brief /format:textvaluelist.xsl"
disk_partitions = []
for part_dict in self.get_parsed_wmic_command_output(cmd):
try:
disk_partitions.append(WinInstanceDiskPartition(self,part_dict))
except Exception, e:
tb = self.tester.get_traceback()
self.debug('Error attempting to create WinInstanceDiskPartition from following dict:')
self.print_dict(dict=part_dict)
raise Exception(str(tb) + "\n Error attempting to create WinInstanceDiskPartition:" + str(e))
self.debug('get_updated_partition_info, Done')
return disk_partitions
def get_updated_logicaldisk_info(self):
self.debug('Getting updated logicaldisk info...')
cmd ='wmic logicaldisk list /format:textvaluelist.xsl'
logicaldisks = []
for part_dict in self.get_parsed_wmic_command_output(cmd):
try:
logicaldisks.append(WinInstanceLogicalDisk(self,part_dict))
except Exception, e:
tb = self.tester.get_traceback()
self.debug('Error attempting to create WinInstanceLogicalDisk from following dict:')
self.print_dict(dict=part_dict)
raise Exception(str(tb) + "\n Error attempting to create WinInstanceLogicalDisk:" + str(e))
self.debug('get_updated_logicaldisk_info, Done')
return logicaldisks
def associate_diskdrives_to_partitions(self):
for disk in self.diskdrives:
disk.disk_partitions = []
for part in self.disk_partitions:
if part.diskindex == disk.index:
disk.disk_partitions.append(part)
def associate_partitions_to_logicaldrives(self, verbose=False):
for part in self.disk_partitions:
drive_id = None
part.logicaldisks = []
cmd = 'wmic partition where (DeviceID="Disk #' + str(part.diskindex) + \
', Partition #' + str(part.index) + '") assoc /assocclass:Win32_LogicalDiskToPartition'
output = self.sys(cmd, verbose=verbose, code=0)
for line in output:
if re.search('Win32_LogicalDisk.DeviceID',line):
try:
drive_id = str(line.split()[0].split('=')[1]).replace('"','').strip()
except Exception, e:
tb = self.tester.get_traceback()
self.debug(str(tb)+ "\nError getting logical drive info:" + str(e))
if drive_id:
for disk in self.logicaldisks:
if re.match(disk.deviceid, drive_id):
part.logicaldisks.append(disk)
disk.partition = part
break
def get_cygwin_scsi_dev_for_windows_drive(self, windisk=None, drive_id=""):
'''
param windisk: WinInstanceDiskType object. windisk.deviceid is used to look up the associated cygwin device
param drive_id: String representing the deviceid. Can be used instead of passing a WinInstanceDiskType
'''
windisk_classname = ""
update = False
retries = 2
if windisk:
drive_id = windisk.deviceid
windisk_classname = str(windisk.__class__).split('.').pop()
#If this is a disk drive allow a retry which set the force update flag, otherwise don't force and retry
if isinstance(windisk,WinInstanceDiskDrive):
update = True
if not drive_id:
raise Exception('WinInstanceDiskType or string w/ device id not provided')
self.debug('Attempting to get cygwin dev for windows drive:' + str(drive_id))
self.update_cygwin_windows_device_map()
for retry in xrange(0, retries):
for device in self.cygwin_dev_map:
if re.search("dev", device):
win_dev = str(self.cygwin_dev_map[device].split('\\').pop()).strip().upper()
formated_drive_id = str(drive_id.split('\\').pop()).strip().upper()
#self.debug('Attempt to match:"' + str(win_dev) + '" with "' + str(formated_drive_id) + '"')
if formated_drive_id == win_dev:
#self.debug('Found match')
return device
if update:
self.update_cygwin_windows_device_map(force_update=True)
else:
break
self.debug('WARNING: Could not find cygwin device for type:"' + str(windisk_classname) + '", deviceid:' + str(drive_id))
return ""
def get_parsed_wmic_command_output(self, wmic_command, verbose=False):
'''
Attempts to parse a wmic command using "/format:textvaluelist.xsl" for key value format into a list of
dicts.
:param wmic_command: string representing the remote wmic command to be run
:returns : list of dict(s) created from the parsed key value output of the command.
Note keys will be in lowercase
'''
self.debug('get_parsed_wmic_command_output, command:' + str(wmic_command))
ret_dicts = []
output = self.sys(wmic_command, verbose=verbose, code=0)
newdict = {}
for line in output:
if not re.match(r"^\w",line):
#If there is a blank line(s) then the previous object is complete
if newdict:
ret_dicts.append(newdict)
newdict = {}
else:
splitline = line.split('=')
key = str(splitline.pop(0)).lower()
if len(splitline) > 1:
value = "=".join(str(x) for x in splitline)
else:
if splitline:
value = splitline.pop()
else:
value = ''
newdict[key] = value
return ret_dicts
def get_logicaldisk_ids(self, forceupdate=False):
'''
:param forceupdate: boolean, to force an update of logical disks detected on the guest. Otherwise updates are
throttled to self.disk_update_interval
:returns list of device ids (ie: [A:,C:,D:]
'''
ret = []
self.update_disk_info(forceupdate=forceupdate)
for disk in self.logicaldisks:
ret.append(disk.deviceid)
return ret
def get_diskdrive_ids(self, drivelist=None, forceupdate=False):
'''
:param forceupdate: boolean, to force an update of logical disks detected on the guest. Otherwise updates are
throttled to self.disk_update_interval
:returns list of device ids ie: ['\\.\PHYSICALDRIVE0','\\.\PHYSICALDRIVE1,'\\.\PHYSICALDRIVE2']
'''
ret = []
if not drivelist:
self.update_disk_info(forceupdate=forceupdate)
drivelist = self.diskdrives
for disk in drivelist:
ret.append(disk.deviceid)
return ret
def get_diskdrive_by_deviceid(self, deviceid):
for disk in self.diskdrives:
if disk.deviceid == deviceid:
return disk
def found(self, command, regex):
""" Returns a Boolean of whether the result of the command contains the regex"""
result = self.sys(command)
for line in result:
found = re.search(regex,line)
if found:
return True
return False
def assertFilePresent(self,filepath):
'''
Raise exception if file not found at filepath on remote guest. dirs '\' need to be represented as '\\'
'''
self.sys('dir ' + str(filepath), code=0)
def assertCygwinFilePresent(self, filepath):
self.cygwin_cmd('ls ' + str(filepath), code=0)
def attach_volume(self, volume, dev=None, timeout=180, overwrite=False):
'''
Method used to attach a volume to an instance and track it's use by that instance
required - euvolume - the euvolume object being attached
required - tester - the eucaops/nephoria object/connection for this cloud
optional - dev - string to specify the dev path to 'request' when attaching the volume to
optional - timeout - integer- time allowed before failing
optional - overwrite - flag to indicate whether to overwrite head data of a non-zero filled volume upon attach for md5
'''
if not isinstance(volume, EuVolume):
volume = EuVolume.make_euvol_from_vol(volume)
return self.attach_euvolume(volume, dev=dev, timeout=timeout, overwrite=overwrite)
def attach_euvolume(self, euvolume, dev=None, timeout=180, overwrite=False):
'''
Method used to attach a volume to an instance and track it's use by that instance
required - euvolume - the euvolume object being attached
required - tester - the eucaops/nephoria object/connection for this cloud
optional - dev - string to specify the dev path to 'request' when attaching the volume to
optional - timeout - integer- time allowed before failing
optional - overwrite - flag to indicate whether to overwrite head data of a non-zero filled volume upon attach for md5
'''
if not isinstance(euvolume, EuVolume):
raise Exception("Volume needs to be of type euvolume, try attach_volume() instead?")
self.debug('Disk drive summary before attach attempt:')
self.print_logicaldisk_summary()
self.print_diskdrive_summary()
self.debug("Attempting to attach volume:"+str(euvolume.id)+" to instance:" +str(self.id)+" to dev:"+ str(dev))
#grab a snapshot of our devices before attach for comparison purposes
diskdrive_list_before = self.get_diskdrive_ids()
use_serial = False
for disk in self.diskdrives:
if re.search('vol-', disk.serialnumber):
use_serial = True
break
attached_dev = None
start= time.time()
elapsed = 0
if dev is None:
#update our block device prefix
dev = self.get_free_scsi_dev()
if (self.tester.attach_volume(self, euvolume, dev, pause=10,timeout=timeout)):
if euvolume.attach_data.device != dev:
raise Exception('Attached device:' + str(euvolume.attach_data.device) +
", does not equal requested dev:" + str(dev))
#Find device this volume is using on guest...
euvolume.guestdev = None
while (not euvolume.guestdev and elapsed < timeout):
#Since all hypervisors may not support serial number info, check for an incremental diff in the
# list of physical diskdrives on this guest.
self.debug("Checking for volume attachment on guest, elapsed time("+str(elapsed)+")")
diskdrive_list_after = self.get_diskdrive_ids(forceupdate=True)
self.print_logicaldisk_summary()
self.print_diskdrive_summary()
self.debug("dev_list_after:"+" ".join(diskdrive_list_after))
diff =list( set(diskdrive_list_after) - set(diskdrive_list_before) )
if len(diff) > 0:
self.debug('Got Diff in drives:' + str(diff))
for disk in self.diskdrives:
if re.search('vol-', disk.serialnumber):
use_serial = True
if euvolume.id == disk.ebs_volume:
attached_dev = disk.deviceid
euvolume.guestdev = attached_dev
self.debug("Volume:"+str(euvolume.id)+" guest device by serialnumber:"+str(euvolume.guestdev))
break
if not use_serial:
attached_dev = str(diff[0])
euvolume.guestdev = attached_dev.strip()
self.debug("Volume:"+str(euvolume.id)+"found guest device by diff:"+str(euvolume.guestdev))
if attached_dev:
euvolume.guestdev = attached_dev
attached_vol = self.get_volume_from_attached_list_by_id(euvolume.id)
self.attached_vols.append(euvolume)
self.debug(euvolume.id+": Requested dev:"+str(euvolume.attach_data.device)+", attached to guest device:"+str(euvolume.guestdev))
break
elapsed = int(time.time() - start)
time.sleep(2)
if not euvolume.guestdev or not attached_dev:
raise Exception('Device not found on guest after '+str(elapsed)+' seconds')
else:
self.debug('Failed to attach volume:'+str(euvolume.id)+' to instance:'+self.id)
raise Exception('Failed to attach volume:'+str(euvolume.id)+' to instance:'+self.id)
if (attached_dev is None):
self.debug("List after\n"+" ".join(diskdrive_list_after))
raise Exception('Volume:'+str(euvolume.id)+' attached, but not found on guest'+str(self.id)+' after '+str(elapsed)+' seconds?')
#Store the md5sum of this diskdrive in the euvolume...
disk = self.get_diskdrive_by_deviceid(attached_dev)
euvolume.md5len = 1024
euvolume.md5 = self.get_dev_md5(devpath=disk.cygwin_scsi_drive, length=euvolume.md5len)
#update the volume and instances information about the attachment...
self.update_volume_guest_info(volume=euvolume,md5=euvolume.md5, md5len=euvolume.md5len, guestdev=euvolume.guestdev)
self.debug('Success attaching volume:'+str(euvolume.id)+' to instance:'+self.id +
', cloud dev:'+str(euvolume.attach_data.device)+', attached dev:'+str(attached_dev) +
", elapsed:" + str(elapsed))
try:
self.rescan_disks(timeout=20)
except Exception, e:
self.debug('Warning. Error while trying to rescan disks after attaching volume. Error: ' + str(e))
euvolume.printself(printmethod=self.debug)
disk.print_self()
return attached_dev
def get_guest_dev_for_volume(self, volume, forceupdate=False):
use_serial = False
self.update_disk_info(forceupdate=forceupdate)
for disk in self.diskdrives:
if re.search('vol-', disk.serialnumber):
use_serial = True
break
if not isinstance(volume, EuVolume):
volume = EuVolume.make_euvol_from_vol(volume=volume, tester=self.tester)
def get_disk_drive_by_id(self, deviceid):
self.update_system_info()
for disk in self.diskdrives:
if disk.deviceid == deviceid:
return disk
return None
def get_guestdevs_inuse_by_vols(self):
retlist = []
for vol in self.attached_vols:
retlist.append(vol.guestdev)
return retlist
def get_free_scsi_dev(self, prefix=None,maxdevs=16):
'''
The volume attach command requires a cloud level device name that is not currently associated with a volume
Note: This is the device name from the clouds perspective, not necessarily the guest's
This method attempts to find a free device name to use in the command
optional - prefix - string, pre-pended to the the device search string
optional - maxdevs - number use to specify the max device names to iterate over.Some virt envs have a limit of 16 devs.
'''
d='e'
in_use_cloud = ""
in_use_guest = ""
dev = None
if prefix is None:
prefix = self.block_device_prefix
cloudlist=self.tester.get_volumes(attached_instance=self.id)
for x in xrange(0,maxdevs):
inuse=False
#double up the letter identifier to avoid exceeding z
if d == 'z':
prefix= prefix+'e'
dev = "/dev/"+prefix+str(d)
for avol in self.attached_vols:
if avol.attach_data.device == dev:
inuse = True
in_use_guest += str(avol.id)+", "
continue
#Check to see if the cloud has a conflict with this device name...
for vol in cloudlist:
vol.update()
if (vol.attach_data is not None) and (vol.attach_data.device == dev):
inuse = True
in_use_cloud += str(vol.id)+", "
continue
if inuse is False:
self.debug("Instance:"+str(self.id)+" returning available cloud scsi dev:"+str(dev))
return str(dev)
else:
d = chr(ord('e') + x) #increment the letter we append to the device string prefix
dev = None
if dev is None:
raise Exception("Could not find a free scsi dev on instance:"+self.id+", maxdevs:"+str(maxdevs)+"\nCloud_devs:"+str(in_use_cloud)+"\nGuest_devs:"+str(in_use_guest))
def detach_euvolume(self, euvolume, waitfordev=True, timeout=180):
'''
Method used to detach detach a volume to an instance and track it's use by that instance
required - euvolume - the euvolume object being deattached
waitfordev - boolean to indicate whether or no to poll guest instance for local device to be removed
optional - timeout - integer seconds to wait before timing out waiting for the volume to detach
'''
start = time.time()
elapsed = 0
found = True
for vol in self.attached_vols:
if vol.id == euvolume.id:
dev = vol.guestdev
if (self.tester.detach_volume(euvolume,timeout=timeout)):
if waitfordev:
self.debug("Cloud has detached" + str(vol.id) + ", Wait for device:"+str(dev)+" to be removed on guest...")
while (elapsed < timeout):
diskdrive_ids = []
try:
disk_drives = self.get_updated_diskdrive_info()
for disk in disk_drives:
if dev == disk.deviceid:
found = True
break
found = False
self.debug('Diskdrive associated with ' + str(vol.id) + ' has been removed from guest.')
#if device is not present remove it
self.attached_vols.remove(vol)
except Exception, de:
self.debug('Warning, error getting diskdrive id during detach:' + str(de))
if not found:
try:
self.rescan_disks(timeout=20)
except Exception, re:
self.debug('Warning: Error while trying to rescan disks after detaching volume:' + str(re))
try:
self.update_disk_info()
except Exception, ue:
self.debug('Warning: Error while trying to update disk info:' + str(ue))
try:
self.print_diskdrive_summary()
except: pass
self.debug('Volume:' + str(vol.id) + ', detached, and no longer found on guest at:' + str(dev))
vol.set_volume_detached_tags()
return True
time.sleep(10)
elapsed = int(time.time()-start)
diskdrive_ids = self.get_diskdrive_ids(drivelist=disk_drives)
self.debug('Current disk drives on guest:' + ",".join(str(x) for x in diskdrive_ids))
self.debug("Waiting for device '"+str(dev)+"' on guest to be removed.Elapsed:"+str(elapsed))
else:
self.attached_vols.remove(vol)
vol.set_volume_detached_tags()
return True
else:
raise Exception("Volume("+str(vol.id)+") failed to detach from device("+str(dev)+") on ("+str(self.id)+")")
raise Exception("Detach Volume("+str(euvolume.id)+") not found on ("+str(self.id)+")")
return False
def check_hostname(self):
if not hasattr(self, 'system_info'):
self.update_system_info()
if hasattr(self, 'system_info') and hasattr(self.system_info, 'host_name'):
if self.id.upper() == self.system_info.host_name.upper():
self.debug('Hostname:' + str(self.id) + ", instance.id:" + str(self.system_info.host_name))
else:
raise Exception('check_hostname failed: hostname:' + str(self.system_info.host_name).upper() +
" != id:" + str(self.id).upper())
else:
raise Exception('check_hostname failed: System_info.hostname not populated')
def get_process_list_brief(self):
'''
Returns a list of dicts representing the processes running on the remote guest. Each service is represented by a
dict containing information about the service.
'''
cmd = "wmic process list brief /format:textvaluelist.xsl"
return self.get_parsed_wmic_command_output(cmd)
def get_process_list_full(self):
'''
Returns a list of dicts representing the processes running on the remote guest. Each service is represented by a
dict containing information about the service.
'''
cmd = "wmic process list full"
return self.get_parsed_wmic_command_output(cmd)
def get_process_by_name(self,process_name):
'''
Attempts to lookup a service on the remote guest.
param service_name: string. The name of the service to get info
returns a dict representing the information returned from the remote guest
'''
cmd = 'wmic process ' + str(process_name) + ' get /format:textvaluelist.xsl'
result = self.get_parsed_wmic_command_output(cmd)
if result:
return result[0]
def get_services_list_brief(self):
'''
Returns a list of dicts representing the services from the remote guest. Each service is represented by a
dict containing information about the service.
'''
cmd = 'wmic service list brief /format:textvaluelist.xsl'
return self.get_parsed_wmic_command_output(cmd)
def get_services_list_full(self):
'''
Returns a list of dicts representing the services from the remote guest. Each service is represented by a
dict containing information about the service.
'''
cmd = 'wmic service list full'
return self.get_parsed_wmic_command_output(cmd)
def get_service_by_name(self,service_name):
'''
Attempts to lookup a service on the remote guest.
param service_name: string. The name of the service to get info
returns a dict representing the information returned from the remote guest
'''
cmd = 'wmic service ' + str(service_name) + ' get /format:textvaluelist.xsl'
result = self.get_parsed_wmic_command_output(cmd)
if result:
return result[0]
def get_memtotal_in_mb(self):
return long(self.system_info.total_physical_memory.split()[0].replace(',',''))
def get_memtotal_in_gb(self):
return long(self.get_memtotal_in_mb()/1024)
def check_ram_against_vmtype(self, pad=32):
total_ram = self.get_memtotal_in_mb()
self.debug('Ram check: vm_ram:' + str(self.vmtype_info.ram)
+ "mb vs memtotal:" + str(total_ram)
+ "mb. Diff:" + str(self.vmtype_info.ram - total_ram)
+ "mb, pad:" + str(pad) + "mb")
if not ((self.vmtype_info.ram - total_ram) <= pad):
raise Exception('Ram check failed. vm_ram:' + str(self.vmtype_info.ram)
+ " vs memtotal:" + str(total_ram) + ". Diff is greater than allowed pad:" + str(pad) + "mb")
else:
self.debug('check_ram_against_vmtype, passed')
def check_ephemeral_against_vmtype(self):
gb = self.gigabyte
size = self.vmtype_info.disk
ephemeral_dev = self.get_ephemeral_dev()
block_size = self.get_blockdev_size_in_bytes(ephemeral_dev)
gbs = block_size / gb
self.debug('Ephemeral check: ephem_dev:'
+ str(ephemeral_dev)
+ ", bytes:"
+ str(block_size)
+ ", gbs:"
+ str(gbs)
+ ", vmtype size:"
+ str(size))
if gbs != size:
raise Exception('Ephemeral check failed. ' + str(ephemeral_dev) + ' Blocksize: '
+ str(gbs) + "gb (" + str(block_size) + "bytes)"
+ ' != vmtype size:' +str(size) + "gb")
else:
self.debug('check_ephemeral_against_vmtype, passed')
return ephemeral_dev
def get_ephemeral_dev(self):
"""
Attempts to find the block device path on this instance
:return: string representing path to ephemeral block device
"""
ephem_name = None
dev_prefixs = ['s','v','xd','xvd']
if not self.root_device_type == 'ebs':
try:
self.assertFilePresent('/dev/' + str(self.rootfs_device))
return self.rootfs_device
except:
ephem_name = 'da'
else:
ephem_name = 'db'
devs = self.get_dev_dir()
for prefix in dev_prefixs:
if str(prefix+ephem_name) in devs:
return str('/dev/'+prefix+ephem_name)
raise Exception('Could not find ephemeral device?')
def cygwin_cmd(self, cmd, timeout=120, verbose=False, code=None):
cmd = self.get_cygwin_path() + '\\bin\\bash.exe --login -c "' + str(cmd) + '"'
return self.sys(cmd,timeout=timeout, verbose=verbose, code=code)
def get_dev_md5(self, devpath, length, timeout=60):
self.assertCygwinFilePresent(devpath)
if length == 0:
md5 = str(self.cygwin_cmd('md5sum ' + devpath, timeout=timeout)[0]).split(' ')[0].strip()
else:
md5 = str(self.cygwin_cmd("head -c " + str(length) + " " + str(devpath) + " | md5sum")[0]).split(' ')[0].strip()
return md5
def update_cygwin_windows_device_map(self, prefix='/dev/*', force_update=False):
cygwin_dev_map = {}
if not force_update:
if self.cygwin_dev_map:
if time.time() - self.cygwin_dev_map['last_updated'] <= 30:
cygwin_dev_map = self.cygwin_dev_map
if not cygwin_dev_map:
self.debug('Updating cygwin to windows device mapping...')
output = self.cygwin_cmd("for DEV in " + prefix + " ; do printf $DEV=$(cygpath -w $DEV); echo ''; done",
verbose=False, code=0)
for line in output:
if re.match(prefix, line):
split = line.split('=')
key = split.pop(0)
if split:
value = split.pop()
else:
value = ''
cygwin_dev_map[key]=value
cygwin_dev_map['last_updated'] = time.time()
self.cygwin_dev_map = cygwin_dev_map
self.debug('Updated cygwin to windows device mapping')
return cygwin_dev_map
def rescan_disks(self, timeout=20):
'''
Attempts to rescan disks on the guest. This may help expedite updates/discovery when attaching/detaching
volumes to the guest. This has also been found to hang post device removal so is used with a 20 second
command timeout as the default.
param timeout: integer. Seconds to wait on command before failing
'''
scriptname = 'eutester_diskpart_script'
self.sys('(echo rescan && echo list disk ) > ' + str(scriptname), code=0)
self.sys('diskpart /s ' + str(scriptname), code=0, timeout=timeout)
def get_diskdrive_for_volume(self, volume):
if not self.is_volume_attached_to_this_instance(volume):
return None
ret_disk = None
for disk in self.diskdrives:
disk.update_ebs_info()
if disk.ebs_volume == volume.id:
ret_disk = disk
if not ret_disk:
ret_disk = self.find_diskdrive_for_volume_by_serial_number(volume, force_check=True)
if not ret_disk:
if hasattr(volume,'md5') and volume.md5:
ret_disk = self.find_diskdrive_for_volume_by_md5(volume, force_check=True)
return ret_disk
def find_diskdrive_for_volume_by_md5(self, volume, md5=None, length=None, force_check=False):
if not force_check and not self.is_volume_attached_to_this_instance(volume):
return None
if not isinstance(volume, EuVolume):
volume = EuVolume.make_euvol_from_vol(volume=volume,tester=self.tester)
md5 = md5 or volume.md5
if not md5:
return None
length = length or volume.md5len
for disk in self.diskdrives:
if disk.cygwin_scsi_drive:
disk_md5 = self.get_dev_md5(disk.cygwin_scsi_drive, length=length)
if disk_md5 == md5:
volume.guestdev = disk.deviceid
volume.md5 = disk_md5
volume.md5len = length
disk.ebs_volume = volume.id
return disk
return None
def find_diskdrive_for_volume_by_serial_number(self, volume, serial_number=None, force_check=False):
'''
Attempt to iterate through all the diskdrives were aware of. If a diskdrive is found with a serial_number
associated with the volume, return that diskdrive obj..
example serial number format: vol-81C13EA4-dev-sdg
:param volume: volume obj to use for deriving the serial_number
:param serial_number: string. Optional. The string representing the serial # to match.
:returns WinInstanceDiskDrive if found, else None
'''
if not force_check and not self.is_volume_attached_to_this_instance(volume):
return None
if not serial_number:
serial_number = volume.id + volume.attach_data.device.replace('/','-')
for disk in self.diskdrives:
if disk.serialnumber == serial_number:
return disk
return None
def is_volume_attached_to_this_instance(self, volume):
'''
Attempts to look up volume state per cloud to confirm the cloud believe the state of this volume is attached
to this instance. This does not verify the guest/hypervisor also belives the volume is attached.
:param volume: volume obj.
:returns boolean
'''
volume.update()
if hasattr(volume, 'attach_data') and volume.attach_data and (volume.attach_data.instance_id == self.id):
self.debug('Volume:' + str(volume.id) + " is attached to this instance: " + str(self.id) + " per cloud perspective")
return True
else:
self.debug('Volume:' + str(volume.id) + " is NOT attached to this instance: " + str(self.id) + " per cloud perspective")
return False
def update_volume_guest_info(self, volume, md5=None, md5len=None, guestdev=None):
self.debug("{0} update_volume_guest_info: {1} {2}"
.format(termline, volume, termline))
if not self.is_volume_attached_to_this_instance(volume):
raise Exception('Volume not attached to this instance')
disk = None
if not self.get_volume_from_attached_list_by_id(volume.id):
self.attached_vols.append(volume)
volume.guestdev = guestdev or volume.guestdev
if md5:
if not md5len:
raise Exception('Must provide md5len if providing the md5')
volume.md5 = md5
volume.md5len = md5len
else:
disk = self.get_diskdrive_for_volume(volume)
if not disk:
raise Exception('Could not find diskdrive for volume when attempting to update volume guest info:' + str(volume))
volume.md5len = md5len or 1024
volume.md5 = self.get_dev_md5(disk.cygwin_scsi_drive, volume.md5len)
if not guestdev:
volume.guestdev = disk.deviceid
disk = disk or self.get_diskdrive_for_volume(volume)
disk.update_ebs_info()
volume.update_volume_attach_info_tags(md5=volume.md5, md5len=volume.md5len, instance_id=self.id, guestdev=volume.guestdev)
return volume
def get_unsynced_volumes(self, check_md5=True):
'''
Description: Returns list of volumes which are:
-in a state the cloud believes the vol is no longer attached
-the attached device has changed, or is not found.
If all euvols are shown as attached to this instance, and the last known local dev is present and/or a local device is found with matching md5 checksum
then the list will return 'None' as all volumes are successfully attached and state is in sync.
By default this method will iterate through all the known euvolumes attached to this euinstance.
A subset can be provided in the list argument 'euvol_list'.
Returns a list of euvolumes for which a corresponding guest device could not be found, or the cloud no longer believes is attached.
:param euvol_list: - optional - euvolume object list. Defaults to all self.attached_vols
:param md5length: - optional - defaults to the length given in each euvolume. Used to calc md5 checksum of devices
:param timerpervolume: -optional - time to wait for device to appear, per volume before failing
:param min_polls: - optional - minimum iterations to check guest devs before failing, despite timeout
:param check_md5: - optional - find devices by md5 comparision. Default is to only perform this check when virtio_blk is in use.
'''
bad_list = []
retdict = self.sync_attached_volumes_with_clouds_view()
bad_list.extend(retdict['badvols'])
return bad_list
def reboot_instance_and_verify(self,
waitconnect=60,
timeout=600,
wait_for_ports=180,
connect=True,
checkvolstatus=False,
pad=5,
uptime_retries=3):
'''
Attempts to reboot an instance and verify it's state post reboot.
waitconnect-optional-integer representing seconds to wait before attempting to connect to instance after reboot
timeout-optional-integer, seconds. If a connection has failed, this timer is used to determine a retry
connect- optional - boolean to indicate whether an ssh session should be established once the expected state has been reached
checkvolstatus - optional -boolean to be used to check volume status post start up
'''
msg=""
newuptime = None
attempt = 0
def get_safe_uptime():
uptime = None
try:
uptime = self.get_uptime()
except: pass
return uptime
self.debug('Attempting to reboot instance:'+str(self.id)+', check attached volume state first')
uptime = self.tester.wait_for_result( get_safe_uptime, None, oper=operator.ne)
elapsed = 0
start = time.time()
if checkvolstatus:
#update the md5sums per volume before reboot
bad_vols=self.get_unsynced_volumes()
if bad_vols != []:
for bv in bad_vols:
self.debug(str(self.id)+'Unsynced volume found:'+str(bv.id))
raise Exception(str(self.id)+"Could not reboot using checkvolstatus flag due to unsync'd volumes")
self.debug('Rebooting now...')
self.reboot()
time.sleep(waitconnect)
try:
self.poll_for_ports_status(ports=[3389,5589], timeout=wait_for_ports)
except:
self.debug('Failed to poll winrm and rdp ports after ' + str(wait_for_ports) + ' seconds, try to connect anyways...')
timeout=timeout - int(time.time()-start)
while (elapsed < timeout):
self.connect_to_instance(timeout=timeout)
#Wait for the system to provide a valid response for uptime, early connections may not
newuptime = self.tester.wait_for_result( get_safe_uptime, None, oper=operator.ne)
elapsed = int(time.time()-start)
#Check to see if new uptime is at least 'pad' less than before, allowing for some pad
if (newuptime - (uptime+elapsed)) > pad:
err_msg = "Instance uptime does not represent a reboot. Orig:"+str(uptime)+\
", New:"+str(newuptime)+", elapsed:"+str(elapsed)+"/"+str(timeout)
if elapsed > timeout:
raise Exception(err_msg)
else:
self.debug(err_msg)
else:
self.debug("Instance uptime indicates a reboot. Orig:"+str(uptime)+\
", New:"+str(newuptime)+", elapsed:"+str(elapsed))
break
if checkvolstatus:
badvols= self.get_unsynced_volumes()
if badvols != []:
for vol in badvols:
msg = msg+"\nVolume:"+vol.id+" Local Dev:"+vol.guestdev
raise Exception("Missing volumes post reboot:"+str(msg)+"\n")
self.debug(self.id+" reboot_instance_and_verify Success")
def get_uptime(self):
if not hasattr(self, 'system_info'):
self.update_system_info()
if hasattr(self.system_info, 'system_boot_time'):
return self._get_uptime_from_system_boot_time()
elif hasattr(self.system_info, 'system_up_time'):
return self._get_uptime_from_system_up_time()
else:
tb = self.tester.get_traceback()
raise Exception(str(tb) + '\nCould not get system boot or up time from system_info')
def _get_uptime_from_system_boot_time(self):
#11/18/2013, 3:15:39 PM
if not hasattr(self, 'system_info'):
self.update_system_info()
splitdate = self.system_info.system_boot_time.split()
datestring = splitdate[0]
timestring = splitdate[1]
ampm = splitdate[2]
month, day, year = datestring.replace(',',"").split('/')
hours, minutes, seconds = timestring.split(':')
if ampm == 'PM':
hours = int(hours) + 12
datetimestring = str(year) + " " + \
str(month) + " " + \
str(day) + " " + \
str(hours) + " " + \
str(minutes) + " " + \
str(seconds)
dt = datetime.strptime(datetimestring, "%Y %m %d %H %M %S")
return int(time.time() - time.mktime(dt.timetuple()))
def _get_uptime_from_system_up_time(self):
#0 Days, 0 Hours, 6 Minutes, 39 Seconds
if not hasattr(self, 'system_info'):
self.update_system_info()
uptime_string = self.system_info.system_up_time
days = 0
hours = 0
minutes = 0
seconds = 0
split = uptime_string.split(',')
for part in split:
time_string = ""
if re.search('Days', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
days = int(time_string or 0)
elif re.search('Hours', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
hours = int(time_string or 0)
elif re.search('Minutes', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
minutes = int(time_string or 0)
elif re.search('Seconds', part, re.IGNORECASE):
time_string = str(part.split()[0]).strip()
seconds = int(time_string or 0)
self.debug("Days:" +str(days)+', Hours:'+ str(hours) + ", Minutes:" + str(minutes) + ", Seconds:" + str(seconds))
uptime = (days * 86400) + (hours * 3600) + (minutes * 60) + seconds
return uptime
def stop_instance_and_verify(self, timeout=200, state='stopped',
failstate='terminated', check_vols=True):
'''
Attempts to stop instance and verify the state has gone to
stopped state
:param timeout; -optional-time to wait on instance to go to state 'state' before failing
:param state: -optional-the expected state to signify success, default is stopped
:param failstate: -optional-a state transition that indicates failure, default is terminated
'''
self.debug(self.id+" Attempting to stop instance...")
start = time.time()
elapsed = 0
self.stop()
while (elapsed < timeout):
time.sleep(2)
self.update()
if self.state == state:
break
if self.state == failstate:
raise Exception(str(self.id) + " instance went to state:" +
str(self.state) + " while stopping")
elapsed = int(time.time()- start)
if elapsed % 10 == 0 :
self.debug(str(self.id) + " wait for stop, in state:" +
str(self.state) + ",time remaining:" +
str(elapsed) + "/" + str(timeout) )
if self.state != state:
raise Exception(self.id + " state: " + str(self.state) +
" expected:" + str(state) +
", after elapsed:" + str(elapsed))
if check_vols:
for volume in self.attached_vols:
volume.update
if volume.status != 'in-use':
raise Exception(str(self.id) + ', Volume ' +
str(volume.id) + ':' + str(volume.status)
+ ' state did not remain in-use '
'during stop')
self.debug(self.id + " stop_instance_and_verify Success")
def start_instance_and_verify(self, timeout=300, state = 'running',
failstates=['terminated'], failfasttime=30,
connect=True, checkvolstatus=True):
'''
Attempts to start instance and verify state, and reconnects ssh session
:param timeout: -optional-time to wait on instance to go to state
'state' before failing
:param state: -optional-the expected state to signify success,
default is running
:param failstate: -optional-a state transition that indicates failure,
default is terminated
:param connect: -optional - boolean to indicate whether an ssh
session should be established once the expected state
has been reached
:param checkvolstatus: -optional -boolean to be used to check volume
status post start up
'''
self.debug(self.id+" Attempting to start instance...")
if checkvolstatus:
for volume in self.attached_vols:
volume.update
if checkvolstatus:
if volume.status != 'in-use':
raise Exception(str(self.id) + ', Volume ' + str(volume.id) + ':' + str(volume.status)
+ ' state did not remain in-use during stop' )
self.debug("\n"+ str(self.id) + ": Printing Instance 'attached_vol' list:\n")
self.tester.show_volumes(self.attached_vols)
msg=""
start = time.time()
elapsed = 0
self.update()
#Add fail fast states...
if self.state == 'stopped':
failstates.extend(['stopped','stopping'])
self.start()
while (elapsed < timeout):
elapsed = int(time.time()- start)
self.update()
self.debug(str(self.id) + " wait for start, in state:" +
str(self.state) + ",time remaining:" + str(elapsed) +
"/"+str(timeout) )
if self.state == state:
break
if elapsed >= failfasttime:
for failstate in failstates:
if self.state == failstate:
raise Exception(str(self.id) +
" instance went to state:" +
str(self.state) + " while starting")
time.sleep(10)
if self.state != state:
raise Exception(self.id + " not in " + str(state) +
" state after elapsed:" + str(elapsed))
else:
self.debug(self.id + " went to state:" + str(state))
if connect:
self.connect_to_instance(timeout=timeout)
if checkvolstatus:
badvols= self.get_unsynced_volumes(check_md5=True)
if badvols != []:
for vol in badvols:
msg = msg + "\nVolume:" + vol.id + " Local Dev:" +\
vol.guestdev
raise Exception("Missing volumes post reboot:" + str(msg) +
"\n")
self.debug(self.id+" start_instance_and_verify Success")
| null | null | null | null | [
0
] |
821 | 2b7d9ded82fa980eeae06beb2d84d89612d53df1 | <mask token>
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,
sitk.ScaleSkewVersor3DTransform(), sitk.
CenteredTransformInitializerFilter.MOMENTS)
lin_transformation = sitk.ImageRegistrationMethod()
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=400, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
lin_transformation.SetInitialTransform(initial_transform)
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
reg_method = sitk.ImageRegistrationMethod()
transform_to_displacement_field_filter = (sitk.
TransformToDisplacementFieldFilter())
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,
varianceForTotalField=1.5)
reg_method.SetInitialTransform(initial_transform)
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
reg_method.SetInterpolator(sitk.sitkLinear)
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=10, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(
im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print('--------')
print('Optimizer stop condition: {0}'.format(trafo.
GetOptimizerStopConditionDescription()))
print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())
)
print('--------')
return transf
<mask token>
def distances(mask_img, seg_img):
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
jaccard = overlap.GetJaccardCoefficient()
dice = overlap.GetDiceCoefficient()
hausdorff_distance = hausdorff.GetHausdorffDistance()
print('The Hausdorff distance: {}'.format(hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
def train_classifier(slice_list, vector_list):
x_train_list = []
for image in slice_list:
image_array = sitk.GetArrayFromImage(image)
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
<mask token>
| <mask token>
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,
sitk.ScaleSkewVersor3DTransform(), sitk.
CenteredTransformInitializerFilter.MOMENTS)
lin_transformation = sitk.ImageRegistrationMethod()
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=400, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
lin_transformation.SetInitialTransform(initial_transform)
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
reg_method = sitk.ImageRegistrationMethod()
transform_to_displacement_field_filter = (sitk.
TransformToDisplacementFieldFilter())
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,
varianceForTotalField=1.5)
reg_method.SetInitialTransform(initial_transform)
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
reg_method.SetInterpolator(sitk.sitkLinear)
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=10, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(
im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print('--------')
print('Optimizer stop condition: {0}'.format(trafo.
GetOptimizerStopConditionDescription()))
print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())
)
print('--------')
return transf
def seg_atlas(common_img, ct_list, seg_list):
seg = []
image_list = []
for i in range(len(ct_list)):
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=
seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
arr1list.sort()
arr2list.sort()
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(
image_list[2]))
intersection_list.sort()
image_array = sitk.GetArrayFromImage(common_img)
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
def distances(mask_img, seg_img):
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
jaccard = overlap.GetJaccardCoefficient()
dice = overlap.GetDiceCoefficient()
hausdorff_distance = hausdorff.GetHausdorffDistance()
print('The Hausdorff distance: {}'.format(hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
def train_classifier(slice_list, vector_list):
x_train_list = []
for image in slice_list:
image_array = sitk.GetArrayFromImage(image)
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
<mask token>
| <mask token>
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,
sitk.ScaleSkewVersor3DTransform(), sitk.
CenteredTransformInitializerFilter.MOMENTS)
lin_transformation = sitk.ImageRegistrationMethod()
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=400, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
lin_transformation.SetInitialTransform(initial_transform)
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
reg_method = sitk.ImageRegistrationMethod()
transform_to_displacement_field_filter = (sitk.
TransformToDisplacementFieldFilter())
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,
varianceForTotalField=1.5)
reg_method.SetInitialTransform(initial_transform)
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
reg_method.SetInterpolator(sitk.sitkLinear)
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=10, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(
im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print('--------')
print('Optimizer stop condition: {0}'.format(trafo.
GetOptimizerStopConditionDescription()))
print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())
)
print('--------')
return transf
def seg_atlas(common_img, ct_list, seg_list):
seg = []
image_list = []
for i in range(len(ct_list)):
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=
seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
arr1list.sort()
arr2list.sort()
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(
image_list[2]))
intersection_list.sort()
image_array = sitk.GetArrayFromImage(common_img)
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
def distances(mask_img, seg_img):
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
jaccard = overlap.GetJaccardCoefficient()
dice = overlap.GetDiceCoefficient()
hausdorff_distance = hausdorff.GetHausdorffDistance()
print('The Hausdorff distance: {}'.format(hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
def train_classifier(slice_list, vector_list):
x_train_list = []
for image in slice_list:
image_array = sitk.GetArrayFromImage(image)
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
def slice_probability(ct_image, classifier):
test_list = []
max_list = []
im_array = sitk.GetArrayFromImage(ct_image)
im_array.resize((512, 512, 512))
for z in range(im_array.shape[2]):
test_list.append(im_array[:, :, z].flatten())
test_array = np.asarray(test_list, dtype=np.uint8)
probabilities = classifier.predict_proba(test_array)
max = np.amax(probabilities, axis=0)[1]
for i, prob in enumerate(probabilities):
if prob[1] == max:
max_list.append(i)
if len(max_list) == 1:
print('Slice {} has highest probability which is: {}'.format(
max_list[0], max))
else:
print('Slices {} have the highest probability which is: {}'.format(
max_list, max))
return None
| import SimpleITK as sitk
import numpy as np
from sklearn.ensemble import RandomForestClassifier
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,
sitk.ScaleSkewVersor3DTransform(), sitk.
CenteredTransformInitializerFilter.MOMENTS)
lin_transformation = sitk.ImageRegistrationMethod()
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=400, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
lin_transformation.SetInitialTransform(initial_transform)
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
reg_method = sitk.ImageRegistrationMethod()
transform_to_displacement_field_filter = (sitk.
TransformToDisplacementFieldFilter())
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,
varianceForTotalField=1.5)
reg_method.SetInitialTransform(initial_transform)
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
reg_method.SetInterpolator(sitk.sitkLinear)
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=10, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(
im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print('--------')
print('Optimizer stop condition: {0}'.format(trafo.
GetOptimizerStopConditionDescription()))
print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())
)
print('--------')
return transf
def seg_atlas(common_img, ct_list, seg_list):
seg = []
image_list = []
for i in range(len(ct_list)):
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=
seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
arr1list.sort()
arr2list.sort()
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(
image_list[2]))
intersection_list.sort()
image_array = sitk.GetArrayFromImage(common_img)
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
def distances(mask_img, seg_img):
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
jaccard = overlap.GetJaccardCoefficient()
dice = overlap.GetDiceCoefficient()
hausdorff_distance = hausdorff.GetHausdorffDistance()
print('The Hausdorff distance: {}'.format(hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
def train_classifier(slice_list, vector_list):
x_train_list = []
for image in slice_list:
image_array = sitk.GetArrayFromImage(image)
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
def slice_probability(ct_image, classifier):
test_list = []
max_list = []
im_array = sitk.GetArrayFromImage(ct_image)
im_array.resize((512, 512, 512))
for z in range(im_array.shape[2]):
test_list.append(im_array[:, :, z].flatten())
test_array = np.asarray(test_list, dtype=np.uint8)
probabilities = classifier.predict_proba(test_array)
max = np.amax(probabilities, axis=0)[1]
for i, prob in enumerate(probabilities):
if prob[1] == max:
max_list.append(i)
if len(max_list) == 1:
print('Slice {} has highest probability which is: {}'.format(
max_list[0], max))
else:
print('Slices {} have the highest probability which is: {}'.format(
max_list, max))
return None
| import SimpleITK as sitk
import numpy as np
from sklearn.ensemble import RandomForestClassifier
# # Estimation function # #
# --------------------------- #
# Linear registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# mov_mask : List of GROUP masks [list]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# lin_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov, sitk.ScaleSkewVersor3DTransform(),
sitk.CenteredTransformInitializerFilter.MOMENTS)
# Initialize registration
lin_transformation = sitk.ImageRegistrationMethod()
# Set metrics
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
# Set mask
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
# Gradient Descent optimizer
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1, numberOfIterations=400,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
# Set the initial transformation
lin_transformation.SetInitialTransform(initial_transform)
# Switching to preferred variable
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
# # Estimation function # #
# --------------------------- #
# Non-linear 'Demons' registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# fixed_mask : The mask of common image, default is None [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# nl_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
# Initialize the registration
reg_method = sitk.ImageRegistrationMethod()
# Create initial identity transformation.
transform_to_displacement_field_filter = sitk.TransformToDisplacementFieldFilter()
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
# Regularization. The update field refers to fluid regularization; the total field to elastic regularization.
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0, varianceForTotalField=1.5)
# Set the initial transformation
reg_method.SetInitialTransform(initial_transform)
# Set Demons registration
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
# Evaluate the metrics only in the mask
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
# Set a linear interpolator
reg_method.SetInterpolator(sitk.sitkLinear)
# Set a gradient descent optimizer
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=10, convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
# Switching to the preferred variable
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
# # Application function # #
# --------------------------- #
# Executes either the linear or the non-linear function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# trafo : The chosen transformation [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# final_image : Returns the registered image [numpy.ndarray]
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
# Perform registration (Executes it)
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print("--------")
print("Optimizer stop condition: {0}".format(trafo.GetOptimizerStopConditionDescription()))
print("Number of iterations: {0}".format(trafo.GetOptimizerIteration()))
print("--------")
return transf
# # Atlas segmentation function # #
# --------------------------- #
# Atlas-based segmentation using the CT images in 'ct_list'
# and corresponding segmentation masks from 'seg_list'.
# After that, majority voting to return a segmentation mask.
# --------------------------- #
# --- Input --- #
# common_img : The chosen COMMON image [sitk-image]
# ct_list : List of GROUP images [list]
# seg_list : List of GROUP masks [list]
# --- Output --- #
# segmented_array : The segmentation as an array [numpy.ndarray]
def seg_atlas(common_img, ct_list, seg_list):
# Creating the necessary lists
seg = []
image_list = []
# # REGISTRATION # #
for i in range(len(ct_list)):
# Adjusting the settings and applying
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
# Perform registration on mask image
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
# # MAJORITY VOTING # #
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
# Filling two lists
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
# Sorting both lists
arr1list.sort()
arr2list.sort()
# Creating necessary list & sorting
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
# Creating a list which contains the indexes of intersecting voxels
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(image_list[2]))
# Sorting the list
intersection_list.sort()
# Fetches array from image
image_array = sitk.GetArrayFromImage(common_img)
# Creates an array for the points and fills it using indexes
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
# # Similarity function # #
# --------------------------- #
# Calculates the following distances between images:
# 1. Jaccard coef.
# 2. Dice coef.
# 3. Hausdorff distance
# --------------------------- #
# --- Input --- #
# mask_img : The mask image [sikt-image]
# seg_img: The segmented image [sikt-image]
# --- Output --- #
# None
def distances(mask_img, seg_img):
# Creating the necessary filters
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
# Execute filters
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
# Fetching the distances and appending to distance list
# Jaccard coef.
jaccard = overlap.GetJaccardCoefficient()
# Dice coef.
dice = overlap.GetDiceCoefficient()
# Hausdorff distance
hausdorff_distance = hausdorff.GetHausdorffDistance()
# Printing out the distances for user
print('The Hausdorff distance: {}'.format(
hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
# # Classifier Function # #
# --------------------------- #
# Trains a random forest classifier by reading 2d images and comparing
# them to a vector which has labels that correspond to if it contains
# the pubic symphysis. The labels are binary.
# --------------------------- #
# --- Input --- #
# slice_list : List of 2D slice images [list]
# vector_list : List of vectors with binary labels [list]
# --- Output --- #
# trained_forest : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
def train_classifier(slice_list, vector_list):
# Creating necessary list
x_train_list = []
# Reading in input data
for image in slice_list:
# Fetching arrays
image_array = sitk.GetArrayFromImage(image)
# Resizing
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
# Reading in training labels
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
# Train classifier
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
# # Classifier Function # #
# --------------------------- #
# Utilizes a trained random forest classifier by reading CT image and prints
# which slice has the highest probability of containing the pubic symphysis.
# --------------------------- #
# --- Input --- #
# ct_image : List of 2D axial slice images [list]
# classifier : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
# --- Output --- #
# None
def slice_probability(ct_image, classifier):
# Creating necessary lists
test_list = []
max_list = []
# Convert image to numpy array & resize
im_array = sitk.GetArrayFromImage(ct_image)
im_array.resize((512, 512, 512))
for z in range(im_array.shape[2]):
test_list.append(im_array[:, :, z].flatten())
test_array = np.asarray(test_list, dtype=np.uint8)
# Predict probabilities for each slice
probabilities = classifier.predict_proba(test_array)
# Fetching array with maximum probabilities
max = np.amax(probabilities, axis=0)[1]
for i, prob in enumerate(probabilities):
if prob[1] == max:
max_list.append(i)
# Print result to user
if len(max_list) == 1:
print("Slice {} has highest probability which is: {}".format(max_list[0], max))
else:
print("Slices {} have the highest probability which is: {}".format(max_list, max))
return None
| [
5,
6,
7,
8,
9
] |
822 | 1406b2ab78b52823a8f455c8e2719f6bd84bd168 | <mask token>
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=
1, min=0, max=100, reverse=False):
self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=
Pin.IN, pull=pin_mode)
self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=
Pin.IN, pull=pin_mode)
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 15
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
<mask token>
def position(self):
return self._pos * self.scale
<mask token>
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
| <mask token>
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=
1, min=0, max=100, reverse=False):
self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=
Pin.IN, pull=pin_mode)
self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=
Pin.IN, pull=pin_mode)
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 15
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
def set_callbacks(self, callback=None):
self.irq_x = self.pin_x.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
self.irq_y = self.pin_y.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
def position(self):
return self._pos * self.scale
def reset(self):
self._pos = 0
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
| <mask token>
ENC_STATES = 0, -1, 1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 0, 1, -1, 0
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=
1, min=0, max=100, reverse=False):
self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=
Pin.IN, pull=pin_mode)
self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=
Pin.IN, pull=pin_mode)
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 15
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
def set_callbacks(self, callback=None):
self.irq_x = self.pin_x.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
self.irq_y = self.pin_y.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
def position(self):
return self._pos * self.scale
def reset(self):
self._pos = 0
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
| <mask token>
from machine import Pin
ENC_STATES = 0, -1, 1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 0, 1, -1, 0
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP, scale=
1, min=0, max=100, reverse=False):
self.pin_x = pin_x if isinstance(pin_x, Pin) else Pin(pin_x, mode=
Pin.IN, pull=pin_mode)
self.pin_y = pin_y if isinstance(pin_y, Pin) else Pin(pin_y, mode=
Pin.IN, pull=pin_mode)
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 15
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
def set_callbacks(self, callback=None):
self.irq_x = self.pin_x.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
self.irq_y = self.pin_y.callback(trigger=Pin.IRQ_FALLING | Pin.
IRQ_RISING, handler=callback)
def position(self):
return self._pos * self.scale
def reset(self):
self._pos = 0
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
| # -*- coding: utf-8 -*-
"""MicroPython rotary encoder library."""
from machine import Pin
ENC_STATES = (0, -1, 1, 0, 1, 0, 0, -1, -1, 0, 0, 1, 0, 1, -1, 0)
class Encoder(object):
def __init__(self, pin_x='P4', pin_y='P5', pin_mode=Pin.PULL_UP,
scale=1, min=0, max=100, reverse=False):
self.pin_x = (pin_x if isinstance(pin_x, Pin) else
Pin(pin_x, mode=Pin.IN, pull=pin_mode))
self.pin_y = (pin_y if isinstance(pin_y, Pin) else
Pin(pin_y, mode=Pin.IN, pull=pin_mode))
self.pin_mode = pin_mode
self.scale = scale
self.min = min
self.max = max
self.reverse = 1 if reverse else -1
# The following variables are assigned to in the interrupt callback,
# so we have to allocate them here.
self._pos = -1
self._readings = 0
self._state = 0
self.set_callbacks(self._callback)
def _callback(self, line):
self._readings = (self._readings << 2 | self.pin_x.value() << 1 |
self.pin_y.value()) & 0x0f
self._state = ENC_STATES[self._readings] * self.reverse
if self._state:
self._pos = min(max(self.min, self._pos + self._state), self.max)
def set_callbacks(self, callback=None):
self.irq_x = self.pin_x.callback(
trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING, handler=callback)
self.irq_y = self.pin_y.callback(
trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING, handler=callback)
def position(self):
return self._pos * self.scale
def reset(self):
self._pos = 0
def setMax(self, Max):
self.max = Max
def setMin(self, Min):
self.min = Min
def setScale(self, Scale):
self.scale = Scale
| [
7,
9,
10,
11,
12
] |
823 | 1bab6b039462bb5762aa588d5ba7c3e74362d0a7 | <mask token>
| class Solution:
<mask token>
<mask token>
| class Solution:
def minRemoveToMakeValid(self, s: str) ->str:
bracketsToRemove = set()
stack = []
for i, c in enumerate(s):
if c not in '()':
continue
if c == '(':
stack.append(i)
elif not stack:
bracketsToRemove.add(i)
else:
stack.pop()
bracketsToRemove = bracketsToRemove.union(set(stack))
stringBuilder = []
for i, c in enumerate(s):
if i not in bracketsToRemove:
stringBuilder.append(c)
return ''.join(stringBuilder)
<mask token>
| class Solution:
def minRemoveToMakeValid(self, s: str) ->str:
bracketsToRemove = set()
stack = []
for i, c in enumerate(s):
if c not in '()':
continue
if c == '(':
stack.append(i)
elif not stack:
bracketsToRemove.add(i)
else:
stack.pop()
bracketsToRemove = bracketsToRemove.union(set(stack))
stringBuilder = []
for i, c in enumerate(s):
if i not in bracketsToRemove:
stringBuilder.append(c)
return ''.join(stringBuilder)
Solution().minRemoveToMakeValid('L(ee)(t(()coe')
| class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
bracketsToRemove = set()
stack = []
for i, c in enumerate(s):
if c not in '()':
continue
if c == '(':
stack.append(i)
elif not stack:
bracketsToRemove.add(i)
else:
stack.pop()
bracketsToRemove = bracketsToRemove.union(set(stack))
stringBuilder = []
for i,c in enumerate(s):
if i not in bracketsToRemove:
stringBuilder.append(c)
return "".join(stringBuilder)
Solution().minRemoveToMakeValid('L(ee)(t(()coe')
| [
0,
1,
2,
3,
4
] |
824 | 75ddcdd4e80b962198ff9de1d996837927c3ac1a | <mask token>
| <mask token>
def truecase_is(string):
""" -> lower/title/upper/other """
if string.islower():
return 'l'
if string.istitle():
return 't'
if string.isupper():
return 'u'
return 'o'
<mask token>
def truecase_matching_is(str1, str2):
""" -> f(ull-string)/s(ub-string)/n(one) """
if str1 == str2:
return 'f'
if str1 in str2:
return 's'
return 'n'
def lowercase_matching_is(str1, str2):
return truecase_matching_is(str1.lower(), str2.lower())
| <mask token>
def truecase_is(string):
""" -> lower/title/upper/other """
if string.islower():
return 'l'
if string.istitle():
return 't'
if string.isupper():
return 'u'
return 'o'
def alnum_is(string):
""" -> alpha/digit/other """
if string.isalpha():
return 'a'
if string.isdigit():
return 'd'
return 'o'
def truecase_matching_is(str1, str2):
""" -> f(ull-string)/s(ub-string)/n(one) """
if str1 == str2:
return 'f'
if str1 in str2:
return 's'
return 'n'
def lowercase_matching_is(str1, str2):
return truecase_matching_is(str1.lower(), str2.lower())
| from __future__ import print_function, with_statement
<mask token>
def truecase_is(string):
""" -> lower/title/upper/other """
if string.islower():
return 'l'
if string.istitle():
return 't'
if string.isupper():
return 'u'
return 'o'
def alnum_is(string):
""" -> alpha/digit/other """
if string.isalpha():
return 'a'
if string.isdigit():
return 'd'
return 'o'
def truecase_matching_is(str1, str2):
""" -> f(ull-string)/s(ub-string)/n(one) """
if str1 == str2:
return 'f'
if str1 in str2:
return 's'
return 'n'
def lowercase_matching_is(str1, str2):
return truecase_matching_is(str1.lower(), str2.lower())
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, with_statement
"""
cosi299a- Cinderella
[email protected]
"""
def truecase_is(string):
""" -> lower/title/upper/other """
if string.islower():
return 'l'
if string.istitle():
return 't'
if string.isupper():
return 'u'
return 'o'
def alnum_is(string):
""" -> alpha/digit/other """ #assumption: only alnum strings analyzed
if string.isalpha():
return 'a'
if string.isdigit():
return 'd'
return 'o'
def truecase_matching_is(str1, str2):
""" -> f(ull-string)/s(ub-string)/n(one) """
if str1==str2:
return 'f'
if str1 in str2:
return 's'
return 'n'
def lowercase_matching_is(str1, str2):
return truecase_matching_is(str1.lower(),str2.lower())
| [
0,
3,
4,
5,
6
] |
825 | cdcb2710291e9897b874f63840193470ed58be49 | <mask token>
class PageInfoAjaxSpider(scrapy.Spider):
<mask token>
<mask token>
<mask token>
def start_requests(self):
url = (
'https://s.search.bilibili.com/cate/search?callback=jqueryCallback_bili_8995260575257822&main_ver=v3&search_type=video&view_type=hot_rank&order=click©_right=-1&cate_id=130&page=1&pagesize=20&jsonp=jsonp&time_from=20190426&time_to=20190625&_=1561516363499'
)
yield Request(url, headers=self.headers)
<mask token>
| <mask token>
class PageInfoAjaxSpider(scrapy.Spider):
<mask token>
<mask token>
<mask token>
def start_requests(self):
url = (
'https://s.search.bilibili.com/cate/search?callback=jqueryCallback_bili_8995260575257822&main_ver=v3&search_type=video&view_type=hot_rank&order=click©_right=-1&cate_id=130&page=1&pagesize=20&jsonp=jsonp&time_from=20190426&time_to=20190625&_=1561516363499'
)
yield Request(url, headers=self.headers)
def parse(self, response):
req_body = response.body
json_data = req_body.decode('utf-8')
pure_json_data = re.sub('jqueryCallback_bili_([0-9])*', '',
json_data, count=1)
pure_json_data = json.loads(pure_json_data[1:-1])
print(pure_json_data['numPages'])
| <mask token>
class PageInfoAjaxSpider(scrapy.Spider):
name = 'page_info_ajax'
allowed_domains = ['bilibili.com']
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
def start_requests(self):
url = (
'https://s.search.bilibili.com/cate/search?callback=jqueryCallback_bili_8995260575257822&main_ver=v3&search_type=video&view_type=hot_rank&order=click©_right=-1&cate_id=130&page=1&pagesize=20&jsonp=jsonp&time_from=20190426&time_to=20190625&_=1561516363499'
)
yield Request(url, headers=self.headers)
def parse(self, response):
req_body = response.body
json_data = req_body.decode('utf-8')
pure_json_data = re.sub('jqueryCallback_bili_([0-9])*', '',
json_data, count=1)
pure_json_data = json.loads(pure_json_data[1:-1])
print(pure_json_data['numPages'])
| import json
import re
import scrapy
from scrapy import Request
class PageInfoAjaxSpider(scrapy.Spider):
name = 'page_info_ajax'
allowed_domains = ['bilibili.com']
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
def start_requests(self):
url = (
'https://s.search.bilibili.com/cate/search?callback=jqueryCallback_bili_8995260575257822&main_ver=v3&search_type=video&view_type=hot_rank&order=click©_right=-1&cate_id=130&page=1&pagesize=20&jsonp=jsonp&time_from=20190426&time_to=20190625&_=1561516363499'
)
yield Request(url, headers=self.headers)
def parse(self, response):
req_body = response.body
json_data = req_body.decode('utf-8')
pure_json_data = re.sub('jqueryCallback_bili_([0-9])*', '',
json_data, count=1)
pure_json_data = json.loads(pure_json_data[1:-1])
print(pure_json_data['numPages'])
| # -*- coding: utf-8 -*-
import json
import re
import scrapy
from scrapy import Request
class PageInfoAjaxSpider(scrapy.Spider):
name = 'page_info_ajax'
allowed_domains = ['bilibili.com']
# start_urls = ['http://bilibili.com/']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
}
def start_requests(self):
url = 'https://s.search.bilibili.com/cate/search?callback=jqueryCallback_bili_8995260575257822&main_ver=v3&search_type=video&view_type=hot_rank&order=click©_right=-1&cate_id=130&page=1&pagesize=20&jsonp=jsonp&time_from=20190426&time_to=20190625&_=1561516363499'
yield Request(url, headers=self.headers)
def parse(self, response):
req_body = response.body
json_data = req_body.decode('utf-8')
pure_json_data = re.sub(r'jqueryCallback_bili_([0-9])*', '', json_data, count=1)
pure_json_data = json.loads(pure_json_data[1:-1])
print(pure_json_data['numPages'])
| [
2,
3,
4,
5,
6
] |
826 | f4bfef2ee78b87184cc72666fade949f8f931fc3 | #### про enumerate
##s = input()
##for index, letter in enumerate(s):
## print(index,':',letter)
#### то же что и
##for i in range(len(s)):
## print (i,':', s[i])
#### номер начала каждого слова
##st = input()
##for index, symbol in enumerate(st):
## if symbol == ' ' and index != len(st)-1 or index == 0 or index == len(st):
## print(index)
#### вводится имя и слово вывести имя без первой и последней букв
##sname = input()
##for index in range(len(sname)):
## if sname[index] == ' ':
## print(sname[(index+2):len(sname)-1])
#### про replace
####вводится имя и строка вывести строку без первых двух букв имени
##name = input("What's your name?")
##deal = input("How are you?")
##cutdeal = deal.replace(name[0], '')
##cutdeal = cutdeal.replace(name[1], '')
##print(cutdeal)
#### то же что и
##name = input("What's your name?")
##deal = input("How are you?")
##for index, symbol in enumerate(deal):
## if symbol == name[0] or symbol == name[1]:
## cutdeal = deal[:index-1] + deal[index+1:]
##print(cutdeal)
####про цикл while
##i = 1
##s = 0
##while s<500:
## s+=i
## i+=1
##print(i)
##
##s = input()
##while s: ## пока s - непустая строка
## print(s.lower) ## все буквы в нижнем регистре
####распечатыывать корень числа пока пользователь не введёт пустую строку, прекращается при отрицательном числе
##n = input("Введите число. ")
##while n:
## n = int(n)
## if n<0:
## break
## print (n**1/2)
## n = input("Введите число. ")
####распечатыывать корень числа пока пользователь не введёт пустую строку, просит положительное число при отрицательном числе
##n = input("Введите число. ")
##while n:
## n = int(n)
## if n<0:
## n = input("Введите лучше положительное число. ")
## continue
## print (n**(1/2))
## n = input("Введите число. ")
####пользователь вводит числа до пустой строки выводится сумма только тех чисел, которые больше 100 если введено число кратное 500 то прекратить спрашивать числа
##n = input("Введите число. ")
##m = 0
##while n:
## n = int(n)
## if n%500 == 0:
## break
## if n>100:
## sum += n
## n = input("Введите число. ")
## continue
## n = input("Введите число. ")
##print(sum)
| null | null | null | null | [
1
] |
827 | 6d0340a08701b0c4f34e9b833bca27cf455d682d |
# coding: utf-8
# # Read Bathy data from ERDDAP
# In[ ]:
get_ipython().system(u'conda install basemap --yes')
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import urllib
import netCDF4
from mpl_toolkits.basemap import Basemap
# In[2]:
# Definine the domain of interest
minlat = 42
maxlat = 45
minlon = -67
maxlon = -61.5
isub = 5
# Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html
# using the netCDF output option
base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?'
query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon)
url = base_url+query
print url
# In[3]:
# store data in NetCDF file
file='usgsCeSrtm30v6.nc'
urllib.urlretrieve (url, file)
# In[4]:
# open NetCDF data in
nc = netCDF4.Dataset(file)
ncv = nc.variables
print ncv.keys()
# In[5]:
lon = ncv['longitude'][:]
lat = ncv['latitude'][:]
lons, lats = np.meshgrid(lon,lat)
topo = ncv['topo'][:,:]
# In[ ]:
# Create map
m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat,llcrnrlon=minlon, urcrnrlon=maxlon,resolution='h')
fig1 = plt.figure(figsize=(10,8))
cs = m.pcolormesh(lons,lats,topo,cmap=plt.cm.jet,latlon=True)
m.drawcoastlines()
m.drawmapboundary()
plt.title('SMRT30 - Bathymetry/Topography')
cbar = plt.colorbar(orientation='horizontal', extend='both')
cbar.ax.set_xlabel('meters')
# Save figure (without 'white' borders)
plt.savefig('topo.png', bbox_inches='tight')
| null | null | null | null | [
0
] |
828 | 0f6737b9e9e9a13d75c20352e9ef9c1db6c0c8a3 | #! /usr/bin/env python
# import ros stuff
import rospy
from std_srvs.srv import *
#to check if the service is active
active_ = False
def unable_service(req):
"""
This function contains the variable declared above that is
used to enable the service.
"""
global active_
active_ = req.data
res = SetBoolResponse()
res.success = True
res.message = 'Done!'
return res
def getInput():
"""
This function get the input, given by the user, on which of the 5
behaviors proposed, the robot must follow.
If one of the input chosen by the user is already active, the
function doesn't ask to give again the input.
"""
global active_
#to disable the service
active_ = False
# reading the previous input
prev_input_ = rospy.get_param('/input')
input_ = prev_input_
#in order to make the user to choose one of the 5 possible inputs
while (prev_input_ == input_) or (input_ > 5 or input_ < 1):
if input_ > 5 or input_ < 1:
#in the case in which the user make another selection
print "Unknown input, please try again"
#propose to the user which are the real possibilities
print("Please select one of the following senteces\n")
print("1 - Move the robot randomly in the environment, by choosing one of six possible target positions\n")
print("2 - The user can chose the next target position\n")
print("3 - Start following the external walls\n")
print("4 - Stop the robot in the last position\n")
print("5 - Change the planning algorithm from move_base to bug0 and vice versa\n")
#read the input typed by the user
input_ = (int(raw_input("Please select a number between 1 and 5: ")))
#set the choice made by the user
if input_ >= 1 and input_ <= 5:
rospy.set_param('/input', input_)
def main():
"""
The main function allows the user to choose the robot's behavior.
If the service is active it call the function getInput that allows
the user to make a new choice. If it is not, it check if the selected
behavior is the second one and in that case change it with the fourth one.
"""
global active_
#init user_interface
rospy.init_node('user_interface')
#service that allows the user to choose a new input
srv_user_interface = rospy.Service('/user_interface_service', SetBool, unable_service)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
#if the service is not active
if not active_:
rate.sleep()
#if the selected behavior is the second one
if rospy.get_param("/input") == 2:
#change it in the fourth behavior
rospy.set_param("/input",4)
continue
#if the service is active
else:
getInput() # allow the user to choose a new behaviour
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| null | null | null | null | [
0
] |
829 | 0686dec7f3dc23f01ffff41f611a1bb597bb5352 | <mask token>
class Files(Base):
<mask token>
def upload_file(self, channel_id, files):
return self.client.post(self.endpoint, data={'channel_id':
channel_id}, files=files)
def get_file(self, file_id):
return self.client.get(self.endpoint + '/' + file_id)
def get_file_thumbnail(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/thumbnail')
<mask token>
<mask token>
def get_file_metadata(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/info')
| <mask token>
class Files(Base):
<mask token>
def upload_file(self, channel_id, files):
return self.client.post(self.endpoint, data={'channel_id':
channel_id}, files=files)
def get_file(self, file_id):
return self.client.get(self.endpoint + '/' + file_id)
def get_file_thumbnail(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/thumbnail')
def get_file_preview(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/preview')
<mask token>
def get_file_metadata(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/info')
| <mask token>
class Files(Base):
endpoint = '/files'
def upload_file(self, channel_id, files):
return self.client.post(self.endpoint, data={'channel_id':
channel_id}, files=files)
def get_file(self, file_id):
return self.client.get(self.endpoint + '/' + file_id)
def get_file_thumbnail(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/thumbnail')
def get_file_preview(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/preview')
def get_public_file_link(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/link')
def get_file_metadata(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/info')
| from .base import Base
class Files(Base):
endpoint = '/files'
def upload_file(self, channel_id, files):
return self.client.post(self.endpoint, data={'channel_id':
channel_id}, files=files)
def get_file(self, file_id):
return self.client.get(self.endpoint + '/' + file_id)
def get_file_thumbnail(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/thumbnail')
def get_file_preview(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/preview')
def get_public_file_link(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/link')
def get_file_metadata(self, file_id):
return self.client.get(self.endpoint + '/' + file_id + '/info')
| from .base import Base
class Files(Base):
endpoint = "/files"
def upload_file(self, channel_id, files):
return self.client.post(self.endpoint, data={"channel_id": channel_id}, files=files)
def get_file(self, file_id):
return self.client.get(
self.endpoint + "/" + file_id,
)
def get_file_thumbnail(self, file_id):
return self.client.get(
self.endpoint + "/" + file_id + "/thumbnail",
)
def get_file_preview(self, file_id):
return self.client.get(
self.endpoint + "/" + file_id + "/preview",
)
def get_public_file_link(self, file_id):
return self.client.get(
self.endpoint + "/" + file_id + "/link",
)
def get_file_metadata(self, file_id):
return self.client.get(
self.endpoint + "/" + file_id + "/info",
)
| [
5,
6,
8,
9,
10
] |
830 | c3d9ad49b62c56dfbd9674cb1ac5c206e6401a27 | <mask token>
class BlogBuilder(object):
<mask token>
def _generate_output(self):
"""Generate output that belongs in the destination file.
Subclasses must implement this method.
"""
raise NotImplementedError()
def write_to(self, filepath):
"""Write the output to the provided filepath."""
output = self._generate_output()
with open(filepath, 'wb') as out:
out.write(output.encode('utf-8'))
out.write(b'<!-- handrolled for excellence -->\n')
class FeedBuilder(BlogBuilder):
"""Transform blog metadata and posts into an Atom feed."""
def __init__(self, metadata):
self.metadata = metadata
self._feed = AtomFeed(**metadata)
def add(self, posts):
"""Add blog posts to the feed."""
for post in posts:
self._feed.add(FeedEntry(summary=post.summary, title=post.title,
title_type='html', url=post.url, updated=post.date))
def _generate_output(self):
return self._feed.to_string()
class ListPageBuilder(BlogBuilder):
"""Transform blog posts into a list page."""
def __init__(self, template):
self._template = template
self._blog_list = ''
self._posts = None
def add(self, posts):
"""Add the posts and generate a blog list."""
li_html = []
for post in posts:
li_html.append(u'<li><a href="{route}">{title}</a></li>'.format
(route=post.route, title=post.title))
self._blog_list = u'\n'.join(li_html)
self._posts = posts
def _generate_output(self):
context = {'blog_list': self._blog_list, 'posts': self._posts}
return self._template.render(context)
| <mask token>
class BlogExtension(Extension):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, config):
super(BlogExtension, self).__init__(config)
self.posts = {}
self.atom_metadata = {}
self.atom_output = ''
self.list_template = None
self.list_output = None
self._resolver = None
self._should_generate = True
def on_pre_composition(self, director):
"""Check that all the required configuration exists."""
if not self._config.parser.has_section('blog'):
raise AbortError(_(
'A blog section is missing in the configuration file.'))
for metadata, option in self.required_metadata.items():
self._add_atom_metadata(metadata, option)
self.atom_output = self._get_option('atom_output')
if self._config.parser.has_option('blog', 'list_template'):
self.list_template = self._get_option('list_template')
self.list_output = self._get_option('list_output')
self._resolver = director.resolver
<mask token>
def on_post_composition(self, director):
"""Generate blog output."""
if not self._should_generate:
return
blog_posts = sorted(self.posts.values(), key=lambda p: p.date,
reverse=True)
self._generate_atom_feed(director, blog_posts)
if self.list_template is not None:
self._generate_list_page(director, blog_posts)
self._should_generate = False
def _is_post(self, frontmatter):
"""Check if the front matter looks like a blog post."""
is_post = frontmatter.get('blog', False)
if type(is_post) != bool:
raise AbortError(_(
'Invalid blog frontmatter (expects True or False): {blog_value}'
).format(blog_value=is_post))
return is_post
def _validate_post(self, source_file, frontmatter):
"""Validate that the post contains all the required fields."""
required = set(['date', 'title'])
fields = set(frontmatter.keys())
missing = required - fields
if missing:
raise AbortError(_(
'The blog post, {filename}, is missing required fields: {missing_fields}'
.format(filename=source_file, missing_fields=', '.join(
missing))))
def _generate_atom_feed(self, director, blog_posts):
"""Generate the atom feed."""
logger.info(_('Generating Atom XML feed ...'))
builder = FeedBuilder(self.atom_metadata)
builder.add(blog_posts)
output_file = os.path.join(director.outdir, self.atom_output)
builder.write_to(output_file)
def _generate_list_page(self, director, blog_posts):
"""Generate the list page."""
logger.info(_('Generating blog list page ...'))
template = director.catalog.get_template(self.list_template)
builder = ListPageBuilder(template)
builder.add(blog_posts)
output_file = os.path.join(director.outdir, self.list_output)
builder.write_to(output_file)
def _add_atom_metadata(self, name, option):
"""Add atom metadata from the config parser."""
self.atom_metadata[name] = self._get_option(option)
def _get_option(self, option):
"""Get an option out of the blog section."""
try:
return self._config.parser.get('blog', option)
except configparser.NoOptionError:
raise AbortError(_(
'The blog extension requires the {option} option.').format(
option=option))
class BlogBuilder(object):
"""A template pattern class for generating output related to a blog."""
def _generate_output(self):
"""Generate output that belongs in the destination file.
Subclasses must implement this method.
"""
raise NotImplementedError()
def write_to(self, filepath):
"""Write the output to the provided filepath."""
output = self._generate_output()
with open(filepath, 'wb') as out:
out.write(output.encode('utf-8'))
out.write(b'<!-- handrolled for excellence -->\n')
class FeedBuilder(BlogBuilder):
"""Transform blog metadata and posts into an Atom feed."""
def __init__(self, metadata):
self.metadata = metadata
self._feed = AtomFeed(**metadata)
def add(self, posts):
"""Add blog posts to the feed."""
for post in posts:
self._feed.add(FeedEntry(summary=post.summary, title=post.title,
title_type='html', url=post.url, updated=post.date))
def _generate_output(self):
return self._feed.to_string()
class ListPageBuilder(BlogBuilder):
"""Transform blog posts into a list page."""
def __init__(self, template):
self._template = template
self._blog_list = ''
self._posts = None
def add(self, posts):
"""Add the posts and generate a blog list."""
li_html = []
for post in posts:
li_html.append(u'<li><a href="{route}">{title}</a></li>'.format
(route=post.route, title=post.title))
self._blog_list = u'\n'.join(li_html)
self._posts = posts
def _generate_output(self):
context = {'blog_list': self._blog_list, 'posts': self._posts}
return self._template.render(context)
| <mask token>
class BlogPost(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class BlogExtension(Extension):
"""Track files marked as blog entries and generate a feed."""
handle_frontmatter_loaded = True
handle_pre_composition = True
handle_post_composition = True
required_metadata = {'author': 'atom_author', 'id': 'atom_id', 'title':
'atom_title', 'url': 'atom_url'}
def __init__(self, config):
super(BlogExtension, self).__init__(config)
self.posts = {}
self.atom_metadata = {}
self.atom_output = ''
self.list_template = None
self.list_output = None
self._resolver = None
self._should_generate = True
def on_pre_composition(self, director):
"""Check that all the required configuration exists."""
if not self._config.parser.has_section('blog'):
raise AbortError(_(
'A blog section is missing in the configuration file.'))
for metadata, option in self.required_metadata.items():
self._add_atom_metadata(metadata, option)
self.atom_output = self._get_option('atom_output')
if self._config.parser.has_option('blog', 'list_template'):
self.list_template = self._get_option('list_template')
self.list_output = self._get_option('list_output')
self._resolver = director.resolver
def on_frontmatter_loaded(self, source_file, frontmatter):
"""Record any new blog posts."""
if not self._is_post(frontmatter):
return
self._validate_post(source_file, frontmatter)
post = BlogPost(date=frontmatter['date'], source_file=source_file,
summary=frontmatter.get('summary', ''), title=frontmatter[
'title'], route=self._resolver.as_route(source_file), url=self.
_resolver.as_url(source_file), posts=self.posts)
frontmatter['post'] = post
if post != self.posts.get(source_file):
self.posts[source_file] = post
self._should_generate = True
def on_post_composition(self, director):
"""Generate blog output."""
if not self._should_generate:
return
blog_posts = sorted(self.posts.values(), key=lambda p: p.date,
reverse=True)
self._generate_atom_feed(director, blog_posts)
if self.list_template is not None:
self._generate_list_page(director, blog_posts)
self._should_generate = False
def _is_post(self, frontmatter):
"""Check if the front matter looks like a blog post."""
is_post = frontmatter.get('blog', False)
if type(is_post) != bool:
raise AbortError(_(
'Invalid blog frontmatter (expects True or False): {blog_value}'
).format(blog_value=is_post))
return is_post
def _validate_post(self, source_file, frontmatter):
"""Validate that the post contains all the required fields."""
required = set(['date', 'title'])
fields = set(frontmatter.keys())
missing = required - fields
if missing:
raise AbortError(_(
'The blog post, {filename}, is missing required fields: {missing_fields}'
.format(filename=source_file, missing_fields=', '.join(
missing))))
def _generate_atom_feed(self, director, blog_posts):
"""Generate the atom feed."""
logger.info(_('Generating Atom XML feed ...'))
builder = FeedBuilder(self.atom_metadata)
builder.add(blog_posts)
output_file = os.path.join(director.outdir, self.atom_output)
builder.write_to(output_file)
def _generate_list_page(self, director, blog_posts):
"""Generate the list page."""
logger.info(_('Generating blog list page ...'))
template = director.catalog.get_template(self.list_template)
builder = ListPageBuilder(template)
builder.add(blog_posts)
output_file = os.path.join(director.outdir, self.list_output)
builder.write_to(output_file)
def _add_atom_metadata(self, name, option):
"""Add atom metadata from the config parser."""
self.atom_metadata[name] = self._get_option(option)
def _get_option(self, option):
"""Get an option out of the blog section."""
try:
return self._config.parser.get('blog', option)
except configparser.NoOptionError:
raise AbortError(_(
'The blog extension requires the {option} option.').format(
option=option))
class BlogBuilder(object):
"""A template pattern class for generating output related to a blog."""
def _generate_output(self):
"""Generate output that belongs in the destination file.
Subclasses must implement this method.
"""
raise NotImplementedError()
def write_to(self, filepath):
"""Write the output to the provided filepath."""
output = self._generate_output()
with open(filepath, 'wb') as out:
out.write(output.encode('utf-8'))
out.write(b'<!-- handrolled for excellence -->\n')
class FeedBuilder(BlogBuilder):
"""Transform blog metadata and posts into an Atom feed."""
def __init__(self, metadata):
self.metadata = metadata
self._feed = AtomFeed(**metadata)
def add(self, posts):
"""Add blog posts to the feed."""
for post in posts:
self._feed.add(FeedEntry(summary=post.summary, title=post.title,
title_type='html', url=post.url, updated=post.date))
def _generate_output(self):
return self._feed.to_string()
class ListPageBuilder(BlogBuilder):
"""Transform blog posts into a list page."""
def __init__(self, template):
self._template = template
self._blog_list = ''
self._posts = None
def add(self, posts):
"""Add the posts and generate a blog list."""
li_html = []
for post in posts:
li_html.append(u'<li><a href="{route}">{title}</a></li>'.format
(route=post.route, title=post.title))
self._blog_list = u'\n'.join(li_html)
self._posts = posts
def _generate_output(self):
context = {'blog_list': self._blog_list, 'posts': self._posts}
return self._template.render(context)
| <mask token>
class BlogPost(object):
def __init__(self, **kwargs):
self.date = kwargs['date']
self.source_file = kwargs['source_file']
self.summary = smartypants.smartypants(kwargs['summary'])
self.title = smartypants.smartypants(kwargs['title'])
self.route = kwargs['route']
self.url = kwargs['url']
self._posts = kwargs['posts']
<mask token>
def __lt__(self, other):
return self.date < other.date
def __ne__(self, other):
return not self.__eq__(other)
<mask token>
@property
def next(self):
"""Get the next chronological blog post."""
posts_by_date = self.posts_by_date
index = bisect.bisect_left(posts_by_date, self)
if index + 1 == len(posts_by_date):
return None
return posts_by_date[index + 1]
<mask token>
<mask token>
class BlogExtension(Extension):
"""Track files marked as blog entries and generate a feed."""
handle_frontmatter_loaded = True
handle_pre_composition = True
handle_post_composition = True
required_metadata = {'author': 'atom_author', 'id': 'atom_id', 'title':
'atom_title', 'url': 'atom_url'}
def __init__(self, config):
super(BlogExtension, self).__init__(config)
self.posts = {}
self.atom_metadata = {}
self.atom_output = ''
self.list_template = None
self.list_output = None
self._resolver = None
self._should_generate = True
def on_pre_composition(self, director):
"""Check that all the required configuration exists."""
if not self._config.parser.has_section('blog'):
raise AbortError(_(
'A blog section is missing in the configuration file.'))
for metadata, option in self.required_metadata.items():
self._add_atom_metadata(metadata, option)
self.atom_output = self._get_option('atom_output')
if self._config.parser.has_option('blog', 'list_template'):
self.list_template = self._get_option('list_template')
self.list_output = self._get_option('list_output')
self._resolver = director.resolver
def on_frontmatter_loaded(self, source_file, frontmatter):
"""Record any new blog posts."""
if not self._is_post(frontmatter):
return
self._validate_post(source_file, frontmatter)
post = BlogPost(date=frontmatter['date'], source_file=source_file,
summary=frontmatter.get('summary', ''), title=frontmatter[
'title'], route=self._resolver.as_route(source_file), url=self.
_resolver.as_url(source_file), posts=self.posts)
frontmatter['post'] = post
if post != self.posts.get(source_file):
self.posts[source_file] = post
self._should_generate = True
def on_post_composition(self, director):
"""Generate blog output."""
if not self._should_generate:
return
blog_posts = sorted(self.posts.values(), key=lambda p: p.date,
reverse=True)
self._generate_atom_feed(director, blog_posts)
if self.list_template is not None:
self._generate_list_page(director, blog_posts)
self._should_generate = False
def _is_post(self, frontmatter):
"""Check if the front matter looks like a blog post."""
is_post = frontmatter.get('blog', False)
if type(is_post) != bool:
raise AbortError(_(
'Invalid blog frontmatter (expects True or False): {blog_value}'
).format(blog_value=is_post))
return is_post
def _validate_post(self, source_file, frontmatter):
"""Validate that the post contains all the required fields."""
required = set(['date', 'title'])
fields = set(frontmatter.keys())
missing = required - fields
if missing:
raise AbortError(_(
'The blog post, {filename}, is missing required fields: {missing_fields}'
.format(filename=source_file, missing_fields=', '.join(
missing))))
def _generate_atom_feed(self, director, blog_posts):
"""Generate the atom feed."""
logger.info(_('Generating Atom XML feed ...'))
builder = FeedBuilder(self.atom_metadata)
builder.add(blog_posts)
output_file = os.path.join(director.outdir, self.atom_output)
builder.write_to(output_file)
def _generate_list_page(self, director, blog_posts):
"""Generate the list page."""
logger.info(_('Generating blog list page ...'))
template = director.catalog.get_template(self.list_template)
builder = ListPageBuilder(template)
builder.add(blog_posts)
output_file = os.path.join(director.outdir, self.list_output)
builder.write_to(output_file)
def _add_atom_metadata(self, name, option):
"""Add atom metadata from the config parser."""
self.atom_metadata[name] = self._get_option(option)
def _get_option(self, option):
"""Get an option out of the blog section."""
try:
return self._config.parser.get('blog', option)
except configparser.NoOptionError:
raise AbortError(_(
'The blog extension requires the {option} option.').format(
option=option))
class BlogBuilder(object):
"""A template pattern class for generating output related to a blog."""
def _generate_output(self):
"""Generate output that belongs in the destination file.
Subclasses must implement this method.
"""
raise NotImplementedError()
def write_to(self, filepath):
"""Write the output to the provided filepath."""
output = self._generate_output()
with open(filepath, 'wb') as out:
out.write(output.encode('utf-8'))
out.write(b'<!-- handrolled for excellence -->\n')
class FeedBuilder(BlogBuilder):
"""Transform blog metadata and posts into an Atom feed."""
def __init__(self, metadata):
self.metadata = metadata
self._feed = AtomFeed(**metadata)
def add(self, posts):
"""Add blog posts to the feed."""
for post in posts:
self._feed.add(FeedEntry(summary=post.summary, title=post.title,
title_type='html', url=post.url, updated=post.date))
def _generate_output(self):
return self._feed.to_string()
class ListPageBuilder(BlogBuilder):
"""Transform blog posts into a list page."""
def __init__(self, template):
self._template = template
self._blog_list = ''
self._posts = None
def add(self, posts):
"""Add the posts and generate a blog list."""
li_html = []
for post in posts:
li_html.append(u'<li><a href="{route}">{title}</a></li>'.format
(route=post.route, title=post.title))
self._blog_list = u'\n'.join(li_html)
self._posts = posts
def _generate_output(self):
context = {'blog_list': self._blog_list, 'posts': self._posts}
return self._template.render(context)
| # Copyright (c) 2017, Matt Layman
import bisect
import configparser
import os
import smartypants
from werkzeug.contrib.atom import AtomFeed, FeedEntry
from handroll import logger
from handroll.exceptions import AbortError
from handroll.extensions.base import Extension
from handroll.i18n import _
class BlogPost(object):
def __init__(self, **kwargs):
self.date = kwargs['date']
self.source_file = kwargs['source_file']
self.summary = smartypants.smartypants(kwargs['summary'])
self.title = smartypants.smartypants(kwargs['title'])
self.route = kwargs['route']
self.url = kwargs['url']
# Having the posts enables a blog post to find its relationships.
self._posts = kwargs['posts']
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __lt__(self, other):
return self.date < other.date
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'BlogPost({}, {})'.format(self.source_file, self.date)
@property
def next(self):
"""Get the next chronological blog post."""
posts_by_date = self.posts_by_date
index = bisect.bisect_left(posts_by_date, self)
if index + 1 == len(posts_by_date):
return None
return posts_by_date[index + 1]
@property
def previous(self):
"""Get the previous chronological blog post."""
posts_by_date = self.posts_by_date
index = bisect.bisect_left(posts_by_date, self)
if index == 0:
return None
return posts_by_date[index - 1]
@property
def posts_by_date(self):
return sorted(self._posts.values(), key=lambda p: p.date)
class BlogExtension(Extension):
"""Track files marked as blog entries and generate a feed."""
handle_frontmatter_loaded = True
handle_pre_composition = True
handle_post_composition = True
required_metadata = {
'author': 'atom_author',
'id': 'atom_id',
'title': 'atom_title',
'url': 'atom_url',
}
def __init__(self, config):
super(BlogExtension, self).__init__(config)
self.posts = {}
self.atom_metadata = {}
self.atom_output = ''
self.list_template = None
self.list_output = None
self._resolver = None
self._should_generate = True
def on_pre_composition(self, director):
"""Check that all the required configuration exists."""
if not self._config.parser.has_section('blog'):
raise AbortError(
_('A blog section is missing in the configuration file.'))
# Collect atom feed configuration.
for metadata, option in self.required_metadata.items():
self._add_atom_metadata(metadata, option)
self.atom_output = self._get_option('atom_output')
# Collect HTML listing configuration.
if self._config.parser.has_option('blog', 'list_template'):
self.list_template = self._get_option('list_template')
self.list_output = self._get_option('list_output')
# Grab the resolver from the director for determining URLs for posts.
self._resolver = director.resolver
def on_frontmatter_loaded(self, source_file, frontmatter):
"""Record any new blog posts."""
if not self._is_post(frontmatter):
return
self._validate_post(source_file, frontmatter)
post = BlogPost(
date=frontmatter['date'],
source_file=source_file,
summary=frontmatter.get('summary', ''),
title=frontmatter['title'],
route=self._resolver.as_route(source_file),
url=self._resolver.as_url(source_file),
posts=self.posts,
)
frontmatter['post'] = post
if post != self.posts.get(source_file):
self.posts[source_file] = post
self._should_generate = True
def on_post_composition(self, director):
"""Generate blog output."""
if not self._should_generate:
return
blog_posts = sorted(
self.posts.values(), key=lambda p: p.date, reverse=True)
self._generate_atom_feed(director, blog_posts)
if self.list_template is not None:
self._generate_list_page(director, blog_posts)
self._should_generate = False
def _is_post(self, frontmatter):
"""Check if the front matter looks like a blog post."""
is_post = frontmatter.get('blog', False)
if type(is_post) != bool:
raise AbortError(
_('Invalid blog frontmatter (expects True or False): '
'{blog_value}').format(blog_value=is_post))
return is_post
def _validate_post(self, source_file, frontmatter):
"""Validate that the post contains all the required fields."""
required = set([
'date',
'title',
])
fields = set(frontmatter.keys())
missing = required - fields
if missing:
raise AbortError(_(
'The blog post, {filename}, '
'is missing required fields: {missing_fields}'.format(
filename=source_file, missing_fields=', '.join(missing))))
def _generate_atom_feed(self, director, blog_posts):
"""Generate the atom feed."""
logger.info(_('Generating Atom XML feed ...'))
builder = FeedBuilder(self.atom_metadata)
builder.add(blog_posts)
output_file = os.path.join(director.outdir, self.atom_output)
builder.write_to(output_file)
def _generate_list_page(self, director, blog_posts):
"""Generate the list page."""
logger.info(_('Generating blog list page ...'))
template = director.catalog.get_template(self.list_template)
builder = ListPageBuilder(template)
builder.add(blog_posts)
output_file = os.path.join(director.outdir, self.list_output)
builder.write_to(output_file)
def _add_atom_metadata(self, name, option):
"""Add atom metadata from the config parser."""
self.atom_metadata[name] = self._get_option(option)
def _get_option(self, option):
"""Get an option out of the blog section."""
try:
return self._config.parser.get('blog', option)
except configparser.NoOptionError:
raise AbortError(
_('The blog extension requires the {option} option.').format(
option=option))
class BlogBuilder(object):
"""A template pattern class for generating output related to a blog."""
def _generate_output(self):
"""Generate output that belongs in the destination file.
Subclasses must implement this method.
"""
raise NotImplementedError()
def write_to(self, filepath):
"""Write the output to the provided filepath."""
output = self._generate_output()
with open(filepath, 'wb') as out:
out.write(output.encode('utf-8'))
out.write(b'<!-- handrolled for excellence -->\n')
class FeedBuilder(BlogBuilder):
"""Transform blog metadata and posts into an Atom feed."""
def __init__(self, metadata):
self.metadata = metadata
self._feed = AtomFeed(**metadata)
def add(self, posts):
"""Add blog posts to the feed."""
for post in posts:
self._feed.add(FeedEntry(
summary=post.summary,
title=post.title,
title_type='html',
url=post.url,
updated=post.date,
))
def _generate_output(self):
return self._feed.to_string()
class ListPageBuilder(BlogBuilder):
"""Transform blog posts into a list page."""
def __init__(self, template):
self._template = template
self._blog_list = ''
self._posts = None
def add(self, posts):
"""Add the posts and generate a blog list."""
li_html = []
for post in posts:
li_html.append(
u'<li><a href="{route}">{title}</a></li>'.format(
route=post.route, title=post.title))
self._blog_list = u'\n'.join(li_html)
self._posts = posts
def _generate_output(self):
context = {
'blog_list': self._blog_list,
'posts': self._posts,
}
return self._template.render(context)
| [
13,
24,
28,
32,
38
] |
831 | 7fa7a632078ce4f0052e3cadf11d5efd47a1fad5 | <mask token>
class TILA_Config_LogList(bpy.types.UIList):
<mask token>
<mask token>
class TILA_Config_SatusList(bpy.types.UIList):
bl_idname = 'TILA_UL_Config_status_list'
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_Log:
def __init__(self, log, index_name):
self.log = log
self.index_name = index_name
def append(self, name, icon='BLANK1'):
element = self.log.add()
element.name = name
element.icon = icon
setattr(bpy.context.window_manager, self.index_name, len(self.log) - 1)
def info(self, name):
self.append(name, icon='INFO')
def warning(self, name):
self.append(name, icon='ERROR')
def error(self, name):
self.append(name, icon='CANCEL')
def start(self, name):
self.append(name, icon='TRIA_RIGHT')
def done(self, name):
self.append(name, icon='CHECKMARK')
| <mask token>
class TILA_Config_LogList(bpy.types.UIList):
<mask token>
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_SatusList(bpy.types.UIList):
bl_idname = 'TILA_UL_Config_status_list'
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_Log:
def __init__(self, log, index_name):
self.log = log
self.index_name = index_name
def append(self, name, icon='BLANK1'):
element = self.log.add()
element.name = name
element.icon = icon
setattr(bpy.context.window_manager, self.index_name, len(self.log) - 1)
def info(self, name):
self.append(name, icon='INFO')
def warning(self, name):
self.append(name, icon='ERROR')
def error(self, name):
self.append(name, icon='CANCEL')
def start(self, name):
self.append(name, icon='TRIA_RIGHT')
def done(self, name):
self.append(name, icon='CHECKMARK')
| <mask token>
class TILA_Config_LogList(bpy.types.UIList):
bl_idname = 'TILA_UL_Config_log_list'
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_SatusList(bpy.types.UIList):
bl_idname = 'TILA_UL_Config_status_list'
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_Log:
def __init__(self, log, index_name):
self.log = log
self.index_name = index_name
def append(self, name, icon='BLANK1'):
element = self.log.add()
element.name = name
element.icon = icon
setattr(bpy.context.window_manager, self.index_name, len(self.log) - 1)
def info(self, name):
self.append(name, icon='INFO')
def warning(self, name):
self.append(name, icon='ERROR')
def error(self, name):
self.append(name, icon='CANCEL')
def start(self, name):
self.append(name, icon='TRIA_RIGHT')
def done(self, name):
self.append(name, icon='CHECKMARK')
| <mask token>
class TILA_Config_LogElement(bpy.types.PropertyGroup):
name: bpy.props.StringProperty(default='')
icon: bpy.props.StringProperty(default='BLANK1')
class TILA_Config_LogList(bpy.types.UIList):
bl_idname = 'TILA_UL_Config_log_list'
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_SatusList(bpy.types.UIList):
bl_idname = 'TILA_UL_Config_status_list'
def draw_item(self, context, layout, data, item, icon, active_data,
active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_Log:
def __init__(self, log, index_name):
self.log = log
self.index_name = index_name
def append(self, name, icon='BLANK1'):
element = self.log.add()
element.name = name
element.icon = icon
setattr(bpy.context.window_manager, self.index_name, len(self.log) - 1)
def info(self, name):
self.append(name, icon='INFO')
def warning(self, name):
self.append(name, icon='ERROR')
def error(self, name):
self.append(name, icon='CANCEL')
def start(self, name):
self.append(name, icon='TRIA_RIGHT')
def done(self, name):
self.append(name, icon='CHECKMARK')
| import bpy
class TILA_Config_LogElement(bpy.types.PropertyGroup):
name: bpy.props.StringProperty(default='')
icon: bpy.props.StringProperty(default='BLANK1')
class TILA_Config_LogList(bpy.types.UIList):
bl_idname = "TILA_UL_Config_log_list"
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_SatusList(bpy.types.UIList):
bl_idname = "TILA_UL_Config_status_list"
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
row = layout.row(align=True)
row.label(text=item.name, icon=item.icon)
class TILA_Config_Log():
def __init__(self, log, index_name):
self.log = log
self.index_name = index_name
def append(self, name, icon='BLANK1'):
element = self.log.add()
element.name = name
element.icon = icon
setattr(bpy.context.window_manager, self.index_name, len(self.log)-1)
def info(self, name):
self.append(name, icon='INFO')
def warning(self, name):
self.append(name, icon='ERROR')
def error(self, name):
self.append(name, icon='CANCEL')
def start(self, name):
self.append(name, icon='TRIA_RIGHT')
def done(self, name):
self.append(name, icon='CHECKMARK')
| [
12,
13,
14,
15,
17
] |
832 | 77e4bbe625251254cdadaeeb23dddf51e729e747 | <mask token>
class DepartmentAdmin(admin.ModelAdmin):
<mask token>
<mask token>
<mask token>
<mask token>
def save_model(self, request, obj, form, change):
if obj.code == '':
obj.code = obj.name.replace(' ', '_')
obj.save()
class DepartmentInline(admin.TabularInline):
model = Department
extra = 0
fields = 'description',
class UniversityAdmin(admin.ModelAdmin):
inlines = [DepartmentInline]
search_fields = 'description',
def save_model(self, request, obj, form, change):
obj.code = obj.description.replace(' ', '_')
obj.save()
def change_view(self, request, object_id, extra_content=None):
self.exclude = '',
return super(UniversityAdmin, self).change_view(request, object_id)
def add_view(self, request, extra_content=None):
self.exclude = 'code',
return super(UniversityAdmin, self).add_view(request)
class CourseForm(forms.ModelForm):
class Meta:
Model = Course
def __init__(self, *args, **kwargs):
super(CourseForm, self).__init__(*args, **kwargs)
self.fields['prerequisite'].queryset = Course.objects.exclude(id__exact
=self.instance.id)
def clean(self):
cleaned_data = self.cleaned_data
if self.instance.pk is None:
if Course.objects.filter(code=cleaned_data['code'], university=
cleaned_data['university']).exists():
raise forms.ValidationError(
'The course already exists at this university.')
return cleaned_data
class CourseAdmin(admin.ModelAdmin):
form = CourseForm
list_display = 'code', 'university'
list_filter = 'university',
search_fields = 'code',
def save_model(self, request, obj, form, change):
if obj.code == '':
obj.code = obj.name.replace(' ', '_')
obj.save()
class dbAdmin(UserAdmin):
fieldsets = (None, {'fields': ('email', 'password')}), (_(
'Personal info'), {'fields': ('first_name', 'last_name')}), (_(
'Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}), (_('Important dates'), {'fields':
('last_login', 'date_joined')})
add_fieldsets = (None, {'classes': ('wide',), 'fields': ('email',
'password1', 'password2')}),
form = CustomUserChangeForm
add_form = CustomUserCreationForm
list_display = 'email', 'first_name', 'last_name', 'is_staff'
search_fields = 'email', 'first_name', 'last_name'
ordering = 'email',
<mask token>
| <mask token>
class ProgramAdmin(admin.ModelAdmin):
<mask token>
<mask token>
<mask token>
def get_university(self, obj):
return obj.department.university
<mask token>
<mask token>
<mask token>
def add_view(self, request, extra_content=None):
self.exclude = 'code',
return super(ProgramAdmin, self).add_view(request)
class ProgramInline(admin.TabularInline):
model = Program
extra = 0
fields = 'description',
class DepartmentAdmin(admin.ModelAdmin):
fieldsets = [(None, {'fields': ['description', 'university', 'tenured',
'nonTenured']})]
inlines = [ProgramInline]
search_fields = 'university__description', 'description'
list_filter = 'description', 'university'
def save_model(self, request, obj, form, change):
if obj.code == '':
obj.code = obj.name.replace(' ', '_')
obj.save()
class DepartmentInline(admin.TabularInline):
model = Department
extra = 0
fields = 'description',
class UniversityAdmin(admin.ModelAdmin):
inlines = [DepartmentInline]
search_fields = 'description',
def save_model(self, request, obj, form, change):
obj.code = obj.description.replace(' ', '_')
obj.save()
def change_view(self, request, object_id, extra_content=None):
self.exclude = '',
return super(UniversityAdmin, self).change_view(request, object_id)
def add_view(self, request, extra_content=None):
self.exclude = 'code',
return super(UniversityAdmin, self).add_view(request)
class CourseForm(forms.ModelForm):
class Meta:
Model = Course
def __init__(self, *args, **kwargs):
super(CourseForm, self).__init__(*args, **kwargs)
self.fields['prerequisite'].queryset = Course.objects.exclude(id__exact
=self.instance.id)
def clean(self):
cleaned_data = self.cleaned_data
if self.instance.pk is None:
if Course.objects.filter(code=cleaned_data['code'], university=
cleaned_data['university']).exists():
raise forms.ValidationError(
'The course already exists at this university.')
return cleaned_data
class CourseAdmin(admin.ModelAdmin):
form = CourseForm
list_display = 'code', 'university'
list_filter = 'university',
search_fields = 'code',
def save_model(self, request, obj, form, change):
if obj.code == '':
obj.code = obj.name.replace(' ', '_')
obj.save()
class dbAdmin(UserAdmin):
fieldsets = (None, {'fields': ('email', 'password')}), (_(
'Personal info'), {'fields': ('first_name', 'last_name')}), (_(
'Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}), (_('Important dates'), {'fields':
('last_login', 'date_joined')})
add_fieldsets = (None, {'classes': ('wide',), 'fields': ('email',
'password1', 'password2')}),
form = CustomUserChangeForm
add_form = CustomUserCreationForm
list_display = 'email', 'first_name', 'last_name', 'is_staff'
search_fields = 'email', 'first_name', 'last_name'
ordering = 'email',
<mask token>
| <mask token>
class ProgramAdmin(admin.ModelAdmin):
<mask token>
<mask token>
<mask token>
def get_university(self, obj):
return obj.department.university
<mask token>
<mask token>
def change_view(self, request, object_id, extra_content=None):
self.exclude = '',
return super(ProgramAdmin, self).change_view(request, object_id)
def add_view(self, request, extra_content=None):
self.exclude = 'code',
return super(ProgramAdmin, self).add_view(request)
class ProgramInline(admin.TabularInline):
model = Program
extra = 0
fields = 'description',
class DepartmentAdmin(admin.ModelAdmin):
fieldsets = [(None, {'fields': ['description', 'university', 'tenured',
'nonTenured']})]
inlines = [ProgramInline]
search_fields = 'university__description', 'description'
list_filter = 'description', 'university'
def save_model(self, request, obj, form, change):
if obj.code == '':
obj.code = obj.name.replace(' ', '_')
obj.save()
class DepartmentInline(admin.TabularInline):
model = Department
extra = 0
fields = 'description',
class UniversityAdmin(admin.ModelAdmin):
inlines = [DepartmentInline]
search_fields = 'description',
def save_model(self, request, obj, form, change):
obj.code = obj.description.replace(' ', '_')
obj.save()
def change_view(self, request, object_id, extra_content=None):
self.exclude = '',
return super(UniversityAdmin, self).change_view(request, object_id)
def add_view(self, request, extra_content=None):
self.exclude = 'code',
return super(UniversityAdmin, self).add_view(request)
class CourseForm(forms.ModelForm):
class Meta:
Model = Course
def __init__(self, *args, **kwargs):
super(CourseForm, self).__init__(*args, **kwargs)
self.fields['prerequisite'].queryset = Course.objects.exclude(id__exact
=self.instance.id)
def clean(self):
cleaned_data = self.cleaned_data
if self.instance.pk is None:
if Course.objects.filter(code=cleaned_data['code'], university=
cleaned_data['university']).exists():
raise forms.ValidationError(
'The course already exists at this university.')
return cleaned_data
class CourseAdmin(admin.ModelAdmin):
form = CourseForm
list_display = 'code', 'university'
list_filter = 'university',
search_fields = 'code',
def save_model(self, request, obj, form, change):
if obj.code == '':
obj.code = obj.name.replace(' ', '_')
obj.save()
class dbAdmin(UserAdmin):
fieldsets = (None, {'fields': ('email', 'password')}), (_(
'Personal info'), {'fields': ('first_name', 'last_name')}), (_(
'Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}), (_('Important dates'), {'fields':
('last_login', 'date_joined')})
add_fieldsets = (None, {'classes': ('wide',), 'fields': ('email',
'password1', 'password2')}),
form = CustomUserChangeForm
add_form = CustomUserCreationForm
list_display = 'email', 'first_name', 'last_name', 'is_staff'
search_fields = 'email', 'first_name', 'last_name'
ordering = 'email',
<mask token>
| <mask token>
class ProgramAdmin(admin.ModelAdmin):
list_display = 'description', 'get_university'
search_fields = 'description', 'department__university__code'
list_filter = 'department__university',
def get_university(self, obj):
return obj.department.university
def save_model(self, request, obj, form, change):
obj.code = obj.description.replace(' ', '_')
obj.save()
get_university.short_description = 'University'
def change_view(self, request, object_id, extra_content=None):
self.exclude = '',
return super(ProgramAdmin, self).change_view(request, object_id)
def add_view(self, request, extra_content=None):
self.exclude = 'code',
return super(ProgramAdmin, self).add_view(request)
class ProgramInline(admin.TabularInline):
model = Program
extra = 0
fields = 'description',
class DepartmentAdmin(admin.ModelAdmin):
fieldsets = [(None, {'fields': ['description', 'university', 'tenured',
'nonTenured']})]
inlines = [ProgramInline]
search_fields = 'university__description', 'description'
list_filter = 'description', 'university'
def save_model(self, request, obj, form, change):
if obj.code == '':
obj.code = obj.name.replace(' ', '_')
obj.save()
class DepartmentInline(admin.TabularInline):
model = Department
extra = 0
fields = 'description',
class UniversityAdmin(admin.ModelAdmin):
inlines = [DepartmentInline]
search_fields = 'description',
def save_model(self, request, obj, form, change):
obj.code = obj.description.replace(' ', '_')
obj.save()
def change_view(self, request, object_id, extra_content=None):
self.exclude = '',
return super(UniversityAdmin, self).change_view(request, object_id)
def add_view(self, request, extra_content=None):
self.exclude = 'code',
return super(UniversityAdmin, self).add_view(request)
class CourseForm(forms.ModelForm):
class Meta:
Model = Course
def __init__(self, *args, **kwargs):
super(CourseForm, self).__init__(*args, **kwargs)
self.fields['prerequisite'].queryset = Course.objects.exclude(id__exact
=self.instance.id)
def clean(self):
cleaned_data = self.cleaned_data
if self.instance.pk is None:
if Course.objects.filter(code=cleaned_data['code'], university=
cleaned_data['university']).exists():
raise forms.ValidationError(
'The course already exists at this university.')
return cleaned_data
class CourseAdmin(admin.ModelAdmin):
form = CourseForm
list_display = 'code', 'university'
list_filter = 'university',
search_fields = 'code',
def save_model(self, request, obj, form, change):
if obj.code == '':
obj.code = obj.name.replace(' ', '_')
obj.save()
class dbAdmin(UserAdmin):
fieldsets = (None, {'fields': ('email', 'password')}), (_(
'Personal info'), {'fields': ('first_name', 'last_name')}), (_(
'Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}), (_('Important dates'), {'fields':
('last_login', 'date_joined')})
add_fieldsets = (None, {'classes': ('wide',), 'fields': ('email',
'password1', 'password2')}),
form = CustomUserChangeForm
add_form = CustomUserCreationForm
list_display = 'email', 'first_name', 'last_name', 'is_staff'
search_fields = 'email', 'first_name', 'last_name'
ordering = 'email',
admin.site.register(dbUser, dbAdmin)
admin.site.register(University, UniversityAdmin)
admin.site.register(Program, ProgramAdmin)
admin.site.register(Department, DepartmentAdmin)
admin.site.register(Course, CourseAdmin)
| from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from django import forms
from programs.models import *
from programs.forms import CustomUserCreationForm, CustomUserChangeForm
import pdb
class ProgramAdmin(admin.ModelAdmin):
list_display = ('description','get_university')
search_fields=('description','department__university__code')
list_filter = ('department__university',)
def get_university(self,obj):
return obj.department.university
def save_model(self,request,obj,form,change):
obj.code = obj.description.replace(' ','_')
obj.save()
get_university.short_description = 'University'
def change_view(self,request,object_id,extra_content=None):
self.exclude = ('',)
return super(ProgramAdmin,self).change_view(request,object_id)
def add_view(self,request,extra_content=None):
self.exclude = ('code',)
return super(ProgramAdmin,self).add_view(request)
class ProgramInline(admin.TabularInline):
model = Program
extra = 0
fields = ('description',)
class DepartmentAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields':['description','university','tenured','nonTenured']}),
]
inlines = [ProgramInline]
search_fields = ('university__description','description')
list_filter = ('description','university')
def save_model(self,request,obj,form,change):
if obj.code == '':
obj.code = obj.name.replace(' ','_')
obj.save()
class DepartmentInline(admin.TabularInline):
model = Department
extra = 0
fields = ('description',)
class UniversityAdmin(admin.ModelAdmin):
inlines = [DepartmentInline]
search_fields = ('description',)
def save_model(self,request,obj,form,change):
obj.code = obj.description.replace(' ','_')
obj.save()
def change_view(self,request,object_id,extra_content=None):
self.exclude = ('',)
return super(UniversityAdmin,self).change_view(request,object_id)
def add_view(self,request,extra_content=None):
self.exclude = ('code',)
return super(UniversityAdmin,self).add_view(request)
class CourseForm(forms.ModelForm):
class Meta:
Model = Course
def __init__(self,*args,**kwargs):
super(CourseForm,self).__init__(*args,**kwargs)
self.fields['prerequisite'].queryset = Course.objects.exclude(id__exact=self.instance.id)
def clean(self):
#Need to handle validation for unique_together
cleaned_data = self.cleaned_data
if self.instance.pk is None:
if Course.objects.filter(code=cleaned_data['code'],university=cleaned_data['university']).exists():
raise forms.ValidationError('The course already exists at this university.')
return cleaned_data
class CourseAdmin(admin.ModelAdmin):
form = CourseForm
list_display = ('code','university',)
list_filter = ('university',)
search_fields = ('code',)
def save_model(self,request,obj,form,change):
if obj.code == '':
obj.code = obj.name.replace(' ','_')
obj.save()
class dbAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}
),
)
form = CustomUserChangeForm
add_form = CustomUserCreationForm
list_display = ('email', 'first_name', 'last_name', 'is_staff')
search_fields = ('email', 'first_name', 'last_name')
ordering = ('email',)
admin.site.register(dbUser, dbAdmin)
admin.site.register(University,UniversityAdmin)
admin.site.register(Program,ProgramAdmin)
admin.site.register(Department,DepartmentAdmin)
admin.site.register(Course,CourseAdmin)
| [
17,
23,
24,
27,
29
] |
833 | 58ca520a2f43cef26a95de446f9c7a82819b0b66 | <mask token>
class GetData:
key = (
'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
)
url = (
'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='
+ key)
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open('sample.xml', 'wb')
f.write(data)
f.close()
<mask token>
| <mask token>
class GetData:
key = (
'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
)
url = (
'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='
+ key)
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open('sample.xml', 'wb')
f.write(data)
f.close()
<mask token>
getData.main()
| <mask token>
class GetData:
key = (
'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
)
url = (
'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='
+ key)
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open('sample.xml', 'wb')
f.write(data)
f.close()
getData = GetData()
getData.main()
| import urllib.request
class GetData:
key = (
'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
)
url = (
'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='
+ key)
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open('sample.xml', 'wb')
f.write(data)
f.close()
getData = GetData()
getData.main()
| import urllib.request
class GetData:
key = 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
url = "http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey=" + key
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open("sample.xml", "wb")
f.write(data)
f.close()
getData = GetData()
getData.main()
| [
3,
4,
5,
6,
7
] |
834 | 9535973f9714926269490b8550a67c74d04d8f0a | <mask token>
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)
def glDepthRangeArrayfvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat)
def glDepthRangeIndexedfNV(index, n, f):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint)
def glDisableiNV(target, index):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint)
def glEnableiNV(target, index):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint, arrays.GLfloatArray)
def glGetFloati_vNV(target, index, data):
pass
@_f
@_p.types(_cs.GLboolean, _cs.GLenum, _cs.GLuint)
def glIsEnablediNV(target, index):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLintArray)
def glScissorArrayvNV(first, count, v):
pass
<mask token>
@_f
@_p.types(None, _cs.GLuint, arrays.GLintArray)
def glScissorIndexedvNV(index, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)
def glViewportArrayvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat)
def glViewportIndexedfNV(index, x, y, w, h):
pass
@_f
@_p.types(None, _cs.GLuint, arrays.GLfloatArray)
def glViewportIndexedfvNV(index, v):
pass
| <mask token>
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)
def glDepthRangeArrayfvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat)
def glDepthRangeIndexedfNV(index, n, f):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint)
def glDisableiNV(target, index):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint)
def glEnableiNV(target, index):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint, arrays.GLfloatArray)
def glGetFloati_vNV(target, index, data):
pass
@_f
@_p.types(_cs.GLboolean, _cs.GLenum, _cs.GLuint)
def glIsEnablediNV(target, index):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLintArray)
def glScissorArrayvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLint, _cs.GLint, _cs.GLsizei, _cs.GLsizei)
def glScissorIndexedNV(index, left, bottom, width, height):
pass
@_f
@_p.types(None, _cs.GLuint, arrays.GLintArray)
def glScissorIndexedvNV(index, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)
def glViewportArrayvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat)
def glViewportIndexedfNV(index, x, y, w, h):
pass
@_f
@_p.types(None, _cs.GLuint, arrays.GLfloatArray)
def glViewportIndexedfvNV(index, v):
pass
| <mask token>
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES2,
'GLES2_NV_viewport_array', error_checker=_errors._error_checker)
<mask token>
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)
def glDepthRangeArrayfvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat)
def glDepthRangeIndexedfNV(index, n, f):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint)
def glDisableiNV(target, index):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint)
def glEnableiNV(target, index):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint, arrays.GLfloatArray)
def glGetFloati_vNV(target, index, data):
pass
@_f
@_p.types(_cs.GLboolean, _cs.GLenum, _cs.GLuint)
def glIsEnablediNV(target, index):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLintArray)
def glScissorArrayvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLint, _cs.GLint, _cs.GLsizei, _cs.GLsizei)
def glScissorIndexedNV(index, left, bottom, width, height):
pass
@_f
@_p.types(None, _cs.GLuint, arrays.GLintArray)
def glScissorIndexedvNV(index, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)
def glViewportArrayvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat)
def glViewportIndexedfNV(index, x, y, w, h):
pass
@_f
@_p.types(None, _cs.GLuint, arrays.GLfloatArray)
def glViewportIndexedfvNV(index, v):
pass
| <mask token>
_EXTENSION_NAME = 'GLES2_NV_viewport_array'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES2,
'GLES2_NV_viewport_array', error_checker=_errors._error_checker)
GL_DEPTH_RANGE = _C('GL_DEPTH_RANGE', 2928)
GL_MAX_VIEWPORTS_NV = _C('GL_MAX_VIEWPORTS_NV', 33371)
GL_SCISSOR_BOX = _C('GL_SCISSOR_BOX', 3088)
GL_SCISSOR_TEST = _C('GL_SCISSOR_TEST', 3089)
GL_VIEWPORT = _C('GL_VIEWPORT', 2978)
GL_VIEWPORT_BOUNDS_RANGE_NV = _C('GL_VIEWPORT_BOUNDS_RANGE_NV', 33373)
GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV = _C(
'GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV', 33375)
GL_VIEWPORT_SUBPIXEL_BITS_NV = _C('GL_VIEWPORT_SUBPIXEL_BITS_NV', 33372)
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)
def glDepthRangeArrayfvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat)
def glDepthRangeIndexedfNV(index, n, f):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint)
def glDisableiNV(target, index):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint)
def glEnableiNV(target, index):
pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLuint, arrays.GLfloatArray)
def glGetFloati_vNV(target, index, data):
pass
@_f
@_p.types(_cs.GLboolean, _cs.GLenum, _cs.GLuint)
def glIsEnablediNV(target, index):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLintArray)
def glScissorArrayvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLint, _cs.GLint, _cs.GLsizei, _cs.GLsizei)
def glScissorIndexedNV(index, left, bottom, width, height):
pass
@_f
@_p.types(None, _cs.GLuint, arrays.GLintArray)
def glScissorIndexedvNV(index, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)
def glViewportArrayvNV(first, count, v):
pass
@_f
@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat)
def glViewportIndexedfNV(index, x, y, w, h):
pass
@_f
@_p.types(None, _cs.GLuint, arrays.GLfloatArray)
def glViewportIndexedfvNV(index, v):
pass
| '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
from OpenGL.constant import Constant as _C
# End users want this...
from OpenGL.raw.GLES2 import _errors
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
_EXTENSION_NAME = 'GLES2_NV_viewport_array'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES2, 'GLES2_NV_viewport_array',
error_checker=_errors._error_checker)
GL_DEPTH_RANGE = _C('GL_DEPTH_RANGE', 0x0B70)
GL_MAX_VIEWPORTS_NV = _C('GL_MAX_VIEWPORTS_NV', 0x825B)
GL_SCISSOR_BOX = _C('GL_SCISSOR_BOX', 0x0C10)
GL_SCISSOR_TEST = _C('GL_SCISSOR_TEST', 0x0C11)
GL_VIEWPORT = _C('GL_VIEWPORT', 0x0BA2)
GL_VIEWPORT_BOUNDS_RANGE_NV = _C('GL_VIEWPORT_BOUNDS_RANGE_NV', 0x825D)
GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV=_C('GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV',0x825F)
GL_VIEWPORT_SUBPIXEL_BITS_NV=_C('GL_VIEWPORT_SUBPIXEL_BITS_NV',0x825C)
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLfloatArray)
def glDepthRangeArrayfvNV(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat)
def glDepthRangeIndexedfNV(index,n,f):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glDisableiNV(target,index):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glEnableiNV(target,index):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,arrays.GLfloatArray)
def glGetFloati_vNV(target,index,data):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLenum,_cs.GLuint)
def glIsEnablediNV(target,index):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLintArray)
def glScissorArrayvNV(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei)
def glScissorIndexedNV(index,left,bottom,width,height):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLintArray)
def glScissorIndexedvNV(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLfloatArray)
def glViewportArrayvNV(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glViewportIndexedfNV(index,x,y,w,h):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glViewportIndexedfvNV(index,v):pass
| [
11,
12,
13,
14,
16
] |
835 | 77d7fb49ed4c3e78b148cd446e9a5c6a0e6fac8b | <mask token>
def calc():
height = v_height.get()
base = v_base.get()
print(f'height is {height}')
print(f'Basal length is {base}')
length = math.isqrt(height * height + base * base)
print('Lenght is {:.2f}'.format(length))
<mask token>
| <mask token>
GUI.title('My Cal Program')
GUI.geometry('500x500')
def calc():
height = v_height.get()
base = v_base.get()
print(f'height is {height}')
print(f'Basal length is {base}')
length = math.isqrt(height * height + base * base)
print('Lenght is {:.2f}'.format(length))
<mask token>
L1.pack()
<mask token>
E1.pack(pady=8, ipady=7, ipadx=17)
<mask token>
L2.pack()
<mask token>
E2.pack(pady=8, ipady=7, ipadx=17)
<mask token>
B1.pack()
<mask token>
v_result.set('----Result----')
<mask token>
Result.pack()
GUI.mainloop()
| <mask token>
GUI = Tk()
GUI.title('My Cal Program')
GUI.geometry('500x500')
def calc():
height = v_height.get()
base = v_base.get()
print(f'height is {height}')
print(f'Basal length is {base}')
length = math.isqrt(height * height + base * base)
print('Lenght is {:.2f}'.format(length))
<mask token>
v_height = IntVar()
v_base = IntVar()
L1 = Label(text='Please input height', foreground='red', font=(
'Angsana New', 15))
L1.pack()
E1 = ttk.Entry(GUI, textvariable=v_height)
E1.pack(pady=8, ipady=7, ipadx=17)
L2 = Label(text='Please input basal length', foreground='red', font=(
'Angsana New', 15))
L2.pack()
E2 = ttk.Entry(GUI, textvariable=v_base)
E2.pack(pady=8, ipady=7, ipadx=17)
B1 = ttk.Button(text='Calculate', command=calc)
B1.pack()
v_result = StringVar()
v_result.set('----Result----')
Result = ttk.Label(GUI, textvariable=v_result, foreground='green', font=(
'Angsana New', 15))
Result.pack()
GUI.mainloop()
| from tkinter import *
from tkinter import ttk
import math
GUI = Tk()
GUI.title('My Cal Program')
GUI.geometry('500x500')
def calc():
height = v_height.get()
base = v_base.get()
print(f'height is {height}')
print(f'Basal length is {base}')
length = math.isqrt(height * height + base * base)
print('Lenght is {:.2f}'.format(length))
<mask token>
v_height = IntVar()
v_base = IntVar()
L1 = Label(text='Please input height', foreground='red', font=(
'Angsana New', 15))
L1.pack()
E1 = ttk.Entry(GUI, textvariable=v_height)
E1.pack(pady=8, ipady=7, ipadx=17)
L2 = Label(text='Please input basal length', foreground='red', font=(
'Angsana New', 15))
L2.pack()
E2 = ttk.Entry(GUI, textvariable=v_base)
E2.pack(pady=8, ipady=7, ipadx=17)
B1 = ttk.Button(text='Calculate', command=calc)
B1.pack()
v_result = StringVar()
v_result.set('----Result----')
Result = ttk.Label(GUI, textvariable=v_result, foreground='green', font=(
'Angsana New', 15))
Result.pack()
GUI.mainloop()
| #GUIcal.py
from tkinter import *
from tkinter import ttk
import math
GUI=Tk()
GUI.title('My Cal Program')
GUI.geometry('500x500')
def calc():
height=v_height.get()
base=v_base.get()#ดึงค่ามาจากv_base
print(f'height is {height}')
print(f'Basal length is {base}')
length= math.isqrt((height*height)+(base*base))
print('Lenght is {:.2f}'.format(length))
###For attach picture
'''
IMG=PhotoImage(file='pythagorus-theorem.png').subsample(3)
IM1=Label(GUI,image=IMG)
IM1.pack()
'''
v_height=IntVar()
v_base=IntVar()
L1=Label(text='Please input height',foreground='red',font=('Angsana New',15))
L1.pack()
E1=ttk.Entry(GUI,textvariable=v_height)
E1.pack(pady=8,ipady=7,ipadx=17)
L2=Label(text='Please input basal length',foreground='red',font=('Angsana New',15))
L2.pack()
E2=ttk.Entry(GUI,textvariable=v_base)
E2.pack(pady=8,ipady=7,ipadx=17)
B1=ttk.Button(text='Calculate',command=calc)
B1.pack()
v_result=StringVar()
v_result.set('----Result----')
Result=ttk.Label(GUI,textvariable=v_result,foreground='green',font=('Angsana New',15))
Result.pack()
GUI.mainloop()
| [
1,
2,
3,
4,
5
] |
836 | 63069f03d17862b8ea6aa74d0acd1370bbea0dcb | <mask token>
class DataRoutes(CommonRouteExchangeService):
<mask token>
<mask token>
<mask token>
def change_status_in_route(self, tree_route, status):
"""Замена статуса маршрута в маршруте
:param tree_route: Маршрут в формате XML
:param status: Cтатус маршрута 1 - ORIGINAL
2 - PLANNED_FOR_VOYAGE
3 - OPTIMIZED
4 - CROSS_CHECKED
5 - SAFETY_CHECKED
6 - APPROVED
7 - USED_FOR_MONITORING
8 - INACTIVE
:return: ElementTree
"""
tree_route_copy = copy.deepcopy(tree_route)
root = tree_route_copy.getroot()
root.find('.//*[@routeStatus]').attrib.update({'routeStatus': str(
status)})
return tree_route_copy
<mask token>
def convert_route_to_str(self, tree_route):
return Et.tostring(tree_route.getroot(), encoding='UTF-8')
| <mask token>
class DataRoutes(CommonRouteExchangeService):
<mask token>
def get_route_from_file(self, path_route):
"""Считывание маршрута из файла
:param path_route: Путь до маршрута в формате XML
:return: ElementTree
"""
path_file = os.path.join(os.getcwd(), path_route)
return Et.parse(path_file)
def change_uvid_in_route(self, tree_route, uvid):
"""Замена UVID в маршруте
:param tree_route: Маршрут в формате XML
:param uvid: UVID
:return: ElementTree
"""
tree_route_copy = copy.deepcopy(tree_route)
root = tree_route_copy.getroot()
root.find('.//*[@vesselVoyage]').attrib.update({'vesselVoyage': uvid})
return tree_route_copy
def change_status_in_route(self, tree_route, status):
"""Замена статуса маршрута в маршруте
:param tree_route: Маршрут в формате XML
:param status: Cтатус маршрута 1 - ORIGINAL
2 - PLANNED_FOR_VOYAGE
3 - OPTIMIZED
4 - CROSS_CHECKED
5 - SAFETY_CHECKED
6 - APPROVED
7 - USED_FOR_MONITORING
8 - INACTIVE
:return: ElementTree
"""
tree_route_copy = copy.deepcopy(tree_route)
root = tree_route_copy.getroot()
root.find('.//*[@routeStatus]').attrib.update({'routeStatus': str(
status)})
return tree_route_copy
<mask token>
def convert_route_to_str(self, tree_route):
return Et.tostring(tree_route.getroot(), encoding='UTF-8')
| <mask token>
class DataRoutes(CommonRouteExchangeService):
"""Класс для работы с данными аршрутов"""
def get_route_from_file(self, path_route):
"""Считывание маршрута из файла
:param path_route: Путь до маршрута в формате XML
:return: ElementTree
"""
path_file = os.path.join(os.getcwd(), path_route)
return Et.parse(path_file)
def change_uvid_in_route(self, tree_route, uvid):
"""Замена UVID в маршруте
:param tree_route: Маршрут в формате XML
:param uvid: UVID
:return: ElementTree
"""
tree_route_copy = copy.deepcopy(tree_route)
root = tree_route_copy.getroot()
root.find('.//*[@vesselVoyage]').attrib.update({'vesselVoyage': uvid})
return tree_route_copy
def change_status_in_route(self, tree_route, status):
"""Замена статуса маршрута в маршруте
:param tree_route: Маршрут в формате XML
:param status: Cтатус маршрута 1 - ORIGINAL
2 - PLANNED_FOR_VOYAGE
3 - OPTIMIZED
4 - CROSS_CHECKED
5 - SAFETY_CHECKED
6 - APPROVED
7 - USED_FOR_MONITORING
8 - INACTIVE
:return: ElementTree
"""
tree_route_copy = copy.deepcopy(tree_route)
root = tree_route_copy.getroot()
root.find('.//*[@routeStatus]').attrib.update({'routeStatus': str(
status)})
return tree_route_copy
def change_route_name_in_route(self, tree_route, route_name):
"""Замена routeName в маршруте
:param tree_route: Маршрут в формате XML
:param route_name: Имя маршрута
:return: ElementTree
"""
tree_route_copy = copy.deepcopy(tree_route)
root = tree_route_copy.getroot()
root.find('.//*[@routeName]').attrib.update({'routeName': route_name})
return tree_route_copy
def convert_route_to_str(self, tree_route):
return Et.tostring(tree_route.getroot(), encoding='UTF-8')
| import os
import xml.etree.ElementTree as Et
import copy
from .common import CommonRouteExchangeService
class DataRoutes(CommonRouteExchangeService):
"""Класс для работы с данными аршрутов"""
def get_route_from_file(self, path_route):
"""Считывание маршрута из файла
:param path_route: Путь до маршрута в формате XML
:return: ElementTree
"""
path_file = os.path.join(os.getcwd(), path_route)
return Et.parse(path_file)
def change_uvid_in_route(self, tree_route, uvid):
"""Замена UVID в маршруте
:param tree_route: Маршрут в формате XML
:param uvid: UVID
:return: ElementTree
"""
tree_route_copy = copy.deepcopy(tree_route)
root = tree_route_copy.getroot()
root.find('.//*[@vesselVoyage]').attrib.update({'vesselVoyage': uvid})
return tree_route_copy
def change_status_in_route(self, tree_route, status):
"""Замена статуса маршрута в маршруте
:param tree_route: Маршрут в формате XML
:param status: Cтатус маршрута 1 - ORIGINAL
2 - PLANNED_FOR_VOYAGE
3 - OPTIMIZED
4 - CROSS_CHECKED
5 - SAFETY_CHECKED
6 - APPROVED
7 - USED_FOR_MONITORING
8 - INACTIVE
:return: ElementTree
"""
tree_route_copy = copy.deepcopy(tree_route)
root = tree_route_copy.getroot()
root.find('.//*[@routeStatus]').attrib.update({'routeStatus': str(
status)})
return tree_route_copy
def change_route_name_in_route(self, tree_route, route_name):
"""Замена routeName в маршруте
:param tree_route: Маршрут в формате XML
:param route_name: Имя маршрута
:return: ElementTree
"""
tree_route_copy = copy.deepcopy(tree_route)
root = tree_route_copy.getroot()
root.find('.//*[@routeName]').attrib.update({'routeName': route_name})
return tree_route_copy
def convert_route_to_str(self, tree_route):
return Et.tostring(tree_route.getroot(), encoding='UTF-8')
| null | [
3,
5,
7,
8
] |
837 | 41eef711c79fb084c9780b6d2638d863266e569d | <mask token>
def answer():
question = input('Ask me anything: ')
print(random.choice(responses))
<mask token>
| <mask token>
def answer():
question = input('Ask me anything: ')
print(random.choice(responses))
answer()
<mask token>
while secondQuestion == str('Yes'):
answer()
secondQuestion = input('Another question? Yes/No: ')
else:
print('Thank you for asking the wise magic 8 ball')
| <mask token>
responses = ['Seems so', 'Never', 'Untrue', 'Always no matter what',
'You decide your fate', 'Not sure', 'Yep', 'Nope', 'Maybe', 'Nein',
'Qui', 'Ask the person next to you', 'That question is not for me']
def answer():
question = input('Ask me anything: ')
print(random.choice(responses))
answer()
secondQuestion = input('Another question? Yes/No: ')
while secondQuestion == str('Yes'):
answer()
secondQuestion = input('Another question? Yes/No: ')
else:
print('Thank you for asking the wise magic 8 ball')
| import random
responses = ['Seems so', 'Never', 'Untrue', 'Always no matter what',
'You decide your fate', 'Not sure', 'Yep', 'Nope', 'Maybe', 'Nein',
'Qui', 'Ask the person next to you', 'That question is not for me']
def answer():
question = input('Ask me anything: ')
print(random.choice(responses))
answer()
secondQuestion = input('Another question? Yes/No: ')
while secondQuestion == str('Yes'):
answer()
secondQuestion = input('Another question? Yes/No: ')
else:
print('Thank you for asking the wise magic 8 ball')
| import random
responses = ['Seems so','Never','Untrue','Always no matter what','You decide your fate','Not sure','Yep','Nope','Maybe','Nein','Qui','Ask the person next to you','That question is not for me']
def answer():
question = input('Ask me anything: ')
print(random.choice(responses))
answer()
secondQuestion = (input('Another question? Yes/No: '))
while secondQuestion == str('Yes'):
answer()
secondQuestion = (input('Another question? Yes/No: '))
else:
print('Thank you for asking the wise magic 8 ball')
| [
1,
2,
3,
4,
5
] |
838 | f4bc5663ab2b2a6dbb41a2fc3d7ca67100b455a4 | <mask token>
| <mask token>
if display is None or 'localhost' in display:
matplotlib.use('agg')
<mask token>
parser.add_argument('--n-samples', type=int, default=5000)
parser.add_argument('--use-localization', action='store_true')
parser.add_argument('--dataset', type=str, default='')
parser.add_argument('--model', type=str, default='')
parser.add_argument('--fname-prefix', type=str, default='sac')
parser.add_argument('--spatial-encoding', type=str, default='ssp', choices=
['ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp',
'orth-proj-ssp', 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp',
'sub-toroid-ssp', 'var-sub-toroid-ssp', 'random', '2d', '2d-normalized',
'one-hot', 'hex-trig', 'trig', 'random-trig', 'random-rotated-trig',
'random-proj', 'legendre', 'learned', 'learned-normalized',
'frozen-learned', 'frozen-learned-normalized', 'pc-gauss', 'pc-dog',
'tile-coding'])
parser.add_argument('--frozen-model', type=str, default='', help=
'model to use frozen encoding weights from')
parser.add_argument('--pc-gauss-sigma', type=float, default=0.25)
parser.add_argument('--pc-diff-sigma', type=float, default=0.5)
parser.add_argument('--hex-freq-coef', type=float, default=2.5, help=
'constant to scale frequencies by')
parser.add_argument('--n-tiles', type=int, default=8, help=
'number of layers for tile coding')
parser.add_argument('--n-bins', type=int, default=8, help=
'number of bins for tile coding')
parser.add_argument('--ssp-scaling', type=float, default=1.0)
parser.add_argument('--grid-ssp-min', type=float, default=0.25, help=
'minimum plane wave scale')
parser.add_argument('--grid-ssp-max', type=float, default=2.0, help=
'maximum plane wave scale')
parser.add_argument('--phi', type=float, default=0.5, help=
'phi as a fraction of pi for orth-proj-ssp')
parser.add_argument('--n-proj', type=int, default=3, help=
'projection dimension for sub toroids')
parser.add_argument('--scale-ratio', type=float, default=0, help=
'ratio between sub toroid scales')
parser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1,
2, 3], help=
'pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid'
)
parser.add_argument('--seed', type=int, default=13)
parser.add_argument('--dropout-p', type=float, default=0.5)
parser.add_argument('--dim', type=int, default=512)
parser.add_argument('--train-split', type=float, default=0.8, help=
'Training fraction of the train/test split')
parser.add_argument('--allow-cache', action='store_true', help=
'once the dataset has been generated, it will be saved to a file to be loaded faster'
)
parser.add_argument('--trajectory-length', type=int, default=100)
parser.add_argument('--minibatch-size', type=int, default=10)
parser.add_argument('--n-image-bins', type=int, default=20)
parser.add_argument('--n-hd-cells', type=int, default=0, help=
'If non-zero, use linear and angular velocity as well as HD cell output')
parser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],
help=
'Use the sin and cos of the angular velocity if angular velocities are used'
)
parser.add_argument('--use-lmu', action='store_true')
parser.add_argument('--lmu-order', type=int, default=6)
parser.add_argument('--no-cache-load', action='store_true', help=
'do not load from cache')
<mask token>
torch.manual_seed(args.seed)
np.random.seed(args.seed)
<mask token>
print('Generating Heatmap Vectors')
for i, x in enumerate(xs):
for j, y in enumerate(ys):
heatmap_vectors[i, j, :] = encoding_func(x=x, y=y)
heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])
print('Heatmap Vector Generation Complete')
<mask token>
if args.n_hd_cells > 0:
hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=
0.25, use_softmax=False, rng=np.random.RandomState(args.seed))
if args.sin_cos_ang:
input_size = 3
else:
input_size = 2
model = SSPPathIntegrationModel(input_size=input_size, unroll_length=
rollout_length, sp_dim=dim + args.n_hd_cells, dropout_p=args.
dropout_p, use_lmu=args.use_lmu, order=args.lmu_order)
else:
hd_encoding_func = None
model = SSPPathIntegrationModel(input_size=2, unroll_length=
rollout_length, sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.
use_lmu, order=args.lmu_order)
model.load_state_dict(torch.load(args.model), strict=False)
model.eval()
<mask token>
if 'ssp' in args.spatial_encoding:
encoding_specific = args.ssp_scaling
elif args.spatial_encoding == 'frozen-learned':
encoding_specific = args.frozen_model
elif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':
encoding_specific = args.pc_gauss_sigma
elif args.spatial_encoding == 'pc-dog':
encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)
elif args.spatial_encoding == 'hex-trig':
encoding_specific = args.hex_freq_coef
if 'tf' in args.dataset:
cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
else:
cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
if os.path.exists(cache_fname) and not args.no_cache_load:
print('Generating Train and Test Loaders from Cache')
trainloader, testloader = load_from_cache(cache_fname, batch_size=
batch_size, n_samples=n_samples)
else:
print('Generating Train and Test Loaders')
if 'tf' in args.dataset:
assert args.sin_cos_ang == 1
trainloader, testloader = tf_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
elif args.n_hd_cells > 0:
trainloader, testloader = angular_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
else:
trainloader, testloader = train_test_loaders(data, n_train_samples=
n_samples, n_test_samples=n_samples, rollout_length=
rollout_length, batch_size=batch_size, encoding=args.
spatial_encoding, encoding_func=encoding_func, encoding_dim=
args.dim, train_split=args.train_split)
if args.allow_cache:
if not os.path.exists('dataset_cache'):
os.makedirs('dataset_cache')
np.savez(cache_fname, train_velocity_inputs=trainloader.dataset.
velocity_inputs, train_ssp_inputs=trainloader.dataset.
ssp_inputs, train_ssp_outputs=trainloader.dataset.ssp_outputs,
test_velocity_inputs=testloader.dataset.velocity_inputs,
test_ssp_inputs=testloader.dataset.ssp_inputs, test_ssp_outputs
=testloader.dataset.ssp_outputs)
print('Train and Test Loaders Generation Complete')
<mask token>
print('Testing')
with torch.no_grad():
for i, data in enumerate(testloader):
velocity_inputs, ssp_inputs, ssp_outputs = data
ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(
velocity_inputs, ssp_inputs)
predictions = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
coords = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
lstm_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.lstm_hidden_size))
dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.linear_hidden_size))
assert rollout_length == ssp_pred.shape[0]
print('Computing predicted locations and true locations')
for ri in range(rollout_length):
pred = ssp_pred.detach().numpy()[ri, :, :args.dim]
predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(pred, heatmap_vectors, xs, ys)
coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]
coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(coord, heatmap_vectors, xs, ys)
lstm_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = lstm_outputs.detach().numpy()[ri, :, :]
dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[
1], :] = dense_outputs.detach().numpy()[ri, :, :]
print(np.max(predictions))
print(np.min(predictions))
<mask token>
print(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth)
<mask token>
np.savez(fname, grid_scores_60_pred=grid_scores_60_pred,
grid_scores_90_pred=grid_scores_90_pred, grid_scores_60_separation_pred
=grid_scores_60_separation_pred, grid_scores_90_separation_pred=
grid_scores_90_separation_pred, grid_scores_60_truth=
grid_scores_60_truth, grid_scores_90_truth=grid_scores_90_truth,
grid_scores_60_separation_truth=grid_scores_60_separation_truth,
grid_scores_90_separation_truth=grid_scores_90_separation_truth,
grid_scores_60_dense_pred=grid_scores_60_dense_pred,
grid_scores_90_dense_pred=grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred=
grid_scores_60_separation_dense_pred,
grid_scores_90_separation_dense_pred=
grid_scores_90_separation_dense_pred, grid_scores_60_dense_truth=
grid_scores_60_dense_truth, grid_scores_90_dense_truth=
grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth=
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth
)
| <mask token>
display = os.environ.get('DISPLAY')
if display is None or 'localhost' in display:
matplotlib.use('agg')
<mask token>
parser = argparse.ArgumentParser(
'Compute grid scores for a path integration model')
parser.add_argument('--n-samples', type=int, default=5000)
parser.add_argument('--use-localization', action='store_true')
parser.add_argument('--dataset', type=str, default='')
parser.add_argument('--model', type=str, default='')
parser.add_argument('--fname-prefix', type=str, default='sac')
parser.add_argument('--spatial-encoding', type=str, default='ssp', choices=
['ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp',
'orth-proj-ssp', 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp',
'sub-toroid-ssp', 'var-sub-toroid-ssp', 'random', '2d', '2d-normalized',
'one-hot', 'hex-trig', 'trig', 'random-trig', 'random-rotated-trig',
'random-proj', 'legendre', 'learned', 'learned-normalized',
'frozen-learned', 'frozen-learned-normalized', 'pc-gauss', 'pc-dog',
'tile-coding'])
parser.add_argument('--frozen-model', type=str, default='', help=
'model to use frozen encoding weights from')
parser.add_argument('--pc-gauss-sigma', type=float, default=0.25)
parser.add_argument('--pc-diff-sigma', type=float, default=0.5)
parser.add_argument('--hex-freq-coef', type=float, default=2.5, help=
'constant to scale frequencies by')
parser.add_argument('--n-tiles', type=int, default=8, help=
'number of layers for tile coding')
parser.add_argument('--n-bins', type=int, default=8, help=
'number of bins for tile coding')
parser.add_argument('--ssp-scaling', type=float, default=1.0)
parser.add_argument('--grid-ssp-min', type=float, default=0.25, help=
'minimum plane wave scale')
parser.add_argument('--grid-ssp-max', type=float, default=2.0, help=
'maximum plane wave scale')
parser.add_argument('--phi', type=float, default=0.5, help=
'phi as a fraction of pi for orth-proj-ssp')
parser.add_argument('--n-proj', type=int, default=3, help=
'projection dimension for sub toroids')
parser.add_argument('--scale-ratio', type=float, default=0, help=
'ratio between sub toroid scales')
parser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1,
2, 3], help=
'pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid'
)
parser.add_argument('--seed', type=int, default=13)
parser.add_argument('--dropout-p', type=float, default=0.5)
parser.add_argument('--dim', type=int, default=512)
parser.add_argument('--train-split', type=float, default=0.8, help=
'Training fraction of the train/test split')
parser.add_argument('--allow-cache', action='store_true', help=
'once the dataset has been generated, it will be saved to a file to be loaded faster'
)
parser.add_argument('--trajectory-length', type=int, default=100)
parser.add_argument('--minibatch-size', type=int, default=10)
parser.add_argument('--n-image-bins', type=int, default=20)
parser.add_argument('--n-hd-cells', type=int, default=0, help=
'If non-zero, use linear and angular velocity as well as HD cell output')
parser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],
help=
'Use the sin and cos of the angular velocity if angular velocities are used'
)
parser.add_argument('--use-lmu', action='store_true')
parser.add_argument('--lmu-order', type=int, default=6)
parser.add_argument('--no-cache-load', action='store_true', help=
'do not load from cache')
args = parser.parse_args()
ssp_scaling = args.ssp_scaling
torch.manual_seed(args.seed)
np.random.seed(args.seed)
data = np.load(args.dataset)
limit_low = 0
limit_high = 2.2
res = 128
encoding_func, dim = get_encoding_function(args, limit_low=limit_low,
limit_high=limit_high)
xs = np.linspace(limit_low, limit_high, res)
ys = np.linspace(limit_low, limit_high, res)
heatmap_vectors = np.zeros((len(xs), len(ys), dim))
print('Generating Heatmap Vectors')
for i, x in enumerate(xs):
for j, y in enumerate(ys):
heatmap_vectors[i, j, :] = encoding_func(x=x, y=y)
heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])
print('Heatmap Vector Generation Complete')
n_samples = args.n_samples
rollout_length = args.trajectory_length
batch_size = args.minibatch_size
if args.n_hd_cells > 0:
hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=
0.25, use_softmax=False, rng=np.random.RandomState(args.seed))
if args.sin_cos_ang:
input_size = 3
else:
input_size = 2
model = SSPPathIntegrationModel(input_size=input_size, unroll_length=
rollout_length, sp_dim=dim + args.n_hd_cells, dropout_p=args.
dropout_p, use_lmu=args.use_lmu, order=args.lmu_order)
else:
hd_encoding_func = None
model = SSPPathIntegrationModel(input_size=2, unroll_length=
rollout_length, sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.
use_lmu, order=args.lmu_order)
model.load_state_dict(torch.load(args.model), strict=False)
model.eval()
encoding_specific = ''
if 'ssp' in args.spatial_encoding:
encoding_specific = args.ssp_scaling
elif args.spatial_encoding == 'frozen-learned':
encoding_specific = args.frozen_model
elif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':
encoding_specific = args.pc_gauss_sigma
elif args.spatial_encoding == 'pc-dog':
encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)
elif args.spatial_encoding == 'hex-trig':
encoding_specific = args.hex_freq_coef
if 'tf' in args.dataset:
cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
else:
cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
if os.path.exists(cache_fname) and not args.no_cache_load:
print('Generating Train and Test Loaders from Cache')
trainloader, testloader = load_from_cache(cache_fname, batch_size=
batch_size, n_samples=n_samples)
else:
print('Generating Train and Test Loaders')
if 'tf' in args.dataset:
assert args.sin_cos_ang == 1
trainloader, testloader = tf_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
elif args.n_hd_cells > 0:
trainloader, testloader = angular_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
else:
trainloader, testloader = train_test_loaders(data, n_train_samples=
n_samples, n_test_samples=n_samples, rollout_length=
rollout_length, batch_size=batch_size, encoding=args.
spatial_encoding, encoding_func=encoding_func, encoding_dim=
args.dim, train_split=args.train_split)
if args.allow_cache:
if not os.path.exists('dataset_cache'):
os.makedirs('dataset_cache')
np.savez(cache_fname, train_velocity_inputs=trainloader.dataset.
velocity_inputs, train_ssp_inputs=trainloader.dataset.
ssp_inputs, train_ssp_outputs=trainloader.dataset.ssp_outputs,
test_velocity_inputs=testloader.dataset.velocity_inputs,
test_ssp_inputs=testloader.dataset.ssp_inputs, test_ssp_outputs
=testloader.dataset.ssp_outputs)
print('Train and Test Loaders Generation Complete')
starts = [0.2] * 10
ends = np.linspace(0.4, 1.0, num=10)
masks_parameters = zip(starts, ends.tolist())
latest_epoch_scorer = scores.GridScorer(nbins=args.n_image_bins,
coords_range=((0, 2.2), (0, 2.2)), mask_parameters=masks_parameters)
fname_lstm_pred = '{}_{}samples_lstm_pred.pdf'.format(args.fname_prefix,
args.n_samples)
fname_lstm_truth = '{}_{}samples_lstm_truth.pdf'.format(args.fname_prefix,
args.n_samples)
fname_dense_pred = '{}_{}samples_dense_pred.pdf'.format(args.fname_prefix,
args.n_samples)
fname_dense_truth = '{}_{}samples_dense_truth.pdf'.format(args.fname_prefix,
args.n_samples)
print('Testing')
with torch.no_grad():
for i, data in enumerate(testloader):
velocity_inputs, ssp_inputs, ssp_outputs = data
ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(
velocity_inputs, ssp_inputs)
predictions = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
coords = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
lstm_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.lstm_hidden_size))
dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.linear_hidden_size))
assert rollout_length == ssp_pred.shape[0]
print('Computing predicted locations and true locations')
for ri in range(rollout_length):
pred = ssp_pred.detach().numpy()[ri, :, :args.dim]
predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(pred, heatmap_vectors, xs, ys)
coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]
coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(coord, heatmap_vectors, xs, ys)
lstm_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = lstm_outputs.detach().numpy()[ri, :, :]
dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[
1], :] = dense_outputs.detach().numpy()[ri, :, :]
print(np.max(predictions))
print(np.min(predictions))
(grid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred,
grid_scores_90_separation_pred) = (utils.get_scores_and_plot(scorer=
latest_epoch_scorer, data_abs_xy=predictions, activations=
lstm_activations, directory='output_grid_scores', filename=fname_lstm_pred)
)
(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth) = (utils
.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=coords,
activations=lstm_activations, directory='output_grid_scores', filename=
fname_lstm_truth))
(grid_scores_60_dense_pred, grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred, grid_scores_90_separation_dense_pred
) = (utils.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=
predictions, activations=dense_activations, directory=
'output_grid_scores', filename=fname_dense_pred))
(grid_scores_60_dense_truth, grid_scores_90_dense_truth,
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth) = (utils.get_scores_and_plot(
scorer=latest_epoch_scorer, data_abs_xy=coords, activations=
dense_activations, directory='output_grid_scores', filename=
fname_dense_truth))
print(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth)
fname = 'output_grid_scores/{}_{}samples.npz'.format(args.fname_prefix,
args.n_samples)
np.savez(fname, grid_scores_60_pred=grid_scores_60_pred,
grid_scores_90_pred=grid_scores_90_pred, grid_scores_60_separation_pred
=grid_scores_60_separation_pred, grid_scores_90_separation_pred=
grid_scores_90_separation_pred, grid_scores_60_truth=
grid_scores_60_truth, grid_scores_90_truth=grid_scores_90_truth,
grid_scores_60_separation_truth=grid_scores_60_separation_truth,
grid_scores_90_separation_truth=grid_scores_90_separation_truth,
grid_scores_60_dense_pred=grid_scores_60_dense_pred,
grid_scores_90_dense_pred=grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred=
grid_scores_60_separation_dense_pred,
grid_scores_90_separation_dense_pred=
grid_scores_90_separation_dense_pred, grid_scores_60_dense_truth=
grid_scores_60_dense_truth, grid_scores_90_dense_truth=
grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth=
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth
)
| import matplotlib
import os
display = os.environ.get('DISPLAY')
if display is None or 'localhost' in display:
matplotlib.use('agg')
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from datasets import train_test_loaders, angular_train_test_loaders, tf_train_test_loaders, load_from_cache
from models import SSPPathIntegrationModel
from datetime import datetime
from tensorboardX import SummaryWriter
import json
from spatial_semantic_pointers.utils import get_heatmap_vectors, ssp_to_loc, ssp_to_loc_v
from spatial_semantic_pointers.plots import plot_predictions, plot_predictions_v
import matplotlib.pyplot as plt
from path_integration_utils import pc_to_loc_v, encoding_func_from_model, pc_gauss_encoding_func, ssp_encoding_func, hd_gauss_encoding_func, hex_trig_encoding_func
from ssp_navigation.utils.encodings import get_encoding_function
import grid_scoring.scores as scores
import grid_scoring.utils as utils
from path_integration_utils import encoding_func_from_model, pc_gauss_encoding_func
parser = argparse.ArgumentParser(
'Compute grid scores for a path integration model')
parser.add_argument('--n-samples', type=int, default=5000)
parser.add_argument('--use-localization', action='store_true')
parser.add_argument('--dataset', type=str, default='')
parser.add_argument('--model', type=str, default='')
parser.add_argument('--fname-prefix', type=str, default='sac')
parser.add_argument('--spatial-encoding', type=str, default='ssp', choices=
['ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp',
'orth-proj-ssp', 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp',
'sub-toroid-ssp', 'var-sub-toroid-ssp', 'random', '2d', '2d-normalized',
'one-hot', 'hex-trig', 'trig', 'random-trig', 'random-rotated-trig',
'random-proj', 'legendre', 'learned', 'learned-normalized',
'frozen-learned', 'frozen-learned-normalized', 'pc-gauss', 'pc-dog',
'tile-coding'])
parser.add_argument('--frozen-model', type=str, default='', help=
'model to use frozen encoding weights from')
parser.add_argument('--pc-gauss-sigma', type=float, default=0.25)
parser.add_argument('--pc-diff-sigma', type=float, default=0.5)
parser.add_argument('--hex-freq-coef', type=float, default=2.5, help=
'constant to scale frequencies by')
parser.add_argument('--n-tiles', type=int, default=8, help=
'number of layers for tile coding')
parser.add_argument('--n-bins', type=int, default=8, help=
'number of bins for tile coding')
parser.add_argument('--ssp-scaling', type=float, default=1.0)
parser.add_argument('--grid-ssp-min', type=float, default=0.25, help=
'minimum plane wave scale')
parser.add_argument('--grid-ssp-max', type=float, default=2.0, help=
'maximum plane wave scale')
parser.add_argument('--phi', type=float, default=0.5, help=
'phi as a fraction of pi for orth-proj-ssp')
parser.add_argument('--n-proj', type=int, default=3, help=
'projection dimension for sub toroids')
parser.add_argument('--scale-ratio', type=float, default=0, help=
'ratio between sub toroid scales')
parser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1,
2, 3], help=
'pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid'
)
parser.add_argument('--seed', type=int, default=13)
parser.add_argument('--dropout-p', type=float, default=0.5)
parser.add_argument('--dim', type=int, default=512)
parser.add_argument('--train-split', type=float, default=0.8, help=
'Training fraction of the train/test split')
parser.add_argument('--allow-cache', action='store_true', help=
'once the dataset has been generated, it will be saved to a file to be loaded faster'
)
parser.add_argument('--trajectory-length', type=int, default=100)
parser.add_argument('--minibatch-size', type=int, default=10)
parser.add_argument('--n-image-bins', type=int, default=20)
parser.add_argument('--n-hd-cells', type=int, default=0, help=
'If non-zero, use linear and angular velocity as well as HD cell output')
parser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],
help=
'Use the sin and cos of the angular velocity if angular velocities are used'
)
parser.add_argument('--use-lmu', action='store_true')
parser.add_argument('--lmu-order', type=int, default=6)
parser.add_argument('--no-cache-load', action='store_true', help=
'do not load from cache')
args = parser.parse_args()
ssp_scaling = args.ssp_scaling
torch.manual_seed(args.seed)
np.random.seed(args.seed)
data = np.load(args.dataset)
limit_low = 0
limit_high = 2.2
res = 128
encoding_func, dim = get_encoding_function(args, limit_low=limit_low,
limit_high=limit_high)
xs = np.linspace(limit_low, limit_high, res)
ys = np.linspace(limit_low, limit_high, res)
heatmap_vectors = np.zeros((len(xs), len(ys), dim))
print('Generating Heatmap Vectors')
for i, x in enumerate(xs):
for j, y in enumerate(ys):
heatmap_vectors[i, j, :] = encoding_func(x=x, y=y)
heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])
print('Heatmap Vector Generation Complete')
n_samples = args.n_samples
rollout_length = args.trajectory_length
batch_size = args.minibatch_size
if args.n_hd_cells > 0:
hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=
0.25, use_softmax=False, rng=np.random.RandomState(args.seed))
if args.sin_cos_ang:
input_size = 3
else:
input_size = 2
model = SSPPathIntegrationModel(input_size=input_size, unroll_length=
rollout_length, sp_dim=dim + args.n_hd_cells, dropout_p=args.
dropout_p, use_lmu=args.use_lmu, order=args.lmu_order)
else:
hd_encoding_func = None
model = SSPPathIntegrationModel(input_size=2, unroll_length=
rollout_length, sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.
use_lmu, order=args.lmu_order)
model.load_state_dict(torch.load(args.model), strict=False)
model.eval()
encoding_specific = ''
if 'ssp' in args.spatial_encoding:
encoding_specific = args.ssp_scaling
elif args.spatial_encoding == 'frozen-learned':
encoding_specific = args.frozen_model
elif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':
encoding_specific = args.pc_gauss_sigma
elif args.spatial_encoding == 'pc-dog':
encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)
elif args.spatial_encoding == 'hex-trig':
encoding_specific = args.hex_freq_coef
if 'tf' in args.dataset:
cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
else:
cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
if os.path.exists(cache_fname) and not args.no_cache_load:
print('Generating Train and Test Loaders from Cache')
trainloader, testloader = load_from_cache(cache_fname, batch_size=
batch_size, n_samples=n_samples)
else:
print('Generating Train and Test Loaders')
if 'tf' in args.dataset:
assert args.sin_cos_ang == 1
trainloader, testloader = tf_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
elif args.n_hd_cells > 0:
trainloader, testloader = angular_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
else:
trainloader, testloader = train_test_loaders(data, n_train_samples=
n_samples, n_test_samples=n_samples, rollout_length=
rollout_length, batch_size=batch_size, encoding=args.
spatial_encoding, encoding_func=encoding_func, encoding_dim=
args.dim, train_split=args.train_split)
if args.allow_cache:
if not os.path.exists('dataset_cache'):
os.makedirs('dataset_cache')
np.savez(cache_fname, train_velocity_inputs=trainloader.dataset.
velocity_inputs, train_ssp_inputs=trainloader.dataset.
ssp_inputs, train_ssp_outputs=trainloader.dataset.ssp_outputs,
test_velocity_inputs=testloader.dataset.velocity_inputs,
test_ssp_inputs=testloader.dataset.ssp_inputs, test_ssp_outputs
=testloader.dataset.ssp_outputs)
print('Train and Test Loaders Generation Complete')
starts = [0.2] * 10
ends = np.linspace(0.4, 1.0, num=10)
masks_parameters = zip(starts, ends.tolist())
latest_epoch_scorer = scores.GridScorer(nbins=args.n_image_bins,
coords_range=((0, 2.2), (0, 2.2)), mask_parameters=masks_parameters)
fname_lstm_pred = '{}_{}samples_lstm_pred.pdf'.format(args.fname_prefix,
args.n_samples)
fname_lstm_truth = '{}_{}samples_lstm_truth.pdf'.format(args.fname_prefix,
args.n_samples)
fname_dense_pred = '{}_{}samples_dense_pred.pdf'.format(args.fname_prefix,
args.n_samples)
fname_dense_truth = '{}_{}samples_dense_truth.pdf'.format(args.fname_prefix,
args.n_samples)
print('Testing')
with torch.no_grad():
for i, data in enumerate(testloader):
velocity_inputs, ssp_inputs, ssp_outputs = data
ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(
velocity_inputs, ssp_inputs)
predictions = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
coords = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
lstm_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.lstm_hidden_size))
dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.linear_hidden_size))
assert rollout_length == ssp_pred.shape[0]
print('Computing predicted locations and true locations')
for ri in range(rollout_length):
pred = ssp_pred.detach().numpy()[ri, :, :args.dim]
predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(pred, heatmap_vectors, xs, ys)
coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]
coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(coord, heatmap_vectors, xs, ys)
lstm_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = lstm_outputs.detach().numpy()[ri, :, :]
dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[
1], :] = dense_outputs.detach().numpy()[ri, :, :]
print(np.max(predictions))
print(np.min(predictions))
(grid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred,
grid_scores_90_separation_pred) = (utils.get_scores_and_plot(scorer=
latest_epoch_scorer, data_abs_xy=predictions, activations=
lstm_activations, directory='output_grid_scores', filename=fname_lstm_pred)
)
(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth) = (utils
.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=coords,
activations=lstm_activations, directory='output_grid_scores', filename=
fname_lstm_truth))
(grid_scores_60_dense_pred, grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred, grid_scores_90_separation_dense_pred
) = (utils.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=
predictions, activations=dense_activations, directory=
'output_grid_scores', filename=fname_dense_pred))
(grid_scores_60_dense_truth, grid_scores_90_dense_truth,
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth) = (utils.get_scores_and_plot(
scorer=latest_epoch_scorer, data_abs_xy=coords, activations=
dense_activations, directory='output_grid_scores', filename=
fname_dense_truth))
print(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth)
fname = 'output_grid_scores/{}_{}samples.npz'.format(args.fname_prefix,
args.n_samples)
np.savez(fname, grid_scores_60_pred=grid_scores_60_pred,
grid_scores_90_pred=grid_scores_90_pred, grid_scores_60_separation_pred
=grid_scores_60_separation_pred, grid_scores_90_separation_pred=
grid_scores_90_separation_pred, grid_scores_60_truth=
grid_scores_60_truth, grid_scores_90_truth=grid_scores_90_truth,
grid_scores_60_separation_truth=grid_scores_60_separation_truth,
grid_scores_90_separation_truth=grid_scores_90_separation_truth,
grid_scores_60_dense_pred=grid_scores_60_dense_pred,
grid_scores_90_dense_pred=grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred=
grid_scores_60_separation_dense_pred,
grid_scores_90_separation_dense_pred=
grid_scores_90_separation_dense_pred, grid_scores_60_dense_truth=
grid_scores_60_dense_truth, grid_scores_90_dense_truth=
grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth=
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth
)
| # Compute grid scores using the new dataset format
import matplotlib
import os
# allow code to work on machines without a display or in a screen session
display = os.environ.get('DISPLAY')
if display is None or 'localhost' in display:
matplotlib.use('agg')
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from datasets import train_test_loaders, angular_train_test_loaders, tf_train_test_loaders, load_from_cache
from models import SSPPathIntegrationModel
from datetime import datetime
from tensorboardX import SummaryWriter
import json
from spatial_semantic_pointers.utils import get_heatmap_vectors, ssp_to_loc, ssp_to_loc_v
from spatial_semantic_pointers.plots import plot_predictions, plot_predictions_v
import matplotlib.pyplot as plt
from path_integration_utils import pc_to_loc_v, encoding_func_from_model, pc_gauss_encoding_func, ssp_encoding_func, \
hd_gauss_encoding_func, hex_trig_encoding_func
from ssp_navigation.utils.encodings import get_encoding_function
import grid_scoring.scores as scores
import grid_scoring.utils as utils
# from grid_scoring.run_network import run_and_gather_activations, run_and_gather_localization_activations
from path_integration_utils import encoding_func_from_model, pc_gauss_encoding_func
parser = argparse.ArgumentParser('Compute grid scores for a path integration model')
parser.add_argument('--n-samples', type=int, default=5000)
parser.add_argument('--use-localization', action='store_true')
# TODO: use these parameters
parser.add_argument('--dataset', type=str, default='')
parser.add_argument('--model', type=str, default='')
parser.add_argument('--fname-prefix', type=str, default='sac')
parser.add_argument('--spatial-encoding', type=str, default='ssp',
choices=[
'ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp', 'orth-proj-ssp',
'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp', 'sub-toroid-ssp', 'var-sub-toroid-ssp',
'random', '2d', '2d-normalized', 'one-hot', 'hex-trig',
'trig', 'random-trig', 'random-rotated-trig', 'random-proj', 'legendre',
'learned', 'learned-normalized', 'frozen-learned', 'frozen-learned-normalized',
'pc-gauss', 'pc-dog', 'tile-coding'
])
# choices=['ssp', '2d', 'frozen-learned', 'pc-gauss', 'pc-dog', 'pc-gauss-softmax', 'hex-trig', 'hex-trig-all-freq'])
parser.add_argument('--frozen-model', type=str, default='', help='model to use frozen encoding weights from')
parser.add_argument('--pc-gauss-sigma', type=float, default=0.25)
parser.add_argument('--pc-diff-sigma', type=float, default=0.5)
parser.add_argument('--hex-freq-coef', type=float, default=2.5, help='constant to scale frequencies by')
parser.add_argument('--n-tiles', type=int, default=8, help='number of layers for tile coding')
parser.add_argument('--n-bins', type=int, default=8, help='number of bins for tile coding')
parser.add_argument('--ssp-scaling', type=float, default=1.0)
parser.add_argument('--grid-ssp-min', type=float, default=0.25, help='minimum plane wave scale')
parser.add_argument('--grid-ssp-max', type=float, default=2.0, help='maximum plane wave scale')
parser.add_argument('--phi', type=float, default=0.5, help='phi as a fraction of pi for orth-proj-ssp')
parser.add_argument('--n-proj', type=int, default=3, help='projection dimension for sub toroids')
parser.add_argument('--scale-ratio', type=float, default=0, help='ratio between sub toroid scales')
parser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1, 2, 3],
help='pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid')
parser.add_argument('--seed', type=int, default=13)
parser.add_argument('--dropout-p', type=float, default=0.5)
parser.add_argument('--dim', type=int, default=512)
parser.add_argument('--train-split', type=float, default=0.8, help='Training fraction of the train/test split')
parser.add_argument('--allow-cache', action='store_true',
help='once the dataset has been generated, it will be saved to a file to be loaded faster')
parser.add_argument('--trajectory-length', type=int, default=100)
parser.add_argument('--minibatch-size', type=int, default=10)
parser.add_argument('--n-image-bins', type=int, default=20)
parser.add_argument('--n-hd-cells', type=int, default=0, help='If non-zero, use linear and angular velocity as well as HD cell output')
parser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],
help='Use the sin and cos of the angular velocity if angular velocities are used')
parser.add_argument('--use-lmu', action='store_true')
parser.add_argument('--lmu-order', type=int, default=6)
parser.add_argument('--no-cache-load', action='store_true', help='do not load from cache')
args = parser.parse_args()
ssp_scaling = args.ssp_scaling
torch.manual_seed(args.seed)
np.random.seed(args.seed)
data = np.load(args.dataset)
# only used for frozen-learned and other custom encoding functions
# encoding_func = None
limit_low = 0 #* args.ssp_scaling
limit_high = 2.2 #* args.ssp_scaling
res = 128 #256
encoding_func, dim = get_encoding_function(args, limit_low=limit_low, limit_high=limit_high)
xs = np.linspace(limit_low, limit_high, res)
ys = np.linspace(limit_low, limit_high, res)
# FIXME: inefficient but will work for now
heatmap_vectors = np.zeros((len(xs), len(ys), dim))
print("Generating Heatmap Vectors")
for i, x in enumerate(xs):
for j, y in enumerate(ys):
heatmap_vectors[i, j, :] = encoding_func(
# batch dim
# np.array(
# [[x, y]]
# )
# no batch dim
# np.array(
# [x, y]
# )
# new signature
x=x, y=y
)
heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])
print("Heatmap Vector Generation Complete")
n_samples = args.n_samples
rollout_length = args.trajectory_length
batch_size = args.minibatch_size
if args.n_hd_cells > 0:
hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=0.25, use_softmax=False, rng=np.random.RandomState(args.seed))
if args.sin_cos_ang:
input_size = 3
else:
input_size = 2
model = SSPPathIntegrationModel(
input_size=input_size, unroll_length=rollout_length,
sp_dim=dim + args.n_hd_cells, dropout_p=args.dropout_p, use_lmu=args.use_lmu, order=args.lmu_order
)
else:
hd_encoding_func = None
model = SSPPathIntegrationModel(
input_size=2, unroll_length=rollout_length,
sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.use_lmu, order=args.lmu_order
)
# model = SSPPathIntegrationModel(unroll_length=rollout_length, sp_dim=dim, dropout_p=args.dropout_p)
model.load_state_dict(torch.load(args.model), strict=False)
model.eval()
# encoding specific cache string
encoding_specific = ''
if 'ssp' in args.spatial_encoding:
encoding_specific = args.ssp_scaling
elif args.spatial_encoding == 'frozen-learned':
encoding_specific = args.frozen_model
elif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':
encoding_specific = args.pc_gauss_sigma
elif args.spatial_encoding == 'pc-dog':
encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)
elif args.spatial_encoding == 'hex-trig':
encoding_specific = args.hex_freq_coef
if 'tf' in args.dataset:
cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(
args.spatial_encoding, args.dim, args.seed, args.n_samples, args.n_hd_cells, encoding_specific
)
else:
cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(
args.spatial_encoding, args.dim, args.seed, args.n_samples, args.n_hd_cells, encoding_specific
)
# if the file exists, load it from cache
if os.path.exists(cache_fname) and not args.no_cache_load:
print("Generating Train and Test Loaders from Cache")
trainloader, testloader = load_from_cache(cache_fname, batch_size=batch_size, n_samples=n_samples)
else:
print("Generating Train and Test Loaders")
if 'tf' in args.dataset:
# tfrecord dataset only supports using the sin and cos of angular velocity
assert args.sin_cos_ang == 1
trainloader, testloader = tf_train_test_loaders(
data,
n_train_samples=n_samples,
n_test_samples=n_samples,
rollout_length=rollout_length,
batch_size=batch_size,
encoding=args.spatial_encoding,
encoding_func=encoding_func,
encoding_dim=args.dim,
train_split=args.train_split,
hd_dim=args.n_hd_cells,
hd_encoding_func=hd_encoding_func,
sin_cos_ang=args.sin_cos_ang,
)
else:
if args.n_hd_cells > 0:
trainloader, testloader = angular_train_test_loaders(
data,
n_train_samples=n_samples,
n_test_samples=n_samples,
rollout_length=rollout_length,
batch_size=batch_size,
encoding=args.spatial_encoding,
encoding_func=encoding_func,
encoding_dim=args.dim,
train_split=args.train_split,
hd_dim=args.n_hd_cells,
hd_encoding_func=hd_encoding_func,
sin_cos_ang=args.sin_cos_ang,
)
else:
trainloader, testloader = train_test_loaders(
data,
n_train_samples=n_samples,
n_test_samples=n_samples,
rollout_length=rollout_length,
batch_size=batch_size,
encoding=args.spatial_encoding,
encoding_func=encoding_func,
encoding_dim=args.dim,
train_split=args.train_split,
)
if args.allow_cache:
if not os.path.exists('dataset_cache'):
os.makedirs('dataset_cache')
np.savez(
cache_fname,
train_velocity_inputs=trainloader.dataset.velocity_inputs,
train_ssp_inputs=trainloader.dataset.ssp_inputs,
train_ssp_outputs=trainloader.dataset.ssp_outputs,
test_velocity_inputs=testloader.dataset.velocity_inputs,
test_ssp_inputs=testloader.dataset.ssp_inputs,
test_ssp_outputs=testloader.dataset.ssp_outputs,
)
print("Train and Test Loaders Generation Complete")
starts = [0.2] * 10
ends = np.linspace(0.4, 1.0, num=10)
masks_parameters = zip(starts, ends.tolist())
latest_epoch_scorer = scores.GridScorer(
nbins=args.n_image_bins,
coords_range=((0, 2.2), (0, 2.2)), # data_reader.get_coord_range(),
mask_parameters=masks_parameters,
)
fname_lstm_pred = '{}_{}samples_lstm_pred.pdf'.format(args.fname_prefix, args.n_samples)
fname_lstm_truth = '{}_{}samples_lstm_truth.pdf'.format(args.fname_prefix, args.n_samples)
fname_dense_pred = '{}_{}samples_dense_pred.pdf'.format(args.fname_prefix, args.n_samples)
fname_dense_truth = '{}_{}samples_dense_truth.pdf'.format(args.fname_prefix, args.n_samples)
# Run and gather activations
print("Testing")
with torch.no_grad():
# Everything is in one batch, so this loop will only happen once
for i, data in enumerate(testloader):
velocity_inputs, ssp_inputs, ssp_outputs = data
ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(velocity_inputs, ssp_inputs)
predictions = np.zeros((ssp_pred.shape[0]*ssp_pred.shape[1], 2))
coords = np.zeros((ssp_pred.shape[0]*ssp_pred.shape[1], 2))
lstm_activations = np.zeros((ssp_pred.shape[0]*ssp_pred.shape[1], model.lstm_hidden_size))
dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], model.linear_hidden_size))
assert rollout_length == ssp_pred.shape[0]
# # For each neuron, contains the average activity at each spatial bin
# # Computing for both ground truth and predicted location
# rate_maps_pred = np.zeros((model.lstm_hidden_size, len(xs), len(ys)))
# rate_maps_truth = np.zeros((model.lstm_hidden_size, len(xs), len(ys)))
print("Computing predicted locations and true locations")
# Using all data, one chunk at a time
for ri in range(rollout_length):
# trim out head direction info if that was included by only looking up to args.encoding_dim
# computing 'predicted' coordinates, where the agent thinks it is
pred = ssp_pred.detach().numpy()[ri, :, :args.dim]
# pred = pred / pred.sum(axis=1)[:, np.newaxis]
predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :] = ssp_to_loc_v(
pred,
heatmap_vectors, xs, ys
)
# computing 'ground truth' coordinates, where the agent should be
coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]
# coord = coord / coord.sum(axis=1)[:, np.newaxis]
coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :] = ssp_to_loc_v(
coord,
heatmap_vectors, xs, ys
)
# reshaping activations and converting to numpy array
lstm_activations[ri*ssp_pred.shape[1]:(ri+1)*ssp_pred.shape[1], :] = lstm_outputs.detach().numpy()[ri, :, :]
dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :] = dense_outputs.detach().numpy()[ri, :, :]
# predictions = predictions / args.ssp_scaling
# coords = coords / args.ssp_scaling
print(np.max(predictions))
print(np.min(predictions))
grid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred, grid_scores_90_separation_pred = utils.get_scores_and_plot(
scorer=latest_epoch_scorer,
data_abs_xy=predictions, #res['pos_xy'],
activations=lstm_activations, #res['bottleneck'],
directory='output_grid_scores', #FLAGS.saver_results_directory,
filename=fname_lstm_pred,
)
grid_scores_60_truth, grid_scores_90_truth, grid_scores_60_separation_truth, grid_scores_90_separation_truth = utils.get_scores_and_plot(
scorer=latest_epoch_scorer,
data_abs_xy=coords, #res['pos_xy'],
activations=lstm_activations, #res['bottleneck'],
directory='output_grid_scores', #FLAGS.saver_results_directory,
filename=fname_lstm_truth,
)
grid_scores_60_dense_pred, grid_scores_90_dense_pred, grid_scores_60_separation_dense_pred, grid_scores_90_separation_dense_pred = utils.get_scores_and_plot(
scorer=latest_epoch_scorer,
data_abs_xy=predictions, #res['pos_xy'],
activations=dense_activations, #res['bottleneck'],
directory='output_grid_scores', #FLAGS.saver_results_directory,
filename=fname_dense_pred,
)
grid_scores_60_dense_truth, grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth, grid_scores_90_separation_dense_truth = utils.get_scores_and_plot(
scorer=latest_epoch_scorer,
data_abs_xy=coords, #res['pos_xy'],
activations=dense_activations, #res['bottleneck'],
directory='output_grid_scores', #FLAGS.saver_results_directory,
filename=fname_dense_truth,
)
print(grid_scores_60_truth, grid_scores_90_truth, grid_scores_60_separation_truth, grid_scores_90_separation_truth)
# Saving to make grid score values easy to compare for different variations
fname = 'output_grid_scores/{}_{}samples.npz'.format(args.fname_prefix, args.n_samples)
np.savez(
fname,
grid_scores_60_pred=grid_scores_60_pred,
grid_scores_90_pred=grid_scores_90_pred,
grid_scores_60_separation_pred=grid_scores_60_separation_pred,
grid_scores_90_separation_pred=grid_scores_90_separation_pred,
grid_scores_60_truth=grid_scores_60_truth,
grid_scores_90_truth=grid_scores_90_truth,
grid_scores_60_separation_truth=grid_scores_60_separation_truth,
grid_scores_90_separation_truth=grid_scores_90_separation_truth,
grid_scores_60_dense_pred=grid_scores_60_dense_pred,
grid_scores_90_dense_pred=grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred=grid_scores_60_separation_dense_pred,
grid_scores_90_separation_dense_pred=grid_scores_90_separation_dense_pred,
grid_scores_60_dense_truth=grid_scores_60_dense_truth,
grid_scores_90_dense_truth=grid_scores_90_dense_truth,
grid_scores_60_separation_dense_truth=grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth,
)
| [
0,
1,
2,
3,
4
] |
839 | 8c6f890631e9696a7907975b5d0bb71d03b380da | <mask token>
| <mask token>
cv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])
cv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])
cv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])
cv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)
:img_width])
cv2.waitKey(0)
cv2.destroyAllWindows()
| <mask token>
img = cv2.imread('Scan1.jpg')
img_height, img_width, dim = img.shape
cv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])
cv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])
cv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])
cv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)
:img_width])
cv2.waitKey(0)
cv2.destroyAllWindows()
| import cv2
import numpy as np
img = cv2.imread('Scan1.jpg')
img_height, img_width, dim = img.shape
cv2.imshow('image1', img[0:int(img_height / 2), 0:int(img_width / 2)])
cv2.imshow('image2', img[int(img_height / 2):img_height, 0:int(img_width / 2)])
cv2.imshow('image3', img[0:int(img_height / 2), int(img_width / 2):img_width])
cv2.imshow('image4', img[int(img_height / 2):img_height, int(img_width / 2)
:img_width])
cv2.waitKey(0)
cv2.destroyAllWindows()
| null | [
0,
1,
2,
3
] |
840 | c804391cc199a242d1b54ece8487ef74065a40ad |
def max_product(n):
lst, lstnums, res, num = [], [], [], 1
for i in range(0, n+1):
lstnums.append(i)
for j in str(i):
num *= int(j)
lst.append(num)
num = 1
maxlst = max(lst)
for i in range(len(lst)):
if lst[i] == maxlst:
res.append(lstnums[i])
return res
| null | null | null | null | [
0
] |
841 | c43b899234ffff09225153dcaf097591c7176430 | <mask token>
class ParticipantAdmin(admin.ModelAdmin):
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class ParticipantAdmin(admin.ModelAdmin):
fieldsets = [('Personal information', {'fields': ['email', 'name',
'institution', 'assistant']}), ('Asistance', {'fields': [
'assistant', 'participant_hash']}), ('Contribution', {'fields': [
'contribution', 'title', 'abstract', 'link']})]
list_display = 'email', 'name', 'assistant', 'contribution', 'title'
list_filter = ['assistant', 'contribution']
<mask token>
| <mask token>
class ParticipantAdmin(admin.ModelAdmin):
fieldsets = [('Personal information', {'fields': ['email', 'name',
'institution', 'assistant']}), ('Asistance', {'fields': [
'assistant', 'participant_hash']}), ('Contribution', {'fields': [
'contribution', 'title', 'abstract', 'link']})]
list_display = 'email', 'name', 'assistant', 'contribution', 'title'
list_filter = ['assistant', 'contribution']
admin.site.register(Participant, ParticipantAdmin)
| from django.contrib import admin
from .models import Participant
class ParticipantAdmin(admin.ModelAdmin):
fieldsets = [('Personal information', {'fields': ['email', 'name',
'institution', 'assistant']}), ('Asistance', {'fields': [
'assistant', 'participant_hash']}), ('Contribution', {'fields': [
'contribution', 'title', 'abstract', 'link']})]
list_display = 'email', 'name', 'assistant', 'contribution', 'title'
list_filter = ['assistant', 'contribution']
admin.site.register(Participant, ParticipantAdmin)
| from django.contrib import admin
# Register your models here.
from .models import Participant
class ParticipantAdmin(admin.ModelAdmin):
fieldsets = [
("Personal information", {'fields': ['email', 'name', 'institution', 'assistant']}),
("Asistance", {'fields': ['assistant', 'participant_hash']}),
("Contribution", {'fields': ['contribution', 'title', 'abstract', 'link']}),
]
list_display = ('email', 'name', 'assistant', 'contribution', 'title')
list_filter = ['assistant', 'contribution']
admin.site.register(Participant, ParticipantAdmin)
| [
1,
2,
3,
4,
5
] |
842 | 95422348c8db9753830cc0a7c8785c05b44886b1 | <mask token>
| <mask token>
def enable_download(driver, directory):
"""
:param driver: Selenium web driver
:param directory: Directory to store the file
This function allows the Selenium web driver to store the file in the given directory.
"""
driver.command_executor._commands['send_command'
] = 'POST', '/session/$sessionId/chromium/send_command'
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior':
'allow', 'downloadPath': directory}}
driver.execute('send_command', params)
| <mask token>
YEAR = dt.today().year
BINARY_LOCATION = {'binary_location':
'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'}
CHROME_DRIVER_PATH = (
'C:\\Users\\pavithra\\Downloads\\chromedriver_win32\\chromedriver.exe')
EXTRACTED_DIR = (
'C:\\Users\\pavithra\\Documents\\fintuple-automation-projects\\BseBhavCopy\\dailybhavcopy\\dailybhavcopy\\csv_files'
)
ZIP_DIR = (
'C:\\Users\\pavithra\\Documents\\fintuple-automation-projects\\BseBhavCopy\\dailybhavcopy\\dailybhavcopy\\zip_files'
)
HEADLESS_OPTIONS = {'headless': '--headless', 'window_size':
'--window-size=1920x1080'}
DOWNLOAD_PREFERENCES = {'download.default_directory': EXTRACTED_DIR,
'download.prompt_for_download': False}
def enable_download(driver, directory):
"""
:param driver: Selenium web driver
:param directory: Directory to store the file
This function allows the Selenium web driver to store the file in the given directory.
"""
driver.command_executor._commands['send_command'
] = 'POST', '/session/$sessionId/chromium/send_command'
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior':
'allow', 'downloadPath': directory}}
driver.execute('send_command', params)
| from datetime import datetime as dt
YEAR = dt.today().year
BINARY_LOCATION = {'binary_location':
'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'}
CHROME_DRIVER_PATH = (
'C:\\Users\\pavithra\\Downloads\\chromedriver_win32\\chromedriver.exe')
EXTRACTED_DIR = (
'C:\\Users\\pavithra\\Documents\\fintuple-automation-projects\\BseBhavCopy\\dailybhavcopy\\dailybhavcopy\\csv_files'
)
ZIP_DIR = (
'C:\\Users\\pavithra\\Documents\\fintuple-automation-projects\\BseBhavCopy\\dailybhavcopy\\dailybhavcopy\\zip_files'
)
HEADLESS_OPTIONS = {'headless': '--headless', 'window_size':
'--window-size=1920x1080'}
DOWNLOAD_PREFERENCES = {'download.default_directory': EXTRACTED_DIR,
'download.prompt_for_download': False}
def enable_download(driver, directory):
"""
:param driver: Selenium web driver
:param directory: Directory to store the file
This function allows the Selenium web driver to store the file in the given directory.
"""
driver.command_executor._commands['send_command'
] = 'POST', '/session/$sessionId/chromium/send_command'
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior':
'allow', 'downloadPath': directory}}
driver.execute('send_command', params)
| from datetime import datetime as dt
YEAR = dt.today().year
BINARY_LOCATION = {'binary_location': 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'}
CHROME_DRIVER_PATH = r'C:\Users\pavithra\Downloads\chromedriver_win32\chromedriver.exe'
EXTRACTED_DIR = r'C:\Users\pavithra\Documents\fintuple-automation-projects\BseBhavCopy\dailybhavcopy\dailybhavcopy' \
r'\csv_files'
ZIP_DIR = r'C:\Users\pavithra\Documents\fintuple-automation-projects\BseBhavCopy\dailybhavcopy\dailybhavcopy\zip_files'
HEADLESS_OPTIONS = {'headless': '--headless',
'window_size': '--window-size=1920x1080'}
DOWNLOAD_PREFERENCES = {'download.default_directory': EXTRACTED_DIR,
'download.prompt_for_download': False}
def enable_download(driver, directory):
"""
:param driver: Selenium web driver
:param directory: Directory to store the file
This function allows the Selenium web driver to store the file in the given directory.
"""
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior',
'params': {'behavior': 'allow',
'downloadPath': directory}}
driver.execute("send_command", params)
| [
0,
1,
2,
3,
4
] |
843 | 96cb2754db2740767dfb145078ed17969e85123d | <mask token>
| <mask token>
main()
| from .parapred import main
main()
| null | null | [
0,
1,
2
] |
844 | 77f94ecd205ae9f240f25d959a6d5cd9cf844d86 | <mask token>
class TextColors:
BUY = '\x1b[92m'
WARNING = '\x1b[93m'
SELL_LOSS = '\x1b[91m'
SELL_PROFIT = '\x1b[32m'
DIM = '\x1b[2m\x1b[35m'
DEFAULT = '\x1b[39m'
YELLOW = '\x1b[33m'
TURQUOISE = '\x1b[36m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
ITALICS = '\x1b[3m'
<mask token>
def get_price(client_api):
initial_price = {}
tickers = [line.strip() for line in open(TICKERS_LIST)]
prices = client_api.get_ticker()
for coin in prices['ticker']:
for item in tickers:
if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH
not in coin['symbol'] for item in EX_PAIRS):
initial_price[coin['symbol']] = {'symbol': coin['symbol'],
'price': coin['last'], 'time': datetime.now(),
'price_list': [], 'change_price': 0.0, 'cov': 0.0}
return initial_price
<mask token>
def get_prices_high_low(list_coins, interval):
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
prices_low_high = {}
hist_data = asyncio.run(get_historical_data(ticker_list=list_coins,
interval=interval))
for item in hist_data:
coin_symbol = item['symbol']
h_p = []
l_p = []
try:
for i in item['data']['data']:
close_time = i[0]
open_price = float(i[1])
close_price = float(i[2])
high_price = float(i[3])
low_price = float(i[4])
volume = float(i[5])
quote_volume = i[6]
h_p.append(high_price)
l_p.append(low_price)
except Exception as e:
print(f'Exception {e}')
continue
prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price':
h_p, 'low_price': l_p, 'current_potential': 0.0}
return prices_low_high
def do_work():
while True:
init_price = get_price(client)
coins = get_prices_high_low(init_price, INTERVAL)
print(
f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}'
)
if os.path.exists(f'signals/snail_scan{signal_file_type}'):
os.remove(f'signals/snail_scan{signal_file_type}')
current_potential_list = []
held_coins_list = {}
if TEST_MODE:
coin_path = 'test_coins_bought.json'
elif BVT:
coin_path = 'coins_bought.json'
else:
coin_path = 'live_coins_bought.json'
if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:
with open(coin_path) as file:
held_coins_list = json.load(file)
for coin in coins:
if len(coins[coin]['high_price']) == LIMIT:
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
last_price = float(init_price[coin + PAIR_WITH]['price'])
range = high_price - low_price
potential = low_price / high_price * 100
buy_above = low_price * 1.0
buy_below = high_price - range * percent_below
max_potential = potential * 0.98
min_potential = potential * 0.6
safe_potential = potential - 12
current_range = high_price - last_price
current_potential = high_price / last_price * 100 - 100
coins[coin]['current_potential'] = current_potential
movement = low_price / range
if MOVEMENT:
if (profit_min < current_potential < profit_max and
last_price < buy_below and movement >= TAKE_PROFIT and
coin not in held_coins_list):
current_potential_list.append(coins[coin])
elif profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:
current_potential_list.append(coins[coin])
if current_potential_list:
exchange = ccxt.binance()
macd_list = []
for i in current_potential_list:
coin = i['symbol'] + PAIR_WITH
current_potential = i['current_potential']
macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)
macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)
macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)
try:
macd1day = exchange.fetch_ohlcv(coin, timeframe='1d',
limit=36)
except Exception as e:
print(f'{coin} Exception {e}')
continue
macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m',
limit=36)
df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df1day = pd.DataFrame(macd1day, columns=['time', 'open',
'high', 'low', 'close', 'volume'])
dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open',
'high', 'low', 'close', 'volume'])
time.sleep(1)
try:
macd1 = df1.ta.macd(fast=12, slow=26)
macd5 = df5.ta.macd(fast=12, slow=26)
macd15 = df15.ta.macd(fast=12, slow=26)
macd1day = df1day.ta.macd(fast=12, slow=26)
macdbtc = dfbtc.ta.macd(fast=12, slow=26)
get_hist1 = macd1.iloc[35, 1]
get_hist5 = macd5.iloc[35, 1]
get_hist15 = macd15.iloc[35, 1]
get_hist1day = macd1day.iloc[35, 1]
get_histbtc = macdbtc.iloc[35, 1]
except Exception as e:
print(f'{coin} Exception {e}')
continue
if all_info:
if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >=
0 and get_hist1day >= 0 and get_histbtc >= 0):
print(
f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}'
)
else:
print(
f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}'
)
if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and
get_hist1day >= 0 and get_histbtc >= 0):
print(
f"""{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}
"""
)
macd_list.append(coins[coin])
if macd_list:
sort_list = sorted(macd_list, key=lambda x: x[
f'current_potential'], reverse=True)
for i in sort_list:
coin = i['symbol']
current_potential = i['current_potential']
last_price = float(init_price[coin + PAIR_WITH]['price'])
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
range = high_price - low_price
potential = low_price / high_price * 100
buy_above = low_price * 1.0
buy_below = high_price - range * percent_below
current_range = high_price - last_price
if all_info:
print(
f"""
Price: ${last_price:.3f}
High: ${high_price:.3f}
Day Max Range: ${range:.3f}
Current Range: ${current_range:.3f}
Buy Below: ${buy_below:.3f}
Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}"""
)
with open(f'signals/snail_scan{signal_file_type}', 'a+'
) as f:
f.write(str(coin + PAIR_WITH) + '\n')
snail_coins = len(current_potential_list)
macd_coins = len(macd_list)
snail_discord = (
f'Snail found {snail_coins} coins and MACD approved {macd_coins}'
)
if DISCORD:
msg_discord(snail_discord)
print(
f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}'
)
time.sleep(180)
| <mask token>
if CREATE_TICKER_LIST:
TICKERS_LIST = 'tickers_all_USDT.txt'
else:
TICKERS_LIST = 'tickers_all_USDT.txt'
<mask token>
if OLORIN:
signal_file_type = '.buy'
else:
signal_file_type = '.exs'
<mask token>
class TextColors:
BUY = '\x1b[92m'
WARNING = '\x1b[93m'
SELL_LOSS = '\x1b[91m'
SELL_PROFIT = '\x1b[32m'
DIM = '\x1b[2m\x1b[35m'
DEFAULT = '\x1b[39m'
YELLOW = '\x1b[33m'
TURQUOISE = '\x1b[36m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
ITALICS = '\x1b[3m'
def msg_discord(msg):
message = msg + '\n\n'
mUrl = 'https://discordapp.com/api/webhooks/' + DISCORD_WEBHOOK
data = {'content': message}
response = requests.post(mUrl, json=data)
def get_price(client_api):
initial_price = {}
tickers = [line.strip() for line in open(TICKERS_LIST)]
prices = client_api.get_ticker()
for coin in prices['ticker']:
for item in tickers:
if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH
not in coin['symbol'] for item in EX_PAIRS):
initial_price[coin['symbol']] = {'symbol': coin['symbol'],
'price': coin['last'], 'time': datetime.now(),
'price_list': [], 'change_price': 0.0, 'cov': 0.0}
return initial_price
async def create_urls(ticker_list, interval) ->dict:
coins_urls = {}
if INTERVAL == '1day':
st = datetime.now() - timedelta(days=float(LIMIT))
et = datetime.now()
start_time = int(st.timestamp())
stop_time = int(et.timestamp())
for coin in ticker_list:
if type(coin) == dict:
if all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS
):
coins_urls[coin['symbol']] = {'symbol': coin['symbol'],
'url':
f"https://api.kucoin.com/api/v1/market/candles?symbol{coin['symbol']}&type={interval}&startAt={start_time}&endAt={stop_time}"
}
else:
coins_urls[coin] = {'symbol': coin, 'url':
f'https://api.kucoin.com/api/v1/market/candles?symbol={coin}&type={interval}&startAt={start_time}&endAt={stop_time}'
}
return coins_urls
async def get(session: aiohttp.ClientSession, url) ->dict:
data = {}
symbol = re.findall('=\\w+', url)[0][1:]
try:
resp = await session.request('GET', url=url)
data['symbol'] = symbol
data['data'] = await resp.json()
except Exception as e:
print(e)
return data
async def get_historical_data(ticker_list, interval):
urls = await create_urls(ticker_list=ticker_list, interval=interval)
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
async with aiohttp.ClientSession() as session:
tasks = []
for url in urls:
link = urls[url]['url']
tasks.append(get(session=session, url=link))
response = await asyncio.gather(*tasks, return_exceptions=True)
return response
def get_prices_high_low(list_coins, interval):
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
prices_low_high = {}
hist_data = asyncio.run(get_historical_data(ticker_list=list_coins,
interval=interval))
for item in hist_data:
coin_symbol = item['symbol']
h_p = []
l_p = []
try:
for i in item['data']['data']:
close_time = i[0]
open_price = float(i[1])
close_price = float(i[2])
high_price = float(i[3])
low_price = float(i[4])
volume = float(i[5])
quote_volume = i[6]
h_p.append(high_price)
l_p.append(low_price)
except Exception as e:
print(f'Exception {e}')
continue
prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price':
h_p, 'low_price': l_p, 'current_potential': 0.0}
return prices_low_high
def do_work():
while True:
init_price = get_price(client)
coins = get_prices_high_low(init_price, INTERVAL)
print(
f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}'
)
if os.path.exists(f'signals/snail_scan{signal_file_type}'):
os.remove(f'signals/snail_scan{signal_file_type}')
current_potential_list = []
held_coins_list = {}
if TEST_MODE:
coin_path = 'test_coins_bought.json'
elif BVT:
coin_path = 'coins_bought.json'
else:
coin_path = 'live_coins_bought.json'
if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:
with open(coin_path) as file:
held_coins_list = json.load(file)
for coin in coins:
if len(coins[coin]['high_price']) == LIMIT:
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
last_price = float(init_price[coin + PAIR_WITH]['price'])
range = high_price - low_price
potential = low_price / high_price * 100
buy_above = low_price * 1.0
buy_below = high_price - range * percent_below
max_potential = potential * 0.98
min_potential = potential * 0.6
safe_potential = potential - 12
current_range = high_price - last_price
current_potential = high_price / last_price * 100 - 100
coins[coin]['current_potential'] = current_potential
movement = low_price / range
if MOVEMENT:
if (profit_min < current_potential < profit_max and
last_price < buy_below and movement >= TAKE_PROFIT and
coin not in held_coins_list):
current_potential_list.append(coins[coin])
elif profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:
current_potential_list.append(coins[coin])
if current_potential_list:
exchange = ccxt.binance()
macd_list = []
for i in current_potential_list:
coin = i['symbol'] + PAIR_WITH
current_potential = i['current_potential']
macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)
macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)
macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)
try:
macd1day = exchange.fetch_ohlcv(coin, timeframe='1d',
limit=36)
except Exception as e:
print(f'{coin} Exception {e}')
continue
macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m',
limit=36)
df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df1day = pd.DataFrame(macd1day, columns=['time', 'open',
'high', 'low', 'close', 'volume'])
dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open',
'high', 'low', 'close', 'volume'])
time.sleep(1)
try:
macd1 = df1.ta.macd(fast=12, slow=26)
macd5 = df5.ta.macd(fast=12, slow=26)
macd15 = df15.ta.macd(fast=12, slow=26)
macd1day = df1day.ta.macd(fast=12, slow=26)
macdbtc = dfbtc.ta.macd(fast=12, slow=26)
get_hist1 = macd1.iloc[35, 1]
get_hist5 = macd5.iloc[35, 1]
get_hist15 = macd15.iloc[35, 1]
get_hist1day = macd1day.iloc[35, 1]
get_histbtc = macdbtc.iloc[35, 1]
except Exception as e:
print(f'{coin} Exception {e}')
continue
if all_info:
if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >=
0 and get_hist1day >= 0 and get_histbtc >= 0):
print(
f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}'
)
else:
print(
f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}'
)
if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and
get_hist1day >= 0 and get_histbtc >= 0):
print(
f"""{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}
"""
)
macd_list.append(coins[coin])
if macd_list:
sort_list = sorted(macd_list, key=lambda x: x[
f'current_potential'], reverse=True)
for i in sort_list:
coin = i['symbol']
current_potential = i['current_potential']
last_price = float(init_price[coin + PAIR_WITH]['price'])
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
range = high_price - low_price
potential = low_price / high_price * 100
buy_above = low_price * 1.0
buy_below = high_price - range * percent_below
current_range = high_price - last_price
if all_info:
print(
f"""
Price: ${last_price:.3f}
High: ${high_price:.3f}
Day Max Range: ${range:.3f}
Current Range: ${current_range:.3f}
Buy Below: ${buy_below:.3f}
Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}"""
)
with open(f'signals/snail_scan{signal_file_type}', 'a+'
) as f:
f.write(str(coin + PAIR_WITH) + '\n')
snail_coins = len(current_potential_list)
macd_coins = len(macd_list)
snail_discord = (
f'Snail found {snail_coins} coins and MACD approved {macd_coins}'
)
if DISCORD:
msg_discord(snail_discord)
print(
f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}'
)
time.sleep(180)
| <mask token>
args = parse_args()
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_creds = load_config(creds_file)
parsed_config = load_config(config_file)
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
EX_PAIRS = parsed_config['trading_options']['FIATS']
TEST_MODE = parsed_config['script_options']['TEST_MODE']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
DISCORD_WEBHOOK = load_discord_creds(parsed_creds)
access_key, secret_key, passphrase_key = load_correct_creds(parsed_creds)
client = Client(access_key, secret_key, passphrase_key)
CREATE_TICKER_LIST = True
ticker_type = 'all'
if CREATE_TICKER_LIST:
TICKERS_LIST = 'tickers_all_USDT.txt'
else:
TICKERS_LIST = 'tickers_all_USDT.txt'
BVT = False
OLORIN = True
if OLORIN:
signal_file_type = '.buy'
else:
signal_file_type = '.exs'
WINDOWS = True
DISCORD = True
LIMIT = 4
INTERVAL = '1day'
profit_min = 15
profit_max = 100
percent_below = 0.7
MOVEMENT = True
all_info = False
class TextColors:
BUY = '\x1b[92m'
WARNING = '\x1b[93m'
SELL_LOSS = '\x1b[91m'
SELL_PROFIT = '\x1b[32m'
DIM = '\x1b[2m\x1b[35m'
DEFAULT = '\x1b[39m'
YELLOW = '\x1b[33m'
TURQUOISE = '\x1b[36m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
ITALICS = '\x1b[3m'
def msg_discord(msg):
message = msg + '\n\n'
mUrl = 'https://discordapp.com/api/webhooks/' + DISCORD_WEBHOOK
data = {'content': message}
response = requests.post(mUrl, json=data)
def get_price(client_api):
initial_price = {}
tickers = [line.strip() for line in open(TICKERS_LIST)]
prices = client_api.get_ticker()
for coin in prices['ticker']:
for item in tickers:
if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH
not in coin['symbol'] for item in EX_PAIRS):
initial_price[coin['symbol']] = {'symbol': coin['symbol'],
'price': coin['last'], 'time': datetime.now(),
'price_list': [], 'change_price': 0.0, 'cov': 0.0}
return initial_price
async def create_urls(ticker_list, interval) ->dict:
coins_urls = {}
if INTERVAL == '1day':
st = datetime.now() - timedelta(days=float(LIMIT))
et = datetime.now()
start_time = int(st.timestamp())
stop_time = int(et.timestamp())
for coin in ticker_list:
if type(coin) == dict:
if all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS
):
coins_urls[coin['symbol']] = {'symbol': coin['symbol'],
'url':
f"https://api.kucoin.com/api/v1/market/candles?symbol{coin['symbol']}&type={interval}&startAt={start_time}&endAt={stop_time}"
}
else:
coins_urls[coin] = {'symbol': coin, 'url':
f'https://api.kucoin.com/api/v1/market/candles?symbol={coin}&type={interval}&startAt={start_time}&endAt={stop_time}'
}
return coins_urls
async def get(session: aiohttp.ClientSession, url) ->dict:
data = {}
symbol = re.findall('=\\w+', url)[0][1:]
try:
resp = await session.request('GET', url=url)
data['symbol'] = symbol
data['data'] = await resp.json()
except Exception as e:
print(e)
return data
async def get_historical_data(ticker_list, interval):
urls = await create_urls(ticker_list=ticker_list, interval=interval)
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
async with aiohttp.ClientSession() as session:
tasks = []
for url in urls:
link = urls[url]['url']
tasks.append(get(session=session, url=link))
response = await asyncio.gather(*tasks, return_exceptions=True)
return response
def get_prices_high_low(list_coins, interval):
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
prices_low_high = {}
hist_data = asyncio.run(get_historical_data(ticker_list=list_coins,
interval=interval))
for item in hist_data:
coin_symbol = item['symbol']
h_p = []
l_p = []
try:
for i in item['data']['data']:
close_time = i[0]
open_price = float(i[1])
close_price = float(i[2])
high_price = float(i[3])
low_price = float(i[4])
volume = float(i[5])
quote_volume = i[6]
h_p.append(high_price)
l_p.append(low_price)
except Exception as e:
print(f'Exception {e}')
continue
prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price':
h_p, 'low_price': l_p, 'current_potential': 0.0}
return prices_low_high
def do_work():
while True:
init_price = get_price(client)
coins = get_prices_high_low(init_price, INTERVAL)
print(
f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}'
)
if os.path.exists(f'signals/snail_scan{signal_file_type}'):
os.remove(f'signals/snail_scan{signal_file_type}')
current_potential_list = []
held_coins_list = {}
if TEST_MODE:
coin_path = 'test_coins_bought.json'
elif BVT:
coin_path = 'coins_bought.json'
else:
coin_path = 'live_coins_bought.json'
if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:
with open(coin_path) as file:
held_coins_list = json.load(file)
for coin in coins:
if len(coins[coin]['high_price']) == LIMIT:
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
last_price = float(init_price[coin + PAIR_WITH]['price'])
range = high_price - low_price
potential = low_price / high_price * 100
buy_above = low_price * 1.0
buy_below = high_price - range * percent_below
max_potential = potential * 0.98
min_potential = potential * 0.6
safe_potential = potential - 12
current_range = high_price - last_price
current_potential = high_price / last_price * 100 - 100
coins[coin]['current_potential'] = current_potential
movement = low_price / range
if MOVEMENT:
if (profit_min < current_potential < profit_max and
last_price < buy_below and movement >= TAKE_PROFIT and
coin not in held_coins_list):
current_potential_list.append(coins[coin])
elif profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:
current_potential_list.append(coins[coin])
if current_potential_list:
exchange = ccxt.binance()
macd_list = []
for i in current_potential_list:
coin = i['symbol'] + PAIR_WITH
current_potential = i['current_potential']
macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)
macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)
macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)
try:
macd1day = exchange.fetch_ohlcv(coin, timeframe='1d',
limit=36)
except Exception as e:
print(f'{coin} Exception {e}')
continue
macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m',
limit=36)
df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df1day = pd.DataFrame(macd1day, columns=['time', 'open',
'high', 'low', 'close', 'volume'])
dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open',
'high', 'low', 'close', 'volume'])
time.sleep(1)
try:
macd1 = df1.ta.macd(fast=12, slow=26)
macd5 = df5.ta.macd(fast=12, slow=26)
macd15 = df15.ta.macd(fast=12, slow=26)
macd1day = df1day.ta.macd(fast=12, slow=26)
macdbtc = dfbtc.ta.macd(fast=12, slow=26)
get_hist1 = macd1.iloc[35, 1]
get_hist5 = macd5.iloc[35, 1]
get_hist15 = macd15.iloc[35, 1]
get_hist1day = macd1day.iloc[35, 1]
get_histbtc = macdbtc.iloc[35, 1]
except Exception as e:
print(f'{coin} Exception {e}')
continue
if all_info:
if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >=
0 and get_hist1day >= 0 and get_histbtc >= 0):
print(
f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}'
)
else:
print(
f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}'
)
if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and
get_hist1day >= 0 and get_histbtc >= 0):
print(
f"""{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}
"""
)
macd_list.append(coins[coin])
if macd_list:
sort_list = sorted(macd_list, key=lambda x: x[
f'current_potential'], reverse=True)
for i in sort_list:
coin = i['symbol']
current_potential = i['current_potential']
last_price = float(init_price[coin + PAIR_WITH]['price'])
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
range = high_price - low_price
potential = low_price / high_price * 100
buy_above = low_price * 1.0
buy_below = high_price - range * percent_below
current_range = high_price - last_price
if all_info:
print(
f"""
Price: ${last_price:.3f}
High: ${high_price:.3f}
Day Max Range: ${range:.3f}
Current Range: ${current_range:.3f}
Buy Below: ${buy_below:.3f}
Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}"""
)
with open(f'signals/snail_scan{signal_file_type}', 'a+'
) as f:
f.write(str(coin + PAIR_WITH) + '\n')
snail_coins = len(current_potential_list)
macd_coins = len(macd_list)
snail_discord = (
f'Snail found {snail_coins} coins and MACD approved {macd_coins}'
)
if DISCORD:
msg_discord(snail_discord)
print(
f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}'
)
time.sleep(180)
| <mask token>
import os
import re
import aiohttp
import asyncio
import time
import json
from datetime import datetime, timedelta
from kucoin.client import Client
from helpers.parameters import parse_args, load_config
import pandas as pd
import pandas_ta as ta
import ccxt
from tradingview_ta import TA_Handler, Interval, Exchange
import requests
from helpers.handle_creds import load_correct_creds, load_discord_creds
args = parse_args()
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_creds = load_config(creds_file)
parsed_config = load_config(config_file)
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
EX_PAIRS = parsed_config['trading_options']['FIATS']
TEST_MODE = parsed_config['script_options']['TEST_MODE']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
DISCORD_WEBHOOK = load_discord_creds(parsed_creds)
access_key, secret_key, passphrase_key = load_correct_creds(parsed_creds)
client = Client(access_key, secret_key, passphrase_key)
CREATE_TICKER_LIST = True
ticker_type = 'all'
if CREATE_TICKER_LIST:
TICKERS_LIST = 'tickers_all_USDT.txt'
else:
TICKERS_LIST = 'tickers_all_USDT.txt'
BVT = False
OLORIN = True
if OLORIN:
signal_file_type = '.buy'
else:
signal_file_type = '.exs'
WINDOWS = True
DISCORD = True
LIMIT = 4
INTERVAL = '1day'
profit_min = 15
profit_max = 100
percent_below = 0.7
MOVEMENT = True
all_info = False
class TextColors:
BUY = '\x1b[92m'
WARNING = '\x1b[93m'
SELL_LOSS = '\x1b[91m'
SELL_PROFIT = '\x1b[32m'
DIM = '\x1b[2m\x1b[35m'
DEFAULT = '\x1b[39m'
YELLOW = '\x1b[33m'
TURQUOISE = '\x1b[36m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
ITALICS = '\x1b[3m'
def msg_discord(msg):
message = msg + '\n\n'
mUrl = 'https://discordapp.com/api/webhooks/' + DISCORD_WEBHOOK
data = {'content': message}
response = requests.post(mUrl, json=data)
def get_price(client_api):
initial_price = {}
tickers = [line.strip() for line in open(TICKERS_LIST)]
prices = client_api.get_ticker()
for coin in prices['ticker']:
for item in tickers:
if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH
not in coin['symbol'] for item in EX_PAIRS):
initial_price[coin['symbol']] = {'symbol': coin['symbol'],
'price': coin['last'], 'time': datetime.now(),
'price_list': [], 'change_price': 0.0, 'cov': 0.0}
return initial_price
async def create_urls(ticker_list, interval) ->dict:
coins_urls = {}
if INTERVAL == '1day':
st = datetime.now() - timedelta(days=float(LIMIT))
et = datetime.now()
start_time = int(st.timestamp())
stop_time = int(et.timestamp())
for coin in ticker_list:
if type(coin) == dict:
if all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS
):
coins_urls[coin['symbol']] = {'symbol': coin['symbol'],
'url':
f"https://api.kucoin.com/api/v1/market/candles?symbol{coin['symbol']}&type={interval}&startAt={start_time}&endAt={stop_time}"
}
else:
coins_urls[coin] = {'symbol': coin, 'url':
f'https://api.kucoin.com/api/v1/market/candles?symbol={coin}&type={interval}&startAt={start_time}&endAt={stop_time}'
}
return coins_urls
async def get(session: aiohttp.ClientSession, url) ->dict:
data = {}
symbol = re.findall('=\\w+', url)[0][1:]
try:
resp = await session.request('GET', url=url)
data['symbol'] = symbol
data['data'] = await resp.json()
except Exception as e:
print(e)
return data
async def get_historical_data(ticker_list, interval):
urls = await create_urls(ticker_list=ticker_list, interval=interval)
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
async with aiohttp.ClientSession() as session:
tasks = []
for url in urls:
link = urls[url]['url']
tasks.append(get(session=session, url=link))
response = await asyncio.gather(*tasks, return_exceptions=True)
return response
def get_prices_high_low(list_coins, interval):
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
prices_low_high = {}
hist_data = asyncio.run(get_historical_data(ticker_list=list_coins,
interval=interval))
for item in hist_data:
coin_symbol = item['symbol']
h_p = []
l_p = []
try:
for i in item['data']['data']:
close_time = i[0]
open_price = float(i[1])
close_price = float(i[2])
high_price = float(i[3])
low_price = float(i[4])
volume = float(i[5])
quote_volume = i[6]
h_p.append(high_price)
l_p.append(low_price)
except Exception as e:
print(f'Exception {e}')
continue
prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price':
h_p, 'low_price': l_p, 'current_potential': 0.0}
return prices_low_high
def do_work():
while True:
init_price = get_price(client)
coins = get_prices_high_low(init_price, INTERVAL)
print(
f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}'
)
if os.path.exists(f'signals/snail_scan{signal_file_type}'):
os.remove(f'signals/snail_scan{signal_file_type}')
current_potential_list = []
held_coins_list = {}
if TEST_MODE:
coin_path = 'test_coins_bought.json'
elif BVT:
coin_path = 'coins_bought.json'
else:
coin_path = 'live_coins_bought.json'
if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:
with open(coin_path) as file:
held_coins_list = json.load(file)
for coin in coins:
if len(coins[coin]['high_price']) == LIMIT:
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
last_price = float(init_price[coin + PAIR_WITH]['price'])
range = high_price - low_price
potential = low_price / high_price * 100
buy_above = low_price * 1.0
buy_below = high_price - range * percent_below
max_potential = potential * 0.98
min_potential = potential * 0.6
safe_potential = potential - 12
current_range = high_price - last_price
current_potential = high_price / last_price * 100 - 100
coins[coin]['current_potential'] = current_potential
movement = low_price / range
if MOVEMENT:
if (profit_min < current_potential < profit_max and
last_price < buy_below and movement >= TAKE_PROFIT and
coin not in held_coins_list):
current_potential_list.append(coins[coin])
elif profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:
current_potential_list.append(coins[coin])
if current_potential_list:
exchange = ccxt.binance()
macd_list = []
for i in current_potential_list:
coin = i['symbol'] + PAIR_WITH
current_potential = i['current_potential']
macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)
macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)
macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)
try:
macd1day = exchange.fetch_ohlcv(coin, timeframe='1d',
limit=36)
except Exception as e:
print(f'{coin} Exception {e}')
continue
macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m',
limit=36)
df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high',
'low', 'close', 'volume'])
df1day = pd.DataFrame(macd1day, columns=['time', 'open',
'high', 'low', 'close', 'volume'])
dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open',
'high', 'low', 'close', 'volume'])
time.sleep(1)
try:
macd1 = df1.ta.macd(fast=12, slow=26)
macd5 = df5.ta.macd(fast=12, slow=26)
macd15 = df15.ta.macd(fast=12, slow=26)
macd1day = df1day.ta.macd(fast=12, slow=26)
macdbtc = dfbtc.ta.macd(fast=12, slow=26)
get_hist1 = macd1.iloc[35, 1]
get_hist5 = macd5.iloc[35, 1]
get_hist15 = macd15.iloc[35, 1]
get_hist1day = macd1day.iloc[35, 1]
get_histbtc = macdbtc.iloc[35, 1]
except Exception as e:
print(f'{coin} Exception {e}')
continue
if all_info:
if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >=
0 and get_hist1day >= 0 and get_histbtc >= 0):
print(
f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}'
)
else:
print(
f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}'
)
if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and
get_hist1day >= 0 and get_histbtc >= 0):
print(
f"""{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}
"""
)
macd_list.append(coins[coin])
if macd_list:
sort_list = sorted(macd_list, key=lambda x: x[
f'current_potential'], reverse=True)
for i in sort_list:
coin = i['symbol']
current_potential = i['current_potential']
last_price = float(init_price[coin + PAIR_WITH]['price'])
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
range = high_price - low_price
potential = low_price / high_price * 100
buy_above = low_price * 1.0
buy_below = high_price - range * percent_below
current_range = high_price - last_price
if all_info:
print(
f"""
Price: ${last_price:.3f}
High: ${high_price:.3f}
Day Max Range: ${range:.3f}
Current Range: ${current_range:.3f}
Buy Below: ${buy_below:.3f}
Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}"""
)
with open(f'signals/snail_scan{signal_file_type}', 'a+'
) as f:
f.write(str(coin + PAIR_WITH) + '\n')
snail_coins = len(current_potential_list)
macd_coins = len(macd_list)
snail_discord = (
f'Snail found {snail_coins} coins and MACD approved {macd_coins}'
)
if DISCORD:
msg_discord(snail_discord)
print(
f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}'
)
time.sleep(180)
| """
The Snail v 2
"Buy the dips! ... then wait"
STRATEGY
1. Selects coins that are X% (percent_below) below their X day (LIMIT) maximum
2. ** NEW ** Finds movement (MOVEMENT) range over X Days
- if MOVEMENT* > TAKE_PROFIT coins pass to 3
3. Check coins are not already owned
4. Uses MACD to check if coins are currently on an uptrend
5. Adds coins that pass all above tests to Signal file for the Bot to buy (ordered by Potential Profit from High to Low)
* MOVEMENT
Looks at the fluctuation in price over LIMIT days and compares to your TAKE_PROFIT settings.
i.e. if your TAKE_PROFIT is 3%, but the movement is only 1%, then you wont hit TP and will be left holding the coin
This can be turned off if you want.
STRATEGY SETTINGS
LIMIT = 4
INTERVAL = '1d'
profit_min = 15
profit_max = 100 # only required if you want to limit max profit
percent_below = 0.6 # change risk level: 0.7 = 70% below high_price, 0.5 = 50% below high_price
MOVEMENT = True #
OTHER SETTINGS
BVT or OLORIN Fork.
Set True / False for compatibility
WINDOWS (WINDOWS OS)
Set True / False for compatibility
DISCORD
send message to Discord - Set True / False
CONFIG.YML SETTINGS
CHANGE_IN_PRICE: 100 REQUIRED
Do NOT use pausebotmod as it will prevent the_snail from buying - The Snail buys the dips
Developed by scoobie
Thanks to
@vyacheslav for optimising the code with async and adding list sorting,
@Kevin.Butters for the meticulous testing and reporting,
@OlorinSledge for the coding advice and a great fork
DISCLAIMER
CHECK YOU HAVE ALL THE REQUIRED IMPORTS INSTALLED
Developed for OlorinSledge fork - no support for any others as I don't use them.
Troubleshooting and help - please use the #troubleshooting channel
Settings - the settings in this file are what I currently use, please don't DM me for the 'best' settings - for me, these are the best so far.
There's a lot of options to adjust the strategy, test them out and share your results in #bot-strategies so others can learn from them too
Hope the Snail makes you rich!
"""
import os
import re
import aiohttp
import asyncio
import time
import json
from datetime import datetime, timedelta
from kucoin.client import Client
from helpers.parameters import parse_args, load_config
import pandas as pd
import pandas_ta as ta
import ccxt
from tradingview_ta import TA_Handler, Interval, Exchange
import requests
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, load_discord_creds
)
# Settings
args = parse_args()
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_creds = load_config(creds_file)
parsed_config = load_config(config_file)
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
EX_PAIRS = parsed_config['trading_options']['FIATS']
TEST_MODE = parsed_config['script_options']['TEST_MODE']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
DISCORD_WEBHOOK = load_discord_creds(parsed_creds)
# Load creds for correct environment
access_key, secret_key, passphrase_key = load_correct_creds(parsed_creds)
client = Client(access_key, secret_key, passphrase_key)
# If True, an updated list of coins will be generated from the site - http://edgesforledges.com/watchlists/binance.
# If False, then the list you create in TICKERS_LIST = 'tickers.txt' will be used.
CREATE_TICKER_LIST = True
# When creating a ticker list from the source site:
# http://edgesforledges.com you can use the parameter (all or innovation-zone).
# ticker_type = 'innovation-zone'
ticker_type = 'all'
if CREATE_TICKER_LIST:
TICKERS_LIST = 'tickers_all_USDT.txt'
else:
TICKERS_LIST = 'tickers_all_USDT.txt'
# System Settings
BVT = False
OLORIN = True # if not using Olorin Sledge Fork set to False
if OLORIN:
signal_file_type = '.buy'
else:
signal_file_type = '.exs'
# if using Windows OS set to True, else set to False
WINDOWS = True
# send message to discord
DISCORD = True
# Strategy Settings
LIMIT = 4
INTERVAL = '1day'
profit_min = 15
profit_max = 100 # only required if you want to limit max profit
percent_below = 0.7 # change risk level: 0.7 = 70% below high_price, 0.5 = 50% below high_price
MOVEMENT = True
# Display Setttings
all_info = False
class TextColors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
YELLOW = '\033[33m'
TURQUOISE = '\033[36m'
UNDERLINE = '\033[4m'
END = '\033[0m'
ITALICS = '\033[3m'
def msg_discord(msg):
message = msg + '\n\n'
mUrl = "https://discordapp.com/api/webhooks/"+DISCORD_WEBHOOK
data = {"content": message}
response = requests.post(mUrl, json=data)
def get_price(client_api):
initial_price = {}
tickers = [line.strip() for line in open(TICKERS_LIST)]
prices = client_api.get_ticker()
for coin in prices['ticker']:
for item in tickers:
if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):
initial_price[coin['symbol']] = {'symbol': coin['symbol'],
'price': coin['last'],
'time': datetime.now(),
'price_list': [],
'change_price': 0.0,
'cov': 0.0}
return initial_price
async def create_urls(ticker_list, interval) -> dict:
coins_urls = {}
if INTERVAL == '1day':
st = datetime.now() - timedelta(days=float(LIMIT))
et = datetime.now()
start_time = int(st.timestamp())
stop_time = int(et.timestamp())
for coin in ticker_list:
if type(coin) == dict:
if all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):
coins_urls[coin['symbol']] = {'symbol': coin['symbol'],
'url': f"https://api.kucoin.com/api/v1/market/candles?symbol"
f"{coin['symbol']}&type={interval}&startAt={start_time}&endAt={stop_time}"}
else:
coins_urls[coin] = {'symbol': coin,
'url': f"https://api.kucoin.com/api/v1/market/candles?symbol={coin}&type={interval}&startAt={start_time}&endAt={stop_time}"}
return coins_urls
async def get(session: aiohttp.ClientSession, url) -> dict:
data = {}
symbol = re.findall(r'=\w+', url)[0][1:]
try:
resp = await session.request('GET', url=url)
data['symbol'] = symbol
# data['last_price'] = await get_last_price(session=session, symbol=symbol)
data['data'] = await resp.json()
except Exception as e:
print(e)
return data
async def get_historical_data(ticker_list, interval):
urls = await create_urls(ticker_list=ticker_list, interval=interval)
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
async with aiohttp.ClientSession() as session:
tasks = []
for url in urls:
link = urls[url]['url']
tasks.append(get(session=session, url=link))
response = await asyncio.gather(*tasks, return_exceptions=True)
return response
def get_prices_high_low(list_coins, interval):
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
prices_low_high = {}
hist_data = asyncio.run(get_historical_data(ticker_list=list_coins, interval=interval))
for item in hist_data:
coin_symbol = item['symbol']
h_p = []
l_p = []
try:
for i in item['data']['data']:
close_time = i[0]
open_price = float(i[1])
close_price = float(i[2])
high_price = float(i[3])
low_price = float(i[4])
volume = float(i[5])
quote_volume = i[6]
h_p.append(high_price)
l_p.append(low_price)
except Exception as e:
print(f'Exception {e}')
continue
prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price': h_p, 'low_price': l_p, 'current_potential': 0.0}
return prices_low_high
def do_work():
while True:
init_price = get_price(client)
coins = get_prices_high_low(init_price, INTERVAL)
print(f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}')
if os.path.exists(f'signals/snail_scan{signal_file_type}'):
os.remove(f'signals/snail_scan{signal_file_type}')
current_potential_list = []
held_coins_list = {}
if TEST_MODE:
coin_path = 'test_coins_bought.json'
elif BVT:
coin_path = 'coins_bought.json'
else:
coin_path = 'live_coins_bought.json'
if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:
with open(coin_path) as file:
held_coins_list = json.load(file)
for coin in coins:
if len(coins[coin]['high_price']) == LIMIT:
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
last_price = float(init_price[coin + PAIR_WITH]['price'])
# Calculation
range = high_price - low_price
potential = (low_price / high_price) * 100
buy_above = low_price * 1.00
buy_below = high_price - (range * percent_below) # percent below affects Risk
max_potential = potential * 0.98
min_potential = potential * 0.6
safe_potential = potential - 12
current_range = high_price - last_price
current_potential = ((high_price / last_price) * 100) - 100
coins[coin]['current_potential'] = current_potential
movement = (low_price / range)
# print(f'{coin} {potential:.2f}% {movement:.2f}%')
if MOVEMENT:
if profit_min < current_potential < profit_max and last_price < buy_below and movement >= TAKE_PROFIT and coin not in held_coins_list:
current_potential_list.append(coins[coin])
else:
if profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:
current_potential_list.append(coins[coin])
if current_potential_list:
# print(current_potential_list)
exchange = ccxt.binance()
macd_list = []
for i in current_potential_list:
coin = i['symbol'] + PAIR_WITH
current_potential = i['current_potential']
macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)
macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)
macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)
try:
macd1day = exchange.fetch_ohlcv(coin, timeframe='1d', limit=36)
except Exception as e:
print(f'{coin} Exception {e}')
continue
macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m', limit=36)
df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df1day = pd.DataFrame(macd1day, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
# Wait for 1 sec to prevent kucoin query limit
time.sleep(1)
try:
macd1 = df1.ta.macd(fast=12, slow=26)
macd5 = df5.ta.macd(fast=12, slow=26)
macd15 = df15.ta.macd(fast=12, slow=26)
macd1day = df1day.ta.macd(fast=12, slow=26)
macdbtc = dfbtc.ta.macd(fast=12, slow=26)
get_hist1 = macd1.iloc[35, 1]
get_hist5 = macd5.iloc[35, 1]
get_hist15 = macd15.iloc[35, 1]
get_hist1day = macd1day.iloc[35, 1]
get_histbtc = macdbtc.iloc[35, 1]
except Exception as e:
print(f'{coin} Exception {e}')
continue
if all_info:
if get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and get_hist1day >= 0 and get_histbtc >= 0:
print(f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}')
else:
print(f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}')
if get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and get_hist1day >= 0 and get_histbtc >= 0:
# Add to coins for Snail to scan
print(f'{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\n')
macd_list.append(coins[coin])
# else:
# print(f'Do NOT buy {coin}')
if macd_list:
# print(macd_list)
sort_list = sorted(macd_list, key=lambda x: x[f'current_potential'], reverse=True)
for i in sort_list:
coin = i['symbol']
current_potential = i['current_potential']
last_price = float(init_price[coin + PAIR_WITH]['price'])
# print(f'list {coin} {last_price}')
high_price = float(max(coins[coin]['high_price']))
# print(f'list {coin} {high_price}')
low_price = float(min(coins[coin]['low_price']))
# print(f'list {coin} {low_price}')
range = high_price - low_price
potential = (low_price / high_price) * 100
buy_above = low_price * 1.00
buy_below = high_price - (range * percent_below)
current_range = high_price - last_price
if all_info:
print(f'\nPrice: ${last_price:.3f}\n'
f'High: ${high_price:.3f}\n'
# f'Plan: TP {TP}% TTP {TTP}%\n'
f'Day Max Range: ${range:.3f}\n'
f'Current Range: ${current_range:.3f} \n'
# f'Daily Range: ${range:.3f}\n'
# f'Current Range ${current_range:.3f} \n'
# f'Potential profit before safety: {potential:.0f}%\n'
# f'Buy above: ${buy_above:.3f}\n'
f'Buy Below: ${buy_below:.3f}\n'
f'Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}'
# f'Max Profit {max_potential:.2f}%\n'
# f'Min Profit {min_potential:.2f}%\n'
)
# print(f'Adding {TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} to buy list')
# add to signal
with open(f'signals/snail_scan{signal_file_type}', 'a+') as f:
f.write(str(coin + PAIR_WITH) + '\n')
# else:
# print(f'{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} may not be profitable at this time')
snail_coins = len(current_potential_list)
macd_coins = len(macd_list)
snail_discord = f'Snail found {snail_coins} coins and MACD approved {macd_coins}'
if DISCORD:
msg_discord(snail_discord)
print(f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}')
time.sleep(180)
| [
5,
7,
8,
9,
10
] |
845 | ea07cb640e76ced8be92b55ee14e1d3058e073c9 | <mask token>
| from .variational_legacy import *
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .variational_legacy import *
| null | null | [
0,
1,
2
] |
846 | 351b2c2a18473e6ac541a96165c69c836ea101de | <mask token>
| class Solution:
<mask token>
| class Solution:
def countKDifference(self, nums: List[int], k: int) ->int:
def abs(x, y):
if x - y >= 0:
return x - y
else:
return y - x
ret = 0
for i in range(len(nums)):
for j in range(i, len(nums)):
if abs(nums[i], nums[j]) == k:
ret += 1
return ret
| #
# @lc app=leetcode.cn id=2006 lang=python3
#
# [2006] 差的绝对值为 K 的数对数目
#
# @lc code=start
class Solution:
def countKDifference(self, nums: List[int], k: int) -> int:
def abs(x,y):
if(x-y>=0):
return x-y
else:
return y-x
ret = 0
for i in range(len(nums)):
for j in range(i,len(nums)):
if(abs(nums[i],nums[j])==k):
ret += 1
return ret
# @lc code=end
| null | [
0,
1,
2,
3
] |
847 | 0fbf8efd39f583581c46fcd3f84c65a7787145cd | <mask token>
class PolicyEstimator:
<mask token>
class ValueEstimator:
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
x = tf.to_float(self.states) / 255.0
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('value_net'):
self.logits = tf.layers.dense(fc1, 1, activation=None)
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=
'logits')
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name='loss')
self.predictions = {'logits': self.logits}
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(
self.logits))
tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(
self.logits))
tf.summary.scalar('{}/mean_value'.format(prefix), tf.
reduce_mean(self.logits))
tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max
(self.targets))
tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min
(self.targets))
tf.summary.scalar('{}/reward_mean'.format(prefix), tf.
reduce_mean(self.targets))
tf.summary.histogram('{}/reward_targets'.format(prefix), self.
targets)
tf.summary.histogram('{}/values'.format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.name or
'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
| <mask token>
class PolicyEstimator:
def __init__(self, num_ouptuts, reuse=False, trainable=True):
self.num_outputs = num_ouptuts
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=
'actions')
x = tf.to_float(self.states) / 255.0
batch_size = tf.shape(self.states)[0]
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('policy_net'):
self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)
self.probs = tf.nn.softmax(self.logits) + 1e-08
self.predictions = {'logits': self.logits, 'probs': self.probs}
self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs),
1, name='entropy')
self.entropy_mean = tf.reduce_mean(self.entropy, name=
'entropy_mean')
gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1
] + self.actions
self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1
]), gather_indices)
self.losses = -(tf.log(self.picked_action_probs) * self.targets +
0.01 * self.entropy)
self.loss = tf.reduce_sum(self.losses, name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)
tf.summary.histogram(self.entropy.op.name, self.entropy)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.
name or 'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name
]
self.summaries = tf.summary.merge(summaries)
class ValueEstimator:
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
x = tf.to_float(self.states) / 255.0
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('value_net'):
self.logits = tf.layers.dense(fc1, 1, activation=None)
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=
'logits')
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name='loss')
self.predictions = {'logits': self.logits}
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(
self.logits))
tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(
self.logits))
tf.summary.scalar('{}/mean_value'.format(prefix), tf.
reduce_mean(self.logits))
tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max
(self.targets))
tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min
(self.targets))
tf.summary.scalar('{}/reward_mean'.format(prefix), tf.
reduce_mean(self.targets))
tf.summary.histogram('{}/reward_targets'.format(prefix), self.
targets)
tf.summary.histogram('{}/values'.format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.name or
'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
| <mask token>
def build_shared_network(x, add_summaries=False):
conv1 = tf.layers.conv2d(x, 16, 8, 4, activation=tf.nn.relu, name='conv1')
conv2 = tf.layers.conv2d(conv1, 32, 4, 2, activation=tf.nn.relu, name=
'conv2')
fc1 = tf.layers.dense(tf.layers.flatten(conv2), 256, name='fc1')
if add_summaries:
tf.contrib.layers.summarize_activation(conv1)
tf.contrib.layers.summarize_activation(conv2)
tf.contrib.layers.summarize_activation(fc1)
return fc1
class PolicyEstimator:
def __init__(self, num_ouptuts, reuse=False, trainable=True):
self.num_outputs = num_ouptuts
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=
'actions')
x = tf.to_float(self.states) / 255.0
batch_size = tf.shape(self.states)[0]
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('policy_net'):
self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)
self.probs = tf.nn.softmax(self.logits) + 1e-08
self.predictions = {'logits': self.logits, 'probs': self.probs}
self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs),
1, name='entropy')
self.entropy_mean = tf.reduce_mean(self.entropy, name=
'entropy_mean')
gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1
] + self.actions
self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1
]), gather_indices)
self.losses = -(tf.log(self.picked_action_probs) * self.targets +
0.01 * self.entropy)
self.loss = tf.reduce_sum(self.losses, name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)
tf.summary.histogram(self.entropy.op.name, self.entropy)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.
name or 'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name
]
self.summaries = tf.summary.merge(summaries)
class ValueEstimator:
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
x = tf.to_float(self.states) / 255.0
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('value_net'):
self.logits = tf.layers.dense(fc1, 1, activation=None)
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=
'logits')
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name='loss')
self.predictions = {'logits': self.logits}
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(
self.logits))
tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(
self.logits))
tf.summary.scalar('{}/mean_value'.format(prefix), tf.
reduce_mean(self.logits))
tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max
(self.targets))
tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min
(self.targets))
tf.summary.scalar('{}/reward_mean'.format(prefix), tf.
reduce_mean(self.targets))
tf.summary.histogram('{}/reward_targets'.format(prefix), self.
targets)
tf.summary.histogram('{}/values'.format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.name or
'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
| import tensorflow as tf
def build_shared_network(x, add_summaries=False):
conv1 = tf.layers.conv2d(x, 16, 8, 4, activation=tf.nn.relu, name='conv1')
conv2 = tf.layers.conv2d(conv1, 32, 4, 2, activation=tf.nn.relu, name=
'conv2')
fc1 = tf.layers.dense(tf.layers.flatten(conv2), 256, name='fc1')
if add_summaries:
tf.contrib.layers.summarize_activation(conv1)
tf.contrib.layers.summarize_activation(conv2)
tf.contrib.layers.summarize_activation(fc1)
return fc1
class PolicyEstimator:
def __init__(self, num_ouptuts, reuse=False, trainable=True):
self.num_outputs = num_ouptuts
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=
'actions')
x = tf.to_float(self.states) / 255.0
batch_size = tf.shape(self.states)[0]
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('policy_net'):
self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)
self.probs = tf.nn.softmax(self.logits) + 1e-08
self.predictions = {'logits': self.logits, 'probs': self.probs}
self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs),
1, name='entropy')
self.entropy_mean = tf.reduce_mean(self.entropy, name=
'entropy_mean')
gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1
] + self.actions
self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1
]), gather_indices)
self.losses = -(tf.log(self.picked_action_probs) * self.targets +
0.01 * self.entropy)
self.loss = tf.reduce_sum(self.losses, name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)
tf.summary.histogram(self.entropy.op.name, self.entropy)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.
name or 'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name
]
self.summaries = tf.summary.merge(summaries)
class ValueEstimator:
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
x = tf.to_float(self.states) / 255.0
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('value_net'):
self.logits = tf.layers.dense(fc1, 1, activation=None)
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=
'logits')
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name='loss')
self.predictions = {'logits': self.logits}
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(
self.logits))
tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(
self.logits))
tf.summary.scalar('{}/mean_value'.format(prefix), tf.
reduce_mean(self.logits))
tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max
(self.targets))
tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min
(self.targets))
tf.summary.scalar('{}/reward_mean'.format(prefix), tf.
reduce_mean(self.targets))
tf.summary.histogram('{}/reward_targets'.format(prefix), self.
targets)
tf.summary.histogram('{}/values'.format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.name or
'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
| import tensorflow as tf
def build_shared_network(x, add_summaries=False):
conv1 = tf.layers.conv2d(x, 16, 8, 4, activation=tf.nn.relu, name="conv1")
conv2 = tf.layers.conv2d(conv1, 32, 4, 2, activation=tf.nn.relu, name="conv2")
fc1 = tf.layers.dense(tf.layers.flatten(conv2), 256, name="fc1")
if add_summaries:
tf.contrib.layers.summarize_activation(conv1)
tf.contrib.layers.summarize_activation(conv2)
tf.contrib.layers.summarize_activation(fc1)
return fc1
class PolicyEstimator():
def __init__(self, num_ouptuts, reuse=False, trainable=True):
self.num_outputs = num_ouptuts
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name="Y")
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name="actions")
x = tf.to_float(self.states) / 255.0
batch_size = tf.shape(self.states)[0]
with tf.variable_scope("shared", reuse=reuse):
fc1 = build_shared_network(x, add_summaries=(not reuse))
with tf.variable_scope("policy_net"):
self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)
self.probs = tf.nn.softmax(self.logits) + 1e-8
self.predictions = {"logits": self.logits, "probs": self.probs}
self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs), 1, name="entropy")
self.entropy_mean = tf.reduce_mean(self.entropy, name="entropy_mean")
# 배열을 리스트처럼 만듬 => 각 데이터의 시작 부분(offset) + action값(onehot 아님) = action의 위치
# 그 후 tf.gather을 이용해 원하는 action에 해당하는 확률값만 뽑아냄
gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1] + self.actions
self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1]), gather_indices)
self.losses = - (tf.log(self.picked_action_probs) * self.targets + 0.01*self.entropy)
self.loss = tf.reduce_sum(self.losses, name="loss")
tf.summary.scalar(self.loss.op.name, self.loss)
tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)
tf.summary.histogram(self.entropy.op.name, self.entropy)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
# grad가 None인 경우 학습이 망가지는 것을 막기 위해서 이렇게 만든 듯 하다.
self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]
# 여기 train_op 정작 쓰진 않음. worker에서 apply_gradient를 함. 지워도 될 듯
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if "policy_net" in s.name or "shared" in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
class ValueEstimator():
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name="Y")
x = tf.to_float(self.states) / 255.0
with tf.variable_scope("shared", reuse=reuse):
fc1 = build_shared_network(x, add_summaries=(not reuse))
with tf.variable_scope("value_net"):
self.logits = tf.layers.dense(fc1, 1, activation=None)
# squeeze는 1인 차원(행렬)을 날림. => [1, 2, 3] squeeze => [2, 3]
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name="logits")
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name="loss")
self.predictions = { "logits": self.logits }
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar("{}/max_value".format(prefix), tf.reduce_max(self.logits))
tf.summary.scalar("{}/min_value".format(prefix), tf.reduce_min(self.logits))
tf.summary.scalar("{}/mean_value".format(prefix), tf.reduce_mean(self.logits))
tf.summary.scalar("{}/reward_max".format(prefix), tf.reduce_max(self.targets))
tf.summary.scalar("{}/reward_min".format(prefix), tf.reduce_min(self.targets))
tf.summary.scalar("{}/reward_mean".format(prefix), tf.reduce_mean(self.targets))
tf.summary.histogram("{}/reward_targets".format(prefix), self.targets)
tf.summary.histogram("{}/values".format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if "policy_net" in s.name or "shared" in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
| [
3,
4,
5,
6,
7
] |
848 | 4d63a5f09164b78faa731af6dce41969edc2c4f5 | <mask token>
class Question:
<mask token>
<mask token>
def findSecond(self, sentenceDoc, verb, children):
for child in children:
if child.dep_ == 'attr' or child.dep_ == 'nsubj':
temp = self.nounArray.findWord(child.orth_)
subjectChildren = []
for ch in child.children:
subjectChildren.append(ch)
if not subjectChildren:
subjectChildren = children
subjectChildren.remove(child)
self.findThird(sentenceDoc, temp, verb, subjectChildren, False)
break
<mask token>
def writeOtter(self, first, second, third):
self.file.write('-rdf("' + first + '", "' + second + '", "' + third +
'").\n')
| <mask token>
class Question:
<mask token>
def findFirst(self, sentence):
sentenceDoc = self.nlp(sentence)
for word in sentenceDoc:
if word.dep_ == 'ROOT':
verb = self.verbArray.findWord(word.orth_)
children = []
for ch in word.children:
children.append(ch)
self.findSecond(sentenceDoc, verb, children)
break
def findSecond(self, sentenceDoc, verb, children):
for child in children:
if child.dep_ == 'attr' or child.dep_ == 'nsubj':
temp = self.nounArray.findWord(child.orth_)
subjectChildren = []
for ch in child.children:
subjectChildren.append(ch)
if not subjectChildren:
subjectChildren = children
subjectChildren.remove(child)
self.findThird(sentenceDoc, temp, verb, subjectChildren, False)
break
<mask token>
def writeOtter(self, first, second, third):
self.file.write('-rdf("' + first + '", "' + second + '", "' + third +
'").\n')
| <mask token>
class Question:
def __init__(self, nlp, otter, nounArray, verbArray):
self.nlp = nlp
self.nounArray = nounArray
self.verbArray = verbArray
self.file = otter
def findFirst(self, sentence):
sentenceDoc = self.nlp(sentence)
for word in sentenceDoc:
if word.dep_ == 'ROOT':
verb = self.verbArray.findWord(word.orth_)
children = []
for ch in word.children:
children.append(ch)
self.findSecond(sentenceDoc, verb, children)
break
def findSecond(self, sentenceDoc, verb, children):
for child in children:
if child.dep_ == 'attr' or child.dep_ == 'nsubj':
temp = self.nounArray.findWord(child.orth_)
subjectChildren = []
for ch in child.children:
subjectChildren.append(ch)
if not subjectChildren:
subjectChildren = children
subjectChildren.remove(child)
self.findThird(sentenceDoc, temp, verb, subjectChildren, False)
break
def findThird(self, sentenceDoc, subject, verb, children, flag):
for child in children:
if child.dep_ == 'appos' or child.dep_ == 'pobj':
temp = self.nounArray.findWord(child.orth_)
if temp is None:
w = datastructure.Word(child.orth_)
w.addType(child.pos_)
w.addUri(wordUri.findUri(w))
print(subject.uri, '- ' + verb.uri + ' -', w.uri)
self.writeOtter(subject.uri, verb.uri, w.uri)
else:
print(subject.uri, '- ' + verb.uri + ' -', temp.uri)
self.writeOtter(subject.uri, verb.uri, temp.uri)
if child.dep_ == 'prep' or child.dep_ == 'acomp':
if not flag:
verb = datastructure.Word(child.orth_)
verb.addType(child.pos_)
verb.addUri(wordUri.findUri(verb))
verbChildren = []
for ch in child.children:
verbChildren.append(ch)
self.findThird(sentenceDoc, subject, verb, verbChildren, True)
def writeOtter(self, first, second, third):
self.file.write('-rdf("' + first + '", "' + second + '", "' + third +
'").\n')
| import datastructure
import wordUri
class Question:
def __init__(self, nlp, otter, nounArray, verbArray):
self.nlp = nlp
self.nounArray = nounArray
self.verbArray = verbArray
self.file = otter
def findFirst(self, sentence):
sentenceDoc = self.nlp(sentence)
for word in sentenceDoc:
if word.dep_ == 'ROOT':
verb = self.verbArray.findWord(word.orth_)
children = []
for ch in word.children:
children.append(ch)
self.findSecond(sentenceDoc, verb, children)
break
def findSecond(self, sentenceDoc, verb, children):
for child in children:
if child.dep_ == 'attr' or child.dep_ == 'nsubj':
temp = self.nounArray.findWord(child.orth_)
subjectChildren = []
for ch in child.children:
subjectChildren.append(ch)
if not subjectChildren:
subjectChildren = children
subjectChildren.remove(child)
self.findThird(sentenceDoc, temp, verb, subjectChildren, False)
break
def findThird(self, sentenceDoc, subject, verb, children, flag):
for child in children:
if child.dep_ == 'appos' or child.dep_ == 'pobj':
temp = self.nounArray.findWord(child.orth_)
if temp is None:
w = datastructure.Word(child.orth_)
w.addType(child.pos_)
w.addUri(wordUri.findUri(w))
print(subject.uri, '- ' + verb.uri + ' -', w.uri)
self.writeOtter(subject.uri, verb.uri, w.uri)
else:
print(subject.uri, '- ' + verb.uri + ' -', temp.uri)
self.writeOtter(subject.uri, verb.uri, temp.uri)
if child.dep_ == 'prep' or child.dep_ == 'acomp':
if not flag:
verb = datastructure.Word(child.orth_)
verb.addType(child.pos_)
verb.addUri(wordUri.findUri(verb))
verbChildren = []
for ch in child.children:
verbChildren.append(ch)
self.findThird(sentenceDoc, subject, verb, verbChildren, True)
def writeOtter(self, first, second, third):
self.file.write('-rdf("' + first + '", "' + second + '", "' + third +
'").\n')
| import datastructure
import wordUri
class Question:
def __init__(self, nlp, otter, nounArray, verbArray):
self.nlp = nlp
self.nounArray = nounArray
self.verbArray = verbArray
self.file = otter
def findFirst(self, sentence):
sentenceDoc = self.nlp(sentence)
for word in sentenceDoc:
if word.dep_ == "ROOT":
verb = self.verbArray.findWord(word.orth_)
children = []
for ch in word.children:
children.append(ch)
self.findSecond(sentenceDoc, verb, children)
break
def findSecond(self, sentenceDoc, verb, children):
for child in children:
if child.dep_ == "attr" or child.dep_ == "nsubj":
temp = self.nounArray.findWord(child.orth_)
subjectChildren = []
for ch in child.children:
subjectChildren.append(ch)
if not subjectChildren:
subjectChildren = children
subjectChildren.remove(child)
self.findThird(sentenceDoc, temp, verb, subjectChildren, False)
break
def findThird(self, sentenceDoc, subject, verb, children, flag):
for child in children:
if child.dep_ == "appos" or child.dep_ == "pobj":
temp = self.nounArray.findWord(child.orth_)
if temp is None:
w = datastructure.Word(child.orth_)
w.addType(child.pos_)
w.addUri(wordUri.findUri(w))
#w.addUri(w.word + "URI")
print(subject.uri, "- " + verb.uri + " -", w.uri)
self.writeOtter(subject.uri, verb.uri, w.uri)
else:
print(subject.uri, "- " + verb.uri + " -", temp.uri)
self.writeOtter(subject.uri, verb.uri, temp.uri)
#self.recoursiveFind(sentenceDoc, subject, verb, child)
if child.dep_ == "prep" or child.dep_ == "acomp":
if not flag:
verb = datastructure.Word(child.orth_)
verb.addType(child.pos_)
verb.addUri(wordUri.findUri(verb))
verbChildren = []
for ch in child.children:
verbChildren.append(ch)
self.findThird(sentenceDoc, subject, verb, verbChildren, True)
def writeOtter(self, first, second, third):
self.file.write("-rdf(\"" + first + "\", \"" + second + "\", \"" + third + "\").\n")
| [
3,
4,
6,
7,
8
] |
849 | 6e01e36170f3f08f2030dbd4dd91019936fb9f5c | <mask token>
@routes.route('/signin', name='signin')
class SigninEndpoint(PoolHTTPEndpoint):
<mask token>
<mask token>
@back_to.setter
def back_to(self, value: typing.Optional[str]):
self.request.session['back_to'] = value
def render_template(self, context: typing.Dict[str, typing.Any]={}
) ->Response:
assert self.pool is not None
if self.pool.username_attributes:
email = (AuxiliaryIdentityAttribute.EMAIL in self.pool.
username_attributes)
phone_number = (AuxiliaryIdentityAttribute.PHONE_NUMBER in self
.pool.username_attributes)
if email and phone_number:
label = 'E-mail address or phone number'
elif email:
label = 'E-mail address'
elif phone_number:
label = 'Phone number'
else:
raise AssertionError()
else:
label = 'User name'
context['username_label'] = label
return self.templates(self.template, context=context)
async def get(self):
assert self.pool is not None
back_to = self.request.query_params.get('back_to')
reauth = bool_val(self.request.query_params.get('reauth'))
if self.request.user.is_authenticated and not reauth:
return RedirectResponse(back_to or self.success_page_url)
parsed_back_to = urlparse(back_to)
if (parsed_back_to.scheme and parsed_back_to.scheme != self.request
.url.scheme or parsed_back_to.hostname and parsed_back_to.
hostname != self.request.url.hostname):
raise HTTPException(status_code=HTTP_400_BAD_REQUEST)
if back_to is not None:
self.back_to = back_to
return self.render_template(context={'form': {'reauth': reauth}})
async def post(self):
assert self.pool is not None
form = await self.request.form()
try:
user = await async_(lambda : self.pool.query_user(form[
'username']).one())()
self.request.app.state.kdf.verify(user.password, form['password'])
except Exception as e:
logger.debug(f"failed login attempt: {form['username']} - {e!r}")
return self.render_template(context={'form': form, 'alerts': [
'No user registered with that user name and password.']})
self.per_pool_session['user_id'] = user.id
return RedirectResponse(self.back_to or self.success_page_url,
status_code=302)
@routes.route('/signin/success', name='signin_success')
class SignedinEndpoint(PoolHTTPEndpoint):
template = 'pools/signin_success.html'
async def get(self):
return self.templates(self.template)
@routes.route('/signout', name='signout', methods=['post'])
class SignOutEndpoint(PoolHTTPEndpoint):
async def post(self):
form = await self.request.form()
client_id = form.get('client_id')
try:
client = await async_(self.pool.clients.filter_by(
oauth2_client_id=client_id).one)()
except orm_exc.NoResultFound as e:
raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e
back_to = form.get('back_to')
if back_to is None or back_to not in client.logout_uris:
back_to = self.request.url_for('pools:signout_success', pool=
self.pool.key)
if self.request.user.is_authenticated:
del self.per_pool_session['user_id']
return RedirectResponse(back_to, status_code=302)
@routes.route('/signout/success', name='signout_success')
class SignedOutEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates('pools/signout_success.html')
@routes.route('/', name='index')
class IndexEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates('pools/index.html')
@routes.route('/.well-known/jwks.json', name='signin_success')
class JWKSEndpoint(PoolHTTPEndpoint):
async def get(self):
keys = []
if isinstance(self.request.app.state.jwt_config.key, dict):
public_jwk = build_jwt_public_key_from_private_key(self.request
.app.state.jwt_config.key)
public_jwk['use'] = 'sig'
keys.append(public_jwk)
return JSONResponse({'keys': keys})
| <mask token>
class PoolHTTPEndpoint(ContextualHTTPEndpoint):
<mask token>
<mask token>
@property
def per_pool_session(self) ->typing.Dict[str, typing.Any]:
pool = self.pool
if pool is not None:
return self.request.scope['session'].setdefault(pool.key, {})
else:
return self.request.scope['session']
async def dispatch(self):
if self.request.get(POOL_KEY) is None:
raise HTTPException(status_code=HTTP_404_NOT_FOUND)
await super().dispatch()
<mask token>
<mask token>
@routes.route('/signin', name='signin')
class SigninEndpoint(PoolHTTPEndpoint):
template = 'pools/signin.html'
@property
def back_to(self) ->typing.Optional[str]:
return self.request.session.get('back_to')
@back_to.setter
def back_to(self, value: typing.Optional[str]):
self.request.session['back_to'] = value
def render_template(self, context: typing.Dict[str, typing.Any]={}
) ->Response:
assert self.pool is not None
if self.pool.username_attributes:
email = (AuxiliaryIdentityAttribute.EMAIL in self.pool.
username_attributes)
phone_number = (AuxiliaryIdentityAttribute.PHONE_NUMBER in self
.pool.username_attributes)
if email and phone_number:
label = 'E-mail address or phone number'
elif email:
label = 'E-mail address'
elif phone_number:
label = 'Phone number'
else:
raise AssertionError()
else:
label = 'User name'
context['username_label'] = label
return self.templates(self.template, context=context)
async def get(self):
assert self.pool is not None
back_to = self.request.query_params.get('back_to')
reauth = bool_val(self.request.query_params.get('reauth'))
if self.request.user.is_authenticated and not reauth:
return RedirectResponse(back_to or self.success_page_url)
parsed_back_to = urlparse(back_to)
if (parsed_back_to.scheme and parsed_back_to.scheme != self.request
.url.scheme or parsed_back_to.hostname and parsed_back_to.
hostname != self.request.url.hostname):
raise HTTPException(status_code=HTTP_400_BAD_REQUEST)
if back_to is not None:
self.back_to = back_to
return self.render_template(context={'form': {'reauth': reauth}})
async def post(self):
assert self.pool is not None
form = await self.request.form()
try:
user = await async_(lambda : self.pool.query_user(form[
'username']).one())()
self.request.app.state.kdf.verify(user.password, form['password'])
except Exception as e:
logger.debug(f"failed login attempt: {form['username']} - {e!r}")
return self.render_template(context={'form': form, 'alerts': [
'No user registered with that user name and password.']})
self.per_pool_session['user_id'] = user.id
return RedirectResponse(self.back_to or self.success_page_url,
status_code=302)
@routes.route('/signin/success', name='signin_success')
class SignedinEndpoint(PoolHTTPEndpoint):
template = 'pools/signin_success.html'
async def get(self):
return self.templates(self.template)
@routes.route('/signout', name='signout', methods=['post'])
class SignOutEndpoint(PoolHTTPEndpoint):
async def post(self):
form = await self.request.form()
client_id = form.get('client_id')
try:
client = await async_(self.pool.clients.filter_by(
oauth2_client_id=client_id).one)()
except orm_exc.NoResultFound as e:
raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e
back_to = form.get('back_to')
if back_to is None or back_to not in client.logout_uris:
back_to = self.request.url_for('pools:signout_success', pool=
self.pool.key)
if self.request.user.is_authenticated:
del self.per_pool_session['user_id']
return RedirectResponse(back_to, status_code=302)
@routes.route('/signout/success', name='signout_success')
class SignedOutEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates('pools/signout_success.html')
@routes.route('/', name='index')
class IndexEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates('pools/index.html')
@routes.route('/.well-known/jwks.json', name='signin_success')
class JWKSEndpoint(PoolHTTPEndpoint):
async def get(self):
keys = []
if isinstance(self.request.app.state.jwt_config.key, dict):
public_jwk = build_jwt_public_key_from_private_key(self.request
.app.state.jwt_config.key)
public_jwk['use'] = 'sig'
keys.append(public_jwk)
return JSONResponse({'keys': keys})
| <mask token>
class PoolHTTPEndpoint(ContextualHTTPEndpoint):
@property
def templates(self):
return lambda name, context={}, *args, **kwargs: typing.cast(
WithTemplates, self.request).templates(name, {**context, 'pool':
self.request.scope.get(POOL_KEY)}, *args, **kwargs)
<mask token>
@property
def per_pool_session(self) ->typing.Dict[str, typing.Any]:
pool = self.pool
if pool is not None:
return self.request.scope['session'].setdefault(pool.key, {})
else:
return self.request.scope['session']
async def dispatch(self):
if self.request.get(POOL_KEY) is None:
raise HTTPException(status_code=HTTP_404_NOT_FOUND)
await super().dispatch()
<mask token>
<mask token>
@routes.route('/signin', name='signin')
class SigninEndpoint(PoolHTTPEndpoint):
template = 'pools/signin.html'
@property
def back_to(self) ->typing.Optional[str]:
return self.request.session.get('back_to')
@back_to.setter
def back_to(self, value: typing.Optional[str]):
self.request.session['back_to'] = value
def render_template(self, context: typing.Dict[str, typing.Any]={}
) ->Response:
assert self.pool is not None
if self.pool.username_attributes:
email = (AuxiliaryIdentityAttribute.EMAIL in self.pool.
username_attributes)
phone_number = (AuxiliaryIdentityAttribute.PHONE_NUMBER in self
.pool.username_attributes)
if email and phone_number:
label = 'E-mail address or phone number'
elif email:
label = 'E-mail address'
elif phone_number:
label = 'Phone number'
else:
raise AssertionError()
else:
label = 'User name'
context['username_label'] = label
return self.templates(self.template, context=context)
async def get(self):
assert self.pool is not None
back_to = self.request.query_params.get('back_to')
reauth = bool_val(self.request.query_params.get('reauth'))
if self.request.user.is_authenticated and not reauth:
return RedirectResponse(back_to or self.success_page_url)
parsed_back_to = urlparse(back_to)
if (parsed_back_to.scheme and parsed_back_to.scheme != self.request
.url.scheme or parsed_back_to.hostname and parsed_back_to.
hostname != self.request.url.hostname):
raise HTTPException(status_code=HTTP_400_BAD_REQUEST)
if back_to is not None:
self.back_to = back_to
return self.render_template(context={'form': {'reauth': reauth}})
async def post(self):
assert self.pool is not None
form = await self.request.form()
try:
user = await async_(lambda : self.pool.query_user(form[
'username']).one())()
self.request.app.state.kdf.verify(user.password, form['password'])
except Exception as e:
logger.debug(f"failed login attempt: {form['username']} - {e!r}")
return self.render_template(context={'form': form, 'alerts': [
'No user registered with that user name and password.']})
self.per_pool_session['user_id'] = user.id
return RedirectResponse(self.back_to or self.success_page_url,
status_code=302)
@routes.route('/signin/success', name='signin_success')
class SignedinEndpoint(PoolHTTPEndpoint):
template = 'pools/signin_success.html'
async def get(self):
return self.templates(self.template)
@routes.route('/signout', name='signout', methods=['post'])
class SignOutEndpoint(PoolHTTPEndpoint):
async def post(self):
form = await self.request.form()
client_id = form.get('client_id')
try:
client = await async_(self.pool.clients.filter_by(
oauth2_client_id=client_id).one)()
except orm_exc.NoResultFound as e:
raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e
back_to = form.get('back_to')
if back_to is None or back_to not in client.logout_uris:
back_to = self.request.url_for('pools:signout_success', pool=
self.pool.key)
if self.request.user.is_authenticated:
del self.per_pool_session['user_id']
return RedirectResponse(back_to, status_code=302)
@routes.route('/signout/success', name='signout_success')
class SignedOutEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates('pools/signout_success.html')
@routes.route('/', name='index')
class IndexEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates('pools/index.html')
@routes.route('/.well-known/jwks.json', name='signin_success')
class JWKSEndpoint(PoolHTTPEndpoint):
async def get(self):
keys = []
if isinstance(self.request.app.state.jwt_config.key, dict):
public_jwk = build_jwt_public_key_from_private_key(self.request
.app.state.jwt_config.key)
public_jwk['use'] = 'sig'
keys.append(public_jwk)
return JSONResponse({'keys': keys})
| import logging
import typing
from urllib.parse import urlparse
from sqlalchemy.orm import exc as orm_exc
from starlette.exceptions import HTTPException
from starlette.responses import JSONResponse, RedirectResponse, Response
from starlette.routing import Router
from starlette.status import HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND
from ...executor import async_
from ...middlewares import WithTemplates
from ...utils import ContextualHTTPEndpoint
from ..application import POOL_KEY
from ..models import AuxiliaryIdentityAttribute, UserPool
from ..utils import build_jwt_public_key_from_private_key
logger = logging.getLogger(__name__)
routes = Router()
class PoolHTTPEndpoint(ContextualHTTPEndpoint):
@property
def templates(self):
return lambda name, context={}, *args, **kwargs: typing.cast(
WithTemplates, self.request).templates(name, {**context, 'pool':
self.request.scope.get(POOL_KEY)}, *args, **kwargs)
@property
def pool(self) ->typing.Optional[UserPool]:
return typing.cast(typing.Optional[UserPool], self.request.get(
POOL_KEY))
@property
def per_pool_session(self) ->typing.Dict[str, typing.Any]:
pool = self.pool
if pool is not None:
return self.request.scope['session'].setdefault(pool.key, {})
else:
return self.request.scope['session']
async def dispatch(self):
if self.request.get(POOL_KEY) is None:
raise HTTPException(status_code=HTTP_404_NOT_FOUND)
await super().dispatch()
@property
def success_page_url(self):
return self.request.url_for('pools:signin_success', pool=self.pool.key)
def bool_val(v: typing.Optional[str]) ->bool:
return v not in ('false', 'no', '0', None)
@routes.route('/signin', name='signin')
class SigninEndpoint(PoolHTTPEndpoint):
template = 'pools/signin.html'
@property
def back_to(self) ->typing.Optional[str]:
return self.request.session.get('back_to')
@back_to.setter
def back_to(self, value: typing.Optional[str]):
self.request.session['back_to'] = value
def render_template(self, context: typing.Dict[str, typing.Any]={}
) ->Response:
assert self.pool is not None
if self.pool.username_attributes:
email = (AuxiliaryIdentityAttribute.EMAIL in self.pool.
username_attributes)
phone_number = (AuxiliaryIdentityAttribute.PHONE_NUMBER in self
.pool.username_attributes)
if email and phone_number:
label = 'E-mail address or phone number'
elif email:
label = 'E-mail address'
elif phone_number:
label = 'Phone number'
else:
raise AssertionError()
else:
label = 'User name'
context['username_label'] = label
return self.templates(self.template, context=context)
async def get(self):
assert self.pool is not None
back_to = self.request.query_params.get('back_to')
reauth = bool_val(self.request.query_params.get('reauth'))
if self.request.user.is_authenticated and not reauth:
return RedirectResponse(back_to or self.success_page_url)
parsed_back_to = urlparse(back_to)
if (parsed_back_to.scheme and parsed_back_to.scheme != self.request
.url.scheme or parsed_back_to.hostname and parsed_back_to.
hostname != self.request.url.hostname):
raise HTTPException(status_code=HTTP_400_BAD_REQUEST)
if back_to is not None:
self.back_to = back_to
return self.render_template(context={'form': {'reauth': reauth}})
async def post(self):
assert self.pool is not None
form = await self.request.form()
try:
user = await async_(lambda : self.pool.query_user(form[
'username']).one())()
self.request.app.state.kdf.verify(user.password, form['password'])
except Exception as e:
logger.debug(f"failed login attempt: {form['username']} - {e!r}")
return self.render_template(context={'form': form, 'alerts': [
'No user registered with that user name and password.']})
self.per_pool_session['user_id'] = user.id
return RedirectResponse(self.back_to or self.success_page_url,
status_code=302)
@routes.route('/signin/success', name='signin_success')
class SignedinEndpoint(PoolHTTPEndpoint):
template = 'pools/signin_success.html'
async def get(self):
return self.templates(self.template)
@routes.route('/signout', name='signout', methods=['post'])
class SignOutEndpoint(PoolHTTPEndpoint):
async def post(self):
form = await self.request.form()
client_id = form.get('client_id')
try:
client = await async_(self.pool.clients.filter_by(
oauth2_client_id=client_id).one)()
except orm_exc.NoResultFound as e:
raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e
back_to = form.get('back_to')
if back_to is None or back_to not in client.logout_uris:
back_to = self.request.url_for('pools:signout_success', pool=
self.pool.key)
if self.request.user.is_authenticated:
del self.per_pool_session['user_id']
return RedirectResponse(back_to, status_code=302)
@routes.route('/signout/success', name='signout_success')
class SignedOutEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates('pools/signout_success.html')
@routes.route('/', name='index')
class IndexEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates('pools/index.html')
@routes.route('/.well-known/jwks.json', name='signin_success')
class JWKSEndpoint(PoolHTTPEndpoint):
async def get(self):
keys = []
if isinstance(self.request.app.state.jwt_config.key, dict):
public_jwk = build_jwt_public_key_from_private_key(self.request
.app.state.jwt_config.key)
public_jwk['use'] = 'sig'
keys.append(public_jwk)
return JSONResponse({'keys': keys})
| # Copyright (c) 2020 Open Collector, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import logging
import typing
from urllib.parse import urlparse
from sqlalchemy.orm import exc as orm_exc
from starlette.exceptions import HTTPException
from starlette.responses import JSONResponse, RedirectResponse, Response
from starlette.routing import Router
from starlette.status import HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND
from ...executor import async_
from ...middlewares import WithTemplates
from ...utils import ContextualHTTPEndpoint
from ..application import POOL_KEY
from ..models import AuxiliaryIdentityAttribute, UserPool
from ..utils import build_jwt_public_key_from_private_key
logger = logging.getLogger(__name__)
routes = Router()
class PoolHTTPEndpoint(ContextualHTTPEndpoint):
@property
def templates(self):
return lambda name, context={}, *args, **kwargs: (
typing.cast(WithTemplates, self.request).templates(
name,
{**context, "pool": self.request.scope.get(POOL_KEY)},
*args,
**kwargs,
)
)
@property
def pool(self) -> typing.Optional[UserPool]:
return typing.cast(typing.Optional[UserPool], self.request.get(POOL_KEY))
@property
def per_pool_session(self) -> typing.Dict[str, typing.Any]:
pool = self.pool
if pool is not None:
return self.request.scope["session"].setdefault(pool.key, {})
else:
return self.request.scope["session"]
async def dispatch(self):
if self.request.get(POOL_KEY) is None:
raise HTTPException(status_code=HTTP_404_NOT_FOUND)
await super().dispatch()
@property
def success_page_url(self):
return self.request.url_for("pools:signin_success", pool=self.pool.key)
def bool_val(v: typing.Optional[str]) -> bool:
return v not in ("false", "no", "0", None)
@routes.route("/signin", name="signin")
class SigninEndpoint(PoolHTTPEndpoint):
template = "pools/signin.html"
@property
def back_to(self) -> typing.Optional[str]:
return self.request.session.get("back_to")
@back_to.setter
def back_to(self, value: typing.Optional[str]):
self.request.session["back_to"] = value
def render_template(self, context: typing.Dict[str, typing.Any] = {}) -> Response:
assert self.pool is not None
if self.pool.username_attributes:
email = AuxiliaryIdentityAttribute.EMAIL in self.pool.username_attributes
phone_number = (
AuxiliaryIdentityAttribute.PHONE_NUMBER in self.pool.username_attributes
)
if email and phone_number:
label = "E-mail address or phone number"
elif email:
label = "E-mail address"
elif phone_number:
label = "Phone number"
else:
raise AssertionError()
else:
label = "User name"
context["username_label"] = label
return self.templates(self.template, context=context)
async def get(self):
assert self.pool is not None
back_to = self.request.query_params.get("back_to")
reauth = bool_val(self.request.query_params.get("reauth"))
if self.request.user.is_authenticated and not reauth:
return RedirectResponse(back_to or self.success_page_url)
parsed_back_to = urlparse(back_to)
if (
parsed_back_to.scheme and parsed_back_to.scheme != self.request.url.scheme
) or (
parsed_back_to.hostname
and parsed_back_to.hostname != self.request.url.hostname
):
raise HTTPException(status_code=HTTP_400_BAD_REQUEST)
if back_to is not None:
self.back_to = back_to
return self.render_template(context={"form": {"reauth": reauth}})
async def post(self):
assert self.pool is not None
form = await self.request.form()
try:
user = await async_(lambda: self.pool.query_user(form["username"]).one())()
self.request.app.state.kdf.verify(user.password, form["password"])
except Exception as e:
logger.debug(f"failed login attempt: {form['username']} - {e!r}")
return self.render_template(
context={
"form": form,
"alerts": ["No user registered with that user name and password."],
}
)
self.per_pool_session["user_id"] = user.id
return RedirectResponse(self.back_to or self.success_page_url, status_code=302)
@routes.route("/signin/success", name="signin_success")
class SignedinEndpoint(PoolHTTPEndpoint):
template = "pools/signin_success.html"
async def get(self):
return self.templates(self.template)
@routes.route("/signout", name="signout", methods=["post"])
class SignOutEndpoint(PoolHTTPEndpoint):
async def post(self):
form = await self.request.form()
client_id = form.get("client_id")
try:
client = await async_(
self.pool.clients.filter_by(oauth2_client_id=client_id).one
)()
except orm_exc.NoResultFound as e:
raise HTTPException(status_code=HTTP_404_NOT_FOUND) from e
back_to = form.get("back_to")
if back_to is None or back_to not in client.logout_uris:
back_to = self.request.url_for("pools:signout_success", pool=self.pool.key)
if self.request.user.is_authenticated:
del self.per_pool_session["user_id"]
return RedirectResponse(back_to, status_code=302)
@routes.route("/signout/success", name="signout_success")
class SignedOutEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates("pools/signout_success.html")
@routes.route("/", name="index")
class IndexEndpoint(PoolHTTPEndpoint):
async def get(self):
return self.templates("pools/index.html")
@routes.route("/.well-known/jwks.json", name="signin_success")
class JWKSEndpoint(PoolHTTPEndpoint):
async def get(self):
keys = []
if isinstance(self.request.app.state.jwt_config.key, dict):
public_jwk = build_jwt_public_key_from_private_key(
self.request.app.state.jwt_config.key
)
public_jwk["use"] = "sig"
keys.append(public_jwk)
return JSONResponse(
{
"keys": keys,
}
)
| [
9,
13,
14,
19,
20
] |
850 | 87504fb88cbbf810ad8bab08bc59284d2cf37cce | <mask token>
class Solution(object):
<mask token>
| <mask token>
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i in range(0, len(nums), 1):
index = abs(nums[i]) - 1
nums[index] = -abs(nums[index])
return [(i + 1) for i in range(0, len(nums), 1) if nums[i] > 0]
| class Solution(object):
<mask token>
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i in range(0, len(nums), 1):
index = abs(nums[i]) - 1
nums[index] = -abs(nums[index])
return [(i + 1) for i in range(0, len(nums), 1) if nums[i] > 0]
| class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
ns = [0] * len(nums)
for i in range(0, len(nums), 1):
ns[nums[i] - 1] = 1
ret = []
for j in range(0, len(ns), 1):
if ns[j] == 0:
ret.append(j + 1)
return ret
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i in range(0, len(nums), 1):
index = abs(nums[i]) - 1
nums[index] = -abs(nums[index])
return [(i + 1) for i in range(0, len(nums), 1) if nums[i] > 0]
| class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
ns = [0]*len(nums)
for i in range(0, len(nums), 1):
ns[nums[i]-1] = 1
ret = []
for j in range(0, len(ns), 1):
if(ns[j] == 0): ret.append(j+1)
return ret
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i in range(0, len(nums), 1):
index = abs(nums[i]) - 1
nums[index] = - abs(nums[index])
return [i + 1 for i in range(0, len(nums), 1) if nums[i] > 0] | [
1,
2,
3,
4,
5
] |
851 | a7add26a919a41e52ae41c6b4c4079eadaa8aa1d | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('main', '0036_auto_20180516_1818')]
operations = [migrations.AddField(model_name='promotion', name='image',
field=models.ImageField(default=1, upload_to='images/promotion',
verbose_name='Image 1318x790'), preserve_default=False)]
| from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('main', '0036_auto_20180516_1818')]
operations = [migrations.AddField(model_name='promotion', name='image',
field=models.ImageField(default=1, upload_to='images/promotion',
verbose_name='Image 1318x790'), preserve_default=False)]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-16 12:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0036_auto_20180516_1818'),
]
operations = [
migrations.AddField(
model_name='promotion',
name='image',
field=models.ImageField(default=1, upload_to='images/promotion', verbose_name='Image 1318x790'),
preserve_default=False,
),
]
| [
0,
1,
2,
3,
4
] |
852 | a52743fc911beb7e51644073131b25c177d4ad29 | <mask token>
| import brainlit.algorithms.generate_fragments
from brainlit.algorithms.generate_fragments import *
| null | null | null | [
0,
1
] |
853 | 44a9bb4d74d2e694f252d8726647bca13baa4df5 | <mask token>
class BaseHandler(tornado.web.RequestHandler):
<mask token>
class CondaHandler(BaseHandler):
def get(self, filePath):
with open('packages/conda/' + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data['packages']
for package in packages:
name = packages[package]['name']
version = packages[package]['version']
try:
if version not in condaPackages[name]['versions']:
condaPackages[name]['versions'].append(version)
except:
condaPackages[name] = {'versions': [version]}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
<mask token>
| <mask token>
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', 'x-requested-with')
class CondaHandler(BaseHandler):
def get(self, filePath):
with open('packages/conda/' + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data['packages']
for package in packages:
name = packages[package]['name']
version = packages[package]['version']
try:
if version not in condaPackages[name]['versions']:
condaPackages[name]['versions'].append(version)
except:
condaPackages[name] = {'versions': [version]}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
def make_app():
return tornado.web.Application([('/packages/(.*)', CondaHandler), (
'/submit', SubmitHandler)])
<mask token>
| <mask token>
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', 'x-requested-with')
class CondaHandler(BaseHandler):
def get(self, filePath):
with open('packages/conda/' + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data['packages']
for package in packages:
name = packages[package]['name']
version = packages[package]['version']
try:
if version not in condaPackages[name]['versions']:
condaPackages[name]['versions'].append(version)
except:
condaPackages[name] = {'versions': [version]}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
def make_app():
return tornado.web.Application([('/packages/(.*)', CondaHandler), (
'/submit', SubmitHandler)])
if __name__ == '__main__':
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
| import tornado.ioloop
import tornado.web
import json
import utils
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', 'x-requested-with')
class CondaHandler(BaseHandler):
def get(self, filePath):
with open('packages/conda/' + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data['packages']
for package in packages:
name = packages[package]['name']
version = packages[package]['version']
try:
if version not in condaPackages[name]['versions']:
condaPackages[name]['versions'].append(version)
except:
condaPackages[name] = {'versions': [version]}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
def make_app():
return tornado.web.Application([('/packages/(.*)', CondaHandler), (
'/submit', SubmitHandler)])
if __name__ == '__main__':
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
| import tornado.ioloop
import tornado.web
import json
import utils
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
class CondaHandler(BaseHandler):
def get(self, filePath):
with open("packages/conda/" + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data["packages"]
for package in packages:
name = packages[package]["name"]
version = packages[package]["version"]
try:
if version not in condaPackages[name]["versions"]:
condaPackages[name]["versions"].append(version)
except:
condaPackages[name] = {
"versions": [
version
]
}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
def make_app():
return tornado.web.Application([
(r"/packages/(.*)", CondaHandler),
(r"/submit", SubmitHandler)
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start() | [
5,
7,
8,
9,
10
] |
854 | ba8cb18544e4ded8b229bfb9cc4b28599119414f | <mask token>
| <mask token>
def gen_diffusion_flux_pyst_mpi_kernel_2d(real_t, mpi_construct,
ghost_exchange_communicator):
diffusion_flux_pyst_kernel = gen_diffusion_flux_pyst_kernel_2d(real_t=
real_t, reset_ghost_zone=False)
kernel_support = 1
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support = kernel_support
check_valid_ghost_size_and_kernel_support(ghost_size=
ghost_exchange_communicator.ghost_size, kernel_support=
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)
y_next, x_next = mpi_construct.next_grid_along
y_previous, x_previous = mpi_construct.previous_grid_along
set_fixed_val_kernel_2d = gen_set_fixed_val_pyst_kernel_2d(real_t=real_t)
def diffusion_flux_pyst_mpi_kernel_2d(diffusion_flux, field, prefactor):
diffusion_flux_pyst_mpi_kernel_2d.kernel_support = (
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)
ghost_size = ghost_exchange_communicator.ghost_size
ghost_exchange_communicator.exchange_scalar_field_init(field)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size
:-ghost_size, ghost_size:-ghost_size], field=field[ghost_size:-
ghost_size, ghost_size:-ghost_size], prefactor=prefactor)
ghost_exchange_communicator.exchange_finalise()
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size -
kernel_support:ghost_size + 2 * kernel_support, ghost_size:-
ghost_size], field=field[ghost_size - kernel_support:ghost_size +
2 * kernel_support, ghost_size:-ghost_size], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[-(
ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -
kernel_support), ghost_size:-ghost_size], field=field[-(
ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -
kernel_support), ghost_size:-ghost_size], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:,
ghost_size - kernel_support:ghost_size + 2 * kernel_support],
field=field[:, ghost_size - kernel_support:ghost_size + 2 *
kernel_support], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:, -(
ghost_size + 2 * kernel_support):field.shape[1] - (ghost_size -
kernel_support)], field=field[:, -(ghost_size + 2 *
kernel_support):field.shape[1] - (ghost_size - kernel_support)],
prefactor=prefactor)
boundary_width = 1
if x_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:, :ghost_size +
boundary_width], fixed_val=0.0)
if x_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:, -ghost_size -
boundary_width:], fixed_val=0.0)
if y_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:ghost_size +
boundary_width, :], fixed_val=0.0)
if y_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[-ghost_size -
boundary_width:, :], fixed_val=0.0)
return diffusion_flux_pyst_mpi_kernel_2d
| <mask token>
from sopht.numeric.eulerian_grid_ops.stencil_ops_2d import gen_diffusion_flux_pyst_kernel_2d, gen_set_fixed_val_pyst_kernel_2d
from sopht_mpi.utils.mpi_utils import check_valid_ghost_size_and_kernel_support
from mpi4py import MPI
def gen_diffusion_flux_pyst_mpi_kernel_2d(real_t, mpi_construct,
ghost_exchange_communicator):
diffusion_flux_pyst_kernel = gen_diffusion_flux_pyst_kernel_2d(real_t=
real_t, reset_ghost_zone=False)
kernel_support = 1
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support = kernel_support
check_valid_ghost_size_and_kernel_support(ghost_size=
ghost_exchange_communicator.ghost_size, kernel_support=
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)
y_next, x_next = mpi_construct.next_grid_along
y_previous, x_previous = mpi_construct.previous_grid_along
set_fixed_val_kernel_2d = gen_set_fixed_val_pyst_kernel_2d(real_t=real_t)
def diffusion_flux_pyst_mpi_kernel_2d(diffusion_flux, field, prefactor):
diffusion_flux_pyst_mpi_kernel_2d.kernel_support = (
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support)
ghost_size = ghost_exchange_communicator.ghost_size
ghost_exchange_communicator.exchange_scalar_field_init(field)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size
:-ghost_size, ghost_size:-ghost_size], field=field[ghost_size:-
ghost_size, ghost_size:-ghost_size], prefactor=prefactor)
ghost_exchange_communicator.exchange_finalise()
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[ghost_size -
kernel_support:ghost_size + 2 * kernel_support, ghost_size:-
ghost_size], field=field[ghost_size - kernel_support:ghost_size +
2 * kernel_support, ghost_size:-ghost_size], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[-(
ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -
kernel_support), ghost_size:-ghost_size], field=field[-(
ghost_size + 2 * kernel_support):field.shape[0] - (ghost_size -
kernel_support), ghost_size:-ghost_size], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:,
ghost_size - kernel_support:ghost_size + 2 * kernel_support],
field=field[:, ghost_size - kernel_support:ghost_size + 2 *
kernel_support], prefactor=prefactor)
diffusion_flux_pyst_kernel(diffusion_flux=diffusion_flux[:, -(
ghost_size + 2 * kernel_support):field.shape[1] - (ghost_size -
kernel_support)], field=field[:, -(ghost_size + 2 *
kernel_support):field.shape[1] - (ghost_size - kernel_support)],
prefactor=prefactor)
boundary_width = 1
if x_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:, :ghost_size +
boundary_width], fixed_val=0.0)
if x_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:, -ghost_size -
boundary_width:], fixed_val=0.0)
if y_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[:ghost_size +
boundary_width, :], fixed_val=0.0)
if y_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(field=diffusion_flux[-ghost_size -
boundary_width:, :], fixed_val=0.0)
return diffusion_flux_pyst_mpi_kernel_2d
| """MPI-supported kernels for computing diffusion flux in 2D."""
from sopht.numeric.eulerian_grid_ops.stencil_ops_2d import (
gen_diffusion_flux_pyst_kernel_2d,
gen_set_fixed_val_pyst_kernel_2d,
)
from sopht_mpi.utils.mpi_utils import check_valid_ghost_size_and_kernel_support
from mpi4py import MPI
def gen_diffusion_flux_pyst_mpi_kernel_2d(
real_t, mpi_construct, ghost_exchange_communicator
):
# Note currently I'm generating these for arbit size arrays, we ca optimise this
# more by generating fixed size for the interior stencil and arbit size for
# boundary crunching
diffusion_flux_pyst_kernel = gen_diffusion_flux_pyst_kernel_2d(
real_t=real_t, reset_ghost_zone=False
)
kernel_support = 1
# define this here so that ghost size and kernel support is checked during
# generation phase itself
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support = kernel_support
check_valid_ghost_size_and_kernel_support(
ghost_size=ghost_exchange_communicator.ghost_size,
kernel_support=gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support,
)
# for setting values at physical domain boundary
y_next, x_next = mpi_construct.next_grid_along
y_previous, x_previous = mpi_construct.previous_grid_along
set_fixed_val_kernel_2d = gen_set_fixed_val_pyst_kernel_2d(real_t=real_t)
def diffusion_flux_pyst_mpi_kernel_2d(
diffusion_flux,
field,
prefactor,
):
# define kernel support for kernel
diffusion_flux_pyst_mpi_kernel_2d.kernel_support = (
gen_diffusion_flux_pyst_mpi_kernel_2d.kernel_support
)
# define variable for use later
ghost_size = ghost_exchange_communicator.ghost_size
# begin ghost comm.
ghost_exchange_communicator.exchange_scalar_field_init(field)
# crunch interior stencil
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
ghost_size:-ghost_size, ghost_size:-ghost_size
],
field=field[ghost_size:-ghost_size, ghost_size:-ghost_size],
prefactor=prefactor,
)
# finalise ghost comm.
ghost_exchange_communicator.exchange_finalise()
# crunch boundary numbers
# NOTE: we pass in arrays of width 3 * kernel support size because the
# interior stencil computation leaves out a width of kernel_support.
# Since the support needed by the kernel is kernel_support on each side,
# we need to pass an array of width 3 * kernel_support, starting from
# index +/-(ghost_size - kernel_support) on the lower and upper end.
# Pystencils then automatically sets the kernel comp. bounds and
# crunches numbers in the kernel_support thickness zone at the boundary.
# Start of Y axis
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
ghost_size - kernel_support : ghost_size + 2 * kernel_support,
ghost_size:-ghost_size,
],
field=field[
ghost_size - kernel_support : ghost_size + 2 * kernel_support,
ghost_size:-ghost_size,
],
prefactor=prefactor,
)
# End of Y axis
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
-(ghost_size + 2 * kernel_support) : field.shape[0]
- (ghost_size - kernel_support),
ghost_size:-ghost_size,
],
field=field[
-(ghost_size + 2 * kernel_support) : field.shape[0]
- (ghost_size - kernel_support),
ghost_size:-ghost_size,
],
prefactor=prefactor,
)
# Start of X axis
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
:,
ghost_size - kernel_support : ghost_size + 2 * kernel_support,
],
field=field[
:,
ghost_size - kernel_support : ghost_size + 2 * kernel_support,
],
prefactor=prefactor,
)
# End of X axis
diffusion_flux_pyst_kernel(
diffusion_flux=diffusion_flux[
:,
-(ghost_size + 2 * kernel_support) : field.shape[1]
- (ghost_size - kernel_support),
],
field=field[
:,
-(ghost_size + 2 * kernel_support) : field.shape[1]
- (ghost_size - kernel_support),
],
prefactor=prefactor,
)
# Set physical domain boundary diffusion flus to zero based on neighboring block
boundary_width = 1
if x_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(
field=diffusion_flux[:, : ghost_size + boundary_width],
fixed_val=0.0,
)
if x_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(
field=diffusion_flux[:, -ghost_size - boundary_width :],
fixed_val=0.0,
)
if y_previous == MPI.PROC_NULL:
set_fixed_val_kernel_2d(
field=diffusion_flux[: ghost_size + boundary_width, :],
fixed_val=0.0,
)
if y_next == MPI.PROC_NULL:
set_fixed_val_kernel_2d(
field=diffusion_flux[-ghost_size - boundary_width :, :],
fixed_val=0.0,
)
return diffusion_flux_pyst_mpi_kernel_2d
| null | [
0,
1,
2,
3
] |
855 | 7754974e79202b2df4ab9a7f69948483042a67cc | <mask token>
| <mask token>
S.sendemail("""
this is a test!
""")
| <mask token>
S = smtpsend.Smtpsent(SUBJECT='Test')
S.sendemail("""
this is a test!
""")
| import smtpsend
S = smtpsend.Smtpsent(SUBJECT='Test')
S.sendemail("""
this is a test!
""")
| #! /usr/bin/env python
import smtpsend
S = smtpsend.Smtpsent(SUBJECT='Test')
S.sendemail('''
this is a test!
''')
| [
0,
1,
2,
3,
4
] |
856 | 987d6c769a4f593405e889ed2b0e3f9955900406 | <mask token>
| <mask token>
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls))
] + urlpatterns
| <mask token>
schema_view = get_swagger_view(title='API')
<mask token>
urlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view
), path('auth/login/', auth_views.LoginView.as_view(template_name=
'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view
()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',
include('apps.polls.urls'))]
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls))
] + urlpatterns
| from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='API')
from django.contrib.auth import views as auth_views
urlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view
), path('auth/login/', auth_views.LoginView.as_view(template_name=
'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view
()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',
include('apps.polls.urls'))]
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls))
] + urlpatterns
| from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='API')
from django.contrib.auth import views as auth_views
urlpatterns = [
path('django-admin/', admin.site.urls),
path('', schema_view),
path('auth/login/', auth_views.LoginView.as_view(template_name='auth/login.html')),
path('auth/logout/', auth_views.LogoutView.as_view()),
path('api/auth/', include('apps.auth.urls')),
path('api/polls/', include('apps.polls.urls')),
]
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls))
] + urlpatterns
| [
0,
1,
2,
3,
4
] |
857 | b808daf8d1fbe3cc585db57e1049a502d3ca46f5 | <mask token>
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word):
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return gnumber, gweight, ' '.join(gname)
def cleanName(name):
return re.sub('[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset='noSpace', keep='first', inplace=True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],
columns=['number', 'weight', 'short name'])
return new_df
<mask token>
| <mask token>
warnings.filterwarnings('ignore')
<mask token>
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word):
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return gnumber, gweight, ' '.join(gname)
def cleanName(name):
return re.sub('[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset='noSpace', keep='first', inplace=True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],
columns=['number', 'weight', 'short name'])
return new_df
<mask token>
df.drop_duplicates(subset='name', keep='first', inplace=True)
<mask token>
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
<mask token>
print(formatName('10mg Dextrose in Water Parenteral Solution for ..'))
splitMedicine(nonTiengViet_df)
<mask token>
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
print(nonTiengViet_df.describe)
print(nonTiengViet_df.tail(5))
nonTiengViet_df.to_json('PreProcessData.json')
| <mask token>
warnings.filterwarnings('ignore')
data_path = (
'/Users/trietnguyen/Documents/Thesis/Thesis-2020/References/Crawler/summaryDataJson.json'
)
weights = ['mg', 'ml', '%']
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word):
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return gnumber, gweight, ' '.join(gname)
def cleanName(name):
return re.sub('[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset='noSpace', keep='first', inplace=True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],
columns=['number', 'weight', 'short name'])
return new_df
df = pd.read_json(data_path, orient='records')
df.drop_duplicates(subset='name', keep='first', inplace=True)
df.index = range(len(df.index))
nonTiengViet_df = df.loc[df['name'].str.contains('[^\\x00-\\x7F]+') == False]
nonTiengViet_df['noSpace'] = nonTiengViet_df.name
rm_character = ['-', '"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
nonTiengViet_df = nonTiengViet_df.sort_values(by=['noSpace'], ascending=True)
nonTiengViet_df.index = range(len(nonTiengViet_df.index))
print(formatName('10mg Dextrose in Water Parenteral Solution for ..'))
splitMedicine(nonTiengViet_df)
new_df = splitMedicine(nonTiengViet_df)
nonTiengViet_df['shortname'] = new_df['short name']
nonTiengViet_df['number'] = new_df['number']
nonTiengViet_df['weight'] = new_df['weight']
nonTiengViet_df['noSpace'] = nonTiengViet_df.shortname
rm_character = ['-', '"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
print(nonTiengViet_df.describe)
print(nonTiengViet_df.tail(5))
nonTiengViet_df.to_json('PreProcessData.json')
| import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import warnings
import re
warnings.filterwarnings('ignore')
data_path = (
'/Users/trietnguyen/Documents/Thesis/Thesis-2020/References/Crawler/summaryDataJson.json'
)
weights = ['mg', 'ml', '%']
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word):
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return gnumber, gweight, ' '.join(gname)
def cleanName(name):
return re.sub('[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset='noSpace', keep='first', inplace=True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a, b, c in df_temp.values],
columns=['number', 'weight', 'short name'])
return new_df
df = pd.read_json(data_path, orient='records')
df.drop_duplicates(subset='name', keep='first', inplace=True)
df.index = range(len(df.index))
nonTiengViet_df = df.loc[df['name'].str.contains('[^\\x00-\\x7F]+') == False]
nonTiengViet_df['noSpace'] = nonTiengViet_df.name
rm_character = ['-', '"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
nonTiengViet_df = nonTiengViet_df.sort_values(by=['noSpace'], ascending=True)
nonTiengViet_df.index = range(len(nonTiengViet_df.index))
print(formatName('10mg Dextrose in Water Parenteral Solution for ..'))
splitMedicine(nonTiengViet_df)
new_df = splitMedicine(nonTiengViet_df)
nonTiengViet_df['shortname'] = new_df['short name']
nonTiengViet_df['number'] = new_df['number']
nonTiengViet_df['weight'] = new_df['weight']
nonTiengViet_df['noSpace'] = nonTiengViet_df.shortname
rm_character = ['-', '"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
print(nonTiengViet_df.describe)
print(nonTiengViet_df.tail(5))
nonTiengViet_df.to_json('PreProcessData.json')
| import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import warnings
import re
warnings.filterwarnings("ignore")
data_path = '/Users/trietnguyen/Documents/Thesis/Thesis-2020/References/Crawler/summaryDataJson.json'
weights = ['mg', 'ml', '%']
def formatName(name):
arr = re.split(' |-', name)
print(arr)
gweight = ''
gname = []
gnumber = ''
for word in arr:
if any(str.isdigit(c) for c in word): #2 trường hợp 200 200mg
for weight in weights:
pos = word.find(weight)
if pos != -1:
gweight = weight
gnumber = word[:pos]
break
else:
gnumber = word
elif any(word == weight for weight in weights):
gweight = word
elif word != '':
gname.append(word)
return (gnumber, gweight ,' '.join(gname))
def cleanName(name):
return re.sub(r'[^a-z0-9]', '', name.lower())
def rmSpecialCharacters(df):
df['noSpace'] = df['noSpace'].apply(cleanName)
def rmDuplicate(df):
df.drop_duplicates(subset ='noSpace',
keep = 'first', inplace = True)
df.index = range(len(df.index))
def splitMedicine(df):
df_temp = df['name'].apply(formatName)
new_df = pd.DataFrame([[a, b, c] for a,b,c in df_temp.values], columns=['number', 'weight', 'short name'])
return new_df
#Read data
df = pd.read_json(data_path, orient='records')
df.drop_duplicates(subset ="name",
keep = 'first', inplace = True)
df.index = range(len(df.index))
#Xoá các thuốc có tiếng việt
nonTiengViet_df = df.loc[df['name'].str.contains(r'[^\x00-\x7F]+') == False]
#print(nonTiengViet_df.head(10))
#Remove duplicate bằng cách xoá hết các khoảng trắng của tên thuốc, nếu trùng tên và thành phần thì xoá
nonTiengViet_df['noSpace'] = nonTiengViet_df.name
rm_character = ['-', '\"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
# sort dataframe:
nonTiengViet_df = nonTiengViet_df.sort_values(by=['noSpace'], ascending=True)
nonTiengViet_df.index = range(len(nonTiengViet_df.index))
# split thuốc theo [' ', '-']
# Tìm các từ có tồn tại số 200, 200mg, 0.1mg/ml 150 ....
#
print(formatName('10mg Dextrose in Water Parenteral Solution for ..'))
splitMedicine(nonTiengViet_df)
new_df = splitMedicine(nonTiengViet_df)
nonTiengViet_df['shortname'] = new_df['short name']
nonTiengViet_df['number'] = new_df['number']
nonTiengViet_df['weight'] = new_df['weight']
nonTiengViet_df['noSpace'] = nonTiengViet_df.shortname
rm_character = ['-', '\"', '/', ' ', ',', '.']
rmSpecialCharacters(nonTiengViet_df)
rmDuplicate(nonTiengViet_df)
print(nonTiengViet_df.describe)
print(nonTiengViet_df.tail(5))
nonTiengViet_df.to_json(r'PreProcessData.json')
| [
5,
6,
7,
8,
9
] |
858 | a649139a600cb506056a20e00089a07ec9244394 | <mask token>
class Config(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def _init_colors(self):
"""Initialize colors to their defaults."""
self.clr_primary = None
self.clr_secondary = 'green'
self.clr_tertiary = 'cyan'
self.clr_quaternary = 'yellow'
self.clr_bold = 'cyan'
self.clr_code = 'cyan'
self.clr_error = 'red'
self.clr_header = 'yellow'
self.clr_link = 'green'
self.clr_list = 'cyan'
self.clr_message = None
self.clr_num_comments = 'green'
self.clr_num_points = 'green'
self.clr_tag = 'cyan'
self.clr_time = 'yellow'
self.clr_title = None
self.clr_tooltip = None
self.clr_user = 'cyan'
self.clr_view_link = 'magenta'
self.clr_view_index = 'magenta'
<mask token>
def authenticate(self, enterprise=False, enterprise_auth=
enterprise_login, overwrite=False):
"""Log into GitHub.
Adapted from https://github.com/sigmavirus24/github-cli.
:type enterprise: bool
:param enterprise: Determines whether to configure GitHub Enterprise.
Default: False.
:type overwrite: bool
:param overwrite: indicates whether we cant to overwrite the current
set of credentials. Default: False.
"""
if self.api is not None and not overwrite:
return
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK
) and not overwrite:
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
self.authenticate_cached_credentials(config, parser)
else:
login_kwargs = {'two_factor_callback': self.request_two_factor_code
}
if enterprise:
self.login = enterprise_auth
while not self.enterprise_url:
self.enterprise_url = input('Enterprise URL: ')
if click.confirm('Do you want to verify SSL certs?',
default=True):
self.verify_ssl = True
else:
self.verify_ssl = False
login_kwargs.update({'url': self.enterprise_url, 'verify':
self.verify_ssl})
while not self.user_login:
self.user_login = input('User Login: ')
login_kwargs.update({'username': self.user_login})
if click.confirm(
'Do you want to log in with a password [Y] or a personal access token [n]?'
, default=True):
user_pass = None
while not user_pass:
user_pass = self.getpass('Password: ')
login_kwargs.update({'password': user_pass})
try:
if not enterprise:
auth = self.authorize(self.user_login, user_pass,
scopes=['user', 'repo'], note='gitsome',
note_url=
'https://github.com/donnemartin/gitsome',
two_factor_callback=self.request_two_factor_code)
self.user_token = auth.token
else:
self.user_pass = user_pass
except (UnprocessableEntity, AuthenticationFailed):
click.secho('Error creating token.', fg=self.clr_error)
click.secho(
"""Visit the following page and verify you do not have an existing token named "gitsome":
https://github.com/settings/tokens
If a token already exists, update your ~/.gitsomeconfig file with your token:
user_token = TOKEN
You can also generate a new token."""
, fg=self.clr_message)
self.print_auth_error()
return
else:
while not self.user_token:
self.user_token = input('Token: ')
login_kwargs.update({'token': self.user_token})
self.api = self.login(**login_kwargs)
if self.user_feed:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self
.user_feed)
def check_auth(self):
"""Check if the current authorization is valid.
This method uses the ratelimit_remaining api to check whether
the currently authenticated user's credentials are valid without
deducting from the rate limit. The ratelimit_remaining api does not
seem to be available for GitHub Enterprise.
github3.py's method check_authorization seems to only work given
an authorization created by a registered application.
TODO: Determine a better way to check the authorization for
GitHub Enterprise.
:type enterprise: bool
:param enterprise: Determines whether we are authenticating with
GitHub Enterprise.
"""
if self.enterprise_url is not None:
return True
try:
if self.api is not None:
self.api.ratelimit_remaining
return True
else:
self.print_auth_error()
except AuthenticationFailed:
self.print_auth_error()
return False
def get_github_config_path(self, config_file_name):
"""Attempt to find the github config file.
Adapted from https://github.com/sigmavirus24/github-cli.
:type config_file_name: str
:param config_file_name: The config file name.
:rtype: str
:return: The github config file path.
"""
home = os.path.abspath(os.environ.get('HOME', ''))
config_file_path = os.path.join(home, config_file_name)
return config_file_path
<mask token>
def load_configs(self, config_funcs):
"""Load the specified config from ~/.gitsomeconfig.
:type foo: list
:param foo: The config methods to run.
"""
config_file_path = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
try:
with open(config_file_path) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
for config_func in config_funcs:
config_func(parser)
except IOError:
return None
def load_config_colors(self, parser):
"""Load the color config from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.load_colors(parser)
def load_colors(self, parser):
"""Load all colors from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.clr_primary = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_PRIMARY, default=self.clr_primary, color_config=True)
self.clr_secondary = self.load_config(parser=parser, cfg_label=self
.CONFIG_CLR_SECONDARY, default=self.clr_secondary, color_config
=True)
self.clr_tertiary = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TERTIARY, default=self.clr_tertiary, color_config=True)
self.clr_quaternary = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_QUATERNARY, default=self.clr_quaternary,
color_config=True)
self.clr_bold = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_BOLD, default=self.clr_bold, color_config=True)
self.clr_code = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_CODE, default=self.clr_code, color_config=True)
self.clr_code = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_ERROR, default=self.clr_code, color_config=True)
self.clr_header = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_HEADER, default=self.clr_header, color_config=True)
self.clr_link = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_LINK, default=self.clr_link, color_config=True)
self.clr_list = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_LIST, default=self.clr_list, color_config=True)
self.clr_message = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_MESSAGE, default=self.clr_message, color_config=True)
self.clr_num_comments = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_NUM_COMMENTS, default=self.clr_num_comments,
color_config=True)
self.clr_num_points = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_NUM_POINTS, default=self.clr_num_points,
color_config=True)
self.clr_tag = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TAG, default=self.clr_tag, color_config=True)
self.clr_time = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TIME, default=self.clr_time, color_config=True)
self.clr_title = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TITLE, default=self.clr_title, color_config=True)
self.clr_tooltip = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TOOLTIP, default=self.clr_tooltip, color_config=True)
self.clr_user = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_USER, default=self.clr_user, color_config=True)
self.clr_view_link = self.load_config(parser=parser, cfg_label=self
.CONFIG_CLR_VIEW_LINK, default=self.clr_view_link, color_config
=True)
self.clr_view_index = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_VIEW_INDEX, default=self.clr_view_index,
color_config=True)
def load_urls(self, view_in_browser):
"""Load the current set of urls from ~/.gitsomeconfigurl.
:type view_in_browser: bool
:param view_in_browser: Determines whether to view the urls in a
browser.
:rtype: list
:return: Collection of urls.
"""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
urls = parser.get(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST)
urls = urls.strip()
excludes = ['[', ']', "'"]
for exclude in excludes:
urls = urls.replace(exclude, '')
if not view_in_browser:
urls = urls.replace('https://github.com/', '')
return urls.split(', ')
def print_auth_error(self):
"""Print a message the authorization has failed."""
click.secho('Authentication error.', fg=self.clr_error)
click.secho(
'Update your credentials in ~/.gitsomeconfig or run:\n gh configure'
, fg=self.clr_message)
<mask token>
def request_two_factor_code(self):
"""Request two factor authentication code.
Callback if two factor authentication is requested.
:rtype: str
:return: The user input two factor authentication code.
"""
code = ''
while not code:
code = input('Enter 2FA code: ')
return code
def save_config(self):
"""Saves the config to ~/.gitsomeconfig."""
if self.check_auth():
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
parser.add_section(self.CONFIG_SECTION)
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_LOGIN, self.
user_login)
if self.user_token is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_TOKEN,
self.user_token)
if self.user_feed is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self
.user_feed)
if self.enterprise_url is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_ENTERPRISE_URL,
self.enterprise_url)
if self.user_pass is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_PASS,
self.user_pass)
else:
parser.remove_option(self.CONFIG_SECTION, self.CONFIG_USER_PASS
)
parser.set(self.CONFIG_SECTION, self.CONFIG_VERIFY_SSL, self.
verify_ssl)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_PRIMARY, self.
clr_primary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_SECONDARY, self
.clr_secondary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TERTIARY, self.
clr_tertiary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_QUATERNARY,
self.clr_quaternary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_BOLD, self.clr_bold
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_CODE, self.clr_code
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_ERROR, self.
clr_error)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_HEADER, self.
clr_header)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LINK, self.clr_link
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LIST, self.clr_list
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_MESSAGE, self.
clr_message)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_COMMENTS,
self.clr_num_comments)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_POINTS,
self.clr_num_points)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TAG, self.clr_tag)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TIME, self.clr_time
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TITLE, self.
clr_title)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TOOLTIP, self.
clr_tooltip)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_USER, self.clr_user
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_LINK, self
.clr_view_link)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_INDEX,
self.clr_view_index)
with open(config, 'w+') as config_file:
parser.write(config_file)
def save_urls(self):
"""Save the current set of urls to ~/.gitsomeconfigurl."""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
try:
parser.add_section(self.CONFIG_URL_SECTION)
except configparser.DuplicateSectionError:
pass
parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)
with open(config, 'w+') as config_file:
parser.write(config_file)
<mask token>
| <mask token>
class Config(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self):
self.api = None
self.user_login = None
self.user_pass = None
self.user_token = None
self.user_feed = None
self.enterprise_url = None
self.verify_ssl = True
self.urls = []
self._init_colors()
self.load_configs([self.load_config_colors])
self.login = login
self.authorize = authorize
self.getpass = getpass
def _init_colors(self):
"""Initialize colors to their defaults."""
self.clr_primary = None
self.clr_secondary = 'green'
self.clr_tertiary = 'cyan'
self.clr_quaternary = 'yellow'
self.clr_bold = 'cyan'
self.clr_code = 'cyan'
self.clr_error = 'red'
self.clr_header = 'yellow'
self.clr_link = 'green'
self.clr_list = 'cyan'
self.clr_message = None
self.clr_num_comments = 'green'
self.clr_num_points = 'green'
self.clr_tag = 'cyan'
self.clr_time = 'yellow'
self.clr_title = None
self.clr_tooltip = None
self.clr_user = 'cyan'
self.clr_view_link = 'magenta'
self.clr_view_index = 'magenta'
<mask token>
def authenticate(self, enterprise=False, enterprise_auth=
enterprise_login, overwrite=False):
"""Log into GitHub.
Adapted from https://github.com/sigmavirus24/github-cli.
:type enterprise: bool
:param enterprise: Determines whether to configure GitHub Enterprise.
Default: False.
:type overwrite: bool
:param overwrite: indicates whether we cant to overwrite the current
set of credentials. Default: False.
"""
if self.api is not None and not overwrite:
return
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK
) and not overwrite:
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
self.authenticate_cached_credentials(config, parser)
else:
login_kwargs = {'two_factor_callback': self.request_two_factor_code
}
if enterprise:
self.login = enterprise_auth
while not self.enterprise_url:
self.enterprise_url = input('Enterprise URL: ')
if click.confirm('Do you want to verify SSL certs?',
default=True):
self.verify_ssl = True
else:
self.verify_ssl = False
login_kwargs.update({'url': self.enterprise_url, 'verify':
self.verify_ssl})
while not self.user_login:
self.user_login = input('User Login: ')
login_kwargs.update({'username': self.user_login})
if click.confirm(
'Do you want to log in with a password [Y] or a personal access token [n]?'
, default=True):
user_pass = None
while not user_pass:
user_pass = self.getpass('Password: ')
login_kwargs.update({'password': user_pass})
try:
if not enterprise:
auth = self.authorize(self.user_login, user_pass,
scopes=['user', 'repo'], note='gitsome',
note_url=
'https://github.com/donnemartin/gitsome',
two_factor_callback=self.request_two_factor_code)
self.user_token = auth.token
else:
self.user_pass = user_pass
except (UnprocessableEntity, AuthenticationFailed):
click.secho('Error creating token.', fg=self.clr_error)
click.secho(
"""Visit the following page and verify you do not have an existing token named "gitsome":
https://github.com/settings/tokens
If a token already exists, update your ~/.gitsomeconfig file with your token:
user_token = TOKEN
You can also generate a new token."""
, fg=self.clr_message)
self.print_auth_error()
return
else:
while not self.user_token:
self.user_token = input('Token: ')
login_kwargs.update({'token': self.user_token})
self.api = self.login(**login_kwargs)
if self.user_feed:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self
.user_feed)
def check_auth(self):
"""Check if the current authorization is valid.
This method uses the ratelimit_remaining api to check whether
the currently authenticated user's credentials are valid without
deducting from the rate limit. The ratelimit_remaining api does not
seem to be available for GitHub Enterprise.
github3.py's method check_authorization seems to only work given
an authorization created by a registered application.
TODO: Determine a better way to check the authorization for
GitHub Enterprise.
:type enterprise: bool
:param enterprise: Determines whether we are authenticating with
GitHub Enterprise.
"""
if self.enterprise_url is not None:
return True
try:
if self.api is not None:
self.api.ratelimit_remaining
return True
else:
self.print_auth_error()
except AuthenticationFailed:
self.print_auth_error()
return False
def get_github_config_path(self, config_file_name):
"""Attempt to find the github config file.
Adapted from https://github.com/sigmavirus24/github-cli.
:type config_file_name: str
:param config_file_name: The config file name.
:rtype: str
:return: The github config file path.
"""
home = os.path.abspath(os.environ.get('HOME', ''))
config_file_path = os.path.join(home, config_file_name)
return config_file_path
<mask token>
def load_configs(self, config_funcs):
"""Load the specified config from ~/.gitsomeconfig.
:type foo: list
:param foo: The config methods to run.
"""
config_file_path = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
try:
with open(config_file_path) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
for config_func in config_funcs:
config_func(parser)
except IOError:
return None
def load_config_colors(self, parser):
"""Load the color config from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.load_colors(parser)
def load_colors(self, parser):
"""Load all colors from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.clr_primary = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_PRIMARY, default=self.clr_primary, color_config=True)
self.clr_secondary = self.load_config(parser=parser, cfg_label=self
.CONFIG_CLR_SECONDARY, default=self.clr_secondary, color_config
=True)
self.clr_tertiary = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TERTIARY, default=self.clr_tertiary, color_config=True)
self.clr_quaternary = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_QUATERNARY, default=self.clr_quaternary,
color_config=True)
self.clr_bold = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_BOLD, default=self.clr_bold, color_config=True)
self.clr_code = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_CODE, default=self.clr_code, color_config=True)
self.clr_code = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_ERROR, default=self.clr_code, color_config=True)
self.clr_header = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_HEADER, default=self.clr_header, color_config=True)
self.clr_link = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_LINK, default=self.clr_link, color_config=True)
self.clr_list = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_LIST, default=self.clr_list, color_config=True)
self.clr_message = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_MESSAGE, default=self.clr_message, color_config=True)
self.clr_num_comments = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_NUM_COMMENTS, default=self.clr_num_comments,
color_config=True)
self.clr_num_points = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_NUM_POINTS, default=self.clr_num_points,
color_config=True)
self.clr_tag = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TAG, default=self.clr_tag, color_config=True)
self.clr_time = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TIME, default=self.clr_time, color_config=True)
self.clr_title = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TITLE, default=self.clr_title, color_config=True)
self.clr_tooltip = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TOOLTIP, default=self.clr_tooltip, color_config=True)
self.clr_user = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_USER, default=self.clr_user, color_config=True)
self.clr_view_link = self.load_config(parser=parser, cfg_label=self
.CONFIG_CLR_VIEW_LINK, default=self.clr_view_link, color_config
=True)
self.clr_view_index = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_VIEW_INDEX, default=self.clr_view_index,
color_config=True)
def load_urls(self, view_in_browser):
"""Load the current set of urls from ~/.gitsomeconfigurl.
:type view_in_browser: bool
:param view_in_browser: Determines whether to view the urls in a
browser.
:rtype: list
:return: Collection of urls.
"""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
urls = parser.get(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST)
urls = urls.strip()
excludes = ['[', ']', "'"]
for exclude in excludes:
urls = urls.replace(exclude, '')
if not view_in_browser:
urls = urls.replace('https://github.com/', '')
return urls.split(', ')
def print_auth_error(self):
"""Print a message the authorization has failed."""
click.secho('Authentication error.', fg=self.clr_error)
click.secho(
'Update your credentials in ~/.gitsomeconfig or run:\n gh configure'
, fg=self.clr_message)
def prompt_news_feed(self):
"""Prompt the user to enter a news feed url."""
if click.confirm(
"""No feed url detected.
Calling gh events without an argument
displays the logged in user's news feed.
Do you want gitsome to track your news feed?"""
, default=True):
click.secho(
"""Visit the following url while logged into GitHub:
https://github.com
Enter the url found under "Subscribe to your news feed"."""
, fg=self.clr_message)
self.user_feed = ''
while not self.user_feed:
self.user_feed = input('URL: ')
def request_two_factor_code(self):
"""Request two factor authentication code.
Callback if two factor authentication is requested.
:rtype: str
:return: The user input two factor authentication code.
"""
code = ''
while not code:
code = input('Enter 2FA code: ')
return code
def save_config(self):
"""Saves the config to ~/.gitsomeconfig."""
if self.check_auth():
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
parser.add_section(self.CONFIG_SECTION)
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_LOGIN, self.
user_login)
if self.user_token is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_TOKEN,
self.user_token)
if self.user_feed is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self
.user_feed)
if self.enterprise_url is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_ENTERPRISE_URL,
self.enterprise_url)
if self.user_pass is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_PASS,
self.user_pass)
else:
parser.remove_option(self.CONFIG_SECTION, self.CONFIG_USER_PASS
)
parser.set(self.CONFIG_SECTION, self.CONFIG_VERIFY_SSL, self.
verify_ssl)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_PRIMARY, self.
clr_primary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_SECONDARY, self
.clr_secondary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TERTIARY, self.
clr_tertiary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_QUATERNARY,
self.clr_quaternary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_BOLD, self.clr_bold
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_CODE, self.clr_code
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_ERROR, self.
clr_error)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_HEADER, self.
clr_header)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LINK, self.clr_link
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LIST, self.clr_list
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_MESSAGE, self.
clr_message)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_COMMENTS,
self.clr_num_comments)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_POINTS,
self.clr_num_points)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TAG, self.clr_tag)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TIME, self.clr_time
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TITLE, self.
clr_title)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TOOLTIP, self.
clr_tooltip)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_USER, self.clr_user
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_LINK, self
.clr_view_link)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_INDEX,
self.clr_view_index)
with open(config, 'w+') as config_file:
parser.write(config_file)
def save_urls(self):
"""Save the current set of urls to ~/.gitsomeconfigurl."""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
try:
parser.add_section(self.CONFIG_URL_SECTION)
except configparser.DuplicateSectionError:
pass
parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)
with open(config, 'w+') as config_file:
parser.write(config_file)
<mask token>
| <mask token>
class Config(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self):
self.api = None
self.user_login = None
self.user_pass = None
self.user_token = None
self.user_feed = None
self.enterprise_url = None
self.verify_ssl = True
self.urls = []
self._init_colors()
self.load_configs([self.load_config_colors])
self.login = login
self.authorize = authorize
self.getpass = getpass
def _init_colors(self):
"""Initialize colors to their defaults."""
self.clr_primary = None
self.clr_secondary = 'green'
self.clr_tertiary = 'cyan'
self.clr_quaternary = 'yellow'
self.clr_bold = 'cyan'
self.clr_code = 'cyan'
self.clr_error = 'red'
self.clr_header = 'yellow'
self.clr_link = 'green'
self.clr_list = 'cyan'
self.clr_message = None
self.clr_num_comments = 'green'
self.clr_num_points = 'green'
self.clr_tag = 'cyan'
self.clr_time = 'yellow'
self.clr_title = None
self.clr_tooltip = None
self.clr_user = 'cyan'
self.clr_view_link = 'magenta'
self.clr_view_index = 'magenta'
<mask token>
def authenticate(self, enterprise=False, enterprise_auth=
enterprise_login, overwrite=False):
"""Log into GitHub.
Adapted from https://github.com/sigmavirus24/github-cli.
:type enterprise: bool
:param enterprise: Determines whether to configure GitHub Enterprise.
Default: False.
:type overwrite: bool
:param overwrite: indicates whether we cant to overwrite the current
set of credentials. Default: False.
"""
if self.api is not None and not overwrite:
return
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK
) and not overwrite:
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
self.authenticate_cached_credentials(config, parser)
else:
login_kwargs = {'two_factor_callback': self.request_two_factor_code
}
if enterprise:
self.login = enterprise_auth
while not self.enterprise_url:
self.enterprise_url = input('Enterprise URL: ')
if click.confirm('Do you want to verify SSL certs?',
default=True):
self.verify_ssl = True
else:
self.verify_ssl = False
login_kwargs.update({'url': self.enterprise_url, 'verify':
self.verify_ssl})
while not self.user_login:
self.user_login = input('User Login: ')
login_kwargs.update({'username': self.user_login})
if click.confirm(
'Do you want to log in with a password [Y] or a personal access token [n]?'
, default=True):
user_pass = None
while not user_pass:
user_pass = self.getpass('Password: ')
login_kwargs.update({'password': user_pass})
try:
if not enterprise:
auth = self.authorize(self.user_login, user_pass,
scopes=['user', 'repo'], note='gitsome',
note_url=
'https://github.com/donnemartin/gitsome',
two_factor_callback=self.request_two_factor_code)
self.user_token = auth.token
else:
self.user_pass = user_pass
except (UnprocessableEntity, AuthenticationFailed):
click.secho('Error creating token.', fg=self.clr_error)
click.secho(
"""Visit the following page and verify you do not have an existing token named "gitsome":
https://github.com/settings/tokens
If a token already exists, update your ~/.gitsomeconfig file with your token:
user_token = TOKEN
You can also generate a new token."""
, fg=self.clr_message)
self.print_auth_error()
return
else:
while not self.user_token:
self.user_token = input('Token: ')
login_kwargs.update({'token': self.user_token})
self.api = self.login(**login_kwargs)
if self.user_feed:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self
.user_feed)
def check_auth(self):
"""Check if the current authorization is valid.
This method uses the ratelimit_remaining api to check whether
the currently authenticated user's credentials are valid without
deducting from the rate limit. The ratelimit_remaining api does not
seem to be available for GitHub Enterprise.
github3.py's method check_authorization seems to only work given
an authorization created by a registered application.
TODO: Determine a better way to check the authorization for
GitHub Enterprise.
:type enterprise: bool
:param enterprise: Determines whether we are authenticating with
GitHub Enterprise.
"""
if self.enterprise_url is not None:
return True
try:
if self.api is not None:
self.api.ratelimit_remaining
return True
else:
self.print_auth_error()
except AuthenticationFailed:
self.print_auth_error()
return False
def get_github_config_path(self, config_file_name):
"""Attempt to find the github config file.
Adapted from https://github.com/sigmavirus24/github-cli.
:type config_file_name: str
:param config_file_name: The config file name.
:rtype: str
:return: The github config file path.
"""
home = os.path.abspath(os.environ.get('HOME', ''))
config_file_path = os.path.join(home, config_file_name)
return config_file_path
<mask token>
def load_configs(self, config_funcs):
"""Load the specified config from ~/.gitsomeconfig.
:type foo: list
:param foo: The config methods to run.
"""
config_file_path = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
try:
with open(config_file_path) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
for config_func in config_funcs:
config_func(parser)
except IOError:
return None
def load_config_colors(self, parser):
"""Load the color config from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.load_colors(parser)
def load_colors(self, parser):
"""Load all colors from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.clr_primary = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_PRIMARY, default=self.clr_primary, color_config=True)
self.clr_secondary = self.load_config(parser=parser, cfg_label=self
.CONFIG_CLR_SECONDARY, default=self.clr_secondary, color_config
=True)
self.clr_tertiary = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TERTIARY, default=self.clr_tertiary, color_config=True)
self.clr_quaternary = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_QUATERNARY, default=self.clr_quaternary,
color_config=True)
self.clr_bold = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_BOLD, default=self.clr_bold, color_config=True)
self.clr_code = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_CODE, default=self.clr_code, color_config=True)
self.clr_code = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_ERROR, default=self.clr_code, color_config=True)
self.clr_header = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_HEADER, default=self.clr_header, color_config=True)
self.clr_link = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_LINK, default=self.clr_link, color_config=True)
self.clr_list = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_LIST, default=self.clr_list, color_config=True)
self.clr_message = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_MESSAGE, default=self.clr_message, color_config=True)
self.clr_num_comments = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_NUM_COMMENTS, default=self.clr_num_comments,
color_config=True)
self.clr_num_points = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_NUM_POINTS, default=self.clr_num_points,
color_config=True)
self.clr_tag = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TAG, default=self.clr_tag, color_config=True)
self.clr_time = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TIME, default=self.clr_time, color_config=True)
self.clr_title = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TITLE, default=self.clr_title, color_config=True)
self.clr_tooltip = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TOOLTIP, default=self.clr_tooltip, color_config=True)
self.clr_user = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_USER, default=self.clr_user, color_config=True)
self.clr_view_link = self.load_config(parser=parser, cfg_label=self
.CONFIG_CLR_VIEW_LINK, default=self.clr_view_link, color_config
=True)
self.clr_view_index = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_VIEW_INDEX, default=self.clr_view_index,
color_config=True)
def load_urls(self, view_in_browser):
"""Load the current set of urls from ~/.gitsomeconfigurl.
:type view_in_browser: bool
:param view_in_browser: Determines whether to view the urls in a
browser.
:rtype: list
:return: Collection of urls.
"""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
urls = parser.get(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST)
urls = urls.strip()
excludes = ['[', ']', "'"]
for exclude in excludes:
urls = urls.replace(exclude, '')
if not view_in_browser:
urls = urls.replace('https://github.com/', '')
return urls.split(', ')
def print_auth_error(self):
"""Print a message the authorization has failed."""
click.secho('Authentication error.', fg=self.clr_error)
click.secho(
'Update your credentials in ~/.gitsomeconfig or run:\n gh configure'
, fg=self.clr_message)
def prompt_news_feed(self):
"""Prompt the user to enter a news feed url."""
if click.confirm(
"""No feed url detected.
Calling gh events without an argument
displays the logged in user's news feed.
Do you want gitsome to track your news feed?"""
, default=True):
click.secho(
"""Visit the following url while logged into GitHub:
https://github.com
Enter the url found under "Subscribe to your news feed"."""
, fg=self.clr_message)
self.user_feed = ''
while not self.user_feed:
self.user_feed = input('URL: ')
def request_two_factor_code(self):
"""Request two factor authentication code.
Callback if two factor authentication is requested.
:rtype: str
:return: The user input two factor authentication code.
"""
code = ''
while not code:
code = input('Enter 2FA code: ')
return code
def save_config(self):
"""Saves the config to ~/.gitsomeconfig."""
if self.check_auth():
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
parser.add_section(self.CONFIG_SECTION)
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_LOGIN, self.
user_login)
if self.user_token is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_TOKEN,
self.user_token)
if self.user_feed is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self
.user_feed)
if self.enterprise_url is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_ENTERPRISE_URL,
self.enterprise_url)
if self.user_pass is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_PASS,
self.user_pass)
else:
parser.remove_option(self.CONFIG_SECTION, self.CONFIG_USER_PASS
)
parser.set(self.CONFIG_SECTION, self.CONFIG_VERIFY_SSL, self.
verify_ssl)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_PRIMARY, self.
clr_primary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_SECONDARY, self
.clr_secondary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TERTIARY, self.
clr_tertiary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_QUATERNARY,
self.clr_quaternary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_BOLD, self.clr_bold
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_CODE, self.clr_code
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_ERROR, self.
clr_error)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_HEADER, self.
clr_header)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LINK, self.clr_link
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LIST, self.clr_list
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_MESSAGE, self.
clr_message)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_COMMENTS,
self.clr_num_comments)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_POINTS,
self.clr_num_points)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TAG, self.clr_tag)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TIME, self.clr_time
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TITLE, self.
clr_title)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TOOLTIP, self.
clr_tooltip)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_USER, self.clr_user
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_LINK, self
.clr_view_link)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_INDEX,
self.clr_view_index)
with open(config, 'w+') as config_file:
parser.write(config_file)
def save_urls(self):
"""Save the current set of urls to ~/.gitsomeconfigurl."""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
try:
parser.add_section(self.CONFIG_URL_SECTION)
except configparser.DuplicateSectionError:
pass
parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)
with open(config, 'w+') as config_file:
parser.write(config_file)
def show_bash_completions_info(self):
"""Show info on how to enable bash completions"""
click.secho(
"""By default, gitsome looks at the following locations to enable bash completions:
https://github.com/donnemartin/gitsome/blob/master/xonsh/environ.py#L123-L130
If bash completions are not working for you, check out the following link:
https://github.com/donnemartin/gitsome#enabling-bash-completions"""
, fg=self.clr_message)
| <mask token>
class Config(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self):
self.api = None
self.user_login = None
self.user_pass = None
self.user_token = None
self.user_feed = None
self.enterprise_url = None
self.verify_ssl = True
self.urls = []
self._init_colors()
self.load_configs([self.load_config_colors])
self.login = login
self.authorize = authorize
self.getpass = getpass
def _init_colors(self):
"""Initialize colors to their defaults."""
self.clr_primary = None
self.clr_secondary = 'green'
self.clr_tertiary = 'cyan'
self.clr_quaternary = 'yellow'
self.clr_bold = 'cyan'
self.clr_code = 'cyan'
self.clr_error = 'red'
self.clr_header = 'yellow'
self.clr_link = 'green'
self.clr_list = 'cyan'
self.clr_message = None
self.clr_num_comments = 'green'
self.clr_num_points = 'green'
self.clr_tag = 'cyan'
self.clr_time = 'yellow'
self.clr_title = None
self.clr_tooltip = None
self.clr_user = 'cyan'
self.clr_view_link = 'magenta'
self.clr_view_index = 'magenta'
def authenticate_cached_credentials(self, config, parser,
enterprise_auth=enterprise_login):
"""Authenticate with the user's credentials in ~/.gitsomeconfig.
:type config: str
:param config: The config path.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser.
"""
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
self.user_login = self.load_config(parser=parser, cfg_label=
self.CONFIG_USER_LOGIN)
self.user_pass = self.load_config(parser=parser, cfg_label=self
.CONFIG_USER_PASS)
self.user_token = self.load_config(parser=parser, cfg_label=
self.CONFIG_USER_TOKEN)
self.enterprise_url = self.load_config(parser=parser, cfg_label
=self.CONFIG_ENTERPRISE_URL)
self.verify_ssl = self.load_config(parser=parser, cfg_label=
self.CONFIG_VERIFY_SSL, boolean_config=True)
self.user_feed = self.load_config(parser=parser, cfg_label=self
.CONFIG_USER_FEED)
if not self.verify_ssl:
requests.packages.urllib3.disable_warnings(
InsecureRequestWarning)
login_kwargs = {'username': self.user_login,
'two_factor_callback': self.request_two_factor_code}
if self.enterprise_url is not None:
self.login = enterprise_auth
login_kwargs.update({'url': self.enterprise_url, 'verify':
self.verify_ssl})
if self.user_token is not None:
login_kwargs.update({'token': self.user_token})
elif self.user_pass is not None:
login_kwargs.update({'password': self.user_pass})
else:
self.print_auth_error()
return
else:
login_kwargs.update({'token': self.user_token})
self.api = self.login(**login_kwargs)
def authenticate(self, enterprise=False, enterprise_auth=
enterprise_login, overwrite=False):
"""Log into GitHub.
Adapted from https://github.com/sigmavirus24/github-cli.
:type enterprise: bool
:param enterprise: Determines whether to configure GitHub Enterprise.
Default: False.
:type overwrite: bool
:param overwrite: indicates whether we cant to overwrite the current
set of credentials. Default: False.
"""
if self.api is not None and not overwrite:
return
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK
) and not overwrite:
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
self.authenticate_cached_credentials(config, parser)
else:
login_kwargs = {'two_factor_callback': self.request_two_factor_code
}
if enterprise:
self.login = enterprise_auth
while not self.enterprise_url:
self.enterprise_url = input('Enterprise URL: ')
if click.confirm('Do you want to verify SSL certs?',
default=True):
self.verify_ssl = True
else:
self.verify_ssl = False
login_kwargs.update({'url': self.enterprise_url, 'verify':
self.verify_ssl})
while not self.user_login:
self.user_login = input('User Login: ')
login_kwargs.update({'username': self.user_login})
if click.confirm(
'Do you want to log in with a password [Y] or a personal access token [n]?'
, default=True):
user_pass = None
while not user_pass:
user_pass = self.getpass('Password: ')
login_kwargs.update({'password': user_pass})
try:
if not enterprise:
auth = self.authorize(self.user_login, user_pass,
scopes=['user', 'repo'], note='gitsome',
note_url=
'https://github.com/donnemartin/gitsome',
two_factor_callback=self.request_two_factor_code)
self.user_token = auth.token
else:
self.user_pass = user_pass
except (UnprocessableEntity, AuthenticationFailed):
click.secho('Error creating token.', fg=self.clr_error)
click.secho(
"""Visit the following page and verify you do not have an existing token named "gitsome":
https://github.com/settings/tokens
If a token already exists, update your ~/.gitsomeconfig file with your token:
user_token = TOKEN
You can also generate a new token."""
, fg=self.clr_message)
self.print_auth_error()
return
else:
while not self.user_token:
self.user_token = input('Token: ')
login_kwargs.update({'token': self.user_token})
self.api = self.login(**login_kwargs)
if self.user_feed:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self
.user_feed)
def check_auth(self):
"""Check if the current authorization is valid.
This method uses the ratelimit_remaining api to check whether
the currently authenticated user's credentials are valid without
deducting from the rate limit. The ratelimit_remaining api does not
seem to be available for GitHub Enterprise.
github3.py's method check_authorization seems to only work given
an authorization created by a registered application.
TODO: Determine a better way to check the authorization for
GitHub Enterprise.
:type enterprise: bool
:param enterprise: Determines whether we are authenticating with
GitHub Enterprise.
"""
if self.enterprise_url is not None:
return True
try:
if self.api is not None:
self.api.ratelimit_remaining
return True
else:
self.print_auth_error()
except AuthenticationFailed:
self.print_auth_error()
return False
def get_github_config_path(self, config_file_name):
"""Attempt to find the github config file.
Adapted from https://github.com/sigmavirus24/github-cli.
:type config_file_name: str
:param config_file_name: The config file name.
:rtype: str
:return: The github config file path.
"""
home = os.path.abspath(os.environ.get('HOME', ''))
config_file_path = os.path.join(home, config_file_name)
return config_file_path
def load_config(self, parser, cfg_label, default=None, color_config=
False, boolean_config=False):
"""Load the specified config from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
:type cfg_label: str
:param cfg_label: The config label to load.
:type default: str
:param default: The default color if no color config exists.
Default: None.
:type color_config: bool
:param color_config: Determines whether this is a color config.
Default: False.
:type boolean_config: bool
:param boolean_config: Determines whether to load a boolean config.
Default: False.
"""
try:
if boolean_config:
cfg = parser.getboolean(self.CONFIG_SECTION, cfg_label)
else:
cfg = parser.get(self.CONFIG_SECTION, cfg_label)
if color_config:
if cfg == 'none':
cfg = None
click.style('', fg=cfg)
except (TypeError, configparser.NoOptionError):
return default
return cfg
def load_configs(self, config_funcs):
"""Load the specified config from ~/.gitsomeconfig.
:type foo: list
:param foo: The config methods to run.
"""
config_file_path = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
try:
with open(config_file_path) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
for config_func in config_funcs:
config_func(parser)
except IOError:
return None
def load_config_colors(self, parser):
"""Load the color config from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.load_colors(parser)
def load_colors(self, parser):
"""Load all colors from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.clr_primary = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_PRIMARY, default=self.clr_primary, color_config=True)
self.clr_secondary = self.load_config(parser=parser, cfg_label=self
.CONFIG_CLR_SECONDARY, default=self.clr_secondary, color_config
=True)
self.clr_tertiary = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TERTIARY, default=self.clr_tertiary, color_config=True)
self.clr_quaternary = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_QUATERNARY, default=self.clr_quaternary,
color_config=True)
self.clr_bold = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_BOLD, default=self.clr_bold, color_config=True)
self.clr_code = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_CODE, default=self.clr_code, color_config=True)
self.clr_code = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_ERROR, default=self.clr_code, color_config=True)
self.clr_header = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_HEADER, default=self.clr_header, color_config=True)
self.clr_link = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_LINK, default=self.clr_link, color_config=True)
self.clr_list = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_LIST, default=self.clr_list, color_config=True)
self.clr_message = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_MESSAGE, default=self.clr_message, color_config=True)
self.clr_num_comments = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_NUM_COMMENTS, default=self.clr_num_comments,
color_config=True)
self.clr_num_points = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_NUM_POINTS, default=self.clr_num_points,
color_config=True)
self.clr_tag = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TAG, default=self.clr_tag, color_config=True)
self.clr_time = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TIME, default=self.clr_time, color_config=True)
self.clr_title = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TITLE, default=self.clr_title, color_config=True)
self.clr_tooltip = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_TOOLTIP, default=self.clr_tooltip, color_config=True)
self.clr_user = self.load_config(parser=parser, cfg_label=self.
CONFIG_CLR_USER, default=self.clr_user, color_config=True)
self.clr_view_link = self.load_config(parser=parser, cfg_label=self
.CONFIG_CLR_VIEW_LINK, default=self.clr_view_link, color_config
=True)
self.clr_view_index = self.load_config(parser=parser, cfg_label=
self.CONFIG_CLR_VIEW_INDEX, default=self.clr_view_index,
color_config=True)
def load_urls(self, view_in_browser):
"""Load the current set of urls from ~/.gitsomeconfigurl.
:type view_in_browser: bool
:param view_in_browser: Determines whether to view the urls in a
browser.
:rtype: list
:return: Collection of urls.
"""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
urls = parser.get(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST)
urls = urls.strip()
excludes = ['[', ']', "'"]
for exclude in excludes:
urls = urls.replace(exclude, '')
if not view_in_browser:
urls = urls.replace('https://github.com/', '')
return urls.split(', ')
def print_auth_error(self):
"""Print a message the authorization has failed."""
click.secho('Authentication error.', fg=self.clr_error)
click.secho(
'Update your credentials in ~/.gitsomeconfig or run:\n gh configure'
, fg=self.clr_message)
def prompt_news_feed(self):
"""Prompt the user to enter a news feed url."""
if click.confirm(
"""No feed url detected.
Calling gh events without an argument
displays the logged in user's news feed.
Do you want gitsome to track your news feed?"""
, default=True):
click.secho(
"""Visit the following url while logged into GitHub:
https://github.com
Enter the url found under "Subscribe to your news feed"."""
, fg=self.clr_message)
self.user_feed = ''
while not self.user_feed:
self.user_feed = input('URL: ')
def request_two_factor_code(self):
"""Request two factor authentication code.
Callback if two factor authentication is requested.
:rtype: str
:return: The user input two factor authentication code.
"""
code = ''
while not code:
code = input('Enter 2FA code: ')
return code
def save_config(self):
"""Saves the config to ~/.gitsomeconfig."""
if self.check_auth():
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
parser.add_section(self.CONFIG_SECTION)
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_LOGIN, self.
user_login)
if self.user_token is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_TOKEN,
self.user_token)
if self.user_feed is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_FEED, self
.user_feed)
if self.enterprise_url is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_ENTERPRISE_URL,
self.enterprise_url)
if self.user_pass is not None:
parser.set(self.CONFIG_SECTION, self.CONFIG_USER_PASS,
self.user_pass)
else:
parser.remove_option(self.CONFIG_SECTION, self.CONFIG_USER_PASS
)
parser.set(self.CONFIG_SECTION, self.CONFIG_VERIFY_SSL, self.
verify_ssl)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_PRIMARY, self.
clr_primary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_SECONDARY, self
.clr_secondary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TERTIARY, self.
clr_tertiary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_QUATERNARY,
self.clr_quaternary)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_BOLD, self.clr_bold
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_CODE, self.clr_code
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_ERROR, self.
clr_error)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_HEADER, self.
clr_header)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LINK, self.clr_link
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_LIST, self.clr_list
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_MESSAGE, self.
clr_message)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_COMMENTS,
self.clr_num_comments)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_NUM_POINTS,
self.clr_num_points)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TAG, self.clr_tag)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TIME, self.clr_time
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TITLE, self.
clr_title)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_TOOLTIP, self.
clr_tooltip)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_USER, self.clr_user
)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_LINK, self
.clr_view_link)
parser.set(self.CONFIG_SECTION, self.CONFIG_CLR_VIEW_INDEX,
self.clr_view_index)
with open(config, 'w+') as config_file:
parser.write(config_file)
def save_urls(self):
"""Save the current set of urls to ~/.gitsomeconfigurl."""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
try:
parser.add_section(self.CONFIG_URL_SECTION)
except configparser.DuplicateSectionError:
pass
parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)
with open(config, 'w+') as config_file:
parser.write(config_file)
def show_bash_completions_info(self):
"""Show info on how to enable bash completions"""
click.secho(
"""By default, gitsome looks at the following locations to enable bash completions:
https://github.com/donnemartin/gitsome/blob/master/xonsh/environ.py#L123-L130
If bash completions are not working for you, check out the following link:
https://github.com/donnemartin/gitsome#enabling-bash-completions"""
, fg=self.clr_message)
| # -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
import click
from getpass import getpass
import os
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from .compat import configparser
from .lib.github3 import authorize, enterprise_login, login
from .lib.github3.exceptions import AuthenticationFailed, UnprocessableEntity
class Config(object):
"""Gitsome config.
:type api: :class:`github3.github.Github`
:param api: An instance of github3.github.Github.
:type clr_x: str
:param clr_x: Various ansi color config colors to use for highlights.
:type CONFIG: str
:param CONFIG: The config file name.
:type CONFIG_SECTION: str
:param CONFIG_SECTION: The main config file section label.
:type CONFIG_CLR_X: str
:param CONFIG_CLR_X: Various ansi color config labels to use for highlights.
:type CONFIG_ENTERPRISE_URL: str
:param CONFIG_ENTERPRISE_URL: The GitHub Enterprise url.
:type CONFIG_USER_LOGIN: str
:param CONFIG_USER_LOGIN: The user login.
:type CONFIG_USER_PASS: str
:param CONFIG_USER_PASS: The user password.
:type CONFIG_USER_TOKEN: str
:param CONFIG_USER_TOKEN: The user token.
:type CONFIG_USER_FEED: str
:param CONFIG_USER_FEED: The user feed config. This is the feed on
https://github.com/ when logged in and requires the basic auth model,
which doesn't work when logging in with tokens or 2FA. This config
listed the pre-signed url to access the feed.
:type CONFIG_URL: str
:param CONFIG_URL: The config file name that contains urls used in the
`gh view` command.
:type CONFIG_URL_SECTION: str
:param CONFIG_URL_SECTION: The config file section that contains urls used
in the `gh view [url_index]` command.
:type CONFIG_URL_LIST: str
:param CONFIG_URL_LIST: The config containing a list of the last set of
urls the user has seen, which allows the user to quickly access a repo
url with the `gh view [url_index]` command.
:type CONFIG_VERIFY_SSL: str
:param CONFIG_VERIFY_SSL: Determines whether to verify SSL certs.
:type enterprise_url: str
:param enterprise_url: The GitHub Enterprise url.
:type urls: list
:param urls: The last set of urls the user has seen, which allows the user
to quickly access a repo url with the gh view [url_index] command.
:type user_login: str
:param user_login: The user's login in ~/.gitsomeconfig.
:type user_pass: str
:param user_pass: The user's pass in ~/.gitsomeconfig.
This is only stored for GitHub Enterprise users since using only a
personal access token does not seem to be supported.
:type user_token: str
:param user_token: The user's token in ~/.gitsomeconfig.
:type verify_ssl: bool
:param verify_ssl: Determines whether to verify SSL certs.
"""
CONFIG = '.gitsomeconfig'
CONFIG_CLR_PRIMARY = 'clr_primary'
CONFIG_CLR_SECONDARY = 'clr_secondary'
CONFIG_CLR_TERTIARY = 'clr_tertiary'
CONFIG_CLR_QUATERNARY = 'clr_quaternary'
CONFIG_CLR_BOLD = 'clr_bold'
CONFIG_CLR_CODE = 'clr_code'
CONFIG_CLR_ERROR = 'clr_error'
CONFIG_CLR_HEADER = 'clr_header'
CONFIG_CLR_LINK = 'clr_link'
CONFIG_CLR_LIST = 'clr_list'
CONFIG_CLR_MESSAGE = 'clr_message'
CONFIG_CLR_NUM_COMMENTS = 'clr_num_comments'
CONFIG_CLR_NUM_POINTS = 'clr_num_points'
CONFIG_CLR_TAG = 'clr_tag'
CONFIG_CLR_TIME = 'clr_time'
CONFIG_CLR_TITLE = 'clr_title'
CONFIG_CLR_TOOLTIP = 'clr_tooltip'
CONFIG_CLR_USER = 'clr_user'
CONFIG_CLR_VIEW_LINK = 'clr_view_link'
CONFIG_CLR_VIEW_INDEX = 'clr_view_index'
CONFIG_SECTION = 'github'
CONFIG_USER_LOGIN = 'user_login'
CONFIG_USER_PASS = 'user_pass'
CONFIG_USER_TOKEN = 'user_token'
CONFIG_USER_FEED = 'user_feed'
CONFIG_ENTERPRISE_URL = 'enterprise_url'
CONFIG_VERIFY_SSL = 'verify_ssl'
CONFIG_URL = '.gitsomeconfigurl'
CONFIG_URL_SECTION = 'url'
CONFIG_URL_LIST = 'url_list'
CONFIG_AVATAR = '.gitsomeconfigavatar.png'
def __init__(self):
self.api = None
self.user_login = None
self.user_pass = None
self.user_token = None
self.user_feed = None
self.enterprise_url = None
self.verify_ssl = True
self.urls = []
self._init_colors()
self.load_configs([
self.load_config_colors,
])
self.login = login
self.authorize = authorize
self.getpass = getpass
def _init_colors(self):
"""Initialize colors to their defaults."""
self.clr_primary = None
self.clr_secondary = 'green'
self.clr_tertiary = 'cyan'
self.clr_quaternary = 'yellow'
self.clr_bold = 'cyan'
self.clr_code = 'cyan'
self.clr_error = 'red'
self.clr_header = 'yellow'
self.clr_link = 'green'
self.clr_list = 'cyan'
self.clr_message = None
self.clr_num_comments = 'green'
self.clr_num_points = 'green'
self.clr_tag = 'cyan'
self.clr_time = 'yellow'
self.clr_title = None
self.clr_tooltip = None
self.clr_user = 'cyan'
self.clr_view_link = 'magenta'
self.clr_view_index = 'magenta'
def authenticate_cached_credentials(self, config, parser,
enterprise_auth=enterprise_login):
"""Authenticate with the user's credentials in ~/.gitsomeconfig.
:type config: str
:param config: The config path.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser.
"""
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
self.user_login = self.load_config(
parser=parser,
cfg_label=self.CONFIG_USER_LOGIN)
self.user_pass = self.load_config(
parser=parser,
cfg_label=self.CONFIG_USER_PASS)
self.user_token = self.load_config(
parser=parser,
cfg_label=self.CONFIG_USER_TOKEN)
self.enterprise_url = self.load_config(
parser=parser,
cfg_label=self.CONFIG_ENTERPRISE_URL)
self.verify_ssl = self.load_config(
parser=parser,
cfg_label=self.CONFIG_VERIFY_SSL,
boolean_config=True)
self.user_feed = self.load_config(
parser=parser,
cfg_label=self.CONFIG_USER_FEED)
if not self.verify_ssl:
# The user has chosen not to verify SSL certs.
# Disable warnings related to this option.
requests.packages.urllib3.disable_warnings(
InsecureRequestWarning)
login_kwargs = {
'username': self.user_login,
'two_factor_callback': self.request_two_factor_code,
}
if self.enterprise_url is not None:
self.login = enterprise_auth
login_kwargs.update({
'url': self.enterprise_url,
'verify': self.verify_ssl,
})
if self.user_token is not None:
login_kwargs.update({'token': self.user_token})
elif self.user_pass is not None:
login_kwargs.update({'password': self.user_pass})
else:
self.print_auth_error()
return
else:
login_kwargs.update({'token': self.user_token})
self.api = self.login(**login_kwargs)
def authenticate(self, enterprise=False,
enterprise_auth=enterprise_login, overwrite=False):
"""Log into GitHub.
Adapted from https://github.com/sigmavirus24/github-cli.
:type enterprise: bool
:param enterprise: Determines whether to configure GitHub Enterprise.
Default: False.
:type overwrite: bool
:param overwrite: indicates whether we cant to overwrite the current
set of credentials. Default: False.
"""
if self.api is not None and not overwrite:
return
# Get the full path to the configuration file.
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
# Check to make sure the file exists and we are allowed to read it.
# Skip if we want to overwrite the auth settings.
if os.path.isfile(config) and os.access(config, os.R_OK | os.W_OK) and \
not overwrite:
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
self.authenticate_cached_credentials(config, parser)
else:
# The file didn't exist or we don't have the correct permissions.
login_kwargs = {
'two_factor_callback': self.request_two_factor_code,
}
if enterprise:
self.login = enterprise_auth
while not self.enterprise_url:
self.enterprise_url = input('Enterprise URL: ')
if click.confirm('Do you want to verify SSL certs?',
default=True):
self.verify_ssl = True
else:
self.verify_ssl = False
login_kwargs.update({
'url': self.enterprise_url,
'verify': self.verify_ssl,
})
while not self.user_login:
self.user_login = input('User Login: ')
login_kwargs.update({'username': self.user_login})
if click.confirm(('Do you want to log in with a password [Y] or '
'a personal access token [n]?'),
default=True):
user_pass = None
while not user_pass:
user_pass = self.getpass('Password: ')
login_kwargs.update({'password': user_pass})
try:
if not enterprise:
# Trade the user password for a personal access token.
# This does not seem to be available for Enterprise.
auth = self.authorize(
self.user_login,
user_pass,
scopes=['user', 'repo'],
note='gitsome',
note_url='https://github.com/donnemartin/gitsome',
two_factor_callback=self.request_two_factor_code
)
self.user_token = auth.token
else:
self.user_pass = user_pass
except (UnprocessableEntity, AuthenticationFailed):
click.secho('Error creating token.',
fg=self.clr_error)
click.secho(('Visit the following page and verify you do '
'not have an existing token named "gitsome":\n'
' https://github.com/settings/tokens\n'
'If a token already exists, update your '
'~/.gitsomeconfig file with your token:\n'
' user_token = TOKEN\n'
'You can also generate a new token.'),
fg=self.clr_message)
self.print_auth_error()
return
else:
# The user has chosen to authenticate with a token.
while not self.user_token:
self.user_token = input('Token: ')
login_kwargs.update({'token': self.user_token})
self.api = self.login(**login_kwargs)
if self.user_feed:
parser.set(self.CONFIG_SECTION,
self.CONFIG_USER_FEED,
self.user_feed)
def check_auth(self):
"""Check if the current authorization is valid.
This method uses the ratelimit_remaining api to check whether
the currently authenticated user's credentials are valid without
deducting from the rate limit. The ratelimit_remaining api does not
seem to be available for GitHub Enterprise.
github3.py's method check_authorization seems to only work given
an authorization created by a registered application.
TODO: Determine a better way to check the authorization for
GitHub Enterprise.
:type enterprise: bool
:param enterprise: Determines whether we are authenticating with
GitHub Enterprise.
"""
if self.enterprise_url is not None:
return True
try:
if self.api is not None:
# Throws AuthenticationFailed if invalid credentials but
# does not deduct from the rate limit.
self.api.ratelimit_remaining
return True
else:
self.print_auth_error()
except AuthenticationFailed:
self.print_auth_error()
return False
def get_github_config_path(self, config_file_name):
"""Attempt to find the github config file.
Adapted from https://github.com/sigmavirus24/github-cli.
:type config_file_name: str
:param config_file_name: The config file name.
:rtype: str
:return: The github config file path.
"""
home = os.path.abspath(os.environ.get('HOME', ''))
config_file_path = os.path.join(home, config_file_name)
return config_file_path
def load_config(self, parser, cfg_label, default=None,
color_config=False, boolean_config=False):
"""Load the specified config from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
:type cfg_label: str
:param cfg_label: The config label to load.
:type default: str
:param default: The default color if no color config exists.
Default: None.
:type color_config: bool
:param color_config: Determines whether this is a color config.
Default: False.
:type boolean_config: bool
:param boolean_config: Determines whether to load a boolean config.
Default: False.
"""
try:
if boolean_config:
cfg = parser.getboolean(self.CONFIG_SECTION, cfg_label)
else:
cfg = parser.get(self.CONFIG_SECTION, cfg_label)
if color_config:
if cfg == 'none':
cfg = None
# Check if the user input a valid color.
# If invalid, this will throw a TypeError
click.style('', fg=cfg)
except (TypeError, configparser.NoOptionError):
return default
return cfg
def load_configs(self, config_funcs):
"""Load the specified config from ~/.gitsomeconfig.
:type foo: list
:param foo: The config methods to run.
"""
config_file_path = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
try:
with open(config_file_path) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
for config_func in config_funcs:
config_func(parser)
except IOError:
# There might not be a cache yet, just silently return.
return None
def load_config_colors(self, parser):
"""Load the color config from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.load_colors(parser)
def load_colors(self, parser):
"""Load all colors from ~/.gitsomeconfig.
:type parser: :class:`ConfigParser.RawConfigParser`
:param parser: An instance of `ConfigParser.RawConfigParser`.
"""
self.clr_primary = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_PRIMARY,
default=self.clr_primary,
color_config=True)
self.clr_secondary = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_SECONDARY,
default=self.clr_secondary,
color_config=True)
self.clr_tertiary = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_TERTIARY,
default=self.clr_tertiary,
color_config=True)
self.clr_quaternary = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_QUATERNARY,
default=self.clr_quaternary,
color_config=True)
self.clr_bold = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_BOLD,
default=self.clr_bold,
color_config=True)
self.clr_code = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_CODE,
default=self.clr_code,
color_config=True)
self.clr_code = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_ERROR,
default=self.clr_code,
color_config=True)
self.clr_header = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_HEADER,
default=self.clr_header,
color_config=True)
self.clr_link = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_LINK,
default=self.clr_link,
color_config=True)
self.clr_list = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_LIST,
default=self.clr_list,
color_config=True)
self.clr_message = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_MESSAGE,
default=self.clr_message,
color_config=True)
self.clr_num_comments = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_NUM_COMMENTS,
default=self.clr_num_comments,
color_config=True)
self.clr_num_points = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_NUM_POINTS,
default=self.clr_num_points,
color_config=True)
self.clr_tag = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_TAG,
default=self.clr_tag,
color_config=True)
self.clr_time = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_TIME,
default=self.clr_time,
color_config=True)
self.clr_title = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_TITLE,
default=self.clr_title,
color_config=True)
self.clr_tooltip = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_TOOLTIP,
default=self.clr_tooltip,
color_config=True)
self.clr_user = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_USER,
default=self.clr_user,
color_config=True)
self.clr_view_link = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_VIEW_LINK,
default=self.clr_view_link,
color_config=True)
self.clr_view_index = self.load_config(
parser=parser,
cfg_label=self.CONFIG_CLR_VIEW_INDEX,
default=self.clr_view_index,
color_config=True)
def load_urls(self, view_in_browser):
"""Load the current set of urls from ~/.gitsomeconfigurl.
:type view_in_browser: bool
:param view_in_browser: Determines whether to view the urls in a
browser.
:rtype: list
:return: Collection of urls.
"""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
with open(config) as config_file:
try:
parser.read_file(config_file)
except AttributeError:
parser.readfp(config_file)
urls = parser.get(self.CONFIG_URL_SECTION,
self.CONFIG_URL_LIST)
urls = urls.strip()
excludes = ['[', ']', "'"]
for exclude in excludes:
urls = urls.replace(exclude, '')
if not view_in_browser:
urls = urls.replace('https://github.com/', '')
return urls.split(', ')
def print_auth_error(self):
"""Print a message the authorization has failed."""
click.secho('Authentication error.', fg=self.clr_error)
click.secho(('Update your credentials in ~/.gitsomeconfig '
'or run:\n gh configure'),
fg=self.clr_message)
def prompt_news_feed(self):
"""Prompt the user to enter a news feed url."""
if click.confirm(('No feed url detected.\n Calling gh events without '
"an argument\n displays the logged in user's "
'news feed.\nDo you want gitsome to track your '
'news feed?'),
default=True):
click.secho(('Visit the following url while logged into GitHub:\n'
' https://github.com\n'
'Enter the url found under "Subscribe to your '
'news feed".'),
fg=self.clr_message)
self.user_feed = ''
while not self.user_feed:
self.user_feed = input('URL: ')
def request_two_factor_code(self):
"""Request two factor authentication code.
Callback if two factor authentication is requested.
:rtype: str
:return: The user input two factor authentication code.
"""
code = ''
while not code:
code = input('Enter 2FA code: ')
return code
def save_config(self):
"""Saves the config to ~/.gitsomeconfig."""
if self.check_auth():
config = self.get_github_config_path(self.CONFIG)
parser = configparser.RawConfigParser()
parser.add_section(self.CONFIG_SECTION)
parser.set(self.CONFIG_SECTION,
self.CONFIG_USER_LOGIN,
self.user_login)
if self.user_token is not None:
parser.set(self.CONFIG_SECTION,
self.CONFIG_USER_TOKEN,
self.user_token)
if self.user_feed is not None:
parser.set(self.CONFIG_SECTION,
self.CONFIG_USER_FEED,
self.user_feed)
if self.enterprise_url is not None:
parser.set(self.CONFIG_SECTION,
self.CONFIG_ENTERPRISE_URL,
self.enterprise_url)
if self.user_pass is not None:
parser.set(self.CONFIG_SECTION,
self.CONFIG_USER_PASS,
self.user_pass)
else:
parser.remove_option(self.CONFIG_SECTION,
self.CONFIG_USER_PASS)
parser.set(self.CONFIG_SECTION,
self.CONFIG_VERIFY_SSL,
self.verify_ssl)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_PRIMARY,
self.clr_primary)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_SECONDARY,
self.clr_secondary)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_TERTIARY,
self.clr_tertiary)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_QUATERNARY,
self.clr_quaternary)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_BOLD,
self.clr_bold)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_CODE,
self.clr_code)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_ERROR,
self.clr_error)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_HEADER,
self.clr_header)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_LINK,
self.clr_link)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_LIST,
self.clr_list)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_MESSAGE,
self.clr_message)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_NUM_COMMENTS,
self.clr_num_comments)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_NUM_POINTS,
self.clr_num_points)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_TAG,
self.clr_tag)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_TIME,
self.clr_time)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_TITLE,
self.clr_title)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_TOOLTIP,
self.clr_tooltip)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_USER,
self.clr_user)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_VIEW_LINK,
self.clr_view_link)
parser.set(self.CONFIG_SECTION,
self.CONFIG_CLR_VIEW_INDEX,
self.clr_view_index)
with open(config, 'w+') as config_file:
parser.write(config_file)
def save_urls(self):
"""Save the current set of urls to ~/.gitsomeconfigurl."""
config = self.get_github_config_path(self.CONFIG_URL)
parser = configparser.RawConfigParser()
try:
parser.add_section(self.CONFIG_URL_SECTION)
except configparser.DuplicateSectionError:
pass
parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)
with open(config, 'w+') as config_file:
parser.write(config_file)
def show_bash_completions_info(self):
"""Show info on how to enable bash completions"""
click.secho(('By default, gitsome looks at the following locations '
'to enable bash completions:\n'
' https://github.com/donnemartin/gitsome/blob/master/xonsh/environ.py#L123-L130\n' # NOQA
'If bash completions are not working for you, check out '
'the following link:\n'
' https://github.com/donnemartin/gitsome#enabling-bash-completions'), # NOQA
fg=self.clr_message)
| [
13,
15,
16,
18,
22
] |
859 | 5488b32970a0b734334835457c712768a756de7f | from datetime import datetime
import requests as req
import smtplib
import mysql.connector
#mysql constant
MYSQL_HOST='den1.mysql6.gear.host'
MYSQL_USER='winlabiot'
MYSQL_PW='winlabiot+123'
MYSQL_DB="winlabiot"
Coffee_mailing_list_table='coffee_mailing_list'
#keys in dict receive via socket
TIME='time'
AMBIENT_TEMP='ambient_temp'
OBJECT_TEMP='object_temp'
#preset values for derivative
TIME_INTERVAL=300
CURR_TIME_INTERVAL=0
MAX_DATA_POINTS=100
#openhab port and host
IP_ADDR='localhost'
PORT=8080
CURR_DERIVATIVE_URL='http://{ip}:{port}/rest/items/DataAnalyzer_CurrentDerivative'.format(port=PORT, ip=IP_ADDR)
DERIVATIVE_THRESHOLD_URL='http://{ip}:{port}/rest/items/DataAnalyzer_DerivativeThreshold'.format(port=PORT, ip=IP_ADDR)
CURR_TIME_INTERVAL_URL='http://{ip}:{port}/rest/items/DataAnalyzer_CurrentTimeInterval'.format(port=PORT, ip=IP_ADDR)
#constant to decide whether it is noise or not
#avoid keep sending email when derivative always > threshold
Making_Coffee=False
Not_Making_Coffee_Count=0
#gmail access
USER='[email protected]'
PASSWORD='winlabiot123'
#email info
FROM ='[email protected]'
TO=[]
CONTENT='Coffee will be served soon!'
def update_To_email_addr():
#global cursor
global TO
#connect to GearHost mysql database
GearHostMySQL = mysql.connector.connect(
host=MYSQL_HOST,
user=MYSQL_USER,
passwd=MYSQL_PW,
database=MYSQL_DB
)
cursor = GearHostMySQL.cursor()
cursor.execute("SELECT email FROM coffee_mailing_list;")
TO=cursor.fetchall()
cursor.close()
GearHostMySQL.close()
def send_email(user, password, from_addr, to_addr, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
response=server.login(user,password)
print str(datetime.now())+' Server Response: '+str(response)
for address in to_addr:
server.sendmail(from_addr,address,content)
print str(datetime.now())+' Email Sent to '+str(address)
class analyzer:
#data type is AMBIENT_TEMP or OBJECT_TEMP
#derivative_threshold is degree/sec
def __init__(self,data_type,derivative_threshold, max_data_points=100,time_interval=300):
#data is array of dict data points
self.data=[]
#start index is the earliest data point
self.start_index=0
self.derivative=0
self.time_interval=time_interval
self.curr_time_interval=0
self.key=data_type
self.max_data_points=max_data_points
self.derivative_threshold=derivative_threshold
def process(self,newPoint, url,update_monitor=True, email=True):
global Making_Coffee
global Not_Making_Coffee_Count
self.add_data_point(newPoint)
self.update_derivative()
#update monitor
if update_monitor:
reponse=req.post(CURR_DERIVATIVE_URL, data=str(self.derivative))
reponse=req.post(DERIVATIVE_THRESHOLD_URL, data=str(self.derivative_threshold))
reponse=req.post(CURR_TIME_INTERVAL_URL, data=str(self.curr_time_interval))
#rv is whether making coffee regardless of noise
rv=False
if(self.derivative>self.derivative_threshold):
if update_monitor:
reponse=req.post(url, data='Making Coffee')
if(Making_Coffee==False and Not_Making_Coffee_Count>10 and email):
#update target email info
update_To_email_addr()
send_email(USER,PASSWORD,FROM,TO,CONTENT)
rv=True
#update constant
Making_Coffee=True
Not_Making_Coffee_Count=0
else:
if update_monitor:
reponse=req.post(url, data='Not Ready')
#update constant
Making_Coffee=False
Not_Making_Coffee_Count+=1
rv= False
return rv
#data --> dict
def add_data_point(self,newPoint):
newPoint[TIME]=self.str2datetime(newPoint[TIME])
self.data.append(newPoint)
self.curr_time_interval=(self.data[len(self.data)-1][TIME]-self.data[self.start_index][TIME]).total_seconds()
#clear expired date if max data points is reached
if(len(self.data)>self.max_data_points):
del self.data[0:self.start_index]
self.start_index=0
'''
if (len(self.data)==5):
#replace expired data point
self.data[self.start_index]=newPoint
#update start index
if self.start_index==4:
self.start_index=0
else:
self.start_index+=1
else:
self.data.append(newPoint)
'''
def str2datetime(self, datetime_string):
return datetime.strptime(datetime_string, '%Y-%m-%d %H:%M:%S.%f')
def update_derivative(self):
if self.curr_time_interval<self.time_interval:
return
else:
self.derivative=1.0*(self.data[len(self.data)-1][self.key]-self.data[self.start_index][self.key])/self.curr_time_interval
#update start_index
self.start_index+=1
#update curr_time_interval
self.curr_time_interval=(self.data[len(self.data)-1][TIME]-self.data[self.start_index][TIME]).total_seconds()
| null | null | null | null | [
0
] |
860 | b039ed74e62f3a74e8506d4e14a3422499046c06 | <mask token>
def plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=
'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,
show_colorbar=True):
"""2D plot of depth vs some other variable, stretching first 500m of depth.
Parameters
----------
depth : xarray DataArray or numpy array
depth variable
x : xarray DataArray or numpy array
variable for x-axis. Likely to be time, latitude, or longitude
fld : xarray DataArray or numpy array
2D field with depth + 1 dim
stretch_depth : scalar (int or float), optional
stretch top depth to this limit
"""
if len(x) == fld.shape[0]:
fld = fld.transpose()
cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)
fig = plt.figure(figsize=(12, 6), dpi=dpi)
ax1 = plt.subplot(2, 1, 1)
if plot_type == 'pcolormesh':
p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
elif plot_type == 'contourf':
p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
plt.ylim([stretch_depth, 0])
ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))
plt.ylabel('Depth [%s]' % depth.attrs['units'])
ax1.xaxis.axes.set_xticklabels([])
ax2 = plt.subplot(2, 1, 2)
if plot_type == 'pcolormesh':
p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
elif plot_type == 'contourf':
p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
plt.ylim([depth.min(), stretch_depth])
yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))
ax2.yaxis.axes.set_yticks(yticks)
plt.ylabel('Depth [%s]' % depth.attrs['units'])
fig.subplots_adjust(hspace=0.05)
if title is not None:
fig.suptitle(title, verticalalignment='top', fontsize=24)
if show_colorbar:
fig.subplots_adjust(right=0.83)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)
plt.show()
return fig, ax1, ax2
<mask token>
| <mask token>
def plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=
'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,
show_colorbar=True):
"""2D plot of depth vs some other variable, stretching first 500m of depth.
Parameters
----------
depth : xarray DataArray or numpy array
depth variable
x : xarray DataArray or numpy array
variable for x-axis. Likely to be time, latitude, or longitude
fld : xarray DataArray or numpy array
2D field with depth + 1 dim
stretch_depth : scalar (int or float), optional
stretch top depth to this limit
"""
if len(x) == fld.shape[0]:
fld = fld.transpose()
cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)
fig = plt.figure(figsize=(12, 6), dpi=dpi)
ax1 = plt.subplot(2, 1, 1)
if plot_type == 'pcolormesh':
p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
elif plot_type == 'contourf':
p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
plt.ylim([stretch_depth, 0])
ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))
plt.ylabel('Depth [%s]' % depth.attrs['units'])
ax1.xaxis.axes.set_xticklabels([])
ax2 = plt.subplot(2, 1, 2)
if plot_type == 'pcolormesh':
p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
elif plot_type == 'contourf':
p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
plt.ylim([depth.min(), stretch_depth])
yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))
ax2.yaxis.axes.set_yticks(yticks)
plt.ylabel('Depth [%s]' % depth.attrs['units'])
fig.subplots_adjust(hspace=0.05)
if title is not None:
fig.suptitle(title, verticalalignment='top', fontsize=24)
if show_colorbar:
fig.subplots_adjust(right=0.83)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)
plt.show()
return fig, ax1, ax2
def set_colorbar_limits(fld, cmin, cmax):
"""If unset, compute colorbar limits based on field max/min values, sequential/divergent
Determine if colorbar needs to be extended
Parameters
----------
fld : xarray.DataArray
2D field to be plotted
Output
------
cmin : double
colorbar min value
cmax : double
colorbar max value
extend_cbar : string
flag to colorbar extension
"""
if cmin is None and cmax is not None:
raise RuntimeError('Only cmax given, must provide both cmin and cmax')
elif cmin is not None and cmax is None:
raise RuntimeError('Only cmin given, must provide both cmin and cmax')
else:
if type(cmin) is xr.DataArray:
cmin = cmin.values()
elif cmin is not None:
raise TypeError('Unsure of cmin type: ', type(cmin))
if type(cmax) is xr.DataArray:
cmax = cmax.values()
elif cmax is not None:
raise TypeError('Unsure of cmax type: ', type(cmax))
fld_min = fld.min(skipna=True).values
fld_max = fld.max(skipna=True).values
if cmin is None and cmax is None:
cmin = fld_min
cmax = fld_max
if fld_max * fld_min < 0 and fld.name is not 'THETA':
cmax = np.nanmax(np.abs(fld.values))
cmin = -cmax
if cmin > fld_min and cmax < fld_max:
extend_cbar = 'both'
elif cmin > fld_min:
extend_cbar = 'min'
elif cmax < fld_max:
extend_cbar = 'max'
else:
extend_cbar = 'neither'
return cmin, cmax, extend_cbar
| <mask token>
def global_and_stereo_map(lat, lon, fld, plot_type='pcolormesh', cmap=
'YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True):
"""Generate the Robinson and Arctic/Antarctic plot.
Parameters
----------
lat : xarray.DataArray
lon : xarray.DataArray
fld : xarray.DataArray
plot_type : string, optional
plot type to use, 'pcolormesh', or 'contourf'
cmap : string or colormap object (TBD)
cmin : double, optional
minimum value for colorbar
cmax : double, optional
maximum value for colorbar
dpi : int, optiopnal
plot resolution in dots (pixels) per inch
title,show_colorbar
figsize?
Output
------
"""
cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)
plt.figure(figsize=(12, 6), dpi=dpi)
fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=
cmap, plot_type=plot_type, subplot_grid=[2, 1, 1], projection_type=
'robin', show_colorbar=False, cmin=cmin, cmax=cmax, user_lon_0=0)
fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=
cmap, plot_type=plot_type, subplot_grid=[2, 2, 3], projection_type=
'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=50,
user_lon_0=0)
fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=
cmap, plot_type=plot_type, subplot_grid=[2, 2, 4], projection_type=
'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=-40,
user_lon_0=180)
ax1.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)
ax2.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)
ax3.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)
if title is not None:
fig.suptitle(title, verticalalignment='top', fontsize=24)
if show_colorbar:
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)
return fig, (ax1, ax2, ax3)
def plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=
'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,
show_colorbar=True):
"""2D plot of depth vs some other variable, stretching first 500m of depth.
Parameters
----------
depth : xarray DataArray or numpy array
depth variable
x : xarray DataArray or numpy array
variable for x-axis. Likely to be time, latitude, or longitude
fld : xarray DataArray or numpy array
2D field with depth + 1 dim
stretch_depth : scalar (int or float), optional
stretch top depth to this limit
"""
if len(x) == fld.shape[0]:
fld = fld.transpose()
cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)
fig = plt.figure(figsize=(12, 6), dpi=dpi)
ax1 = plt.subplot(2, 1, 1)
if plot_type == 'pcolormesh':
p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
elif plot_type == 'contourf':
p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
plt.ylim([stretch_depth, 0])
ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))
plt.ylabel('Depth [%s]' % depth.attrs['units'])
ax1.xaxis.axes.set_xticklabels([])
ax2 = plt.subplot(2, 1, 2)
if plot_type == 'pcolormesh':
p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
elif plot_type == 'contourf':
p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
plt.ylim([depth.min(), stretch_depth])
yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))
ax2.yaxis.axes.set_yticks(yticks)
plt.ylabel('Depth [%s]' % depth.attrs['units'])
fig.subplots_adjust(hspace=0.05)
if title is not None:
fig.suptitle(title, verticalalignment='top', fontsize=24)
if show_colorbar:
fig.subplots_adjust(right=0.83)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)
plt.show()
return fig, ax1, ax2
def set_colorbar_limits(fld, cmin, cmax):
"""If unset, compute colorbar limits based on field max/min values, sequential/divergent
Determine if colorbar needs to be extended
Parameters
----------
fld : xarray.DataArray
2D field to be plotted
Output
------
cmin : double
colorbar min value
cmax : double
colorbar max value
extend_cbar : string
flag to colorbar extension
"""
if cmin is None and cmax is not None:
raise RuntimeError('Only cmax given, must provide both cmin and cmax')
elif cmin is not None and cmax is None:
raise RuntimeError('Only cmin given, must provide both cmin and cmax')
else:
if type(cmin) is xr.DataArray:
cmin = cmin.values()
elif cmin is not None:
raise TypeError('Unsure of cmin type: ', type(cmin))
if type(cmax) is xr.DataArray:
cmax = cmax.values()
elif cmax is not None:
raise TypeError('Unsure of cmax type: ', type(cmax))
fld_min = fld.min(skipna=True).values
fld_max = fld.max(skipna=True).values
if cmin is None and cmax is None:
cmin = fld_min
cmax = fld_max
if fld_max * fld_min < 0 and fld.name is not 'THETA':
cmax = np.nanmax(np.abs(fld.values))
cmin = -cmax
if cmin > fld_min and cmax < fld_max:
extend_cbar = 'both'
elif cmin > fld_min:
extend_cbar = 'min'
elif cmax < fld_max:
extend_cbar = 'max'
else:
extend_cbar = 'neither'
return cmin, cmax, extend_cbar
| <mask token>
import numpy as np
import matplotlib.pyplot as plt
import cartopy as cart
import xarray as xr
import ecco_v4_py as ecco
def global_and_stereo_map(lat, lon, fld, plot_type='pcolormesh', cmap=
'YlOrRd', title=None, cmin=None, cmax=None, dpi=100, show_colorbar=True):
"""Generate the Robinson and Arctic/Antarctic plot.
Parameters
----------
lat : xarray.DataArray
lon : xarray.DataArray
fld : xarray.DataArray
plot_type : string, optional
plot type to use, 'pcolormesh', or 'contourf'
cmap : string or colormap object (TBD)
cmin : double, optional
minimum value for colorbar
cmax : double, optional
maximum value for colorbar
dpi : int, optiopnal
plot resolution in dots (pixels) per inch
title,show_colorbar
figsize?
Output
------
"""
cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)
plt.figure(figsize=(12, 6), dpi=dpi)
fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=
cmap, plot_type=plot_type, subplot_grid=[2, 1, 1], projection_type=
'robin', show_colorbar=False, cmin=cmin, cmax=cmax, user_lon_0=0)
fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=
cmap, plot_type=plot_type, subplot_grid=[2, 2, 3], projection_type=
'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=50,
user_lon_0=0)
fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(lat, lon, fld, cmap=
cmap, plot_type=plot_type, subplot_grid=[2, 2, 4], projection_type=
'stereo', show_colorbar=False, cmin=cmin, cmax=cmax, lat_lim=-40,
user_lon_0=180)
ax1.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)
ax2.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)
ax3.add_feature(cart.feature.LAND, facecolor='0.7', zorder=2)
if title is not None:
fig.suptitle(title, verticalalignment='top', fontsize=24)
if show_colorbar:
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)
return fig, (ax1, ax2, ax3)
def plot_depth_slice(x, depth, fld, stretch_depth=-500, plot_type=
'pcolormesh', cmap='YlOrRd', title=None, cmin=None, cmax=None, dpi=100,
show_colorbar=True):
"""2D plot of depth vs some other variable, stretching first 500m of depth.
Parameters
----------
depth : xarray DataArray or numpy array
depth variable
x : xarray DataArray or numpy array
variable for x-axis. Likely to be time, latitude, or longitude
fld : xarray DataArray or numpy array
2D field with depth + 1 dim
stretch_depth : scalar (int or float), optional
stretch top depth to this limit
"""
if len(x) == fld.shape[0]:
fld = fld.transpose()
cmin, cmax, extend_cbar = set_colorbar_limits(fld, cmin, cmax)
fig = plt.figure(figsize=(12, 6), dpi=dpi)
ax1 = plt.subplot(2, 1, 1)
if plot_type == 'pcolormesh':
p1 = ax1.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
elif plot_type == 'contourf':
p1 = ax1.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
plt.ylim([stretch_depth, 0])
ax1.yaxis.axes.set_yticks(np.arange(stretch_depth, 1, 100))
plt.ylabel('Depth [%s]' % depth.attrs['units'])
ax1.xaxis.axes.set_xticklabels([])
ax2 = plt.subplot(2, 1, 2)
if plot_type == 'pcolormesh':
p2 = ax2.pcolormesh(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
elif plot_type == 'contourf':
p2 = ax2.contourf(x, depth, fld, vmin=cmin, vmax=cmax, cmap=cmap)
plt.ylim([depth.min(), stretch_depth])
yticks = np.flip(np.arange(2 * stretch_depth, depth.min(), -1000))
ax2.yaxis.axes.set_yticks(yticks)
plt.ylabel('Depth [%s]' % depth.attrs['units'])
fig.subplots_adjust(hspace=0.05)
if title is not None:
fig.suptitle(title, verticalalignment='top', fontsize=24)
if show_colorbar:
fig.subplots_adjust(right=0.83)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)
plt.show()
return fig, ax1, ax2
def set_colorbar_limits(fld, cmin, cmax):
"""If unset, compute colorbar limits based on field max/min values, sequential/divergent
Determine if colorbar needs to be extended
Parameters
----------
fld : xarray.DataArray
2D field to be plotted
Output
------
cmin : double
colorbar min value
cmax : double
colorbar max value
extend_cbar : string
flag to colorbar extension
"""
if cmin is None and cmax is not None:
raise RuntimeError('Only cmax given, must provide both cmin and cmax')
elif cmin is not None and cmax is None:
raise RuntimeError('Only cmin given, must provide both cmin and cmax')
else:
if type(cmin) is xr.DataArray:
cmin = cmin.values()
elif cmin is not None:
raise TypeError('Unsure of cmin type: ', type(cmin))
if type(cmax) is xr.DataArray:
cmax = cmax.values()
elif cmax is not None:
raise TypeError('Unsure of cmax type: ', type(cmax))
fld_min = fld.min(skipna=True).values
fld_max = fld.max(skipna=True).values
if cmin is None and cmax is None:
cmin = fld_min
cmax = fld_max
if fld_max * fld_min < 0 and fld.name is not 'THETA':
cmax = np.nanmax(np.abs(fld.values))
cmin = -cmax
if cmin > fld_min and cmax < fld_max:
extend_cbar = 'both'
elif cmin > fld_min:
extend_cbar = 'min'
elif cmax < fld_max:
extend_cbar = 'max'
else:
extend_cbar = 'neither'
return cmin, cmax, extend_cbar
| """
Module for generic standard analysis plots.
"""
import numpy as np
import matplotlib.pyplot as plt
import cartopy as cart
import xarray as xr
import ecco_v4_py as ecco
def global_and_stereo_map(lat, lon, fld,
plot_type='pcolormesh',
cmap='YlOrRd',
title=None,
cmin=None,
cmax=None,
dpi=100,
show_colorbar=True):
"""Generate the Robinson and Arctic/Antarctic plot.
Parameters
----------
lat : xarray.DataArray
lon : xarray.DataArray
fld : xarray.DataArray
plot_type : string, optional
plot type to use, 'pcolormesh', or 'contourf'
cmap : string or colormap object (TBD)
cmin : double, optional
minimum value for colorbar
cmax : double, optional
maximum value for colorbar
dpi : int, optiopnal
plot resolution in dots (pixels) per inch
title,show_colorbar
figsize?
Output
------
"""
# to do
# -figsize option?
# -cmin/cmax defaults handling with plot_proj ...
# -colorbar defaults with diverging/sequential
# -number of colors in plot
# -suppress dask warnings
# -get the subplot size "just right" no matter the figsize
# -arrows for when colorbar is exceeded
# handle colorbar limits
cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax)
# default figsize which seems to work for a laptop screen
plt.figure(figsize=(12,6),dpi=dpi)
# the big top global plot
fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(
lat,lon,fld,
cmap=cmap,
plot_type=plot_type,
subplot_grid=[2,1,1],
projection_type='robin',
show_colorbar=False,
cmin=cmin,
cmax=cmax,
user_lon_0=0
)
# Arctic: bottom left
fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(
lat,lon,fld,
cmap=cmap,
plot_type=plot_type,
subplot_grid=[2,2,3],
projection_type='stereo',
show_colorbar=False,
cmin=cmin,
cmax=cmax,
lat_lim=50,
user_lon_0=0
)
# ACC: bottom right
fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(
lat,lon,fld,
cmap=cmap,
plot_type=plot_type,
subplot_grid=[2,2,4],
projection_type='stereo',
show_colorbar=False,
cmin=cmin,
cmax=cmax,
lat_lim=-40,
user_lon_0=180
)
# Set land color to gray
ax1.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)
ax2.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)
ax3.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)
# Make a single title
if title is not None:
fig.suptitle(title,verticalalignment='top',fontsize=24)
# Make an overyling colorbar
if show_colorbar:
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)
return fig, (ax1,ax2,ax3)
def plot_depth_slice(x, depth, fld,
stretch_depth=-500,
plot_type='pcolormesh',
cmap='YlOrRd',
title=None,
cmin=None,
cmax=None,
dpi=100,
show_colorbar=True):
"""2D plot of depth vs some other variable, stretching first 500m of depth.
Parameters
----------
depth : xarray DataArray or numpy array
depth variable
x : xarray DataArray or numpy array
variable for x-axis. Likely to be time, latitude, or longitude
fld : xarray DataArray or numpy array
2D field with depth + 1 dim
stretch_depth : scalar (int or float), optional
stretch top depth to this limit
"""
# Ensure negative values
#if (depth>0).any():
# depth = -depth
#if stretch_depth > 0:
# stretch_depth = -stretch_depth
# Handle shape
if len(x) == fld.shape[0]:
fld = fld.transpose()
# handle colorbar limits
cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax)
# default figsize which seems to work for a laptop screen
fig = plt.figure(figsize=(12,6),dpi=dpi)
# Could also use plt.subplots here ...
# First top 500m
ax1 = plt.subplot(2,1,1)
if plot_type == 'pcolormesh':
p1 = ax1.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)
elif plot_type == 'contourf':
p1 = ax1.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)
# Handle y-axis
plt.ylim([stretch_depth, 0])
ax1.yaxis.axes.set_yticks(np.arange(stretch_depth,1,100))
plt.ylabel('Depth [%s]' % depth.attrs['units'])
# Remove top plot xtick label
ax1.xaxis.axes.set_xticklabels([])
# Now the rest ...
ax2 = plt.subplot(2,1,2)
if plot_type == 'pcolormesh':
p2 = ax2.pcolormesh(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)
elif plot_type == 'contourf':
p2 = ax2.contourf(x,depth,fld,vmin=cmin,vmax=cmax,cmap=cmap)
# Handle y-axis
plt.ylim([depth.min(), stretch_depth])
yticks = np.flip(np.arange(2*stretch_depth,depth.min(),-1000))
ax2.yaxis.axes.set_yticks(yticks)
plt.ylabel('Depth [%s]' % depth.attrs['units'])
# Reduce space between subplots
fig.subplots_adjust(hspace=0.05)
# Make a single title
if title is not None:
fig.suptitle(title,verticalalignment='top',fontsize=24)
# Make an overyling colorbar
if show_colorbar:
fig.subplots_adjust(right=0.83)
cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])
fig.colorbar(p2, cax=cbar_ax, extend=extend_cbar)
plt.show()
return fig,ax1,ax2
def set_colorbar_limits(fld,cmin,cmax):
"""If unset, compute colorbar limits based on field max/min values, sequential/divergent
Determine if colorbar needs to be extended
Parameters
----------
fld : xarray.DataArray
2D field to be plotted
Output
------
cmin : double
colorbar min value
cmax : double
colorbar max value
extend_cbar : string
flag to colorbar extension
"""
# handle input
if (cmin is None) and (cmax is not None):
raise RuntimeError('Only cmax given, must provide both cmin and cmax')
elif (cmin is not None) and (cmax is None):
raise RuntimeError('Only cmin given, must provide both cmin and cmax')
else:
# handle colorbar limits accidentally passed as with xarray functions
if type(cmin) is xr.DataArray:
cmin = cmin.values()
elif cmin is not None:
raise TypeError('Unsure of cmin type: ',type(cmin))
if type(cmax) is xr.DataArray:
cmax = cmax.values()
elif cmax is not None:
raise TypeError('Unsure of cmax type: ',type(cmax))
# compute fld limits
fld_min = fld.min(skipna=True).values
fld_max = fld.max(skipna=True).values
# if cmin/cmax not set, compute
if (cmin is None) and (cmax is None):
cmin = fld_min
cmax = fld_max
# determine if divergent colorbar
# Note: Not making divergent colorbar for temperature
# in degC because still sequential even though +/-
if (fld_max*fld_min < 0) and (fld.name is not 'THETA'):
cmax = np.nanmax(np.abs(fld.values))
cmin = -cmax
# determine if colorbar needs to be extended
if (cmin > fld_min) and (cmax < fld_max):
extend_cbar = "both"
elif cmin > fld_min:
extend_cbar = "min"
elif cmax < fld_max:
extend_cbar = "max"
else:
extend_cbar = "neither"
return cmin, cmax, extend_cbar
| [
1,
2,
3,
4,
5
] |
861 | bf6d1ddf66bc0d54320c0491e344925a5f507df7 | <mask token>
| <mask token>
sys.path.insert(0, '/path/to/mm-api/python')
sys.path.insert(0, '/path/to/mm-api/distrib/python_osx')
print(sys.path)
<mask token>
remote.connect()
<mask token>
remote.shutdown()
| <mask token>
sys.path.insert(0, '/path/to/mm-api/python')
sys.path.insert(0, '/path/to/mm-api/distrib/python_osx')
print(sys.path)
<mask token>
examples_dir = '/dir/of/models/'
part_filename1 = os.path.join(examples_dir, 'model1.stl')
part_filename2 = os.path.join(examples_dir, 'model2.stl')
remote = mmRemote()
remote.connect()
cmd = mmapi.StoredCommands()
new_obj1 = mm.append_objects_from_file(remote, part_filename1)
new_obj1 = mm.append_objects_from_file(remote, part_filename2)
remote.shutdown()
| import os
import sys
sys.path.insert(0, '/path/to/mm-api/python')
sys.path.insert(0, '/path/to/mm-api/distrib/python_osx')
print(sys.path)
import mmapi
from mmRemote import *
import mm
examples_dir = '/dir/of/models/'
part_filename1 = os.path.join(examples_dir, 'model1.stl')
part_filename2 = os.path.join(examples_dir, 'model2.stl')
remote = mmRemote()
remote.connect()
cmd = mmapi.StoredCommands()
new_obj1 = mm.append_objects_from_file(remote, part_filename1)
new_obj1 = mm.append_objects_from_file(remote, part_filename2)
remote.shutdown()
| import os
import sys
sys.path.insert(0, "/path/to/mm-api/python")
sys.path.insert(0, "/path/to/mm-api/distrib/python_osx")
print(sys.path)
import mmapi
from mmRemote import *
import mm;
# assumption: we are running
examples_dir = "/dir/of/models/"
part_filename1 = os.path.join( examples_dir, "model1.stl" )
part_filename2 = os.path.join( examples_dir, "model2.stl" )
# initialize connection
remote = mmRemote()
remote.connect()
cmd = mmapi.StoredCommands()
new_obj1 = mm.append_objects_from_file(remote, part_filename1);
new_obj1 = mm.append_objects_from_file(remote, part_filename2);
#done!
remote.shutdown()
| [
0,
1,
2,
3,
4
] |
862 | 28854823b1edc7df6cf025175811c1858efd2c42 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='OpenHumansMember', fields=[(
'oh_id', models.CharField(max_length=16, primary_key=True,
serialize=False, unique=True)), ('access_token', models.CharField(
max_length=256)), ('refresh_token', models.CharField(max_length=256
)), ('token_expires', models.DateTimeField()), ('seeq_id', models.
IntegerField(null=True))])]
| from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='OpenHumansMember', fields=[(
'oh_id', models.CharField(max_length=16, primary_key=True,
serialize=False, unique=True)), ('access_token', models.CharField(
max_length=256)), ('refresh_token', models.CharField(max_length=256
)), ('token_expires', models.DateTimeField()), ('seeq_id', models.
IntegerField(null=True))])]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-19 15:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OpenHumansMember',
fields=[
('oh_id', models.CharField(max_length=16, primary_key=True, serialize=False, unique=True)),
('access_token', models.CharField(max_length=256)),
('refresh_token', models.CharField(max_length=256)),
('token_expires', models.DateTimeField()),
('seeq_id', models.IntegerField(null=True)),
],
),
]
| [
0,
1,
2,
3,
4
] |
863 | e3665141397d52877242463d548c059272d13536 | <mask token>
| <mask token>
def main(input, output):
vocab = OrderedDict({'</s>': 0, '<unk>': 1})
for line in io.open(input, 'r', encoding='utf-8'):
word, count = line.strip().split()
vocab[word] = len(vocab)
with io.open(output, 'w', encoding='utf-8') as out:
json.dump(vocab, out, indent=2, ensure_ascii=False)
<mask token>
| <mask token>
def main(input, output):
vocab = OrderedDict({'</s>': 0, '<unk>': 1})
for line in io.open(input, 'r', encoding='utf-8'):
word, count = line.strip().split()
vocab[word] = len(vocab)
with io.open(output, 'w', encoding='utf-8') as out:
json.dump(vocab, out, indent=2, ensure_ascii=False)
if __name__ == '__main__':
fire.Fire(main)
| import io
import json
import fire
from collections import OrderedDict
def main(input, output):
vocab = OrderedDict({'</s>': 0, '<unk>': 1})
for line in io.open(input, 'r', encoding='utf-8'):
word, count = line.strip().split()
vocab[word] = len(vocab)
with io.open(output, 'w', encoding='utf-8') as out:
json.dump(vocab, out, indent=2, ensure_ascii=False)
if __name__ == '__main__':
fire.Fire(main)
| #!/usr/bin/env python3
import io
import json
import fire
from collections import OrderedDict
def main(input, output):
vocab = OrderedDict({'</s>': 0, '<unk>': 1})
for line in io.open(input, 'r', encoding='utf-8'):
word, count = line.strip().split()
vocab[word] = len(vocab)
with io.open(output, 'w', encoding='utf-8') as out:
json.dump(vocab, out, indent=2, ensure_ascii=False)
if __name__ == '__main__':
fire.Fire(main)
| [
0,
1,
2,
3,
4
] |
864 | 25ff54a969651d365de33f2420c662518dd63738 | <mask token>
def loop(run_state):
error = 1
simulations = 1
while run:
error_margin = str(error / simulations * 100) + '%'
prediction = get_prediction()
print('Prediction: %s' % prediction)
print('Error Margin: %s' % error_margin)
print('Flip the coin and insert your result:\nh = head\nt = tail')
answer = input()
comparator = ''
if answer is 'h' or answer is 't':
if answer == 't':
write_data(False)
comparator = 'tail'
elif answer == 'h':
write_data(True)
comparator = 'head'
simulations += 1
if comparator != prediction:
error += 1
else:
print('Invalid answer\n')
def get_prediction():
file = read_file()
data = file['coin-result']
true = 0
for i in data:
if i is True:
true += 1
head = true / len(data)
tail = 1 - head
if head + tail == 1:
rand = random.uniform(0.0, 1.0)
if head == 1:
return 'head'
elif tail == 1:
return 'tail'
elif head > tail:
if rand > head:
return 'head'
else:
return 'tail'
elif head < tail:
if rand > tail:
return 'tail'
else:
return 'head'
elif head == tail:
rand = random.randint(0, 1)
if rand == 0:
return 'tail'
else:
return 'head'
def read_file():
file = open(url, 'r')
data = json.loads(file.read())
file.close()
return data
def write_data(value):
data = read_file()
file = open(url, 'w')
data['coin-result'].append(value)
json.dump(data, file)
file.close()
def get_answer(answer):
if answer == 'c':
return 'head'
elif answer == 't':
return 'tail'
else:
print('Invalid answer')
<mask token>
| <mask token>
def loop(run_state):
error = 1
simulations = 1
while run:
error_margin = str(error / simulations * 100) + '%'
prediction = get_prediction()
print('Prediction: %s' % prediction)
print('Error Margin: %s' % error_margin)
print('Flip the coin and insert your result:\nh = head\nt = tail')
answer = input()
comparator = ''
if answer is 'h' or answer is 't':
if answer == 't':
write_data(False)
comparator = 'tail'
elif answer == 'h':
write_data(True)
comparator = 'head'
simulations += 1
if comparator != prediction:
error += 1
else:
print('Invalid answer\n')
def get_prediction():
file = read_file()
data = file['coin-result']
true = 0
for i in data:
if i is True:
true += 1
head = true / len(data)
tail = 1 - head
if head + tail == 1:
rand = random.uniform(0.0, 1.0)
if head == 1:
return 'head'
elif tail == 1:
return 'tail'
elif head > tail:
if rand > head:
return 'head'
else:
return 'tail'
elif head < tail:
if rand > tail:
return 'tail'
else:
return 'head'
elif head == tail:
rand = random.randint(0, 1)
if rand == 0:
return 'tail'
else:
return 'head'
def read_file():
file = open(url, 'r')
data = json.loads(file.read())
file.close()
return data
def write_data(value):
data = read_file()
file = open(url, 'w')
data['coin-result'].append(value)
json.dump(data, file)
file.close()
def get_answer(answer):
if answer == 'c':
return 'head'
elif answer == 't':
return 'tail'
else:
print('Invalid answer')
<mask token>
print('Welcome to CoinPredictor\n')
loop(run)
<mask token>
| <mask token>
url = 'data/data.json'
def loop(run_state):
error = 1
simulations = 1
while run:
error_margin = str(error / simulations * 100) + '%'
prediction = get_prediction()
print('Prediction: %s' % prediction)
print('Error Margin: %s' % error_margin)
print('Flip the coin and insert your result:\nh = head\nt = tail')
answer = input()
comparator = ''
if answer is 'h' or answer is 't':
if answer == 't':
write_data(False)
comparator = 'tail'
elif answer == 'h':
write_data(True)
comparator = 'head'
simulations += 1
if comparator != prediction:
error += 1
else:
print('Invalid answer\n')
def get_prediction():
file = read_file()
data = file['coin-result']
true = 0
for i in data:
if i is True:
true += 1
head = true / len(data)
tail = 1 - head
if head + tail == 1:
rand = random.uniform(0.0, 1.0)
if head == 1:
return 'head'
elif tail == 1:
return 'tail'
elif head > tail:
if rand > head:
return 'head'
else:
return 'tail'
elif head < tail:
if rand > tail:
return 'tail'
else:
return 'head'
elif head == tail:
rand = random.randint(0, 1)
if rand == 0:
return 'tail'
else:
return 'head'
def read_file():
file = open(url, 'r')
data = json.loads(file.read())
file.close()
return data
def write_data(value):
data = read_file()
file = open(url, 'w')
data['coin-result'].append(value)
json.dump(data, file)
file.close()
def get_answer(answer):
if answer == 'c':
return 'head'
elif answer == 't':
return 'tail'
else:
print('Invalid answer')
run = True
print('Welcome to CoinPredictor\n')
loop(run)
<mask token>
| import json
import random
from time import sleep
url = 'data/data.json'
def loop(run_state):
error = 1
simulations = 1
while run:
error_margin = str(error / simulations * 100) + '%'
prediction = get_prediction()
print('Prediction: %s' % prediction)
print('Error Margin: %s' % error_margin)
print('Flip the coin and insert your result:\nh = head\nt = tail')
answer = input()
comparator = ''
if answer is 'h' or answer is 't':
if answer == 't':
write_data(False)
comparator = 'tail'
elif answer == 'h':
write_data(True)
comparator = 'head'
simulations += 1
if comparator != prediction:
error += 1
else:
print('Invalid answer\n')
def get_prediction():
file = read_file()
data = file['coin-result']
true = 0
for i in data:
if i is True:
true += 1
head = true / len(data)
tail = 1 - head
if head + tail == 1:
rand = random.uniform(0.0, 1.0)
if head == 1:
return 'head'
elif tail == 1:
return 'tail'
elif head > tail:
if rand > head:
return 'head'
else:
return 'tail'
elif head < tail:
if rand > tail:
return 'tail'
else:
return 'head'
elif head == tail:
rand = random.randint(0, 1)
if rand == 0:
return 'tail'
else:
return 'head'
def read_file():
file = open(url, 'r')
data = json.loads(file.read())
file.close()
return data
def write_data(value):
data = read_file()
file = open(url, 'w')
data['coin-result'].append(value)
json.dump(data, file)
file.close()
def get_answer(answer):
if answer == 'c':
return 'head'
elif answer == 't':
return 'tail'
else:
print('Invalid answer')
run = True
print('Welcome to CoinPredictor\n')
loop(run)
<mask token>
| import json
import random
from time import sleep
url = "data/data.json"
def loop(run_state):
error = 1
simulations = 1
while run:
error_margin = str((error/simulations) * 100) + "%"
prediction = get_prediction()
print("Prediction: %s" % prediction)
print("Error Margin: %s" % error_margin)
print("Flip the coin and insert your result:\nh = head\nt = tail")
answer = input()
comparator = ""
if answer is "h" or answer is "t":
if answer == "t":
write_data(False)
comparator = "tail"
elif answer == "h":
write_data(True)
comparator = "head"
simulations += 1
if comparator != prediction:
error += 1
else:
print("Invalid answer\n")
def get_prediction():
file = read_file()
data = file["coin-result"]
true = 0
for i in data:
if i is True:
true += 1
head = true/len(data)
tail = 1-head
if head + tail == 1:
rand = random.uniform(0.0, 1.0)
if head == 1:
return "head"
elif tail == 1:
return "tail"
elif head > tail:
if rand > head:
return "head"
else:
return "tail"
elif head < tail:
if rand > tail:
return "tail"
else:
return "head"
elif head == tail:
rand = random.randint(0, 1)
if rand == 0:
return "tail"
else:
return "head"
def read_file():
file = open(url, "r")
data = json.loads(file.read())
file.close()
return data
def write_data(value):
data = read_file()
file = open(url, "w")
data["coin-result"].append(value)
json.dump(data, file)
file.close()
def get_answer(answer):
if answer == "c":
return "head"
elif answer == "t":
return "tail"
else:
print("Invalid answer")
# OnRun
run = True
print("Welcome to CoinPredictor\n")
loop(run)
'''
file = open("data/data.json", "w")
data['coin-result'].append(False)
data = json.dump(data, file)
print(data)
file.close()'''
| [
5,
6,
7,
8,
9
] |
865 | 73d7b1895282df5b744d8c03ec7e6f8530366b76 | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from sklearn import svm
data=np.loadtxt('yucedata1.txt')
X=data[:,0]
y=data[:,1]
plt.figure(1,figsize=(8,6))
myfont = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14)
plt.scatter(X,y,color="red",label="ini_data",linewidth=3)
plt.xlabel(u'Exam1 Score',fontproperties=myfont)
plt.ylabel('Exam2 Score')
plt.legend()
# plt.show()
X=X.reshape(-1,1)
print X
clf = svm.SVR(kernel='linear').fit(X, y)
# clf = svm.SVC(kernel='poly',degree=5,gamma=1,coef0=0).fit(X, y)
# clf = svm.SVR(kernel='rbf',C=100,gamma=20).fit(X, y)
'''gamma越大,多项式项数越多,导致高方差'''
# print u'精准度为: %.2f' % clf.score(X, y)
X1=np.linspace(0,25,100).reshape(-1,1)
y1=clf.predict(X1)
plt.plot(X1,y1,color="orange",label="Fitting Line",linewidth=2)
plt.show()
| null | null | null | null | [
0
] |
866 | f2cdee7e5eebaeeb784cb901c3ac6301e90ac7b9 | <mask token>
| <mask token>
def index(request):
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.signup_date = timezone.now()
post.email_confirmed = True
post.save()
return redirect('/emailupdate/thanks/')
else:
form_class = EmailForm
return render(request, 'emailupdate/emailupdate.html', {'form':
form_class})
<mask token>
| <mask token>
def index(request):
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.signup_date = timezone.now()
post.email_confirmed = True
post.save()
return redirect('/emailupdate/thanks/')
else:
form_class = EmailForm
return render(request, 'emailupdate/emailupdate.html', {'form':
form_class})
def thanks(request):
return render(request, 'emailupdate/emailupdate_thanks.html')
| from django.shortcuts import render, get_object_or_404, redirect
from forms import EmailForm
from django.utils import timezone
def index(request):
if request.method == 'POST':
form = EmailForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.signup_date = timezone.now()
post.email_confirmed = True
post.save()
return redirect('/emailupdate/thanks/')
else:
form_class = EmailForm
return render(request, 'emailupdate/emailupdate.html', {'form':
form_class})
def thanks(request):
return render(request, 'emailupdate/emailupdate_thanks.html')
| from django.shortcuts import render, get_object_or_404, redirect
#from emailupdate.forms import emailupdate_form
from forms import EmailForm
from django.utils import timezone
def index(request):
if request.method == "POST":
form = EmailForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.signup_date = timezone.now()
post.email_confirmed = True
post.save()
return redirect('/emailupdate/thanks/')
else:
form_class = EmailForm
return render(request, 'emailupdate/emailupdate.html', {
'form': form_class,
})
def thanks(request):
return render(request, 'emailupdate/emailupdate_thanks.html') | [
0,
1,
2,
3,
4
] |
867 | 18dae039f6455f944cbaa97bcb9c36ed29ac9a21 | <mask token>
| <mask token>
def read_incremental_factors():
import csv
inc_file = open(incremental_factors_file, 'r')
reader = csv.reader(inc_file)
increment_map = dict()
funding_code_map = dict()
this_trn_code = ''
for row in reader:
if row[0] != '':
this_trn_code = row[0].replace('-', '')
this_trn = increment_map.get(this_trn_code, {})
this_trn[int(row[1])] = float(row[3])
funding_code_map[int(row[1])] = row[2]
increment_map[this_trn_code] = this_trn
return increment_map, funding_code_map
<mask token>
apns.sort()
<mask token>
for apn in apns:
try:
tax_history_index = tax_history_apns.index(apn)
except:
tax_history_index = None
if tax_history_index is None:
print('No Matching APN: ' + apn)
else:
this_tax_history = tax_history[tax_history_index]
total_tax = this_tax_history[3]
tra = this_tax_history[1]
this_tra = increment_map.get(tra, None)
if this_tra is None:
print('TRA is Null for APN: ' + apn)
else:
fraction = this_tra.get(cabrillo_key, None)
if fraction is None:
print('APN: ' + apn + ' is not in district')
else:
tax_distribution += [[this_tax_history[0], this_tax_history
[1], this_tax_history[2], fraction, this_tax_history[3],
[(t * fraction) for t in this_tax_history[3]]]]
<mask token>
print('District Contributions: ')
<mask token>
for ds in district_sum:
print(str(year) + ': ' + str(ds))
year += 1
p.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,
'wb'))
| incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'
tax_pickle_for_apns = 'kmes_taxes.p'
tax_history_pickle = '../cusd_1percent_tax_history.p'
distribution_pickle_out = 'kmes_distribution.p'
cabrillo_key = 50200
def read_incremental_factors():
import csv
inc_file = open(incremental_factors_file, 'r')
reader = csv.reader(inc_file)
increment_map = dict()
funding_code_map = dict()
this_trn_code = ''
for row in reader:
if row[0] != '':
this_trn_code = row[0].replace('-', '')
this_trn = increment_map.get(this_trn_code, {})
this_trn[int(row[1])] = float(row[3])
funding_code_map[int(row[1])] = row[2]
increment_map[this_trn_code] = this_trn
return increment_map, funding_code_map
increment_map, funding_code_map = read_incremental_factors()
<mask token>
tax_data_apns = p.load(open(tax_pickle_for_apns, 'rb'))
apns = list(set([d[0] for d in tax_data_apns]))
apns.sort()
tax_distribution = list()
tax_history = p.load(open(tax_history_pickle, 'rb'))
tax_history_apns = [d[0] for d in tax_history]
for apn in apns:
try:
tax_history_index = tax_history_apns.index(apn)
except:
tax_history_index = None
if tax_history_index is None:
print('No Matching APN: ' + apn)
else:
this_tax_history = tax_history[tax_history_index]
total_tax = this_tax_history[3]
tra = this_tax_history[1]
this_tra = increment_map.get(tra, None)
if this_tra is None:
print('TRA is Null for APN: ' + apn)
else:
fraction = this_tra.get(cabrillo_key, None)
if fraction is None:
print('APN: ' + apn + ' is not in district')
else:
tax_distribution += [[this_tax_history[0], this_tax_history
[1], this_tax_history[2], fraction, this_tax_history[3],
[(t * fraction) for t in this_tax_history[3]]]]
<mask token>
district_data = np.array(np.array([x[5] for x in tax_distribution]))
print('District Contributions: ')
district_sum = np.sum(district_data, axis=0)
year = 2007
for ds in district_sum:
print(str(year) + ': ' + str(ds))
year += 1
p.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,
'wb'))
| incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'
tax_pickle_for_apns = 'kmes_taxes.p'
tax_history_pickle = '../cusd_1percent_tax_history.p'
distribution_pickle_out = 'kmes_distribution.p'
cabrillo_key = 50200
def read_incremental_factors():
import csv
inc_file = open(incremental_factors_file, 'r')
reader = csv.reader(inc_file)
increment_map = dict()
funding_code_map = dict()
this_trn_code = ''
for row in reader:
if row[0] != '':
this_trn_code = row[0].replace('-', '')
this_trn = increment_map.get(this_trn_code, {})
this_trn[int(row[1])] = float(row[3])
funding_code_map[int(row[1])] = row[2]
increment_map[this_trn_code] = this_trn
return increment_map, funding_code_map
increment_map, funding_code_map = read_incremental_factors()
import pickle as p
tax_data_apns = p.load(open(tax_pickle_for_apns, 'rb'))
apns = list(set([d[0] for d in tax_data_apns]))
apns.sort()
tax_distribution = list()
tax_history = p.load(open(tax_history_pickle, 'rb'))
tax_history_apns = [d[0] for d in tax_history]
for apn in apns:
try:
tax_history_index = tax_history_apns.index(apn)
except:
tax_history_index = None
if tax_history_index is None:
print('No Matching APN: ' + apn)
else:
this_tax_history = tax_history[tax_history_index]
total_tax = this_tax_history[3]
tra = this_tax_history[1]
this_tra = increment_map.get(tra, None)
if this_tra is None:
print('TRA is Null for APN: ' + apn)
else:
fraction = this_tra.get(cabrillo_key, None)
if fraction is None:
print('APN: ' + apn + ' is not in district')
else:
tax_distribution += [[this_tax_history[0], this_tax_history
[1], this_tax_history[2], fraction, this_tax_history[3],
[(t * fraction) for t in this_tax_history[3]]]]
import numpy as np
district_data = np.array(np.array([x[5] for x in tax_distribution]))
print('District Contributions: ')
district_sum = np.sum(district_data, axis=0)
year = 2007
for ds in district_sum:
print(str(year) + ': ' + str(ds))
year += 1
p.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,
'wb'))
| incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'
tax_pickle_for_apns = 'kmes_taxes.p'
tax_history_pickle = '../cusd_1percent_tax_history.p'
distribution_pickle_out = 'kmes_distribution.p'
cabrillo_key = 50200
def read_incremental_factors():
import csv
inc_file = open(incremental_factors_file, 'r')
reader = csv.reader(inc_file)
increment_map = dict()
funding_code_map = dict()
this_trn_code = ''
for row in reader:
if row[0] != '':
this_trn_code = row[0].replace('-','')
this_trn = increment_map.get(this_trn_code,{})
this_trn[int(row[1])] = float(row[3])
funding_code_map[int(row[1])] = row[2]
increment_map[this_trn_code] = this_trn
return increment_map, funding_code_map
increment_map, funding_code_map = read_incremental_factors()
import pickle as p
tax_data_apns = p.load(open(tax_pickle_for_apns,'rb'))
apns = list(set([d[0] for d in tax_data_apns]))
apns.sort()
tax_distribution = list()
tax_history = p.load(open(tax_history_pickle,'rb'))
tax_history_apns = [d[0] for d in tax_history]
for apn in apns:
try:
tax_history_index = tax_history_apns.index(apn)
except:
tax_history_index = None
if tax_history_index is None:
print('No Matching APN: ' + apn)
else:
this_tax_history = tax_history[tax_history_index]
total_tax = this_tax_history[3]
tra = this_tax_history[1]
this_tra = increment_map.get(tra, None)
if this_tra is None:
print('TRA is Null for APN: ' + apn)
else:
fraction = this_tra.get(cabrillo_key, None)
if fraction is None:
print('APN: ' + apn + ' is not in district')
else:
tax_distribution += [[this_tax_history[0], this_tax_history[1], this_tax_history[2], fraction, this_tax_history[3], [t*fraction for t in this_tax_history[3]]]]
import numpy as np
district_data = np.array(np.array([x[5] for x in tax_distribution]))
print('District Contributions: ')
district_sum = np.sum(district_data, axis=0)
year = 2007
for ds in district_sum:
print(str(year) + ": " + str(ds))
year += 1
p.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,'wb'))
| [
0,
2,
3,
4,
5
] |
868 | 860f77b031c815df40a16669dae8d32af4afa5bf | <mask token>
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'failed', 'message':
'Something Went Wrong !!'})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == 'POST':
try:
files = request.files.getlist('file')
print('files', files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join(
'static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == 'docx' or extension[-1] == 'doc':
if convert_doc_to_pdf(file_path):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
return jsonify({'status': 'success', 'message':
'File Uploaded Successfully !!'})
else:
return jsonify({'status': 'failed', 'message':
'Format Not Allowed !!'})
else:
return jsonify({'status': 'failed'})
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'exception', 'message':
'Something Went Wrong !!'})
else:
return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})
<mask token>
| <mask token>
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'failed', 'message':
'Something Went Wrong !!'})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == 'POST':
try:
files = request.files.getlist('file')
print('files', files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join(
'static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == 'docx' or extension[-1] == 'doc':
if convert_doc_to_pdf(file_path):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
return jsonify({'status': 'success', 'message':
'File Uploaded Successfully !!'})
else:
return jsonify({'status': 'failed', 'message':
'Format Not Allowed !!'})
else:
return jsonify({'status': 'failed'})
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'exception', 'message':
'Something Went Wrong !!'})
else:
return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000)
| <mask token>
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'failed', 'message':
'Something Went Wrong !!'})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == 'POST':
try:
files = request.files.getlist('file')
print('files', files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join(
'static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == 'docx' or extension[-1] == 'doc':
if convert_doc_to_pdf(file_path):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
return jsonify({'status': 'success', 'message':
'File Uploaded Successfully !!'})
else:
return jsonify({'status': 'failed', 'message':
'Format Not Allowed !!'})
else:
return jsonify({'status': 'failed'})
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'exception', 'message':
'Something Went Wrong !!'})
else:
return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000)
| from flask import Flask, jsonify, request, render_template
from werkzeug import secure_filename
import os
from utils import allowed_file, convert_html_to_pdf, convert_doc_to_pdf
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'failed', 'message':
'Something Went Wrong !!'})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == 'POST':
try:
files = request.files.getlist('file')
print('files', files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join(
'static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == 'docx' or extension[-1] == 'doc':
if convert_doc_to_pdf(file_path):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
return jsonify({'status': 'success', 'message':
'File Uploaded Successfully !!'})
else:
return jsonify({'status': 'failed', 'message':
'Format Not Allowed !!'})
else:
return jsonify({'status': 'failed'})
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'exception', 'message':
'Something Went Wrong !!'})
else:
return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000)
| from flask import Flask, jsonify, request, render_template
from werkzeug import secure_filename
import os
from utils import allowed_file, convert_html_to_pdf, convert_doc_to_pdf
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print("Exception Occurred", e)
return jsonify({"status": "failed", "message": "Something Went Wrong !!"})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == "POST":
try:
files = request.files.getlist('file')
print("files", files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join('static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print("File Converted to PDF Successfully !!")
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == "docx" or extension[-1] == "doc":
if convert_doc_to_pdf(file_path):
print("File Converted to PDF Successfully !!")
else:
raise Exception('Something Went Wrong !')
return jsonify({"status": "success", "message": "File Uploaded Successfully !!"})
else:
return jsonify({"status": "failed", "message": "Format Not Allowed !!"})
else:
return jsonify({"status": "failed"})
except Exception as e:
print("Exception Occurred", e)
return jsonify({"status": "exception", "message": "Something Went Wrong !!"})
else:
return jsonify({"status": "failed", "message": "Method Not Allowed !"})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000)
| [
2,
3,
4,
5,
6
] |
869 | f080191fec4e56adc4013da74c840817e88caf56 | <mask token>
@spotify.route('/callback')
def callback():
auth_code = request.args['code']
code_payload = {'grant_type': 'authorization_code', 'code': str(
auth_code), 'redirect_uri': REDIRECT_URI}
base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,
SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(
'utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,
headers=headers)
response_data = json.loads(post_request.text)
access_token = response_data['access_token']
print(access_token)
refresh_token = response_data['refresh_token']
token_type = response_data['token_type']
expires_in = response_data['expires_in']
redirect_to_index = redirect('http://localhost:3000/')
response = make_response(redirect_to_index)
response.set_cookie('access_token', value=access_token)
response.set_cookie('refresh_token', value=refresh_token)
return response
@spotify.route('/refresh_token', methods=['POST'])
def refresh_token():
r = request.get_json()
refresh_token = r['refresh_token']
code_payload = {'grant_type': 'refresh_token', 'refresh_token':
refresh_token}
base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,
SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(
'utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,
headers=headers)
response_data = json.loads(post_request.text)
return jsonify(response_data)
| <mask token>
@spotify.route('/login')
def login():
url_args = urlencode(auth_query_parameters)
print(url_args)
auth_url = '{}/?{}'.format(SPOTIFY_AUTH_URL, url_args)
return redirect(auth_url)
@spotify.route('/callback')
def callback():
auth_code = request.args['code']
code_payload = {'grant_type': 'authorization_code', 'code': str(
auth_code), 'redirect_uri': REDIRECT_URI}
base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,
SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(
'utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,
headers=headers)
response_data = json.loads(post_request.text)
access_token = response_data['access_token']
print(access_token)
refresh_token = response_data['refresh_token']
token_type = response_data['token_type']
expires_in = response_data['expires_in']
redirect_to_index = redirect('http://localhost:3000/')
response = make_response(redirect_to_index)
response.set_cookie('access_token', value=access_token)
response.set_cookie('refresh_token', value=refresh_token)
return response
@spotify.route('/refresh_token', methods=['POST'])
def refresh_token():
r = request.get_json()
refresh_token = r['refresh_token']
code_payload = {'grant_type': 'refresh_token', 'refresh_token':
refresh_token}
base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,
SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(
'utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,
headers=headers)
response_data = json.loads(post_request.text)
return jsonify(response_data)
| <mask token>
spotify = Blueprint('spotify', __name__)
SPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')
SPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')
SPOTIFY_AUTH_URL = 'https://accounts.spotify.com/authorize'
SPOTIFY_TOKEN_URL = 'https://accounts.spotify.com/api/token'
SPOTIFY_API_BASE_URL = 'https://api.spotify.com'
API_VERSION = 'v1'
SPOTIFY_API_URL = '{}/{}'.format(SPOTIFY_API_BASE_URL, API_VERSION)
CLIENT_SIDE_URL = 'http://localhost'
PORT = 8888
REDIRECT_URI = '{}:{}/callback'.format(CLIENT_SIDE_URL, PORT)
SCOPE = (
'playlist-modify-public playlist-modify-private user-read-currently-playing'
)
STATE = ''
SHOW_DIALOG_BOOL = True
SHOW_DIALOG_STR = str(SHOW_DIALOG_BOOL).lower()
auth_query_parameters = {'response_type': 'code', 'redirect_uri':
REDIRECT_URI, 'scope': SCOPE, 'client_id': SPOTIFY_CLIENT_ID}
@spotify.route('/login')
def login():
url_args = urlencode(auth_query_parameters)
print(url_args)
auth_url = '{}/?{}'.format(SPOTIFY_AUTH_URL, url_args)
return redirect(auth_url)
@spotify.route('/callback')
def callback():
auth_code = request.args['code']
code_payload = {'grant_type': 'authorization_code', 'code': str(
auth_code), 'redirect_uri': REDIRECT_URI}
base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,
SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(
'utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,
headers=headers)
response_data = json.loads(post_request.text)
access_token = response_data['access_token']
print(access_token)
refresh_token = response_data['refresh_token']
token_type = response_data['token_type']
expires_in = response_data['expires_in']
redirect_to_index = redirect('http://localhost:3000/')
response = make_response(redirect_to_index)
response.set_cookie('access_token', value=access_token)
response.set_cookie('refresh_token', value=refresh_token)
return response
@spotify.route('/refresh_token', methods=['POST'])
def refresh_token():
r = request.get_json()
refresh_token = r['refresh_token']
code_payload = {'grant_type': 'refresh_token', 'refresh_token':
refresh_token}
base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,
SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(
'utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,
headers=headers)
response_data = json.loads(post_request.text)
return jsonify(response_data)
| import os
import base64
from urllib.parse import urlencode
import json
from flask import Blueprint, request, redirect, jsonify, make_response
import requests
spotify = Blueprint('spotify', __name__)
SPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')
SPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')
SPOTIFY_AUTH_URL = 'https://accounts.spotify.com/authorize'
SPOTIFY_TOKEN_URL = 'https://accounts.spotify.com/api/token'
SPOTIFY_API_BASE_URL = 'https://api.spotify.com'
API_VERSION = 'v1'
SPOTIFY_API_URL = '{}/{}'.format(SPOTIFY_API_BASE_URL, API_VERSION)
CLIENT_SIDE_URL = 'http://localhost'
PORT = 8888
REDIRECT_URI = '{}:{}/callback'.format(CLIENT_SIDE_URL, PORT)
SCOPE = (
'playlist-modify-public playlist-modify-private user-read-currently-playing'
)
STATE = ''
SHOW_DIALOG_BOOL = True
SHOW_DIALOG_STR = str(SHOW_DIALOG_BOOL).lower()
auth_query_parameters = {'response_type': 'code', 'redirect_uri':
REDIRECT_URI, 'scope': SCOPE, 'client_id': SPOTIFY_CLIENT_ID}
@spotify.route('/login')
def login():
url_args = urlencode(auth_query_parameters)
print(url_args)
auth_url = '{}/?{}'.format(SPOTIFY_AUTH_URL, url_args)
return redirect(auth_url)
@spotify.route('/callback')
def callback():
auth_code = request.args['code']
code_payload = {'grant_type': 'authorization_code', 'code': str(
auth_code), 'redirect_uri': REDIRECT_URI}
base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,
SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(
'utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,
headers=headers)
response_data = json.loads(post_request.text)
access_token = response_data['access_token']
print(access_token)
refresh_token = response_data['refresh_token']
token_type = response_data['token_type']
expires_in = response_data['expires_in']
redirect_to_index = redirect('http://localhost:3000/')
response = make_response(redirect_to_index)
response.set_cookie('access_token', value=access_token)
response.set_cookie('refresh_token', value=refresh_token)
return response
@spotify.route('/refresh_token', methods=['POST'])
def refresh_token():
r = request.get_json()
refresh_token = r['refresh_token']
code_payload = {'grant_type': 'refresh_token', 'refresh_token':
refresh_token}
base64encoded = base64.b64encode(bytes('{}:{}'.format(SPOTIFY_CLIENT_ID,
SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {'Authorization': 'Basic {}'.format(base64encoded.decode(
'utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload,
headers=headers)
response_data = json.loads(post_request.text)
return jsonify(response_data)
| import os
import base64
from urllib.parse import urlencode
import json
from flask import Blueprint, request, redirect, jsonify, make_response
import requests
spotify = Blueprint('spotify', __name__)
# Client Keys
SPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')
SPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')
# Spotify URLS
SPOTIFY_AUTH_URL = "https://accounts.spotify.com/authorize"
SPOTIFY_TOKEN_URL = "https://accounts.spotify.com/api/token"
SPOTIFY_API_BASE_URL = "https://api.spotify.com"
API_VERSION = "v1"
SPOTIFY_API_URL = "{}/{}".format(SPOTIFY_API_BASE_URL, API_VERSION)
# Server-side Parameters
CLIENT_SIDE_URL = "http://localhost"
PORT = 8888
REDIRECT_URI = "{}:{}/callback".format(CLIENT_SIDE_URL, PORT)
SCOPE = "playlist-modify-public playlist-modify-private user-read-currently-playing"
STATE = ""
SHOW_DIALOG_BOOL = True
SHOW_DIALOG_STR = str(SHOW_DIALOG_BOOL).lower()
auth_query_parameters = {
"response_type": "code",
"redirect_uri": REDIRECT_URI,
"scope": SCOPE,
# "state": STATE,
# "show_dialog": SHOW_DIALOG_str,
"client_id": SPOTIFY_CLIENT_ID
}
@spotify.route("/login")
def login():
# Auth Step 1: Authorization
url_args = urlencode(auth_query_parameters)
print(url_args)
auth_url = "{}/?{}".format(SPOTIFY_AUTH_URL, url_args)
return redirect(auth_url)
@spotify.route("/callback")
def callback():
# Auth Step 4: Requests refresh and access tokens
auth_code = request.args['code']
code_payload = {
"grant_type": "authorization_code",
"code": str(auth_code),
"redirect_uri": REDIRECT_URI
}
base64encoded = base64.b64encode(bytes("{}:{}".format(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {"Authorization": "Basic {}".format(base64encoded.decode('utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload, headers=headers)
# Auth Step 5: Tokens are Returned to Application
response_data = json.loads(post_request.text)
access_token = response_data["access_token"]
print(access_token)
refresh_token = response_data["refresh_token"]
token_type = response_data["token_type"]
expires_in = response_data["expires_in"]
redirect_to_index = redirect("http://localhost:3000/")
response = make_response(redirect_to_index)
response.set_cookie('access_token', value=access_token)
response.set_cookie('refresh_token', value=refresh_token)
return response
@spotify.route("/refresh_token", methods=['POST'])
def refresh_token():
# 7. Requesting access token from refresh token
r = request.get_json()
refresh_token = r['refresh_token']
code_payload = {
"grant_type": "refresh_token",
"refresh_token": refresh_token
}
base64encoded = base64.b64encode(bytes("{}:{}".format(SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET), 'utf-8'))
headers = {"Authorization": "Basic {}".format(base64encoded.decode('utf-8'))}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload, headers=headers)
response_data = json.loads(post_request.text)
return jsonify(response_data)
| [
2,
3,
4,
5,
6
] |
870 | d4d19411f0c48ffb99bd17e8387f1741144e43b4 | <mask token>
@shared_task(name='clean_tweetdb')
def clean_tweetdb():
tweets = Tweet.objects.all()
for tweets.tweet_date in tweets:
if tweets.tweet_date <= datetime.now() - timedelta(days=8):
tweets.delet()
@shared_task(name='get_tweets')
def get_tweets():
"""Get some tweets from the twitter api and store them to the db."""
clean_tweetdb.delay()
db_tweets = Tweet.objects.all()
max_id = min([tweet.tweet_id for tweet in db_tweets])
tweets = api.search(q='#python', max_id=max_id, count=100)
tweets_id = [tweet.id for tweet in tweets]
tweets_date = [tweet.created_at for tweet in tweets]
tweets_source = [tweet.source for tweet in tweets]
tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]
tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]
tweets_text = [tweet.text for tweet in tweets]
for i, j, k, l, m, n in zip(tweets_id, tweets_date, tweets_source,
tweets_favorite_cnt, tweets_retweet_cnt, tweets_text):
try:
Tweet.objects.create(tweet_id=i, tweet_date=j, tweet_source=k,
tweet_favorite_cnt=l, tweet_retweet_cnt=m, tweet_text=n)
except IntegrityError:
pass
| <mask token>
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
<mask token>
@shared_task(name='clean_tweetdb')
def clean_tweetdb():
tweets = Tweet.objects.all()
for tweets.tweet_date in tweets:
if tweets.tweet_date <= datetime.now() - timedelta(days=8):
tweets.delet()
@shared_task(name='get_tweets')
def get_tweets():
"""Get some tweets from the twitter api and store them to the db."""
clean_tweetdb.delay()
db_tweets = Tweet.objects.all()
max_id = min([tweet.tweet_id for tweet in db_tweets])
tweets = api.search(q='#python', max_id=max_id, count=100)
tweets_id = [tweet.id for tweet in tweets]
tweets_date = [tweet.created_at for tweet in tweets]
tweets_source = [tweet.source for tweet in tweets]
tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]
tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]
tweets_text = [tweet.text for tweet in tweets]
for i, j, k, l, m, n in zip(tweets_id, tweets_date, tweets_source,
tweets_favorite_cnt, tweets_retweet_cnt, tweets_text):
try:
Tweet.objects.create(tweet_id=i, tweet_date=j, tweet_source=k,
tweet_favorite_cnt=l, tweet_retweet_cnt=m, tweet_text=n)
except IntegrityError:
pass
| <mask token>
CONSUMER_KEY = 'Vp7FVQLSwESvE9oTQruw0TnhW'
CONSUMER_SECRET = 'miy6EsGklNYxAaVn37vTjAVGwP0c67IOyuY71AAyL1p2Ba4VPN'
ACCESS_TOKEN = '1952022900-5WAHk6l5d3GllFtqDPaucSpnraIokE6hU7aBxNJ'
ACCESS_TOKEN_SECRET = 'ekONOf6QxJG6Lq3k2kznfQ16x12BGm909wckYFcP8SlYZ'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
@shared_task(name='clean_tweetdb')
def clean_tweetdb():
tweets = Tweet.objects.all()
for tweets.tweet_date in tweets:
if tweets.tweet_date <= datetime.now() - timedelta(days=8):
tweets.delet()
@shared_task(name='get_tweets')
def get_tweets():
"""Get some tweets from the twitter api and store them to the db."""
clean_tweetdb.delay()
db_tweets = Tweet.objects.all()
max_id = min([tweet.tweet_id for tweet in db_tweets])
tweets = api.search(q='#python', max_id=max_id, count=100)
tweets_id = [tweet.id for tweet in tweets]
tweets_date = [tweet.created_at for tweet in tweets]
tweets_source = [tweet.source for tweet in tweets]
tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]
tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]
tweets_text = [tweet.text for tweet in tweets]
for i, j, k, l, m, n in zip(tweets_id, tweets_date, tweets_source,
tweets_favorite_cnt, tweets_retweet_cnt, tweets_text):
try:
Tweet.objects.create(tweet_id=i, tweet_date=j, tweet_source=k,
tweet_favorite_cnt=l, tweet_retweet_cnt=m, tweet_text=n)
except IntegrityError:
pass
| from celery import shared_task
import tweepy
from datetime import datetime, timedelta
from .models import Tweet
from django.db import IntegrityError
CONSUMER_KEY = 'Vp7FVQLSwESvE9oTQruw0TnhW'
CONSUMER_SECRET = 'miy6EsGklNYxAaVn37vTjAVGwP0c67IOyuY71AAyL1p2Ba4VPN'
ACCESS_TOKEN = '1952022900-5WAHk6l5d3GllFtqDPaucSpnraIokE6hU7aBxNJ'
ACCESS_TOKEN_SECRET = 'ekONOf6QxJG6Lq3k2kznfQ16x12BGm909wckYFcP8SlYZ'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
@shared_task(name='clean_tweetdb')
def clean_tweetdb():
tweets = Tweet.objects.all()
for tweets.tweet_date in tweets:
if tweets.tweet_date <= datetime.now() - timedelta(days=8):
tweets.delet()
@shared_task(name='get_tweets')
def get_tweets():
"""Get some tweets from the twitter api and store them to the db."""
clean_tweetdb.delay()
db_tweets = Tweet.objects.all()
max_id = min([tweet.tweet_id for tweet in db_tweets])
tweets = api.search(q='#python', max_id=max_id, count=100)
tweets_id = [tweet.id for tweet in tweets]
tweets_date = [tweet.created_at for tweet in tweets]
tweets_source = [tweet.source for tweet in tweets]
tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]
tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]
tweets_text = [tweet.text for tweet in tweets]
for i, j, k, l, m, n in zip(tweets_id, tweets_date, tweets_source,
tweets_favorite_cnt, tweets_retweet_cnt, tweets_text):
try:
Tweet.objects.create(tweet_id=i, tweet_date=j, tweet_source=k,
tweet_favorite_cnt=l, tweet_retweet_cnt=m, tweet_text=n)
except IntegrityError:
pass
| from celery import shared_task
import tweepy
from datetime import datetime, timedelta
from .models import Tweet
from django.db import IntegrityError
CONSUMER_KEY = 'Vp7FVQLSwESvE9oTQruw0TnhW'
CONSUMER_SECRET = 'miy6EsGklNYxAaVn37vTjAVGwP0c67IOyuY71AAyL1p2Ba4VPN'
ACCESS_TOKEN = '1952022900-5WAHk6l5d3GllFtqDPaucSpnraIokE6hU7aBxNJ'
ACCESS_TOKEN_SECRET = 'ekONOf6QxJG6Lq3k2kznfQ16x12BGm909wckYFcP8SlYZ'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
@shared_task(name='clean_tweetdb')
def clean_tweetdb():
tweets = Tweet.objects.all()
for tweets.tweet_date in tweets:
if tweets.tweet_date <= datetime.now() - timedelta(days=8):
tweets.delet()
@shared_task(name='get_tweets')
def get_tweets():
"""Get some tweets from the twitter api and store them to the db."""
clean_tweetdb.delay()
db_tweets = Tweet.objects.all()
max_id = min([tweet.tweet_id for tweet in db_tweets])
tweets = api.search(
q='#python',
max_id=max_id,
count=100
)
tweets_id = [tweet.id for tweet in tweets]
tweets_date = [tweet.created_at for tweet in tweets]
tweets_source = [tweet.source for tweet in tweets]
tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]
tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]
tweets_text = [tweet.text for tweet in tweets]
for i, j, k, l, m, n in zip(
tweets_id,
tweets_date,
tweets_source,
tweets_favorite_cnt,
tweets_retweet_cnt,
tweets_text,
):
try:
Tweet.objects.create(
tweet_id=i,
tweet_date=j,
tweet_source=k,
tweet_favorite_cnt=l,
tweet_retweet_cnt=m,
tweet_text=n,
)
except IntegrityError:
pass
| [
2,
3,
4,
5,
6
] |
871 | 879482e4df9c3d7f32d9b2a883201ae043e1189f | <mask token>
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2))
for i in labelCom:
labelMean1 = wn.synsets(i[0])
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print('兩個詞的語意獲得最高分(語意相近)')
print('score : {}'.format(maxScore))
print('firstWord : {}'.format(firstWord))
print('secondWord : {}'.format(secondWord))
print('\n')
if type(firstWord) == type(''):
return get_similar_words(list(words[0]))
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
<mask token>
| <mask token>
with open(usersDataFile, 'r') as load_f:
usersData = json.load(load_f)
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2))
for i in labelCom:
labelMean1 = wn.synsets(i[0])
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print('兩個詞的語意獲得最高分(語意相近)')
print('score : {}'.format(maxScore))
print('firstWord : {}'.format(firstWord))
print('secondWord : {}'.format(secondWord))
print('\n')
if type(firstWord) == type(''):
return get_similar_words(list(words[0]))
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
def getWordNetScore(model):
new_dic = {}
scoreFile = '{}\\{}.json'.format(scorePath, model)
print(scoreFile)
if not os.path.exists(scoreFile):
with open(scoreFile, 'w') as dump_f:
new_dic['50'] = list()
new_dic['100'] = list()
new_dic['150'] = list()
new_dic['200'] = list()
new_dic['250'] = list()
new_dic['300'] = list()
json.dump(new_dic, dump_f)
with open(scoreFile, 'r') as load_f:
load_dict = json.load(load_f)
for user in usersData:
print('\n')
print(user)
print('\n')
countPost = 0
countLike = 0
countComment = 0
imageScoreDic = {}
videoScoreDic = {}
countImages = 0
for t in myTypes:
imageScoreDic[t] = 0
countVideos = 0
for t in myTypes:
videoScoreDic[t] = 0
for timestamp in usersData[user]['data']:
countPost += 1
countLike += usersData[user]['data'][timestamp]['likes']
countComment += usersData[user]['data'][timestamp]['comments']
if usersData[user]['data'][timestamp]['is_video']:
countVideos += 1
else:
countImages += 1
if 'labels' not in usersData[user]['data'][timestamp]:
print(user)
print(timestamp)
print(usersData[user]['data'][timestamp])
if len(usersData[user]['data'][timestamp]['labels']) > 0:
synsetWords = get_similar_words(usersData[user]['data'][
timestamp]['labels'])
if len(synsetWords) == 2:
for t in myTypes:
standard = wn.synsets(t)
firstWordMaxWordSimilarity = 0
secondWordMaxWordSimilarity = 0
for k in standard:
if synsetWords[0].wup_similarity(k) is not None:
if synsetWords[0].wup_similarity(k
) > firstWordMaxWordSimilarity:
firstWordMaxWordSimilarity = synsetWords[0
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[0], k,
firstWordMaxWordSimilarity))
if synsetWords[1].wup_similarity(k) is not None:
if synsetWords[1].wup_similarity(k
) > secondWordMaxWordSimilarity:
secondWordMaxWordSimilarity = synsetWords[1
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[1], k,
secondWordMaxWordSimilarity))
maxScore = (firstWordMaxWordSimilarity +
secondWordMaxWordSimilarity) / 2
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
else:
for t in myTypes:
maxScore = 0
standard = wn.synsets(t)
for k in standard:
for s in synsetWords:
if s.wup_similarity(k) is not None:
if s.wup_similarity(k) > maxScore:
maxScore = s.wup_similarity(k)
print('{} vs {} = {}'.format(s, k,
maxScore))
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
if countPost != 0 and countPost % 50 == 0:
print(countPost)
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
try:
currentImgScoreDic = {t: round(imageScoreDic[t] /
countImages * 100, 3) for t in myTypes}
except:
currentImgScoreDic = {}
print('目前沒有圖片')
try:
currentVideoScoreDic = {t: round(videoScoreDic[t] /
countVideos * 100, 3) for t in myTypes}
except:
currentVideoScoreDic = {}
print('目前沒有影片')
if user in users:
load_dict[str(countPost)][users[user]]['follower'
] = usersData[user]['followers']
load_dict[str(countPost)][users[user]]['like'] = round(
countLike / countPost, 3)
load_dict[str(countPost)][users[user]]['comment'] = round(
countComment / countPost, 3)
load_dict[str(countPost)][users[user]]['image']['amount'
] = countImages
load_dict[str(countPost)][users[user]]['image']['score'
] = currentImgScoreDic
load_dict[str(countPost)][users[user]]['video']['amount'
] = countVideos
load_dict[str(countPost)][users[user]]['video']['score'
] = currentVideoScoreDic
load_dict[str(countPost)][users[user]]['ERate'] = round(
(countLike / countPost + countComment / countPost) /
usersData[user]['followers'], 5)
else:
new_dic = {}
new_dic['name'] = user
new_dic['follower'] = usersData[user]['followers']
new_dic['like'] = round(countLike / countPost, 3)
new_dic['comment'] = round(countComment / countPost, 3)
new_dic['image'] = {}
new_dic['image']['amount'] = countImages
new_dic['image']['score'] = currentImgScoreDic
new_dic['video'] = {}
new_dic['video']['amount'] = countVideos
new_dic['video']['score'] = currentVideoScoreDic
new_dic['ERate'] = round((countLike / countPost +
countComment / countPost) / usersData[user][
'followers'], 5)
load_dict[str(countPost)].append(new_dic)
if countPost == 300:
break
if countPost < 300:
if countPost > 250:
countPost = 300
elif countPost > 200:
countPost = 250
elif countPost > 150:
countPost = 200
elif countPost > 100:
countPost = 150
elif countPost > 50:
countPost = 100
else:
countPost = 50
users = {load_dict[str(countPost - 50)][i]['name']: i for i in
range(0, len(load_dict[str(countPost - 50)]))}
finalDic = load_dict[str(countPost - 50)][users[user]]
while countPost <= 300:
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
if user in users:
load_dict[str(countPost)][users[user]] = finalDic
else:
load_dict[str(countPost)].append(finalDic)
countPost += 50
with open(scoreFile, 'w') as dump_f:
json.dump(load_dict, dump_f)
if __name__ == '__main__':
getWordNetScore('wordNet')
| <mask token>
myTypes = ['animal', 'vehicle', 'food', 'fashion', 'dog', 'cat', 'car',
'motorcycle']
scorePath = '..\\data\\score'
usersDataFile = '..\\data\\usersData.json'
with open(usersDataFile, 'r') as load_f:
usersData = json.load(load_f)
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2))
for i in labelCom:
labelMean1 = wn.synsets(i[0])
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print('兩個詞的語意獲得最高分(語意相近)')
print('score : {}'.format(maxScore))
print('firstWord : {}'.format(firstWord))
print('secondWord : {}'.format(secondWord))
print('\n')
if type(firstWord) == type(''):
return get_similar_words(list(words[0]))
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
def getWordNetScore(model):
new_dic = {}
scoreFile = '{}\\{}.json'.format(scorePath, model)
print(scoreFile)
if not os.path.exists(scoreFile):
with open(scoreFile, 'w') as dump_f:
new_dic['50'] = list()
new_dic['100'] = list()
new_dic['150'] = list()
new_dic['200'] = list()
new_dic['250'] = list()
new_dic['300'] = list()
json.dump(new_dic, dump_f)
with open(scoreFile, 'r') as load_f:
load_dict = json.load(load_f)
for user in usersData:
print('\n')
print(user)
print('\n')
countPost = 0
countLike = 0
countComment = 0
imageScoreDic = {}
videoScoreDic = {}
countImages = 0
for t in myTypes:
imageScoreDic[t] = 0
countVideos = 0
for t in myTypes:
videoScoreDic[t] = 0
for timestamp in usersData[user]['data']:
countPost += 1
countLike += usersData[user]['data'][timestamp]['likes']
countComment += usersData[user]['data'][timestamp]['comments']
if usersData[user]['data'][timestamp]['is_video']:
countVideos += 1
else:
countImages += 1
if 'labels' not in usersData[user]['data'][timestamp]:
print(user)
print(timestamp)
print(usersData[user]['data'][timestamp])
if len(usersData[user]['data'][timestamp]['labels']) > 0:
synsetWords = get_similar_words(usersData[user]['data'][
timestamp]['labels'])
if len(synsetWords) == 2:
for t in myTypes:
standard = wn.synsets(t)
firstWordMaxWordSimilarity = 0
secondWordMaxWordSimilarity = 0
for k in standard:
if synsetWords[0].wup_similarity(k) is not None:
if synsetWords[0].wup_similarity(k
) > firstWordMaxWordSimilarity:
firstWordMaxWordSimilarity = synsetWords[0
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[0], k,
firstWordMaxWordSimilarity))
if synsetWords[1].wup_similarity(k) is not None:
if synsetWords[1].wup_similarity(k
) > secondWordMaxWordSimilarity:
secondWordMaxWordSimilarity = synsetWords[1
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[1], k,
secondWordMaxWordSimilarity))
maxScore = (firstWordMaxWordSimilarity +
secondWordMaxWordSimilarity) / 2
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
else:
for t in myTypes:
maxScore = 0
standard = wn.synsets(t)
for k in standard:
for s in synsetWords:
if s.wup_similarity(k) is not None:
if s.wup_similarity(k) > maxScore:
maxScore = s.wup_similarity(k)
print('{} vs {} = {}'.format(s, k,
maxScore))
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
if countPost != 0 and countPost % 50 == 0:
print(countPost)
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
try:
currentImgScoreDic = {t: round(imageScoreDic[t] /
countImages * 100, 3) for t in myTypes}
except:
currentImgScoreDic = {}
print('目前沒有圖片')
try:
currentVideoScoreDic = {t: round(videoScoreDic[t] /
countVideos * 100, 3) for t in myTypes}
except:
currentVideoScoreDic = {}
print('目前沒有影片')
if user in users:
load_dict[str(countPost)][users[user]]['follower'
] = usersData[user]['followers']
load_dict[str(countPost)][users[user]]['like'] = round(
countLike / countPost, 3)
load_dict[str(countPost)][users[user]]['comment'] = round(
countComment / countPost, 3)
load_dict[str(countPost)][users[user]]['image']['amount'
] = countImages
load_dict[str(countPost)][users[user]]['image']['score'
] = currentImgScoreDic
load_dict[str(countPost)][users[user]]['video']['amount'
] = countVideos
load_dict[str(countPost)][users[user]]['video']['score'
] = currentVideoScoreDic
load_dict[str(countPost)][users[user]]['ERate'] = round(
(countLike / countPost + countComment / countPost) /
usersData[user]['followers'], 5)
else:
new_dic = {}
new_dic['name'] = user
new_dic['follower'] = usersData[user]['followers']
new_dic['like'] = round(countLike / countPost, 3)
new_dic['comment'] = round(countComment / countPost, 3)
new_dic['image'] = {}
new_dic['image']['amount'] = countImages
new_dic['image']['score'] = currentImgScoreDic
new_dic['video'] = {}
new_dic['video']['amount'] = countVideos
new_dic['video']['score'] = currentVideoScoreDic
new_dic['ERate'] = round((countLike / countPost +
countComment / countPost) / usersData[user][
'followers'], 5)
load_dict[str(countPost)].append(new_dic)
if countPost == 300:
break
if countPost < 300:
if countPost > 250:
countPost = 300
elif countPost > 200:
countPost = 250
elif countPost > 150:
countPost = 200
elif countPost > 100:
countPost = 150
elif countPost > 50:
countPost = 100
else:
countPost = 50
users = {load_dict[str(countPost - 50)][i]['name']: i for i in
range(0, len(load_dict[str(countPost - 50)]))}
finalDic = load_dict[str(countPost - 50)][users[user]]
while countPost <= 300:
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
if user in users:
load_dict[str(countPost)][users[user]] = finalDic
else:
load_dict[str(countPost)].append(finalDic)
countPost += 50
with open(scoreFile, 'w') as dump_f:
json.dump(load_dict, dump_f)
if __name__ == '__main__':
getWordNetScore('wordNet')
| import os
import json
from nltk.corpus import wordnet as wn
from itertools import combinations
myTypes = ['animal', 'vehicle', 'food', 'fashion', 'dog', 'cat', 'car',
'motorcycle']
scorePath = '..\\data\\score'
usersDataFile = '..\\data\\usersData.json'
with open(usersDataFile, 'r') as load_f:
usersData = json.load(load_f)
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2))
for i in labelCom:
labelMean1 = wn.synsets(i[0])
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print('兩個詞的語意獲得最高分(語意相近)')
print('score : {}'.format(maxScore))
print('firstWord : {}'.format(firstWord))
print('secondWord : {}'.format(secondWord))
print('\n')
if type(firstWord) == type(''):
return get_similar_words(list(words[0]))
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
def getWordNetScore(model):
new_dic = {}
scoreFile = '{}\\{}.json'.format(scorePath, model)
print(scoreFile)
if not os.path.exists(scoreFile):
with open(scoreFile, 'w') as dump_f:
new_dic['50'] = list()
new_dic['100'] = list()
new_dic['150'] = list()
new_dic['200'] = list()
new_dic['250'] = list()
new_dic['300'] = list()
json.dump(new_dic, dump_f)
with open(scoreFile, 'r') as load_f:
load_dict = json.load(load_f)
for user in usersData:
print('\n')
print(user)
print('\n')
countPost = 0
countLike = 0
countComment = 0
imageScoreDic = {}
videoScoreDic = {}
countImages = 0
for t in myTypes:
imageScoreDic[t] = 0
countVideos = 0
for t in myTypes:
videoScoreDic[t] = 0
for timestamp in usersData[user]['data']:
countPost += 1
countLike += usersData[user]['data'][timestamp]['likes']
countComment += usersData[user]['data'][timestamp]['comments']
if usersData[user]['data'][timestamp]['is_video']:
countVideos += 1
else:
countImages += 1
if 'labels' not in usersData[user]['data'][timestamp]:
print(user)
print(timestamp)
print(usersData[user]['data'][timestamp])
if len(usersData[user]['data'][timestamp]['labels']) > 0:
synsetWords = get_similar_words(usersData[user]['data'][
timestamp]['labels'])
if len(synsetWords) == 2:
for t in myTypes:
standard = wn.synsets(t)
firstWordMaxWordSimilarity = 0
secondWordMaxWordSimilarity = 0
for k in standard:
if synsetWords[0].wup_similarity(k) is not None:
if synsetWords[0].wup_similarity(k
) > firstWordMaxWordSimilarity:
firstWordMaxWordSimilarity = synsetWords[0
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[0], k,
firstWordMaxWordSimilarity))
if synsetWords[1].wup_similarity(k) is not None:
if synsetWords[1].wup_similarity(k
) > secondWordMaxWordSimilarity:
secondWordMaxWordSimilarity = synsetWords[1
].wup_similarity(k)
print('{} vs {} = {}'.format(
synsetWords[1], k,
secondWordMaxWordSimilarity))
maxScore = (firstWordMaxWordSimilarity +
secondWordMaxWordSimilarity) / 2
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
else:
for t in myTypes:
maxScore = 0
standard = wn.synsets(t)
for k in standard:
for s in synsetWords:
if s.wup_similarity(k) is not None:
if s.wup_similarity(k) > maxScore:
maxScore = s.wup_similarity(k)
print('{} vs {} = {}'.format(s, k,
maxScore))
if usersData[user]['data'][timestamp]['is_video']:
videoScoreDic[t] += maxScore - 0.05
else:
imageScoreDic[t] += maxScore - 0.05
if countPost != 0 and countPost % 50 == 0:
print(countPost)
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
try:
currentImgScoreDic = {t: round(imageScoreDic[t] /
countImages * 100, 3) for t in myTypes}
except:
currentImgScoreDic = {}
print('目前沒有圖片')
try:
currentVideoScoreDic = {t: round(videoScoreDic[t] /
countVideos * 100, 3) for t in myTypes}
except:
currentVideoScoreDic = {}
print('目前沒有影片')
if user in users:
load_dict[str(countPost)][users[user]]['follower'
] = usersData[user]['followers']
load_dict[str(countPost)][users[user]]['like'] = round(
countLike / countPost, 3)
load_dict[str(countPost)][users[user]]['comment'] = round(
countComment / countPost, 3)
load_dict[str(countPost)][users[user]]['image']['amount'
] = countImages
load_dict[str(countPost)][users[user]]['image']['score'
] = currentImgScoreDic
load_dict[str(countPost)][users[user]]['video']['amount'
] = countVideos
load_dict[str(countPost)][users[user]]['video']['score'
] = currentVideoScoreDic
load_dict[str(countPost)][users[user]]['ERate'] = round(
(countLike / countPost + countComment / countPost) /
usersData[user]['followers'], 5)
else:
new_dic = {}
new_dic['name'] = user
new_dic['follower'] = usersData[user]['followers']
new_dic['like'] = round(countLike / countPost, 3)
new_dic['comment'] = round(countComment / countPost, 3)
new_dic['image'] = {}
new_dic['image']['amount'] = countImages
new_dic['image']['score'] = currentImgScoreDic
new_dic['video'] = {}
new_dic['video']['amount'] = countVideos
new_dic['video']['score'] = currentVideoScoreDic
new_dic['ERate'] = round((countLike / countPost +
countComment / countPost) / usersData[user][
'followers'], 5)
load_dict[str(countPost)].append(new_dic)
if countPost == 300:
break
if countPost < 300:
if countPost > 250:
countPost = 300
elif countPost > 200:
countPost = 250
elif countPost > 150:
countPost = 200
elif countPost > 100:
countPost = 150
elif countPost > 50:
countPost = 100
else:
countPost = 50
users = {load_dict[str(countPost - 50)][i]['name']: i for i in
range(0, len(load_dict[str(countPost - 50)]))}
finalDic = load_dict[str(countPost - 50)][users[user]]
while countPost <= 300:
users = {load_dict[str(countPost)][i]['name']: i for i in
range(0, len(load_dict[str(countPost)]))}
if user in users:
load_dict[str(countPost)][users[user]] = finalDic
else:
load_dict[str(countPost)].append(finalDic)
countPost += 50
with open(scoreFile, 'w') as dump_f:
json.dump(load_dict, dump_f)
if __name__ == '__main__':
getWordNetScore('wordNet')
| import os
import json
from nltk.corpus import wordnet as wn
from itertools import combinations #計算排列組合
# 需要被計算的分類
myTypes = ['animal', 'vehicle', 'food', 'fashion', 'dog', 'cat', 'car', 'motorcycle']
# 計算完網紅權重存放的位置
scorePath = "..\\data\\score"
# getUsersData.py儲存網紅貼文資料的json檔案,拿來計算分數
usersDataFile = "..\\data\\usersData.json"
with open(usersDataFile, 'r') as load_f:
usersData = json.load(load_f)
def get_similar_words(words):
words = [w.lower() for w in words]
if len(words) > 1:
maxScore = 0
firstWord = ''
secondWord = ''
labelCom = list(combinations(words, 2)) #計算所有label內的排列組合
for i in labelCom: #labelCom 為排列組合的結果
labelMean1 = wn.synsets(i[0])#取出每個計算詞的詞性
labelMean2 = wn.synsets(i[1])
for j in labelMean1:
for k in labelMean2:
if j.wup_similarity(k) is not None:#因有可能出現計算結果為None的狀況 所以需要排除
if j.wup_similarity(k) > maxScore:
maxScore = j.wup_similarity(k)
firstWord = j
secondWord = k
print("兩個詞的語意獲得最高分(語意相近)")
print("score : {}".format(maxScore))
print("firstWord : {}".format(firstWord))
print("secondWord : {}".format(secondWord))
print("\n")
if type(firstWord) == type('') :
return get_similar_words( list(words[0]) )
else:
print(firstWord, firstWord.definition())
print(secondWord, secondWord.definition())
print('\n')
return [firstWord, secondWord]
else:
synSetList = []
for i in range(len(words)):
labelMean1 = wn.synsets(words[i])
for j in labelMean1:
synSetList.append(j)
return synSetList
def getWordNetScore(model):
new_dic = {}
scoreFile = ("{}\\{}.json".format( scorePath, model ) )
print(scoreFile)
if not os.path.exists(scoreFile):
with open(scoreFile,"w") as dump_f:
new_dic['50'] = list()
new_dic['100'] = list()
new_dic['150'] = list()
new_dic['200'] = list()
new_dic['250'] = list()
new_dic['300'] = list()
json.dump(new_dic,dump_f)
with open(scoreFile,'r') as load_f:
load_dict = json.load(load_f)
for user in usersData:
print('\n')
print( user )
print('\n')
countPost = 0
countLike = 0
countComment = 0
imageScoreDic = {}
videoScoreDic = {}
# 換帳號,圖片分類分數初始化
countImages = 0
for t in myTypes:
imageScoreDic[t] = 0
# 換帳號,影片分類分數初始化
countVideos = 0
for t in myTypes:
videoScoreDic[t] = 0
for timestamp in usersData[user]['data']:
countPost += 1
countLike += usersData[user]['data'][timestamp]['likes']
countComment += usersData[user]['data'][timestamp]['comments']
if usersData[user]['data'][timestamp]['is_video']:
countVideos += 1
else:
countImages += 1
if 'labels' not in usersData[user]['data'][timestamp]:
print( user )
print( timestamp )
print( usersData[user]['data'][timestamp] )
if len(usersData[user]['data'][timestamp]['labels']) > 0:
synsetWords = get_similar_words(usersData[user]['data'][timestamp]['labels'])
if len(synsetWords) == 2:
for t in myTypes:
standard = wn.synsets(t)
firstWordMaxWordSimilarity = 0
secondWordMaxWordSimilarity = 0
for k in standard:
if synsetWords[0].wup_similarity(k) is not None:
if synsetWords[0].wup_similarity(k) > firstWordMaxWordSimilarity:
firstWordMaxWordSimilarity = synsetWords[0].wup_similarity(k)
print("{} vs {} = {}".format( synsetWords[0], k, firstWordMaxWordSimilarity ))
if synsetWords[1].wup_similarity(k) is not None:
if synsetWords[1].wup_similarity(k) > secondWordMaxWordSimilarity:
secondWordMaxWordSimilarity = synsetWords[1].wup_similarity(k)
print("{} vs {} = {}".format( synsetWords[1], k, secondWordMaxWordSimilarity ))
maxScore = (firstWordMaxWordSimilarity+secondWordMaxWordSimilarity)/2
if usersData[user]['data'][timestamp]['is_video']:
# print( '這部影片在{}獲得{}分'.format(t, maxScore) )
videoScoreDic[t] += maxScore - 0.05
else:
# print( '這張圖片在{}獲得{}分'.format(t, maxScore) )
imageScoreDic[t] += maxScore - 0.05
else:
for t in myTypes:
maxScore = 0
standard = wn.synsets(t)
for k in standard:
for s in synsetWords:
if s.wup_similarity(k) is not None:
#print('{0}為計算詞性,{1}為目標詞性,分數為:{2}'.format(j,k,j.wup_similarity(k)))
if s.wup_similarity(k) > maxScore:
maxScore = s.wup_similarity(k)
print("{} vs {} = {}".format( s, k, maxScore ))
if usersData[user]['data'][timestamp]['is_video']:
# print( '這部影片在{}獲得{}分'.format(t, maxScore) )
videoScoreDic[t] += maxScore - 0.05
else:
# print( '這張圖片在{}獲得{}分'.format(t, maxScore) )
imageScoreDic[t] += maxScore - 0.05
# print('\n')
# print('\n')
# print("{}目前圖片個數 : {}".format(user, countImages))
# print("{}目前在每個分類的總分:".format(user))
# print(imageScoreDic)
# print('\n')
# print("{}目前影片個數 : {}".format(user, countVideos))
# print("{}目前在每個分類的總分:".format(user))
# print("{}目前在每個分類的總分:".format(user))
# print(videoScoreDic)
# print('\n\n')
if countPost != 0 and countPost % 50 == 0 :
print(countPost)
users = { load_dict[str(countPost)][i]['name']:i for i in range( 0, len(load_dict[str(countPost)]) ) }
try:
currentImgScoreDic = { t:round(imageScoreDic[t]/countImages*100, 3) for t in myTypes }
except :
currentImgScoreDic = {}
print("目前沒有圖片")
try:
currentVideoScoreDic = { t:round(videoScoreDic[t]/countVideos*100, 3) for t in myTypes }
except :
currentVideoScoreDic = {}
print("目前沒有影片")
if user in users:
load_dict[str(countPost)][ users[user] ]['follower'] = usersData[user]['followers']
load_dict[str(countPost)][ users[user] ]['like'] = round( countLike/countPost, 3)
load_dict[str(countPost)][ users[user] ]['comment'] = round(countComment/countPost,3)
load_dict[str(countPost)][ users[user] ]['image']['amount'] = countImages
load_dict[str(countPost)][ users[user] ]['image']['score'] = currentImgScoreDic
load_dict[str(countPost)][ users[user] ]['video']['amount'] = countVideos
load_dict[str(countPost)][ users[user] ]['video']['score'] = currentVideoScoreDic
load_dict[str(countPost)][ users[user] ]['ERate'] = round( ((countLike/countPost)+(countComment/countPost))/usersData[user]['followers'], 5 )
else:
new_dic = {}
new_dic['name'] = user
new_dic['follower'] = usersData[user]['followers']
new_dic['like'] = round( countLike/countPost, 3)
new_dic['comment'] = round(countComment/countPost,3)
new_dic['image'] = {}
new_dic['image']['amount'] = countImages
new_dic['image']['score'] = currentImgScoreDic
new_dic['video'] = {}
new_dic['video']['amount'] = countVideos
new_dic['video']['score'] = currentVideoScoreDic
new_dic['ERate'] = round( ((countLike/countPost)+(countComment/countPost))/usersData[user]['followers'], 5 )
load_dict[str(countPost)].append( new_dic )
if( countPost == 300 ):
break
if countPost < 300:
if countPost > 250:
countPost = 300
elif countPost > 200:
countPost = 250
elif countPost > 150:
countPost = 200
elif countPost > 100:
countPost = 150
elif countPost > 50:
countPost = 100
else:
countPost = 50
users = { load_dict[str(countPost-50)][i]['name']:i for i in range( 0, len(load_dict[str(countPost-50)]) ) }
finalDic = load_dict[str(countPost-50)][ users[user] ]
while countPost <= 300:
users = { load_dict[str(countPost)][i]['name']:i for i in range( 0, len(load_dict[str(countPost)]) ) }
if user in users:
load_dict[str(countPost)][ users[user] ] = finalDic
else:
load_dict[str(countPost)].append( finalDic )
countPost += 50
with open(scoreFile, "w") as dump_f:
json.dump(load_dict, dump_f)
if __name__ == '__main__':
getWordNetScore("wordNet")
# print( usersData )
| [
1,
3,
4,
5,
6
] |
872 | 93d0d73d56b04bba505265958fccff229f5eaf49 | <mask token>
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST' and 'photo' in request.files:
filename = photos.save(request.files['photo'])
file_url = photos.url(filename)
path, label, element = model(file_url)
result = []
for el in path:
img = Image.fromarray((el * 255).astype(np.uint8))
file_object = io.BytesIO()
img.save(file_object, 'jpeg', quality=100)
figdata_jgp = base64.b64encode(file_object.getvalue())
result.append(figdata_jgp.decode('ascii'))
return render_template('display.html', image=file_url, label=
element, results=zip(result, label))
return render_template('index.html')
<mask token>
| <mask token>
sys.path.insert(1, 'script')
<mask token>
configure_uploads(app, photos)
patch_request_class(app)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST' and 'photo' in request.files:
filename = photos.save(request.files['photo'])
file_url = photos.url(filename)
path, label, element = model(file_url)
result = []
for el in path:
img = Image.fromarray((el * 255).astype(np.uint8))
file_object = io.BytesIO()
img.save(file_object, 'jpeg', quality=100)
figdata_jgp = base64.b64encode(file_object.getvalue())
result.append(figdata_jgp.decode('ascii'))
return render_template('display.html', image=file_url, label=
element, results=zip(result, label))
return render_template('index.html')
app.run(threaded=False)
render_template('index.html')
| <mask token>
sys.path.insert(1, 'script')
<mask token>
app = Flask(__name__)
app.config['UPLOADED_PHOTOS_DEST'] = os.path.realpath('images')
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST' and 'photo' in request.files:
filename = photos.save(request.files['photo'])
file_url = photos.url(filename)
path, label, element = model(file_url)
result = []
for el in path:
img = Image.fromarray((el * 255).astype(np.uint8))
file_object = io.BytesIO()
img.save(file_object, 'jpeg', quality=100)
figdata_jgp = base64.b64encode(file_object.getvalue())
result.append(figdata_jgp.decode('ascii'))
return render_template('display.html', image=file_url, label=
element, results=zip(result, label))
return render_template('index.html')
app.run(threaded=False)
render_template('index.html')
| import os
from flask import Flask, request, render_template, url_for
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
import sys
sys.path.insert(1, 'script')
from backend import model
import io
from PIL import Image
import base64
import numpy as np
app = Flask(__name__)
app.config['UPLOADED_PHOTOS_DEST'] = os.path.realpath('images')
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST' and 'photo' in request.files:
filename = photos.save(request.files['photo'])
file_url = photos.url(filename)
path, label, element = model(file_url)
result = []
for el in path:
img = Image.fromarray((el * 255).astype(np.uint8))
file_object = io.BytesIO()
img.save(file_object, 'jpeg', quality=100)
figdata_jgp = base64.b64encode(file_object.getvalue())
result.append(figdata_jgp.decode('ascii'))
return render_template('display.html', image=file_url, label=
element, results=zip(result, label))
return render_template('index.html')
app.run(threaded=False)
render_template('index.html')
|
# -*- coding: utf-8 -*-
import os
from flask import Flask, request,render_template,url_for
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
import sys
sys.path.insert(1, 'script')
from backend import model
import io
from PIL import Image
import base64
import numpy as np
app = Flask(__name__)
app.config['UPLOADED_PHOTOS_DEST'] = os.path.realpath('images')
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST' and 'photo' in request.files:
filename = photos.save(request.files['photo'])
file_url = photos.url(filename)
path,label,element = model(file_url)
result = []
for el in path :
img = Image.fromarray((el * 255).astype(np.uint8))
file_object = io.BytesIO()
img.save(file_object, 'jpeg',quality=100)
figdata_jgp = base64.b64encode(file_object.getvalue())
result.append(figdata_jgp.decode('ascii'))
return render_template('display.html',image = file_url,label = element, results=zip(result,label))
return render_template('index.html')
app.run(threaded=False)
render_template('index.html')
| [
1,
2,
3,
4,
5
] |
873 | ce4ecff2012cfda4a458912713b0330a218fa186 | <mask token>
class MoveDigState(State):
<mask token>
<mask token>
<mask token>
| <mask token>
class MoveDigState(State):
def __init__(self):
super().__init__('MoveDig', 'ScanDig')
self.transitionReady = False
self.digSiteDistance = 0
<mask token>
<mask token>
| <mask token>
class MoveDigState(State):
def __init__(self):
super().__init__('MoveDig', 'ScanDig')
self.transitionReady = False
self.digSiteDistance = 0
def run(self, moveInstructions):
print('\n>run() not implemented\n')
self.transitionReady = False
self.transitionReady = True
def transition(self):
return self.transitionReady
| from states.state import State
class MoveDigState(State):
def __init__(self):
super().__init__('MoveDig', 'ScanDig')
self.transitionReady = False
self.digSiteDistance = 0
def run(self, moveInstructions):
print('\n>run() not implemented\n')
self.transitionReady = False
self.transitionReady = True
def transition(self):
return self.transitionReady
| from states.state import State
class MoveDigState(State):
#init attributes of state
def __init__(self):
super().__init__("MoveDig", "ScanDig")
self.transitionReady = False
self.digSiteDistance = 0
#implementation for each state: overridden
def run(self, moveInstructions):
print("\n>run() not implemented\n")
#always begin with no transition
self.transitionReady = False
#track distance
#execute move instructions
#when in dig site then
self.transitionReady = True
#implementation for each state: overridden
def transition(self):
return self.transitionReady | [
1,
2,
4,
5,
6
] |
874 | 612535d95e655f2e2d2c58f41b2aa99afa7fbcbc | <mask token>
@app.route('/')
def index():
return "<h1>Congratulations, it's a web app!</h1>"
<mask token>
| <mask token>
@app.route('/')
def index():
return "<h1>Congratulations, it's a web app!</h1>"
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| <mask token>
app = Flask(__name__)
@app.route('/')
def index():
return "<h1>Congratulations, it's a web app!</h1>"
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return "<h1>Congratulations, it's a web app!</h1>"
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| # from the top
# clean up dependencies
from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "<h1>Congratulations, it's a web app!</h1>"
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080, debug=True)
| [
1,
2,
3,
4,
5
] |
875 | 2579b0c31c5f7cad361ed317f87cb8b0ffcb0098 | '''
Created on Feb 21, 2013
@author: dharadarji
'''
def get_row(row_index):
entry = [1]
if row_index == 0:
return entry
tmp = []
for i in range(1, row_index + 2):
tmp = entry
print "i: ", i, "tmp: ", tmp
entry = []
entry.append(1)
for j in range(1, i-1):
print "j: ", j, "tmp[j]: ", tmp[0]
entry.append(tmp[j-1] + tmp[j])
entry.append(1)
print "entry: ", entry
print entry
get_row(3) | null | null | null | null | [
0
] |
876 | f135d52e4d5e49f96869c4209b84f30ff72f6780 | import praw
import pickle
import copy
class histogram:
def __init__(self, dictionary=None):
self.frequencies = {}
if dictionary is not None:
self.frequencies = copy.deepcopy(dictionary)
def get_sum(self):
the_sum = 0
for e in self.frequencies:
the_sum += self.frequencies[e]
return the_sum
def add_frequency(self, key, value):
if key in self.frequencies:
self.frequencies[key] += value
else:
self.frequencies[key] = value
def add_by_frequencies(self,frequencies):
for key in frequencies.frequencies:
self.add_frequency(key, frequencies.frequencies[key])
def multiply_frequency(self, key, value):
if key in self.frequencies:
self.frequencies[key] *= value
else:
self.frequencies[key] = 0.0
def multiply_by_frequencies(self, frequencies):
for key in frequencies.frequencies:
self.multiply_frequency(key, frequencies.frequencies[key])
def multiply_by_scalar(self, scalar):
for key in self.frequencies:
self.multiply_frequency(key,scalar)
def divide_frequency(self, key, value):
if key in self.frequencies:
if value != 0:
if self.frequencies[key] == 0:
self.frequencies[key] = 1.0
else:
self.frequencies[key] /= (0.0 + value)
else:
if self.frequencies[key] == 0:
self.frequencies[key] = 1.0
else:
self.frequencies[key] = float('inf')
else:
if value > 0:
self.frequencies[key] = 0.0
else:
self.frequencies[key] = 1.0
def divide_by_frequencies(self, frequencies):
for key in frequencies.frequencies:
self.divide_frequency(key, frequencies.frequencies[key])
class comment:
def __init__(self, comment):
if comment is not None and hasattr(comment,'author') and comment.author is not None and hasattr(comment.author, 'name'):
self.author_name = comment.author.name
else:
self.author_name = ''
self.subreddit = str(comment.subreddit.display_name.strip(' ').lower())
class user:
@staticmethod
def get_histogram(comments, author_name):
total_comments_by_author = 0
the_histogram = histogram()
for comment in comments:
if comment.author_name == author_name:
total_comments_by_author += 1
the_histogram.add_frequency(comment.subreddit, 1)
the_histogram.multiply_by_scalar(1.0 / total_comments_by_author)
#print author_name, " ", the_histogram.get_sum()
return the_histogram.frequencies
class community:
@staticmethod
def get_histogram(comments, subreddit_name):
total_comments_in_subreddit = 0
the_histogram = histogram()
for comment in comments:
if comment.subreddit == subreddit_name:
total_comments_in_subreddit += 1
the_histogram.add_frequency(comment.author_name, 1)
the_histogram.multiply_by_scalar(1.0 / total_comments_in_subreddit)
return the_histogram.frequencies
class data:
def __init__(self, comments, x_subs):
self.comments = comments
self.x_subs = x_subs
def remove_sub_data(subredditName):
the_data = pickle.load(open('data.pkl', 'rb'))
comments = the_data.comments
x_subs = the_data.x_subs
comments = [x for x in comments if x.subreddit.lower() != subredditName]
x_subs = [x for x in x_subs if x != subredditName]
the_data = data(comments, x_subs )
print x_subs
output = open('data.pkl', 'wb')
pickle.dump(the_data,output)
output.close()
def add_sub_data(subredditName, num_redditors):
user_agent = ("Testing Reddit Functionality by /u/Reddit_Projector https://github.com/joshlemer/RedditProject")
reddit = praw.Reddit(user_agent)
subreddit_object = reddit.get_subreddit(subredditName)
the_data = pickle.load(open('data.pkl', 'rb'))
comments = the_data.comments
x_subs = the_data.x_subs
y_comments = [comment(a) for a in subreddit_object.get_comments(limit=num_redditors)]
z_comments = []
redditors = []
i = 0
for y_com in y_comments:
print y_com.subreddit, " z = ", i
redditor = y_com.author_name
if redditor not in redditors:
try:
z_comments += [comment(a) for a in reddit.get_redditor(y_com.author_name).get_comments(limit=100)]
redditors.append(redditor)
except:
print "oops, that user is weird"
i += 1
comments += list(z_comments)
print "COMMENTS LENGTH: ", len(comments)
the_data = data(comments, x_subs + [subredditName] )
output = open('data.pkl', 'wb')
pickle.dump(the_data,output)
output.close()
if __name__ == "__main__":
user_agent = ("Testing Reddit Functionality by /u/Reddit_Projector https://github.com/joshlemer/RedditProject")
reddit = praw.Reddit(user_agent)
subredditName = 'all'
subreddit_object = reddit.get_subreddit(subredditName)
y = 5 #Comments per subreddit inspected
z = 100 #Comments per user inspected
#List of subreddits to be analyzed
# x_subs = [
# 'hiphopheads',
# 'metal',
# 'postrock',
# 'letstalkmusic' ]
#Commented code below is for pulling our x_subs from the most recent comments in /r/all
# x_comments = [comment(a) for a in subreddit_object.get_comments(limit=x)]
# i = 0
# for c in x_comments:
# print "x = ", i
# if c.subreddit not in x_subs:
# x_subs.append(c.subreddit)
# i += 1
#List of subreddits to be analyzed
x_subs = [
'hiphopheads',
'metal',
'postrock',
'letstalkmusic' ]
y_comments = []
i = 0
print "Getting ", y, " comments from each of the ", len(x_subs), " subreddits"
for x_sub in x_subs:
print "\tRetrieving ", 5, " comments from /r/", x_sub
subreddit_object = reddit.get_subreddit(x_sub)
y_comments += [comment(a) for a in subreddit_object.get_comments(limit=y)]
i += 1
z_comments = []
redditors = []
i = 0
print "Following commenters from original subs to gather their other reddit activity"
for y_com in y_comments:
redditor = y_com.author_name
print "\tAnalyzing user ", redditor, " (user ", i, "/", len(y_comments), ")"
if redditor not in redditors:
try:
z_comments += [comment(a) for a in reddit.get_redditor(y_com.author_name).get_comments(limit=z)]
redditors.append(redditor)
except:
print "\t\toops, that user is weird\n\t\tprobably deleted their comment or profile or something"
else:
print "\t\tAlready looked at this user, no need to make an other call."
i += 1
comments = list(z_comments)
print "COMMENTS LENGTH: ", len(comments)
the_data = data(comments, x_subs)
output = open('data.pkl', 'wb')
pickle.dump(the_data,output)
output.close()
| null | null | null | null | [
0
] |
877 | 1c85ccaacfb47808e9e74f2a18bfe3b309891cf4 | #!/usr/bin/python
import pymysql
dbServerName = "127.0.0.1"
dbUser = "root"
dbPassword = "1448"
dbName = "TestDataBase2"
charSet = "utf8mb4"
cusrorType = pymysql.cursors.DictCursor
connectionObject = pymysql.connect(host=dbServerName, user=dbUser, password=dbPassword,
db=dbName, charset=charSet,cursorclass=cusrorType)
try:
# Create a cursor object
cursorObject = connectionObject.cursor()
# SQL query string
sqlQuery = "CREATE TABLE Liceu(id int, Nume varchar(32), Prenume varchar(32), Legitimatie int)"
# Execute the sqlQuery
cursorObject.execute(sqlQuery)
# SQL query string
sqlQuery = "show tables"
# Execute the sqlQuery
cursorObject.execute(sqlQuery)
#mycursor = mydb.cursor()
sql = "INSERT INTO Liceu(id, Nume, Prenume, Leg) VALUES (%n, %s, %s, %n)"
val = (5, 'Highway 21', 'sfsdfs', 53)
cursorObject.execute(sql, val)
cursorObject.commit()
print(mycursor.rowcount, "record inserted.")
#Fetch all the rows
rows = cursorObject.fetchall()
for row in rows:
print(row)
except Exception as e:
print("Exeception occured:{}".format(e))
finally:
connectionObject.close()
| null | null | null | null | [
0
] |
878 | 33b8baf2ca819315eaa5f16c7986390acb4d6efd | <mask token>
def normalize_mac_address(address):
return address.lower().replace('-', ':')
<mask token>
| <mask token>
def normalize_mac_address(address):
return address.lower().replace('-', ':')
def urlencode(s):
return urllib.quote(s.encode('utf-8'), '')
<mask token>
| <mask token>
def normalize_mac_address(address):
return address.lower().replace('-', ':')
def urlencode(s):
return urllib.quote(s.encode('utf-8'), '')
def urlencode_plus(s):
return urllib.quote_plus(s.encode('utf-8'), '')
| from __future__ import absolute_import, division, unicode_literals
import urllib
def normalize_mac_address(address):
return address.lower().replace('-', ':')
def urlencode(s):
return urllib.quote(s.encode('utf-8'), '')
def urlencode_plus(s):
return urllib.quote_plus(s.encode('utf-8'), '')
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import urllib
def normalize_mac_address(address):
return address.lower().replace("-", ":")
def urlencode(s):
return urllib.quote(s.encode("utf-8"), "")
def urlencode_plus(s):
return urllib.quote_plus(s.encode("utf-8"), "")
| [
1,
2,
3,
4,
5
] |
879 | bf8ffe603b7c1e90deed6a69500ea5b7671e7270 | <mask token>
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
cur_img_array = deserialize_image(cur_img)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test' + str(i) + '.jpg', image)
<mask token>
| <mask token>
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
cur_img_array = deserialize_image(cur_img)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test' + str(i) + '.jpg', image)
<mask token>
with open('settings.json') as d:
SETTINGS = json.load(d)
<mask token>
if len(sys.argv) > 1:
filename = sys.argv[1]
visualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],
depth=SETTINGS['depth'])
| <mask token>
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
cur_img_array = deserialize_image(cur_img)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test' + str(i) + '.jpg', image)
<mask token>
with open('settings.json') as d:
SETTINGS = json.load(d)
filename = get_latest_filename()
if len(sys.argv) > 1:
filename = sys.argv[1]
visualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],
depth=SETTINGS['depth'])
| import numpy as np
import cv2
import pandas as pd
from suiron.utils.functions import raw_to_cnn, cnn_to_raw, raw_motor_to_rgb
from suiron.utils.img_serializer import deserialize_image
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
cur_img_array = deserialize_image(cur_img)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test' + str(i) + '.jpg', image)
import sys
import json
from suiron.utils.file_finder import get_latest_filename
with open('settings.json') as d:
SETTINGS = json.load(d)
filename = get_latest_filename()
if len(sys.argv) > 1:
filename = sys.argv[1]
visualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],
depth=SETTINGS['depth'])
| # from suiron.core.SuironIO import SuironIO
# import cv2
# import os
# import time
# import json
# import numpy as np
# suironio = SuironIO(serial_location='/dev/ttyUSB0', baudrate=57600, port=5050)
# if __name__ == "__main__":
# while True:
# # suironio.record_inputs()
# print('turn90')
# suironio.servo_test(90)
# print('turn0')
# suironio.servo_test(0)
# print('turn-90')
# suironio.servo_test(-90)
# import socket
# import struct
# import pandas as pd
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# host = raw_input("Server hostname or ip? ")
# port = input("Server port? ")
# # sock.connect((host,port))
# sock.connect(('192.168.0.164',5051))
# while True:
# data = raw_input("message: ")
# # sock.send(data)
# raw_data = {
# 'image': [2,4,2,5,6,3,2,3],
# 'servo': [22,42,5,45,34,534,2,3],
# 'motor': [23423,324,32,324,324,2,4,2]
# }
# df = pd.DataFrame(raw_data, columns=['image', 'servo', 'motor'])
# df = df.to_csv()
# sock.sendall(struct.pack('>i', len(df))+df)
# # sock.sendall(struct.pack('>i', len(data))+data)
# print("response: ", sock.recv(1024))
import numpy as np
import cv2
import pandas as pd
from suiron.utils.functions import raw_to_cnn, cnn_to_raw, raw_motor_to_rgb
from suiron.utils.img_serializer import deserialize_image
# Visualize images
# With and without any predictions
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
# [1:-1] is used to remove '[' and ']' from string
cur_img_array = deserialize_image(cur_img)
# cur_img_array = cv2.resize(cur_img_array, (480, 320), interpolation=cv2.INTER_CUBIC)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test'+str(i)+'.jpg', image)
import sys
import json
# from suiron.core.SuironVZ import visualize_data
from suiron.utils.file_finder import get_latest_filename
# Load image settings
with open('settings.json') as d:
SETTINGS = json.load(d)
# Visualize latest filename
filename = get_latest_filename()
# If we specified which file
if len(sys.argv) > 1:
filename = sys.argv[1]
visualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'], depth=SETTINGS['depth']) | [
1,
2,
3,
4,
5
] |
880 | 71ebc6e9218085e887eda7843b5489837ed45c97 | <mask token>
class Zouts:
<mask token>
def search(self, StN, ZN, Motion):
for elem in self.elements:
print('elem:')
print(str(type(elem.StN)) + str(type(StN)))
print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))
print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))
if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:
print('match')
return elem
print('not match')
return None
def add(self, zout):
self.elements.append(zout)
<mask token>
| <mask token>
class Zout:
<mask token>
<mask token>
<mask token>
class Zouts:
def __init__(self):
self.elements = []
def search(self, StN, ZN, Motion):
for elem in self.elements:
print('elem:')
print(str(type(elem.StN)) + str(type(StN)))
print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))
print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))
if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:
print('match')
return elem
print('not match')
return None
def add(self, zout):
self.elements.append(zout)
def display(self):
for elem in self.elements:
print(elem.Var)
| <mask token>
class Zout:
<mask token>
def tozout(self, aline):
"""transform station statement to Cylinder Outputs struct"""
pattern = re.compile(
'.*(?P<Var>A.*[sS]t(?P<StN>\\d+)_Y(?P<ZN>\\d+)_[24]_(?P<ZName>\\w+)_(?P<Motion>open|close|forward|backward|up|upward|down|downward|left|leftward|right|rightward))\\s*\\).*'
)
match = pattern.match(aline)
if match:
self.Var = match.group('Var')
self.StN = match.group('StN')
self.ZN = match.group('ZN')
self.ZName = match.group('ZName')
self.Motion = match.group('Motion')
self.Motion = re.sub('^(up|down|left|right)$', '\\1ward', self.
Motion)
isgrippermatch = re.compile('.*(open|close).*').match(aline)
if isgrippermatch:
self.Ztype = 'gripper'
else:
self.Ztype = 'not gripper'
<mask token>
class Zouts:
def __init__(self):
self.elements = []
def search(self, StN, ZN, Motion):
for elem in self.elements:
print('elem:')
print(str(type(elem.StN)) + str(type(StN)))
print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))
print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))
if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:
print('match')
return elem
print('not match')
return None
def add(self, zout):
self.elements.append(zout)
def display(self):
for elem in self.elements:
print(elem.Var)
| import re
class Zout:
def __init__(self, aline):
self.Str = aline
self.Var = ''
self.StN = ''
self.ZN = ''
self.ZName = ''
self.Motion = ''
self.Ztype = ''
self.tozout(aline)
def tozout(self, aline):
"""transform station statement to Cylinder Outputs struct"""
pattern = re.compile(
'.*(?P<Var>A.*[sS]t(?P<StN>\\d+)_Y(?P<ZN>\\d+)_[24]_(?P<ZName>\\w+)_(?P<Motion>open|close|forward|backward|up|upward|down|downward|left|leftward|right|rightward))\\s*\\).*'
)
match = pattern.match(aline)
if match:
self.Var = match.group('Var')
self.StN = match.group('StN')
self.ZN = match.group('ZN')
self.ZName = match.group('ZName')
self.Motion = match.group('Motion')
self.Motion = re.sub('^(up|down|left|right)$', '\\1ward', self.
Motion)
isgrippermatch = re.compile('.*(open|close).*').match(aline)
if isgrippermatch:
self.Ztype = 'gripper'
else:
self.Ztype = 'not gripper'
def display(self):
print(self.Var)
class Zouts:
def __init__(self):
self.elements = []
def search(self, StN, ZN, Motion):
for elem in self.elements:
print('elem:')
print(str(type(elem.StN)) + str(type(StN)))
print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))
print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))
if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:
print('match')
return elem
print('not match')
return None
def add(self, zout):
self.elements.append(zout)
def display(self):
for elem in self.elements:
print(elem.Var)
| import re
class Zout:
def __init__(self, aline):
self.Str = aline
self.Var = ''
self.StN = ''
self.ZN = ''
self.ZName = ''
self.Motion = ''
self.Ztype = ''
self.tozout(aline)
def tozout(self, aline):
"""transform station statement to Cylinder Outputs struct"""
# SetAusg(A120,5,A.St201_Y1_2_SwivelUnit_backward);
#front|back|up|down|left|right
pattern = re.compile(r'.*(?P<Var>A.*[sS]t(?P<StN>\d+)_Y(?P<ZN>\d+)_[24]_(?P<ZName>\w+)_'
r'(?P<Motion>open|close|forward|backward|up|upward|down|downward|left|leftward|right|rightward))\s*\).*')
match = pattern.match(aline)
if match:
#print('match')
self.Var = match.group('Var')
self.StN = match.group('StN')
self.ZN = match.group('ZN')
self.ZName = match.group('ZName')
self.Motion = match.group('Motion')
# if re.compile(r'^up|down|left|right$').match(self.Motion):
# self.Motion = self.Motion+'ward'
# obj = re.compile(r'up|down|left|right')
# if obj.match(self.Motion):
# print('match')
# self.Motion = obj.subn('ward',self.Motion)[0]
self.Motion = re.sub(r'^(up|down|left|right)$',r'\1ward', self.Motion)
isgrippermatch = re.compile(r'.*(open|close).*').match(aline)
if isgrippermatch:
self.Ztype = 'gripper'
else:
self.Ztype = 'not gripper'
def display(self):
print(self.Var)
class Zouts:
def __init__(self):
self.elements = []
def search(self, StN, ZN, Motion):
for elem in self.elements:
print('elem:')
print(str(type(elem.StN)) + str(type(StN)))
print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))
print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))
if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:
print('match')
return elem
print('not match')
return None
def add(self, zout):
self.elements.append(zout)
def display(self):
for elem in self.elements:
print(elem.Var) | [
3,
6,
7,
10,
11
] |
881 | 58f7810e2731721562e3459f92684589dc66862c | <mask token>
| <mask token>
print('')
print('Lesson #2')
print('Program start:')
for i in a:
if i < 9:
print(i)
print('End')
| a = [3, 4, 2, 3, 5, 8, 23, 32, 35, 34, 4, 6, 9]
print('')
print('Lesson #2')
print('Program start:')
for i in a:
if i < 9:
print(i)
print('End')
| a = [3, 4, 2, 3, 5, 8, 23, 32, 35, 34, 4, 6, 9]
print("")
print("Lesson #2")
print("Program start:")
for i in a:
if i < 9:
print(i)
print("End") | null | [
0,
1,
2,
3
] |
882 | aaa0ac5e31e2c10b5baba6077e952fff1a92ef82 | <mask token>
| <mask token>
print('cross-vali score is: {}'.format(score.mean()))
<mask token>
for train_index, test_index in kfold.split(iris.data, iris.target):
print(train_index, test_index)
<mask token>
def simple_grid(iris, kfold):
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.
target, test_size=0.3, random_state=0)
best_score = 0
para_list = [0.001, 0.01, 0.1, 1, 10]
for gamma in para_list:
for C in para_list:
svm = SVC(gamma=gamma, C=C)
scores = cross_val_score(svm, iris.data, iris.target, cv=kfold)
score = scores.mean()
if score > best_score:
best_score = score
best_para = {'C': C, 'gamma': gamma}
print('best score is {:.2f}'.format(best_score))
print('best parameters is {}'.format(best_para))
score = cross_val_score(svm, iris.data, iris.target, cv=kfold)
print('CV-score is {}'.format(score.mean(0)))
return best_para
<mask token>
grid_search.fit(X_train, y_train)
print('best grid score is {:.2f}'.format(grid_search.score(X_test, y_test)))
<mask token>
display(results.head())
print(cross_val_score(GridSearchCV(SVC(), para_grid, cv=kfold), X_train,
y_train, cv=kfold).mean())
<mask token>
print(classification_report(y_test, y_pred))
| <mask token>
iris = load_iris()
log_reg = LogisticRegression()
score = cross_val_score(log_reg, iris.data, iris.target, cv=10)
print('cross-vali score is: {}'.format(score.mean()))
<mask token>
kfold = StratifiedKFold(n_splits=5, shuffle=True)
for train_index, test_index in kfold.split(iris.data, iris.target):
print(train_index, test_index)
<mask token>
def simple_grid(iris, kfold):
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.
target, test_size=0.3, random_state=0)
best_score = 0
para_list = [0.001, 0.01, 0.1, 1, 10]
for gamma in para_list:
for C in para_list:
svm = SVC(gamma=gamma, C=C)
scores = cross_val_score(svm, iris.data, iris.target, cv=kfold)
score = scores.mean()
if score > best_score:
best_score = score
best_para = {'C': C, 'gamma': gamma}
print('best score is {:.2f}'.format(best_score))
print('best parameters is {}'.format(best_para))
score = cross_val_score(svm, iris.data, iris.target, cv=kfold)
print('CV-score is {}'.format(score.mean(0)))
return best_para
para = simple_grid(iris, kfold)
para_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1, 10]
}
grid_search = GridSearchCV(SVC(), para_grid, cv=kfold)
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,
test_size=0.3, random_state=0)
grid_search.fit(X_train, y_train)
print('best grid score is {:.2f}'.format(grid_search.score(X_test, y_test)))
<mask token>
results = pd.DataFrame(grid_search.cv_results_)
display(results.head())
print(cross_val_score(GridSearchCV(SVC(), para_grid, cv=kfold), X_train,
y_train, cv=kfold).mean())
y_pred = grid_search.predict(X_test, y_test)
<mask token>
print(classification_report(y_test, y_pred))
| <mask token>
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
iris = load_iris()
log_reg = LogisticRegression()
score = cross_val_score(log_reg, iris.data, iris.target, cv=10)
print('cross-vali score is: {}'.format(score.mean()))
import mglearn
kfold = StratifiedKFold(n_splits=5, shuffle=True)
for train_index, test_index in kfold.split(iris.data, iris.target):
print(train_index, test_index)
from sklearn.svm import SVC
def simple_grid(iris, kfold):
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.
target, test_size=0.3, random_state=0)
best_score = 0
para_list = [0.001, 0.01, 0.1, 1, 10]
for gamma in para_list:
for C in para_list:
svm = SVC(gamma=gamma, C=C)
scores = cross_val_score(svm, iris.data, iris.target, cv=kfold)
score = scores.mean()
if score > best_score:
best_score = score
best_para = {'C': C, 'gamma': gamma}
print('best score is {:.2f}'.format(best_score))
print('best parameters is {}'.format(best_para))
score = cross_val_score(svm, iris.data, iris.target, cv=kfold)
print('CV-score is {}'.format(score.mean(0)))
return best_para
para = simple_grid(iris, kfold)
para_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1, 10]
}
grid_search = GridSearchCV(SVC(), para_grid, cv=kfold)
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,
test_size=0.3, random_state=0)
grid_search.fit(X_train, y_train)
print('best grid score is {:.2f}'.format(grid_search.score(X_test, y_test)))
import pandas as pd
results = pd.DataFrame(grid_search.cv_results_)
display(results.head())
print(cross_val_score(GridSearchCV(SVC(), para_grid, cv=kfold), X_train,
y_train, cv=kfold).mean())
y_pred = grid_search.predict(X_test, y_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
| # -*- coding: utf-8 -*-
"""
Created on Wed Aug 22 18:05:44 2018
@author: Administrator
"""
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
iris = load_iris()
log_reg = LogisticRegression()
score = cross_val_score(log_reg, iris.data, iris.target,cv=10)
print("cross-vali score is: {}".format(score.mean()))
import mglearn
#mglearn.plots.plot_stratified_cross_validation()
kfold = StratifiedKFold(n_splits=5, shuffle=True)
for train_index, test_index in kfold.split(iris.data, iris.target):
print(train_index, test_index)
from sklearn.svm import SVC
def simple_grid(iris, kfold):
X_train,X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.3,random_state = 0)
best_score = 0
para_list = [0.001, 0.01, 0.1, 1, 10]
for gamma in para_list:
for C in para_list:
svm = SVC(gamma=gamma, C=C)
#svm.fit(X_train, y_train)
scores = cross_val_score(svm, iris.data, iris.target,cv=kfold)
score = scores.mean()
if score > best_score:
best_score = score
best_para = {'C':C, 'gamma':gamma}
print("best score is {:.2f}".format(best_score))
print("best parameters is {}".format(best_para))
score = cross_val_score(svm, iris.data, iris.target,cv=kfold)
print("CV-score is {}".format(score.mean(0)))
return best_para
para = simple_grid(iris, kfold)
para_grid = {"C":[0.001, 0.01, 0.1, 1, 10],
'gamma':[0.001, 0.01, 0.1, 1, 10]}
grid_search = GridSearchCV(SVC(), para_grid, cv = kfold)
X_train,X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.3,random_state = 0)
grid_search.fit(X_train, y_train)
print("best grid score is {:.2f}".format(grid_search.score(X_test,
y_test)))
import pandas as pd
results = pd.DataFrame(grid_search.cv_results_)
display(results.head())
print(cross_val_score(GridSearchCV(SVC(), para_grid, cv = kfold),
X_train,y_train, cv = kfold).mean())
y_pred = grid_search.predict(X_test,y_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred)) | [
0,
2,
3,
4,
5
] |
883 | ad3c5ed3d6a9aa83e69f53d3fec845e8e2b1c9c6 | <mask token>
def avg(x):
return [(sum(x[i]) / row) for i in range(col)]
<mask token>
def cov(x, md_x):
cov_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
for k in range(row):
cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]
) / row
return cov_xy
def cor(cov, sd_x):
cor_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])
print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],
'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))
return cor_xy
<mask token>
| <mask token>
def avg(x):
return [(sum(x[i]) / row) for i in range(col)]
def sd(x):
return [np.std(x[i]) for i in range(col)]
def cov(x, md_x):
cov_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
for k in range(row):
cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]
) / row
return cov_xy
def cor(cov, sd_x):
cor_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])
print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],
'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))
return cor_xy
<mask token>
| <mask token>
def avg(x):
return [(sum(x[i]) / row) for i in range(col)]
def sd(x):
return [np.std(x[i]) for i in range(col)]
def cov(x, md_x):
cov_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
for k in range(row):
cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]
) / row
return cov_xy
def cor(cov, sd_x):
cor_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])
print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],
'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))
return cor_xy
if __name__ == '__main__':
argv = sys.argv[:]
if len(argv) < 2:
print('1 argument required. Provide data file name')
sys.exit(0)
data = pd.read_csv(argv[1], header=None)
row = data.shape[0]
col = data.shape[1]
print('** dataset dimensions **')
print(row)
print(col)
mean = avg(data)
stdev = sd(data)
print(stdev)
covar = cov(data, mean)
correl = cor(covar, stdev)
print('---------CORRELATION MATRIX---------')
print(correl)
| import pandas as pd
import numpy as np
import sys
def avg(x):
return [(sum(x[i]) / row) for i in range(col)]
def sd(x):
return [np.std(x[i]) for i in range(col)]
def cov(x, md_x):
cov_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
for k in range(row):
cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]
) / row
return cov_xy
def cor(cov, sd_x):
cor_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])
print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],
'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))
return cor_xy
if __name__ == '__main__':
argv = sys.argv[:]
if len(argv) < 2:
print('1 argument required. Provide data file name')
sys.exit(0)
data = pd.read_csv(argv[1], header=None)
row = data.shape[0]
col = data.shape[1]
print('** dataset dimensions **')
print(row)
print(col)
mean = avg(data)
stdev = sd(data)
print(stdev)
covar = cov(data, mean)
correl = cor(covar, stdev)
print('---------CORRELATION MATRIX---------')
print(correl)
| import pandas as pd
import numpy as np
import sys
def avg (x):
return [sum(x[i])/row for i in range(col)]
def sd (x):
return [np.std(x[i]) for i in range(col)]
def cov (x, md_x):
cov_xy=[[0 for r in range(col)] for c in range(col)]
for i in range(col):
for j in range (col):
for k in range (row):
cov_xy[i][j]+=((data[i][k]-md_x[i])*(data[j][k]-md_x[j]))/(row)
return(cov_xy)
def cor (cov, sd_x):
cor_xy=[[0 for r in range(col)] for c in range(col)]
for i in range(col):
for j in range (col):
cor_xy[i][j] = cov[i][j]/(sd_x[i]*sd_x[j])
print("cov= ",cov[i][j],"sd i", sd_x[i], " sd k", sd_x[j],"cov/sd", cov[i][j]/(sd_x[i]*sd_x[j]))
return(cor_xy)
if __name__ == "__main__":
argv=sys.argv[:]
if len(argv)<2:
print("1 argument required. Provide data file name")
sys.exit(0)
data=pd.read_csv(argv[1],header= None)
row=data.shape[0]
col=data.shape[1]
print("** dataset dimensions **")
print(row)
print(col)
mean=avg(data)
stdev=sd(data)
print(stdev)
covar=cov(data, mean)
correl=cor(covar, stdev)
print("---------CORRELATION MATRIX---------")
print(correl)
| [
3,
4,
5,
6,
7
] |
884 | d267c8cbe51fb1bacc9404a1385f1daa4a0db7f2 | <mask token>
class KMeans:
def __init__(self, k=5, max_iters=100, random_seed=42):
self.k = k
self.max_iters = max_iters
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids) ** 2, axis=1)
<mask token>
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
X_cluster = X[cluster_labels == cluster]
cluster_mean = np.mean(X_cluster, axis=0)
self.centroids[cluster] = cluster_mean
<mask token>
def predict(self, X):
return self._assign_clusters(X)
<mask token>
| <mask token>
class KMeans:
def __init__(self, k=5, max_iters=100, random_seed=42):
self.k = k
self.max_iters = max_iters
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids) ** 2, axis=1)
<mask token>
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
X_cluster = X[cluster_labels == cluster]
cluster_mean = np.mean(X_cluster, axis=0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
cluster_labels = self._assign_clusters(X)
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
<mask token>
| <mask token>
class KMeans:
def __init__(self, k=5, max_iters=100, random_seed=42):
self.k = k
self.max_iters = max_iters
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids) ** 2, axis=1)
def _assign_clusters(self, X):
cluster_distances = pairwise_distances(X, self.centroids, metric=
'euclidean')
cluster_labels = np.argmin(cluster_distances, axis=1)
return cluster_labels
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
X_cluster = X[cluster_labels == cluster]
cluster_mean = np.mean(X_cluster, axis=0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
cluster_labels = self._assign_clusters(X)
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
<mask token>
| <mask token>
class KMeans:
def __init__(self, k=5, max_iters=100, random_seed=42):
self.k = k
self.max_iters = max_iters
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids) ** 2, axis=1)
def _assign_clusters(self, X):
cluster_distances = pairwise_distances(X, self.centroids, metric=
'euclidean')
cluster_labels = np.argmin(cluster_distances, axis=1)
return cluster_labels
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
X_cluster = X[cluster_labels == cluster]
cluster_mean = np.mean(X_cluster, axis=0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
cluster_labels = self._assign_clusters(X)
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
data = load_breast_cancer()
X, y = data.data, data.target
X_train, X_test = train_test_split(X, test_size=0.1)
model = KMeans(k=5)
model.fit(X_train)
y_pred = model.predict(X_test)
print(y_pred)
| import pandas as pd
import numpy as np
import math
from sklearn.datasets import load_digits, load_iris, load_boston, load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import pairwise_distances
class KMeans():
def __init__(self, k = 5, max_iters = 100, random_seed = 42):
self.k = k
self.max_iters = max_iters
# Set random seed
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids)**2, axis = 1)
def _assign_clusters(self, X):
cluster_distances = pairwise_distances(X, self.centroids, metric = 'euclidean')
cluster_labels = np.argmin(cluster_distances, axis = 1)
return cluster_labels
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
# Get all data points of a cluster
X_cluster = X[cluster_labels == cluster]
# Update the cluster's centroid
cluster_mean = np.mean(X_cluster, axis = 0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
# Initialise random centroids
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
# Assign clusters to data
cluster_labels = self._assign_clusters(X)
# Update centroids
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
# Load data
data = load_breast_cancer()
X, y = data.data, data.target
X_train, X_test = train_test_split(X, test_size = 0.1)
# Fit model
model = KMeans(k = 5)
model.fit(X_train)
# Predict
y_pred = model.predict(X_test)
print(y_pred)
| [
6,
7,
8,
10,
12
] |
885 | 9189c1dd21b0858df3138bcf4fc7568b378e6271 | <mask token>
class NotSetTestCase(unittest.TestCase):
<mask token>
class _CachedPropertyHelper(object):
def __init__(self, value):
self.value = value
@cached_property('_cached_value')
def cached_value(self):
return self.value
class CachedPropertyTestCase(unittest.TestCase):
def test_cached_property(self):
o = _CachedPropertyHelper(0)
self.assertFalse(hasattr(o, '_cached_value'))
o.value = 123
self.assertEqual(o.cached_value, 123)
self.assertTrue(hasattr(o, '_cached_value'))
self.assertEqual(o._cached_value, 123)
o.value = 456
self.assertEqual(o.cached_value, 123)
self.assertEqual(o._cached_value, 123)
def test_clear_cached_property(self):
o = _CachedPropertyHelper(123)
_ = o.cached_value
clear_cached_property(o, '_cached_value')
o.value = 456
self.assertFalse(hasattr(o, '_cached_value'))
self.assertEqual(o.cached_value, 456)
self.assertEqual(o._cached_value, 456)
class MaybeCloseTestCase(unittest.TestCase):
def test_maybe_close(self):
f = Mock(close=Mock(return_value=None))
with maybe_close(f):
self.assertFalse(f.close.called)
self.assertTrue(f.close.called)
with maybe_close(1):
pass
class IterFilesTestCase(unittest.TestCase):
def test_iter_files(self):
names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt', 'b/1.txt',
'b/2.txt', 'c.txt']
with TemporaryDirectory() as tempdir:
for name in names:
f_path = os.path.join(tempdir, name)
f_dir = os.path.split(f_path)[0]
makedirs(f_dir, exist_ok=True)
with open(f_path, 'wb') as f:
f.write(b'')
self.assertListEqual(names, sorted(iter_files(tempdir)))
self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))
<mask token>
| <mask token>
class NotSetTestCase(unittest.TestCase):
def test_repr(self):
self.assertEqual(repr(NOT_SET), 'NOT_SET')
class _CachedPropertyHelper(object):
def __init__(self, value):
self.value = value
@cached_property('_cached_value')
def cached_value(self):
return self.value
class CachedPropertyTestCase(unittest.TestCase):
def test_cached_property(self):
o = _CachedPropertyHelper(0)
self.assertFalse(hasattr(o, '_cached_value'))
o.value = 123
self.assertEqual(o.cached_value, 123)
self.assertTrue(hasattr(o, '_cached_value'))
self.assertEqual(o._cached_value, 123)
o.value = 456
self.assertEqual(o.cached_value, 123)
self.assertEqual(o._cached_value, 123)
def test_clear_cached_property(self):
o = _CachedPropertyHelper(123)
_ = o.cached_value
clear_cached_property(o, '_cached_value')
o.value = 456
self.assertFalse(hasattr(o, '_cached_value'))
self.assertEqual(o.cached_value, 456)
self.assertEqual(o._cached_value, 456)
class MaybeCloseTestCase(unittest.TestCase):
def test_maybe_close(self):
f = Mock(close=Mock(return_value=None))
with maybe_close(f):
self.assertFalse(f.close.called)
self.assertTrue(f.close.called)
with maybe_close(1):
pass
class IterFilesTestCase(unittest.TestCase):
def test_iter_files(self):
names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt', 'b/1.txt',
'b/2.txt', 'c.txt']
with TemporaryDirectory() as tempdir:
for name in names:
f_path = os.path.join(tempdir, name)
f_dir = os.path.split(f_path)[0]
makedirs(f_dir, exist_ok=True)
with open(f_path, 'wb') as f:
f.write(b'')
self.assertListEqual(names, sorted(iter_files(tempdir)))
self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))
<mask token>
| <mask token>
class CamelToUnderscoreTestCase(unittest.TestCase):
<mask token>
<mask token>
class NotSetTestCase(unittest.TestCase):
def test_repr(self):
self.assertEqual(repr(NOT_SET), 'NOT_SET')
class _CachedPropertyHelper(object):
def __init__(self, value):
self.value = value
@cached_property('_cached_value')
def cached_value(self):
return self.value
class CachedPropertyTestCase(unittest.TestCase):
def test_cached_property(self):
o = _CachedPropertyHelper(0)
self.assertFalse(hasattr(o, '_cached_value'))
o.value = 123
self.assertEqual(o.cached_value, 123)
self.assertTrue(hasattr(o, '_cached_value'))
self.assertEqual(o._cached_value, 123)
o.value = 456
self.assertEqual(o.cached_value, 123)
self.assertEqual(o._cached_value, 123)
def test_clear_cached_property(self):
o = _CachedPropertyHelper(123)
_ = o.cached_value
clear_cached_property(o, '_cached_value')
o.value = 456
self.assertFalse(hasattr(o, '_cached_value'))
self.assertEqual(o.cached_value, 456)
self.assertEqual(o._cached_value, 456)
class MaybeCloseTestCase(unittest.TestCase):
def test_maybe_close(self):
f = Mock(close=Mock(return_value=None))
with maybe_close(f):
self.assertFalse(f.close.called)
self.assertTrue(f.close.called)
with maybe_close(1):
pass
class IterFilesTestCase(unittest.TestCase):
def test_iter_files(self):
names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt', 'b/1.txt',
'b/2.txt', 'c.txt']
with TemporaryDirectory() as tempdir:
for name in names:
f_path = os.path.join(tempdir, name)
f_dir = os.path.split(f_path)[0]
makedirs(f_dir, exist_ok=True)
with open(f_path, 'wb') as f:
f.write(b'')
self.assertListEqual(names, sorted(iter_files(tempdir)))
self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))
<mask token>
| <mask token>
class HumanizeDurationTestCase(unittest.TestCase):
<mask token>
def test_positive(self):
for seconds, answer in self.cases:
result = humanize_duration(seconds)
self.assertEqual(result, answer, msg=
'humanize_duraion({!r}) is expected to be {!r}, but got {!r}.'
.format(seconds, answer, result))
def test_negative(self):
for seconds, answer in self.cases[1:]:
seconds = -seconds
answer = answer + ' ago'
result = humanize_duration(seconds)
self.assertEqual(result, answer, msg=
'humanize_duraion({!r}) is expected to be {!r}, but got {!r}.'
.format(seconds, answer, result))
class CamelToUnderscoreTestCase(unittest.TestCase):
def assert_convert(self, camel, underscore):
self.assertEqual(camel_to_underscore(camel), underscore, msg=
'{!r} should be converted to {!r}'.format(camel, underscore))
def test_camel_to_underscore(self):
examples = [('simpleTest', 'simple_test'), ('easy', 'easy'), (
'HTML', 'html'), ('simpleXML', 'simple_xml'), ('PDFLoad',
'pdf_load'), ('startMIDDLELast', 'start_middle_last'), (
'AString', 'a_string'), ('Some4Numbers234', 'some4_numbers234'),
('TEST123String', 'test123_string')]
for camel, underscore in examples:
self.assert_convert(camel, underscore)
self.assert_convert(underscore, underscore)
self.assert_convert('_{}_'.format(camel), '_{}_'.format(underscore)
)
self.assert_convert('_{}_'.format(underscore), '_{}_'.format(
underscore))
self.assert_convert('__{}__'.format(camel), '__{}__'.format(
underscore))
self.assert_convert('__{}__'.format(underscore), '__{}__'.
format(underscore))
self.assert_convert('_'.join([s.capitalize() for s in
underscore.split('_')]), underscore)
self.assert_convert('_'.join([s.upper() for s in underscore.
split('_')]), underscore)
class NotSetTestCase(unittest.TestCase):
def test_repr(self):
self.assertEqual(repr(NOT_SET), 'NOT_SET')
class _CachedPropertyHelper(object):
def __init__(self, value):
self.value = value
@cached_property('_cached_value')
def cached_value(self):
return self.value
class CachedPropertyTestCase(unittest.TestCase):
def test_cached_property(self):
o = _CachedPropertyHelper(0)
self.assertFalse(hasattr(o, '_cached_value'))
o.value = 123
self.assertEqual(o.cached_value, 123)
self.assertTrue(hasattr(o, '_cached_value'))
self.assertEqual(o._cached_value, 123)
o.value = 456
self.assertEqual(o.cached_value, 123)
self.assertEqual(o._cached_value, 123)
def test_clear_cached_property(self):
o = _CachedPropertyHelper(123)
_ = o.cached_value
clear_cached_property(o, '_cached_value')
o.value = 456
self.assertFalse(hasattr(o, '_cached_value'))
self.assertEqual(o.cached_value, 456)
self.assertEqual(o._cached_value, 456)
class MaybeCloseTestCase(unittest.TestCase):
def test_maybe_close(self):
f = Mock(close=Mock(return_value=None))
with maybe_close(f):
self.assertFalse(f.close.called)
self.assertTrue(f.close.called)
with maybe_close(1):
pass
class IterFilesTestCase(unittest.TestCase):
def test_iter_files(self):
names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt', 'b/1.txt',
'b/2.txt', 'c.txt']
with TemporaryDirectory() as tempdir:
for name in names:
f_path = os.path.join(tempdir, name)
f_dir = os.path.split(f_path)[0]
makedirs(f_dir, exist_ok=True)
with open(f_path, 'wb') as f:
f.write(b'')
self.assertListEqual(names, sorted(iter_files(tempdir)))
self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))
<mask token>
| import os
import unittest
from mock import Mock
from tfsnippet.utils import *
class HumanizeDurationTestCase(unittest.TestCase):
cases = [
(0.0, '0 sec'),
(1e-8, '1e-08 sec'),
(0.1, '0.1 sec'),
(1.0, '1 sec'),
(1, '1 sec'),
(1.1, '1.1 secs'),
(59, '59 secs'),
(59.9, '59.9 secs'),
(60, '1 min'),
(61, '1 min 1 sec'),
(62, '1 min 2 secs'),
(119, '1 min 59 secs'),
(120, '2 mins'),
(121, '2 mins 1 sec'),
(122, '2 mins 2 secs'),
(3599, '59 mins 59 secs'),
(3600, '1 hr'),
(3601, '1 hr 1 sec'),
(3661, '1 hr 1 min 1 sec'),
(86399, '23 hrs 59 mins 59 secs'),
(86400, '1 day'),
(86401, '1 day 1 sec'),
(172799, '1 day 23 hrs 59 mins 59 secs'),
(259199, '2 days 23 hrs 59 mins 59 secs'),
]
def test_positive(self):
for seconds, answer in self.cases:
result = humanize_duration(seconds)
self.assertEqual(
result, answer,
msg='humanize_duraion({!r}) is expected to be {!r}, '
'but got {!r}.'.format(seconds, answer, result)
)
def test_negative(self):
for seconds, answer in self.cases[1:]:
seconds = -seconds
answer = answer + ' ago'
result = humanize_duration(seconds)
self.assertEqual(
result, answer,
msg='humanize_duraion({!r}) is expected to be {!r}, '
'but got {!r}.'.format(seconds, answer, result)
)
class CamelToUnderscoreTestCase(unittest.TestCase):
def assert_convert(self, camel, underscore):
self.assertEqual(
camel_to_underscore(camel),
underscore,
msg='{!r} should be converted to {!r}'.format(camel, underscore)
)
def test_camel_to_underscore(self):
examples = [
('simpleTest', 'simple_test'),
('easy', 'easy'),
('HTML', 'html'),
('simpleXML', 'simple_xml'),
('PDFLoad', 'pdf_load'),
('startMIDDLELast', 'start_middle_last'),
('AString', 'a_string'),
('Some4Numbers234', 'some4_numbers234'),
('TEST123String', 'test123_string'),
]
for camel, underscore in examples:
self.assert_convert(camel, underscore)
self.assert_convert(underscore, underscore)
self.assert_convert('_{}_'.format(camel),
'_{}_'.format(underscore))
self.assert_convert('_{}_'.format(underscore),
'_{}_'.format(underscore))
self.assert_convert('__{}__'.format(camel),
'__{}__'.format(underscore))
self.assert_convert('__{}__'.format(underscore),
'__{}__'.format(underscore))
self.assert_convert(
'_'.join([s.capitalize() for s in underscore.split('_')]),
underscore
)
self.assert_convert(
'_'.join([s.upper() for s in underscore.split('_')]),
underscore
)
class NotSetTestCase(unittest.TestCase):
def test_repr(self):
self.assertEqual(repr(NOT_SET), 'NOT_SET')
class _CachedPropertyHelper(object):
def __init__(self, value):
self.value = value
@cached_property('_cached_value')
def cached_value(self):
return self.value
class CachedPropertyTestCase(unittest.TestCase):
def test_cached_property(self):
o = _CachedPropertyHelper(0)
self.assertFalse(hasattr(o, '_cached_value'))
o.value = 123
self.assertEqual(o.cached_value, 123)
self.assertTrue(hasattr(o, '_cached_value'))
self.assertEqual(o._cached_value, 123)
o.value = 456
self.assertEqual(o.cached_value, 123)
self.assertEqual(o._cached_value, 123)
def test_clear_cached_property(self):
o = _CachedPropertyHelper(123)
_ = o.cached_value
clear_cached_property(o, '_cached_value')
o.value = 456
self.assertFalse(hasattr(o, '_cached_value'))
self.assertEqual(o.cached_value, 456)
self.assertEqual(o._cached_value, 456)
class MaybeCloseTestCase(unittest.TestCase):
def test_maybe_close(self):
# test having `close()`
f = Mock(close=Mock(return_value=None))
with maybe_close(f):
self.assertFalse(f.close.called)
self.assertTrue(f.close.called)
# test having not `close()`
with maybe_close(1):
pass
class IterFilesTestCase(unittest.TestCase):
def test_iter_files(self):
names = ['a/1.txt', 'a/2.txt', 'a/b/1.txt', 'a/b/2.txt',
'b/1.txt', 'b/2.txt', 'c.txt']
with TemporaryDirectory() as tempdir:
for name in names:
f_path = os.path.join(tempdir, name)
f_dir = os.path.split(f_path)[0]
makedirs(f_dir, exist_ok=True)
with open(f_path, 'wb') as f:
f.write(b'')
self.assertListEqual(names, sorted(iter_files(tempdir)))
self.assertListEqual(names, sorted(iter_files(tempdir + '/a/../')))
if __name__ == '__main__':
unittest.main()
| [
11,
12,
13,
18,
22
] |
886 | feed412278d9e711e49ef209ece0876c1de4a873 | <mask token>
def workingDate(start, end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days + 1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
<mask token>
| <mask token>
print(cal.holidays(2020))
def workingDate(start, end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days + 1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
<mask token>
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n' * 3)
| <mask token>
cal = UnitedKingdom()
print(cal.holidays(2020))
def workingDate(start, end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days + 1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
start = datetime.today()
end = datetime(2020, 12, 23)
r = workingDate(start, end)
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n' * 3)
| from datetime import date, timedelta, datetime
from workalendar.europe import UnitedKingdom
cal = UnitedKingdom()
print(cal.holidays(2020))
def workingDate(start, end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days + 1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
start = datetime.today()
end = datetime(2020, 12, 23)
r = workingDate(start, end)
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n' * 3)
| # -*- coding: UTF-8 -*-
# File name: ukWorkingDays
# Created by JKChang
# 29/07/2020, 11:20
# Tag:
# Description:
from datetime import date,timedelta,datetime
from workalendar.europe import UnitedKingdom
cal = UnitedKingdom()
print(cal.holidays(2020))
def workingDate(start,end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days +1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
start = datetime.today()
end = datetime(2020, 12, 23)
r = workingDate(start,end)
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n'*3)
| [
1,
2,
3,
4,
5
] |
887 | 662fc9d64b9046180cf70ce4b26ac2b9665dba0e | # -*- coding=UTF-8 -*-
'''
Created on 20180127
@author: Harry
'''
import datetime
# today = datetime.date.today()
# weekday = today.weekday()
#
# if weekday == 0:
# print "周一"
# else:
# print "other days"
nowtime=datetime.datetime.now()
detaday = datetime.timedelta(days=-1)
da_days= nowtime + detaday
print da_days.strftime('%Y-%m-%d')
| null | null | null | null | [
0
] |
888 | ccee0e3c47fd3809e0670be24aaa6fd0a9bad3bc | class Library(object):
<mask token>
<mask token>
def cache_key(self, key):
return self._backend.cache_key(key)
<mask token>
| class Library(object):
<mask token>
<mask token>
def cache_key(self, key):
return self._backend.cache_key(key)
def get_url(self, track):
raise NotImplementedError()
| class Library(object):
def __init__(self, backend):
self._backend = backend
<mask token>
def cache_key(self, key):
return self._backend.cache_key(key)
def get_url(self, track):
raise NotImplementedError()
| class Library(object):
def __init__(self, backend):
self._backend = backend
@property
def cache(self):
return self._backend.cache
def cache_key(self, key):
return self._backend.cache_key(key)
def get_url(self, track):
raise NotImplementedError()
| # -*- coding: utf-8 -*-
class Library(object):
def __init__(self, backend):
self._backend = backend
@property
def cache(self):
return self._backend.cache
def cache_key(self, key):
return self._backend.cache_key(key)
def get_url(self, track):
raise NotImplementedError()
| [
2,
3,
4,
5,
6
] |
889 | f799fdfde537bbe8f6c49a5e1a15cf6f910a0d45 | <mask token>
class TestMaxInteger(unittest.TestCase):
<mask token>
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
<mask token>
| <mask token>
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
<mask token>
| <mask token>
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
if __name__ == '__main__':
unittest.main()
| <mask token>
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
if __name__ == '__main__':
unittest.main()
| [
2,
3,
5,
6,
7
] |
890 | edd70f55e76418911d304d6eb41a6d2a93005a58 | <mask token>
class Player:
def __init__(self):
data = self._get_status()
time.sleep(data['cooldown'])
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.current_room = self.check_room()
self.world = 'dark' if self.current_room['room_id'] > 499 else 'light'
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
<mask token>
def _read_file(self, filepath):
if self.world == 'dark':
filepath = 'dark_' + filepath
if not os.path.exists(filepath):
f = open(filepath, 'w+')
room = self.current_room
if 'graph' in filepath:
room = {room['room_id']: {d: '?' for d in room['exits']}}
self._write_file(filepath, {self.current_room['room_id']: room})
with open(filepath, 'r') as f:
data = json.load(f)
return data
<mask token>
<mask token>
<mask token>
def dash(self, direction, num_rooms, room_ids):
if 'dash' not in self.abilities:
print("Error! You can't dash yet!")
return
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print('\n======================================')
print(f'Dashing {direction} from room {curr_id}...')
json = {'direction': direction, 'num_rooms': num_rooms,
'next_room_ids': room_ids}
r = requests.post(f'{url}/api/adv/dash/', headers={'Authorization':
f'Token {key}', 'Content-Type': 'application/json'}, json=json)
next_room = r.json()
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world == 'dark' and 'golden snitch' in next_room['items']:
try:
self.pick_up_loot('golden snitch')
except:
print('Somebody already got that snitch!')
elif self.world == 'light' and len(next_room['items']):
for item in next_room['items']:
self.pick_up_loot(item)
for message in next_room['messages']:
print(f'{message}')
print(f"Now the player is in {self.current_room['room_id']}")
print(f'Cooldown before next action: {self.cooldown} seconds')
print('======================================\n')
def travel(self, direction, method='move'):
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print('\n======================================')
if 'fly' in self.abilities and self.map[str(curr_id)]['terrain'] in [
'MOUNTAIN', 'NORMAL']:
method = 'fly'
print(f'Flying {direction} from room {curr_id}...')
else:
print(f'Walking {direction} from room {curr_id}...')
if direction not in self.graph[str(curr_id)]:
print('Error! Not a valid direction from the current room')
else:
json = {'direction': direction}
if self.graph[str(curr_id)][direction] != '?':
json['next_room_id'] = str(self.graph[str(curr_id)][direction])
next_room = requests.post(f'{url}/api/adv/{method}/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world != 'dark':
if len(next_room['items']
) > 0 and self.encumbrance < self.strength:
for item in next_room['items']:
time.sleep(next_room['cooldown'])
self.pick_up_loot(item)
elif 'golden snitch' in next_room['items']:
self.pick_up_loot('golden snitch')
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
if str(next_id) not in self.graph:
print(f'New room! # {next_id}')
self.graph[str(next_id)] = {e: '?' for e in next_room['exits']}
self.graph[str(curr_id)][direction] = next_id
self.graph[str(next_id)][opposite[direction]] = curr_id
self._write_file('graph.txt', self.graph)
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
for message in next_room['messages']:
print(f'{message}')
print(f"Now the player is in {self.current_room['room_id']}")
print(f'Cooldown before next action: {self.cooldown} seconds')
if len(self.graph) < 500:
print(
f'Total number of rooms explored so far: {len(self.graph)}'
)
print('======================================\n')
def get_coin(self):
time.sleep(self.cooldown)
data = mine()
self.cooldown = data['cooldown']
if len(data['errors']) > 0:
self.get_coin()
<mask token>
<mask token>
<mask token>
def examine(self, item):
time.sleep(self.cooldown)
json = {'name': item}
req = requests.post(f'{url}/api/adv/examine/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
if item == 'WELL':
if os.path.exists('hint.txt'):
os.remove('hint.txt')
desc = req['description']
instructions = desc.split('\n')
for line in instructions[2:]:
with open('hint.txt', 'a') as f:
f.write(f'{line}\n')
cpu = CPU()
cpu.load('hint.txt')
cpu.run()
if os.path.exists('hint.txt'):
os.remove('hint.txt')
limiter = 23 if self.world == 'light' else 24
return cpu.hint[limiter:]
else:
print(req['description'])
def pray(self):
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/pray/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}).json()
print(req)
time.sleep(req['cooldown'])
self.check_self()
<mask token>
def check_balance(self):
time.sleep(self.cooldown)
req = requests.get(f'{url}/api/bc/get_balance/', headers={
'Authorization': f'Token {key}'}).json()
self.coins = float(req['messages'][0].split(' ')[5])
self.cooldown = req['cooldown']
print(f"\n{req['messages'][0]}\n")
def transform_coin(self, item):
time.sleep(self.cooldown)
self.check_balance()
json = {'name': item}
if self.coins > 0 and item in self.inventory:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/transmogrify/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(req)
self.cooldown = req['cooldown']
for item in req['items']:
self.pick_up_loot(item)
def warp(self):
if 'warp' in self.abilities:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/warp/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}).json()
print(req['messages'][0])
self.cooldown = req['cooldown']
if self.world == 'light':
self.world = 'dark'
else:
self.world = 'light'
self.current_room = req
time.sleep(self.cooldown)
self.check_self()
if req['room_id'] not in self.graph:
g = self.graph
g[req['room_id']] = {d: '?' for d in req['exits']}
self._write_file('graph.txt', g)
m = self.map
m[req['room_id']] = req
self._write_file('map.txt', m)
else:
print('You do not have the warp ability yet!')
| <mask token>
class Player:
def __init__(self):
data = self._get_status()
time.sleep(data['cooldown'])
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.current_room = self.check_room()
self.world = 'dark' if self.current_room['room_id'] > 499 else 'light'
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def _get_status(self):
r = requests.post(f'{url}/api/adv/status/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'})
return r.json()
def _read_file(self, filepath):
if self.world == 'dark':
filepath = 'dark_' + filepath
if not os.path.exists(filepath):
f = open(filepath, 'w+')
room = self.current_room
if 'graph' in filepath:
room = {room['room_id']: {d: '?' for d in room['exits']}}
self._write_file(filepath, {self.current_room['room_id']: room})
with open(filepath, 'r') as f:
data = json.load(f)
return data
def _write_file(self, filepath, data):
if self.world == 'dark' and 'dark' not in filepath:
filepath = 'dark_' + filepath
with open(filepath, 'w+') as outfile:
json.dump(data, outfile)
<mask token>
<mask token>
def dash(self, direction, num_rooms, room_ids):
if 'dash' not in self.abilities:
print("Error! You can't dash yet!")
return
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print('\n======================================')
print(f'Dashing {direction} from room {curr_id}...')
json = {'direction': direction, 'num_rooms': num_rooms,
'next_room_ids': room_ids}
r = requests.post(f'{url}/api/adv/dash/', headers={'Authorization':
f'Token {key}', 'Content-Type': 'application/json'}, json=json)
next_room = r.json()
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world == 'dark' and 'golden snitch' in next_room['items']:
try:
self.pick_up_loot('golden snitch')
except:
print('Somebody already got that snitch!')
elif self.world == 'light' and len(next_room['items']):
for item in next_room['items']:
self.pick_up_loot(item)
for message in next_room['messages']:
print(f'{message}')
print(f"Now the player is in {self.current_room['room_id']}")
print(f'Cooldown before next action: {self.cooldown} seconds')
print('======================================\n')
def travel(self, direction, method='move'):
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print('\n======================================')
if 'fly' in self.abilities and self.map[str(curr_id)]['terrain'] in [
'MOUNTAIN', 'NORMAL']:
method = 'fly'
print(f'Flying {direction} from room {curr_id}...')
else:
print(f'Walking {direction} from room {curr_id}...')
if direction not in self.graph[str(curr_id)]:
print('Error! Not a valid direction from the current room')
else:
json = {'direction': direction}
if self.graph[str(curr_id)][direction] != '?':
json['next_room_id'] = str(self.graph[str(curr_id)][direction])
next_room = requests.post(f'{url}/api/adv/{method}/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world != 'dark':
if len(next_room['items']
) > 0 and self.encumbrance < self.strength:
for item in next_room['items']:
time.sleep(next_room['cooldown'])
self.pick_up_loot(item)
elif 'golden snitch' in next_room['items']:
self.pick_up_loot('golden snitch')
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
if str(next_id) not in self.graph:
print(f'New room! # {next_id}')
self.graph[str(next_id)] = {e: '?' for e in next_room['exits']}
self.graph[str(curr_id)][direction] = next_id
self.graph[str(next_id)][opposite[direction]] = curr_id
self._write_file('graph.txt', self.graph)
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
for message in next_room['messages']:
print(f'{message}')
print(f"Now the player is in {self.current_room['room_id']}")
print(f'Cooldown before next action: {self.cooldown} seconds')
if len(self.graph) < 500:
print(
f'Total number of rooms explored so far: {len(self.graph)}'
)
print('======================================\n')
def get_coin(self):
time.sleep(self.cooldown)
data = mine()
self.cooldown = data['cooldown']
if len(data['errors']) > 0:
self.get_coin()
def pick_up_loot(self, item):
print(f'Looting {item}')
json = {'name': item}
if self.encumbrance < self.strength:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/take/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self('item pick up'
) if self.world == 'light' else print(' Success!\n ' +
req['messages'][0] if len(req['messages']) > 0 else print(
""" Oh NO!
just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!"""
))
elif 'carry' in self.abilities:
if len(self.status) != 0:
print(
'It seems your Bag is full and Glasowyn is already carring something!'
)
else:
req = requests.post(f'{url}/api/adv/carry/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
print(req)
else:
print('Your Bag is full!')
<mask token>
def buy_name(self, name):
time.sleep(self.cooldown)
json = {'name': name}
req = requests.post(f'{url}/api/adv/change_name/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(req)
time.sleep(req['cooldown'])
json['confirm'] = 'aye'
r1_conf = requests.post(f'{url}/api/adv/change_name/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(r1_conf)
time.sleep(r1_conf['cooldown'])
self.check_self()
def examine(self, item):
time.sleep(self.cooldown)
json = {'name': item}
req = requests.post(f'{url}/api/adv/examine/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
if item == 'WELL':
if os.path.exists('hint.txt'):
os.remove('hint.txt')
desc = req['description']
instructions = desc.split('\n')
for line in instructions[2:]:
with open('hint.txt', 'a') as f:
f.write(f'{line}\n')
cpu = CPU()
cpu.load('hint.txt')
cpu.run()
if os.path.exists('hint.txt'):
os.remove('hint.txt')
limiter = 23 if self.world == 'light' else 24
return cpu.hint[limiter:]
else:
print(req['description'])
def pray(self):
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/pray/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}).json()
print(req)
time.sleep(req['cooldown'])
self.check_self()
<mask token>
def check_balance(self):
time.sleep(self.cooldown)
req = requests.get(f'{url}/api/bc/get_balance/', headers={
'Authorization': f'Token {key}'}).json()
self.coins = float(req['messages'][0].split(' ')[5])
self.cooldown = req['cooldown']
print(f"\n{req['messages'][0]}\n")
def transform_coin(self, item):
time.sleep(self.cooldown)
self.check_balance()
json = {'name': item}
if self.coins > 0 and item in self.inventory:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/transmogrify/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(req)
self.cooldown = req['cooldown']
for item in req['items']:
self.pick_up_loot(item)
def warp(self):
if 'warp' in self.abilities:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/warp/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}).json()
print(req['messages'][0])
self.cooldown = req['cooldown']
if self.world == 'light':
self.world = 'dark'
else:
self.world = 'light'
self.current_room = req
time.sleep(self.cooldown)
self.check_self()
if req['room_id'] not in self.graph:
g = self.graph
g[req['room_id']] = {d: '?' for d in req['exits']}
self._write_file('graph.txt', g)
m = self.map
m[req['room_id']] = req
self._write_file('map.txt', m)
else:
print('You do not have the warp ability yet!')
| <mask token>
class Player:
def __init__(self):
data = self._get_status()
time.sleep(data['cooldown'])
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.current_room = self.check_room()
self.world = 'dark' if self.current_room['room_id'] > 499 else 'light'
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def _get_status(self):
r = requests.post(f'{url}/api/adv/status/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'})
return r.json()
def _read_file(self, filepath):
if self.world == 'dark':
filepath = 'dark_' + filepath
if not os.path.exists(filepath):
f = open(filepath, 'w+')
room = self.current_room
if 'graph' in filepath:
room = {room['room_id']: {d: '?' for d in room['exits']}}
self._write_file(filepath, {self.current_room['room_id']: room})
with open(filepath, 'r') as f:
data = json.load(f)
return data
def _write_file(self, filepath, data):
if self.world == 'dark' and 'dark' not in filepath:
filepath = 'dark_' + filepath
with open(filepath, 'w+') as outfile:
json.dump(data, outfile)
def check_room(self):
r = requests.get(f'{url}/api/adv/init/', headers={'Authorization':
f'Token {key}'})
data = r.json()
if 'players' in data:
del data['players']
return data
def check_self(self, cause=None):
data = self._get_status()
cleaned = {**data}
cleaned['status'].append(
"Glasowyn's hands stand Empty and Effervescent, see them filled."
) if len(cleaned['status']) < 1 else None
cleaned['world'] = self.world
cut = ['has_mined', 'errors']
for k in cut:
del cleaned[k]
if cause == 'item pick up':
ret = f""" You are now held down by the weight of {cleaned['encumbrance']} Stones.
Your Experience and equipment Grant you the ability to
carry {cleaned['strength']} stones before you need to take longer rests.
Your bag now carries {cleaned['inventory']}"""
print(ret +
f"""
Your ghost seems to have the space to carry an additional item if you would like"""
if 'carry' in cleaned['abilities'] and len(cleaned[
'status']) else ret)
else:
print('\n' + '*' * 22 + ' ' + 'Your Current State' + ' ' + '*' * 22
)
for item in cleaned.items():
print(f'{item[0]}: {item[1]}')
print('*' * 64 + '\n')
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def dash(self, direction, num_rooms, room_ids):
if 'dash' not in self.abilities:
print("Error! You can't dash yet!")
return
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print('\n======================================')
print(f'Dashing {direction} from room {curr_id}...')
json = {'direction': direction, 'num_rooms': num_rooms,
'next_room_ids': room_ids}
r = requests.post(f'{url}/api/adv/dash/', headers={'Authorization':
f'Token {key}', 'Content-Type': 'application/json'}, json=json)
next_room = r.json()
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world == 'dark' and 'golden snitch' in next_room['items']:
try:
self.pick_up_loot('golden snitch')
except:
print('Somebody already got that snitch!')
elif self.world == 'light' and len(next_room['items']):
for item in next_room['items']:
self.pick_up_loot(item)
for message in next_room['messages']:
print(f'{message}')
print(f"Now the player is in {self.current_room['room_id']}")
print(f'Cooldown before next action: {self.cooldown} seconds')
print('======================================\n')
def travel(self, direction, method='move'):
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print('\n======================================')
if 'fly' in self.abilities and self.map[str(curr_id)]['terrain'] in [
'MOUNTAIN', 'NORMAL']:
method = 'fly'
print(f'Flying {direction} from room {curr_id}...')
else:
print(f'Walking {direction} from room {curr_id}...')
if direction not in self.graph[str(curr_id)]:
print('Error! Not a valid direction from the current room')
else:
json = {'direction': direction}
if self.graph[str(curr_id)][direction] != '?':
json['next_room_id'] = str(self.graph[str(curr_id)][direction])
next_room = requests.post(f'{url}/api/adv/{method}/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world != 'dark':
if len(next_room['items']
) > 0 and self.encumbrance < self.strength:
for item in next_room['items']:
time.sleep(next_room['cooldown'])
self.pick_up_loot(item)
elif 'golden snitch' in next_room['items']:
self.pick_up_loot('golden snitch')
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
if str(next_id) not in self.graph:
print(f'New room! # {next_id}')
self.graph[str(next_id)] = {e: '?' for e in next_room['exits']}
self.graph[str(curr_id)][direction] = next_id
self.graph[str(next_id)][opposite[direction]] = curr_id
self._write_file('graph.txt', self.graph)
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
for message in next_room['messages']:
print(f'{message}')
print(f"Now the player is in {self.current_room['room_id']}")
print(f'Cooldown before next action: {self.cooldown} seconds')
if len(self.graph) < 500:
print(
f'Total number of rooms explored so far: {len(self.graph)}'
)
print('======================================\n')
def get_coin(self):
time.sleep(self.cooldown)
data = mine()
self.cooldown = data['cooldown']
if len(data['errors']) > 0:
self.get_coin()
def pick_up_loot(self, item):
print(f'Looting {item}')
json = {'name': item}
if self.encumbrance < self.strength:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/take/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self('item pick up'
) if self.world == 'light' else print(' Success!\n ' +
req['messages'][0] if len(req['messages']) > 0 else print(
""" Oh NO!
just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!"""
))
elif 'carry' in self.abilities:
if len(self.status) != 0:
print(
'It seems your Bag is full and Glasowyn is already carring something!'
)
else:
req = requests.post(f'{url}/api/adv/carry/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
print(req)
else:
print('Your Bag is full!')
<mask token>
def buy_name(self, name):
time.sleep(self.cooldown)
json = {'name': name}
req = requests.post(f'{url}/api/adv/change_name/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(req)
time.sleep(req['cooldown'])
json['confirm'] = 'aye'
r1_conf = requests.post(f'{url}/api/adv/change_name/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(r1_conf)
time.sleep(r1_conf['cooldown'])
self.check_self()
def examine(self, item):
time.sleep(self.cooldown)
json = {'name': item}
req = requests.post(f'{url}/api/adv/examine/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
if item == 'WELL':
if os.path.exists('hint.txt'):
os.remove('hint.txt')
desc = req['description']
instructions = desc.split('\n')
for line in instructions[2:]:
with open('hint.txt', 'a') as f:
f.write(f'{line}\n')
cpu = CPU()
cpu.load('hint.txt')
cpu.run()
if os.path.exists('hint.txt'):
os.remove('hint.txt')
limiter = 23 if self.world == 'light' else 24
return cpu.hint[limiter:]
else:
print(req['description'])
def pray(self):
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/pray/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}).json()
print(req)
time.sleep(req['cooldown'])
self.check_self()
<mask token>
def check_balance(self):
time.sleep(self.cooldown)
req = requests.get(f'{url}/api/bc/get_balance/', headers={
'Authorization': f'Token {key}'}).json()
self.coins = float(req['messages'][0].split(' ')[5])
self.cooldown = req['cooldown']
print(f"\n{req['messages'][0]}\n")
def transform_coin(self, item):
time.sleep(self.cooldown)
self.check_balance()
json = {'name': item}
if self.coins > 0 and item in self.inventory:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/transmogrify/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(req)
self.cooldown = req['cooldown']
for item in req['items']:
self.pick_up_loot(item)
def warp(self):
if 'warp' in self.abilities:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/warp/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}).json()
print(req['messages'][0])
self.cooldown = req['cooldown']
if self.world == 'light':
self.world = 'dark'
else:
self.world = 'light'
self.current_room = req
time.sleep(self.cooldown)
self.check_self()
if req['room_id'] not in self.graph:
g = self.graph
g[req['room_id']] = {d: '?' for d in req['exits']}
self._write_file('graph.txt', g)
m = self.map
m[req['room_id']] = req
self._write_file('map.txt', m)
else:
print('You do not have the warp ability yet!')
| <mask token>
class Player:
def __init__(self):
data = self._get_status()
time.sleep(data['cooldown'])
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.current_room = self.check_room()
self.world = 'dark' if self.current_room['room_id'] > 499 else 'light'
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def _get_status(self):
r = requests.post(f'{url}/api/adv/status/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'})
return r.json()
def _read_file(self, filepath):
if self.world == 'dark':
filepath = 'dark_' + filepath
if not os.path.exists(filepath):
f = open(filepath, 'w+')
room = self.current_room
if 'graph' in filepath:
room = {room['room_id']: {d: '?' for d in room['exits']}}
self._write_file(filepath, {self.current_room['room_id']: room})
with open(filepath, 'r') as f:
data = json.load(f)
return data
def _write_file(self, filepath, data):
if self.world == 'dark' and 'dark' not in filepath:
filepath = 'dark_' + filepath
with open(filepath, 'w+') as outfile:
json.dump(data, outfile)
def check_room(self):
r = requests.get(f'{url}/api/adv/init/', headers={'Authorization':
f'Token {key}'})
data = r.json()
if 'players' in data:
del data['players']
return data
def check_self(self, cause=None):
data = self._get_status()
cleaned = {**data}
cleaned['status'].append(
"Glasowyn's hands stand Empty and Effervescent, see them filled."
) if len(cleaned['status']) < 1 else None
cleaned['world'] = self.world
cut = ['has_mined', 'errors']
for k in cut:
del cleaned[k]
if cause == 'item pick up':
ret = f""" You are now held down by the weight of {cleaned['encumbrance']} Stones.
Your Experience and equipment Grant you the ability to
carry {cleaned['strength']} stones before you need to take longer rests.
Your bag now carries {cleaned['inventory']}"""
print(ret +
f"""
Your ghost seems to have the space to carry an additional item if you would like"""
if 'carry' in cleaned['abilities'] and len(cleaned[
'status']) else ret)
else:
print('\n' + '*' * 22 + ' ' + 'Your Current State' + ' ' + '*' * 22
)
for item in cleaned.items():
print(f'{item[0]}: {item[1]}')
print('*' * 64 + '\n')
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def dash(self, direction, num_rooms, room_ids):
if 'dash' not in self.abilities:
print("Error! You can't dash yet!")
return
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print('\n======================================')
print(f'Dashing {direction} from room {curr_id}...')
json = {'direction': direction, 'num_rooms': num_rooms,
'next_room_ids': room_ids}
r = requests.post(f'{url}/api/adv/dash/', headers={'Authorization':
f'Token {key}', 'Content-Type': 'application/json'}, json=json)
next_room = r.json()
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world == 'dark' and 'golden snitch' in next_room['items']:
try:
self.pick_up_loot('golden snitch')
except:
print('Somebody already got that snitch!')
elif self.world == 'light' and len(next_room['items']):
for item in next_room['items']:
self.pick_up_loot(item)
for message in next_room['messages']:
print(f'{message}')
print(f"Now the player is in {self.current_room['room_id']}")
print(f'Cooldown before next action: {self.cooldown} seconds')
print('======================================\n')
def travel(self, direction, method='move'):
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print('\n======================================')
if 'fly' in self.abilities and self.map[str(curr_id)]['terrain'] in [
'MOUNTAIN', 'NORMAL']:
method = 'fly'
print(f'Flying {direction} from room {curr_id}...')
else:
print(f'Walking {direction} from room {curr_id}...')
if direction not in self.graph[str(curr_id)]:
print('Error! Not a valid direction from the current room')
else:
json = {'direction': direction}
if self.graph[str(curr_id)][direction] != '?':
json['next_room_id'] = str(self.graph[str(curr_id)][direction])
next_room = requests.post(f'{url}/api/adv/{method}/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world != 'dark':
if len(next_room['items']
) > 0 and self.encumbrance < self.strength:
for item in next_room['items']:
time.sleep(next_room['cooldown'])
self.pick_up_loot(item)
elif 'golden snitch' in next_room['items']:
self.pick_up_loot('golden snitch')
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
if str(next_id) not in self.graph:
print(f'New room! # {next_id}')
self.graph[str(next_id)] = {e: '?' for e in next_room['exits']}
self.graph[str(curr_id)][direction] = next_id
self.graph[str(next_id)][opposite[direction]] = curr_id
self._write_file('graph.txt', self.graph)
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
for message in next_room['messages']:
print(f'{message}')
print(f"Now the player is in {self.current_room['room_id']}")
print(f'Cooldown before next action: {self.cooldown} seconds')
if len(self.graph) < 500:
print(
f'Total number of rooms explored so far: {len(self.graph)}'
)
print('======================================\n')
def get_coin(self):
time.sleep(self.cooldown)
data = mine()
self.cooldown = data['cooldown']
if len(data['errors']) > 0:
self.get_coin()
def pick_up_loot(self, item):
print(f'Looting {item}')
json = {'name': item}
if self.encumbrance < self.strength:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/take/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self('item pick up'
) if self.world == 'light' else print(' Success!\n ' +
req['messages'][0] if len(req['messages']) > 0 else print(
""" Oh NO!
just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!"""
))
elif 'carry' in self.abilities:
if len(self.status) != 0:
print(
'It seems your Bag is full and Glasowyn is already carring something!'
)
else:
req = requests.post(f'{url}/api/adv/carry/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
print(req)
else:
print('Your Bag is full!')
def drop_loot(self, item):
time.sleep(self.cooldown)
json = {'name': item}
req = requests.post(f'{url}/api/adv/drop/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
time.sleep(req['cooldown'])
self.check_self()
def buy_name(self, name):
time.sleep(self.cooldown)
json = {'name': name}
req = requests.post(f'{url}/api/adv/change_name/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(req)
time.sleep(req['cooldown'])
json['confirm'] = 'aye'
r1_conf = requests.post(f'{url}/api/adv/change_name/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(r1_conf)
time.sleep(r1_conf['cooldown'])
self.check_self()
def examine(self, item):
time.sleep(self.cooldown)
json = {'name': item}
req = requests.post(f'{url}/api/adv/examine/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
if item == 'WELL':
if os.path.exists('hint.txt'):
os.remove('hint.txt')
desc = req['description']
instructions = desc.split('\n')
for line in instructions[2:]:
with open('hint.txt', 'a') as f:
f.write(f'{line}\n')
cpu = CPU()
cpu.load('hint.txt')
cpu.run()
if os.path.exists('hint.txt'):
os.remove('hint.txt')
limiter = 23 if self.world == 'light' else 24
return cpu.hint[limiter:]
else:
print(req['description'])
def pray(self):
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/pray/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}).json()
print(req)
time.sleep(req['cooldown'])
self.check_self()
def wear(self, item):
time.sleep(self.cooldown)
json = {'name': item}
req = requests.post(f'{url}/api/adv/wear/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self()
def check_balance(self):
time.sleep(self.cooldown)
req = requests.get(f'{url}/api/bc/get_balance/', headers={
'Authorization': f'Token {key}'}).json()
self.coins = float(req['messages'][0].split(' ')[5])
self.cooldown = req['cooldown']
print(f"\n{req['messages'][0]}\n")
def transform_coin(self, item):
time.sleep(self.cooldown)
self.check_balance()
json = {'name': item}
if self.coins > 0 and item in self.inventory:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/transmogrify/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}, json=json).json()
print(req)
self.cooldown = req['cooldown']
for item in req['items']:
self.pick_up_loot(item)
def warp(self):
if 'warp' in self.abilities:
time.sleep(self.cooldown)
req = requests.post(f'{url}/api/adv/warp/', headers={
'Authorization': f'Token {key}', 'Content-Type':
'application/json'}).json()
print(req['messages'][0])
self.cooldown = req['cooldown']
if self.world == 'light':
self.world = 'dark'
else:
self.world = 'light'
self.current_room = req
time.sleep(self.cooldown)
self.check_self()
if req['room_id'] not in self.graph:
g = self.graph
g[req['room_id']] = {d: '?' for d in req['exits']}
self._write_file('graph.txt', g)
m = self.map
m[req['room_id']] = req
self._write_file('map.txt', m)
else:
print('You do not have the warp ability yet!')
| from api import url, key, opposite
import requests
import json
import time
import os
from miner import mine
from cpu import *
class Player:
def __init__(self):
data = self._get_status()
time.sleep(data['cooldown'])
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.current_room = self.check_room()
self.world = "dark" if self.current_room['room_id'] > 499 else "light"
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def _get_status(self):
r = requests.post(f"{url}/api/adv/status/",
headers={'Authorization': f"Token {key}", "Content-Type": "application/json"})
return r.json()
def _read_file(self, filepath):
if self.world == 'dark':
filepath = 'dark_' + filepath
if not os.path.exists(filepath):
f = open(filepath, 'w+')
room = self.current_room
if 'graph' in filepath:
room = {room['room_id']: {d: '?' for d in room['exits']}}
self._write_file(filepath, {self.current_room['room_id']: room})
with open(filepath, 'r') as f:
data = json.load(f)
return data
def _write_file(self, filepath, data):
if self.world == 'dark' and 'dark' not in filepath:
filepath = 'dark_' + filepath
with open(filepath, 'w+') as outfile:
json.dump(data, outfile)
def check_room(self):
r = requests.get(f"{url}/api/adv/init/",
headers={'Authorization': f"Token {key}"})
data = r.json()
if 'players' in data:
del data['players']
return data
def check_self(self, cause=None):
data = self._get_status()
cleaned = {**data} # How cool is the spread operator!
cleaned['status'].append("Glasowyn's hands stand Empty and Effervescent, see them filled.") if len(
cleaned['status']) < 1 else None
cleaned["world"] = self.world
cut = ['has_mined', 'errors', ]
for k in cut:
del cleaned[k]
if cause == "item pick up":
ret = f" You are now held down by the weight of {cleaned['encumbrance']} Stones.\n Your Experience and equipment Grant you the ability to\n carry {cleaned['strength']} stones before you need to take longer rests.\n Your bag now carries {cleaned['inventory']}"
print(ret + f"\n Your ghost seems to have the space to carry an additional item if you would like" if "carry" in cleaned['abilities'] and len(
cleaned['status']) else ret)
else:
print('\n'+"*"*22+' '+"Your Current State"+' '+"*"*22)
for item in cleaned.items():
print(f"{item[0]}: {item[1]}")
print("*"*64+'\n')
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def dash(self, direction, num_rooms, room_ids):
if "dash" not in self.abilities:
print("Error! You can't dash yet!")
return
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print("\n======================================")
print(f"Dashing {direction} from room {curr_id}...")
json = {"direction": direction,
"num_rooms": num_rooms, "next_room_ids": room_ids}
r = requests.post(f"{url}/api/adv/dash/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json)
next_room = r.json()
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
# update map with room info
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
# change current room and update cooldown
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world == 'dark' and 'golden snitch' in next_room['items']:
try:
self.pick_up_loot('golden snitch')
except:
print("Somebody already got that snitch!")
elif self.world == 'light' and len(next_room['items']):
for item in next_room['items']:
self.pick_up_loot(item)
for message in next_room['messages']:
print(f"{message}")
print(f"Now the player is in {self.current_room['room_id']}")
print(f"Cooldown before next action: {self.cooldown} seconds")
print("======================================\n")
def travel(self, direction, method="move"):
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print("\n======================================")
if "fly" in self.abilities and self.map[str(curr_id)]['terrain'] in ['MOUNTAIN', 'NORMAL']:
method = "fly"
print(f"Flying {direction} from room {curr_id}...")
else:
print(f"Walking {direction} from room {curr_id}...")
if direction not in self.graph[str(curr_id)]:
print("Error! Not a valid direction from the current room")
else:
json = {"direction": direction}
if self.graph[str(curr_id)][direction] != "?":
json['next_room_id'] = str(self.graph[str(curr_id)][direction])
next_room = requests.post(f"{url}/api/adv/{method}/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
# change current room and update cooldown
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world != 'dark':
# Code for looting any items in the room if the space is available
if len(next_room['items']) > 0 and self.encumbrance < self.strength:
for item in next_room['items']:
time.sleep(next_room['cooldown'])
self.pick_up_loot(item)
else:
if 'golden snitch' in next_room['items']:
self.pick_up_loot('golden snitch')
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
# add to graph and map, in addition to making graph connections
if str(next_id) not in self.graph:
print(f"New room! # {next_id}")
self.graph[str(next_id)] = {
e: '?' for e in next_room['exits']}
# make graph connections and update graph
self.graph[str(curr_id)][direction] = next_id
self.graph[str(next_id)][opposite[direction]] = curr_id
self._write_file('graph.txt', self.graph)
# update map with room info
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
for message in next_room['messages']:
print(f"{message}")
print(f"Now the player is in {self.current_room['room_id']}")
print(f"Cooldown before next action: {self.cooldown} seconds")
if len(self.graph) < 500:
print(
f"Total number of rooms explored so far: {len(self.graph)}")
print("======================================\n")
def get_coin(self):
time.sleep(self.cooldown)
data = mine()
self.cooldown = data['cooldown']
if len(data['errors']) > 0:
self.get_coin()
def pick_up_loot(self, item):
print(f"Looting {item}")
json = {"name": item}
if self.encumbrance < self.strength:
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/take/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self("item pick up") if self.world == 'light' else print(' Success!\n '+req['messages'][0] if len(req['messages']) > 0 else print(
" Oh NO!\n just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!"))
else:
if "carry" in self.abilities:
if len(self.status) != 0:
print(
"It seems your Bag is full and Glasowyn is already carring something!")
else:
req = requests.post(f"{url}/api/adv/carry/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
print(req)
else:
print("Your Bag is full!")
def drop_loot(self, item):
time.sleep(self.cooldown)
json = {"name": item}
req = requests.post(f"{url}/api/adv/drop/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
time.sleep(req['cooldown'])
self.check_self()
def buy_name(self, name):
time.sleep(self.cooldown)
json = {"name": name}
req = requests.post(f"{url}/api/adv/change_name/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(req)
time.sleep(req['cooldown'])
json['confirm'] = "aye"
r1_conf = requests.post(f"{url}/api/adv/change_name/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(r1_conf)
time.sleep(r1_conf['cooldown'])
self.check_self()
def examine(self, item):
time.sleep(self.cooldown)
json = {"name": item}
req = requests.post(f"{url}/api/adv/examine/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
if item == "WELL": # Examining well gives binary code to be deciphered for next coin location
if os.path.exists("hint.txt"):
os.remove("hint.txt")
desc = req['description']
instructions = desc.split('\n')
for line in instructions[2:]:
with open("hint.txt", "a") as f:
f.write(f"{line}\n")
cpu = CPU()
cpu.load('hint.txt')
cpu.run()
# clean up after itself and remove the hint file after used (new one will be made for future hints anyway)
if os.path.exists("hint.txt"):
os.remove("hint.txt")
# full message for light is "Mine your coin in room ###"
# but message for dark well is "Find your snitch in room ###"
limiter = 23 if self.world == 'light' else 24
return cpu.hint[limiter:]
else:
print(req['description'])
def pray(self):
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/pray/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}).json()
print(req)
time.sleep(req['cooldown'])
self.check_self()
def wear(self, item):
time.sleep(self.cooldown)
json = {"name": item}
req = requests.post(f"{url}/api/adv/wear/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self()
def check_balance(self):
time.sleep(self.cooldown)
req = requests.get(f"{url}/api/bc/get_balance/", headers={
'Authorization': f"Token {key}"}).json()
self.coins = float(req['messages'][0].split(' ')[5])
self.cooldown = req['cooldown']
print(f"\n{req['messages'][0]}\n")
def transform_coin(self, item):
time.sleep(self.cooldown)
self.check_balance()
json = {"name": item}
if self.coins > 0 and item in self.inventory:
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/transmogrify/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(req)
self.cooldown = req['cooldown']
for item in req['items']:
self.pick_up_loot(item)
def warp(self):
if "warp" in self.abilities:
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/warp/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}).json()
print(req['messages'][0])
self.cooldown = req['cooldown']
if self.world == 'light':
self.world = 'dark'
else:
self.world = 'light'
self.current_room = req
time.sleep(self.cooldown)
self.check_self()
if req['room_id'] not in self.graph:
# Just warped to a previously unknown room, add it to graph and map
g = self.graph
g[req['room_id']] = {d: '?' for d in req['exits']}
self._write_file('graph.txt', g)
m = self.map
m[req['room_id']] = req
self._write_file('map.txt', m)
else:
print("You do not have the warp ability yet!")
| [
11,
15,
17,
19,
21
] |
891 | e14bea6376c8649bf9c9c5759d530af773664cd4 | <mask token>
| <mask token>
def get_apriori_input(input_file, output_file, sample_col='Sample',
gene_id_col='Gene_ID'):
df = pd.read_csv(input_file, sep='\t')
sample_names = df[sample_col].unique()
with open(output_file, 'w') as out:
csv_writer = csv.writer(out, delimiter='\t')
for sample_name in sample_names:
bool = df[sample_col] == sample_name
df_sample = df[bool]
gene_ids = df_sample[gene_id_col]
gene_string = ','.join(gene_ids)
csv_writer.writerow([sample_name, gene_string])
<mask token>
| <mask token>
def get_apriori_input(input_file, output_file, sample_col='Sample',
gene_id_col='Gene_ID'):
df = pd.read_csv(input_file, sep='\t')
sample_names = df[sample_col].unique()
with open(output_file, 'w') as out:
csv_writer = csv.writer(out, delimiter='\t')
for sample_name in sample_names:
bool = df[sample_col] == sample_name
df_sample = df[bool]
gene_ids = df_sample[gene_id_col]
gene_string = ','.join(gene_ids)
csv_writer.writerow([sample_name, gene_string])
if __name__ == '__main__':
import sys
program, input_file, output_file, sample_col, gene_id_col = sys.argv
get_apriori_input(input_file, output_file, sample_col, gene_id_col)
| import pandas as pd
import csv
def get_apriori_input(input_file, output_file, sample_col='Sample',
gene_id_col='Gene_ID'):
df = pd.read_csv(input_file, sep='\t')
sample_names = df[sample_col].unique()
with open(output_file, 'w') as out:
csv_writer = csv.writer(out, delimiter='\t')
for sample_name in sample_names:
bool = df[sample_col] == sample_name
df_sample = df[bool]
gene_ids = df_sample[gene_id_col]
gene_string = ','.join(gene_ids)
csv_writer.writerow([sample_name, gene_string])
if __name__ == '__main__':
import sys
program, input_file, output_file, sample_col, gene_id_col = sys.argv
get_apriori_input(input_file, output_file, sample_col, gene_id_col)
| #!/usr/bin/env python3
import pandas as pd
import csv
def get_apriori_input(input_file,output_file,sample_col="Sample",gene_id_col="Gene_ID"):
df=pd.read_csv(input_file,sep="\t")
sample_names=df[sample_col].unique()
with open(output_file,"w") as out:
csv_writer=csv.writer(out,delimiter="\t")
for sample_name in sample_names:
bool=df[sample_col]==sample_name
df_sample=df[bool]
gene_ids=df_sample[gene_id_col]
gene_string=",".join(gene_ids)
csv_writer.writerow([sample_name,gene_string])
if __name__ == "__main__":
import sys
program,input_file,output_file,sample_col,gene_id_col=sys.argv
get_apriori_input(input_file,output_file,sample_col,gene_id_col)
| [
0,
1,
2,
3,
4
] |
892 | 462d73195680118d19a3d4e8a855e65aaeecb3c6 | <mask token>
class DISTRICT:
def __init__(self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
<mask token>
| <mask token>
class DISTRICT:
def __init__(self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
def start_end_timer():
print(time.perf_counter())
def read_text_file(strfile):
f = open(strfile, 'r')
f.read()
def print_text_file(strfile):
f = open(strfile, 'r')
print(f.read(3))
def load_text_file_to_class(strfile):
t = open('/home/student/Desktop/schooldata/copiedfile.txt', 'w')
f = open(strfile, 'r')
next(f)
for line in f:
d = []
d = line.split('\t')
district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15], d[16], d[
17], d[18], d[19], d[20], d[21], d[22], d[23])
district.get_district_name()
district.get_district_cdcode()
district.get_district_statustype()
f.close()
t.close()
<mask token>
| <mask token>
class DISTRICT:
def __init__(self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
def start_end_timer():
print(time.perf_counter())
def read_text_file(strfile):
f = open(strfile, 'r')
f.read()
def print_text_file(strfile):
f = open(strfile, 'r')
print(f.read(3))
def load_text_file_to_class(strfile):
t = open('/home/student/Desktop/schooldata/copiedfile.txt', 'w')
f = open(strfile, 'r')
next(f)
for line in f:
d = []
d = line.split('\t')
district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15], d[16], d[
17], d[18], d[19], d[20], d[21], d[22], d[23])
district.get_district_name()
district.get_district_cdcode()
district.get_district_statustype()
f.close()
t.close()
start_end_timer()
<mask token>
load_text_file_to_class(strfile)
start_end_timer()
| import time
class DISTRICT:
def __init__(self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
def start_end_timer():
print(time.perf_counter())
def read_text_file(strfile):
f = open(strfile, 'r')
f.read()
def print_text_file(strfile):
f = open(strfile, 'r')
print(f.read(3))
def load_text_file_to_class(strfile):
t = open('/home/student/Desktop/schooldata/copiedfile.txt', 'w')
f = open(strfile, 'r')
next(f)
for line in f:
d = []
d = line.split('\t')
district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15], d[16], d[
17], d[18], d[19], d[20], d[21], d[22], d[23])
district.get_district_name()
district.get_district_cdcode()
district.get_district_statustype()
f.close()
t.close()
start_end_timer()
strfile = '/home/student/Desktop/schooldata/pubdistricts.txt'
load_text_file_to_class(strfile)
start_end_timer()
| import time
class DISTRICT:
def __init__(
self, cdcode, county, district, street, city, zipcode,
state, mailstreet, mailcity, mailzip, mailstate, phone, extphone,
faxnumber, email, admfname, admlname, admemail, lat, long,
distrownercode, doctype, statustype, lastupdate):
self.cdcode = cdcode
self.county = county
self.district = district
self.street = street
self.city = city
self.zipcode = zipcode
self.state = state
self.mailstreet = mailstreet
self.mailcity = mailcity
self.mailzip = mailzip
self.mailstate = mailstate
self.phone = phone
self.extphone = extphone
self.faxnumber = faxnumber
self.email = email
self.admfname = admfname
self.admlname = admlname
self.admemail = admemail
self.lat = lat
self.long = long
self.distrownercode = distrownercode
self.doctype = doctype
self.statustype = statustype
self.lastupdate = lastupdate
def get_district_name(self):
print(self.district)
def get_district_cdcode(self):
print(self.cdcode)
def get_district_statustype(self):
print(self.statustype)
def start_end_timer():
print(time.perf_counter())
def read_text_file(strfile):
f = open(strfile, "r")
f.read()
def print_text_file(strfile):
f = open(strfile, "r")
print(f.read(3))
def load_text_file_to_class(strfile):
t = open("/home/student/Desktop/schooldata/copiedfile.txt", "w")
f = open(strfile, "r")
next(f)
for line in f:
d = []
d = line.split("\t")
# print(d)
# t.write(d)
district = DISTRICT(d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11],
d[12], d[13], d[14], d[15], d[16], d[17], d[18], d[19], d[20], d[21], d[22], d[23])
district.get_district_name()
district.get_district_cdcode()
district.get_district_statustype()
f.close()
t.close()
start_end_timer()
strfile = "/home/student/Desktop/schooldata/pubdistricts.txt"
load_text_file_to_class(strfile)
start_end_timer()
| [
5,
9,
10,
12,
13
] |
893 | e884825325ceb401142cab0618d9d4e70e475cf5 | <mask token>
| <mask token>
for line in sys.stdin:
line = line.strip()
twits = line.split()
i = 0
while i < len(twits):
j = 0
while j < len(twits):
if i != j:
print('%s%s\t%d' % (twits[i] + ' ', twits[j], 1))
j += 1
i += 1
| <mask token>
window = 2
for line in sys.stdin:
line = line.strip()
twits = line.split()
i = 0
while i < len(twits):
j = 0
while j < len(twits):
if i != j:
print('%s%s\t%d' % (twits[i] + ' ', twits[j], 1))
j += 1
i += 1
| import sys, re
window = 2
for line in sys.stdin:
line = line.strip()
twits = line.split()
i = 0
while i < len(twits):
j = 0
while j < len(twits):
if i != j:
print('%s%s\t%d' % (twits[i] + ' ', twits[j], 1))
j += 1
i += 1
| #!/usr/bin/env python
import sys, re
window = 2
for line in sys.stdin:
line = line.strip()
twits = line.split()
i = 0
while i <len(twits):
j = 0
while j <len(twits):
if i!= j:
print("%s%s\t%d" % (twits[i]+' ', twits[j], 1))
j+=1
i+=1 | [
0,
1,
2,
3,
4
] |
894 | 9d6b5baa8462b2996e4518dd39b5bb1efde1fd9d | <mask token>
def quartiles(values):
n = len(values)
values.sort()
Q2 = median(values)
Q1 = median(values[:int(n / 2)])
if n % 2 == 0:
Q3 = median(values[int(n / 2):])
else:
Q3 = median(values[int(n / 2 + 1):])
return Q1, Q2, Q3
<mask token>
| <mask token>
def median(values):
n = len(values)
values = sorted(values)
if n % 2 == 1:
return values[(n + 1) // 2 - 1]
else:
return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)
def quartiles(values):
n = len(values)
values.sort()
Q2 = median(values)
Q1 = median(values[:int(n / 2)])
if n % 2 == 0:
Q3 = median(values[int(n / 2):])
else:
Q3 = median(values[int(n / 2 + 1):])
return Q1, Q2, Q3
<mask token>
| <mask token>
def median(values):
n = len(values)
values = sorted(values)
if n % 2 == 1:
return values[(n + 1) // 2 - 1]
else:
return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)
def quartiles(values):
n = len(values)
values.sort()
Q2 = median(values)
Q1 = median(values[:int(n / 2)])
if n % 2 == 0:
Q3 = median(values[int(n / 2):])
else:
Q3 = median(values[int(n / 2 + 1):])
return Q1, Q2, Q3
<mask token>
print(Q1)
print(Q2)
print(Q3)
| n = input()
vals = list(map(int, input().split()))
def median(values):
n = len(values)
values = sorted(values)
if n % 2 == 1:
return values[(n + 1) // 2 - 1]
else:
return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)
def quartiles(values):
n = len(values)
values.sort()
Q2 = median(values)
Q1 = median(values[:int(n / 2)])
if n % 2 == 0:
Q3 = median(values[int(n / 2):])
else:
Q3 = median(values[int(n / 2 + 1):])
return Q1, Q2, Q3
Q1, Q2, Q3 = quartiles(vals)
print(Q1)
print(Q2)
print(Q3)
| # -*- coding: utf-8 -*-
# Enter your code here. Read input from STDIN. Print output to STDOUT
n= input()
vals= list(map(int,input().split()))
def median(values):
n=len(values)
values = sorted(values)
if n%2==1:
return values[(n+1)//2 - 1]
else:
return int(sum(values[int((n/2)-1):int((n/2)+1)])/2)
def quartiles(values):
n=len(values)
values.sort()
Q2=median(values)
Q1=median(values[:int(n/2)])
#print ("values=",values)
if n%2==0:
Q3=median(values[int(n/2):])
else:
Q3=median(values[int(n/2+1):])
return Q1,Q2,Q3
Q1,Q2,Q3=quartiles(vals)
print(Q1)
print(Q2)
print(Q3)
| [
1,
2,
3,
4,
5
] |
895 | 624b34d160ea6db4f5249544f1614a20f506ca9e | <mask token>
class TelaLisatrClientes:
<mask token>
def init_components(self, lista_clientes):
layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values=
lista_clientes, size=(60, 10))], [sg.Submit()]]
self.__window = sg.Window('Lista de clientes').Layout(layout)
<mask token>
| <mask token>
class TelaLisatrClientes:
def __init__(self):
self.__window = None
def init_components(self, lista_clientes):
layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values=
lista_clientes, size=(60, 10))], [sg.Submit()]]
self.__window = sg.Window('Lista de clientes').Layout(layout)
<mask token>
| <mask token>
class TelaLisatrClientes:
def __init__(self):
self.__window = None
def init_components(self, lista_clientes):
layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values=
lista_clientes, size=(60, 10))], [sg.Submit()]]
self.__window = sg.Window('Lista de clientes').Layout(layout)
def lista_clientes(self, lista_clientes):
self.init_components(lista_clientes)
button, values = self.__window.Read()
self.__window.Close()
return button, values
| import PySimpleGUI as sg
class TelaLisatrClientes:
def __init__(self):
self.__window = None
def init_components(self, lista_clientes):
layout = [[sg.Text('Dados do cliente')], [sg.Listbox(values=
lista_clientes, size=(60, 10))], [sg.Submit()]]
self.__window = sg.Window('Lista de clientes').Layout(layout)
def lista_clientes(self, lista_clientes):
self.init_components(lista_clientes)
button, values = self.__window.Read()
self.__window.Close()
return button, values
| import PySimpleGUI as sg
class TelaLisatrClientes():
def __init__(self):
self.__window = None
def init_components(self, lista_clientes):
layout = [
[sg.Text('Dados do cliente')],
[sg.Listbox(values=lista_clientes, size=(60, 10))],
[sg.Submit()]
]
self.__window = sg.Window('Lista de clientes').Layout(layout)
def lista_clientes(self, lista_clientes):
self.init_components(lista_clientes)
button, values = self.__window.Read()
self.__window.Close()
return button, values
| [
2,
3,
4,
5,
6
] |
896 | d9156c20e046f608563bc6779575e14cc60f4c25 | <mask token>
class AccessDenied(Exception):
pass
class FileNotFound(Exception):
pass
class NotDirectory(Exception):
pass
<mask token>
def get_parent(path):
"""A megadott elem szulokonyvtarat adja meg"""
parent = os.path.dirname(path)
try:
get_abspath(parent)
except:
parent = ''
return parent
def get_abspath(path):
"""AccessDenied exceptiont dob, ha valaki cselezni akar"""
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))
if not abspath.startswith(settings.KEPTAR_ROOT):
raise AccessDenied('%s < %s' % (abspath, settings.KEPTAR_ROOT))
return abspath
<mask token>
| <mask token>
class AccessDenied(Exception):
pass
class FileNotFound(Exception):
pass
class NotDirectory(Exception):
pass
<mask token>
def get_parent(path):
"""A megadott elem szulokonyvtarat adja meg"""
parent = os.path.dirname(path)
try:
get_abspath(parent)
except:
parent = ''
return parent
def get_abspath(path):
"""AccessDenied exceptiont dob, ha valaki cselezni akar"""
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))
if not abspath.startswith(settings.KEPTAR_ROOT):
raise AccessDenied('%s < %s' % (abspath, settings.KEPTAR_ROOT))
return abspath
<mask token>
def get_thumbnail(file, type='', regenerate=False):
"""Visszaadja, illetve ha nem letezik, akkor legeneralja a ``file``-hoz
tartozo thumbnailt.
A ``type``-on keresztul mondhatjuk meg, hogy milyen tipusu thumbnailre
van szuksegunk, a tipusok parametereit a ``settings.py``-ben allithatjuk.
Ha a ``regenerate`` ``True``, akkor ujrageneralja a thumbnailt.
"""
ext = file[file.rfind('.') + 1:]
if not os.path.isfile(file) or ext.lower(
) not in settings.KEPTAR_EXTENSIONS:
raise FileNotFound(file)
basename = os.path.basename(file)
dirname = os.path.dirname(file)
thumbname = os.path.join(dirname, settings.KEPTAR_THUMBS[type]['dir'],
basename)
if regenerate or not os.path.isfile(thumbname):
if not os.path.isdir(os.path.dirname(thumbname)):
os.mkdir(os.path.dirname(thumbname))
generate_thumbnail(file, thumbname, settings.KEPTAR_THUMBS[type][
'size'])
thumburl = getattr(settings, 'KEPTAR_URL', '/media') + thumbname[len(
settings.KEPTAR_ROOT):]
return thumburl
<mask token>
| <mask token>
try:
from collections import OrderedDict
except ImportError:
from keptar.odict import OrderedDict
class AccessDenied(Exception):
pass
class FileNotFound(Exception):
pass
class NotDirectory(Exception):
pass
def enrich(filelist, relpath='', thumbnails=True):
"""A kep neveihez hozzateszi a szukseges adatokat"""
files = OrderedDict()
for f in filelist:
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT,
relpath, f))
if os.path.isdir(abspath):
thumb = settings.KEPTAR_ICONS.get('dir', None)
url = reverse('keptar.views.listdir', args=[os.path.join(
relpath, f)])
direct_url = None
type = 'dir'
else:
if thumbnails:
try:
thumb = get_thumbnail(abspath)
except:
thumb = None
else:
thumb = settings.KEPTAR_ICONS.get('file', None)
url = reverse('keptar.views.showfile', args=[os.path.join(
relpath, f)])
direct_url = getattr(settings, 'KEPTAR_URL', '/media/'
) + relpath + f
type = 'file'
files[f] = {'relpath': relpath, 'url': url, 'abspath': abspath,
'thumb': thumb, 'type': type, 'direct_url': direct_url}
return files
def get_parent(path):
"""A megadott elem szulokonyvtarat adja meg"""
parent = os.path.dirname(path)
try:
get_abspath(parent)
except:
parent = ''
return parent
def get_abspath(path):
"""AccessDenied exceptiont dob, ha valaki cselezni akar"""
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))
if not abspath.startswith(settings.KEPTAR_ROOT):
raise AccessDenied('%s < %s' % (abspath, settings.KEPTAR_ROOT))
return abspath
def get_filelist(path, show_hidden=getattr(settings, 'KEPTAR_SHOW_HIDDEN',
False), thumbnails=True):
"""Visszaadja a ``path`` konyvtarban levo konyvtarak es fileok listajat.
A ``path`` a ``settings.KEPTAR_ROOT``-hoz relativ.
A konyvtarak es a fileok listajat ket kulon dict-ben adja vissza,
mindenfele extra parameterrel.
A ``settings.KEPTAR_EXTENSIONS``-nel allithatoak a tamogatott
kiterjesztesek.
"""
abspath = get_abspath(path)
if not os.path.isdir(abspath):
raise NotDirectory(abspath)
dirs = []
pictures = []
for fname in os.listdir(abspath):
file = os.path.join(abspath, fname)
if os.path.isdir(file) and (show_hidden or not fname.startswith('.')):
dirs.append(fname)
if os.path.isfile(file):
ext = file[file.rfind('.') + 1:]
if ext.lower() in settings.KEPTAR_EXTENSIONS and (show_hidden or
not fname.startswith('.')):
pictures.append(fname)
dirs.sort()
pictures.sort()
return enrich(dirs + pictures, relpath=path)
def get_thumbnail(file, type='', regenerate=False):
"""Visszaadja, illetve ha nem letezik, akkor legeneralja a ``file``-hoz
tartozo thumbnailt.
A ``type``-on keresztul mondhatjuk meg, hogy milyen tipusu thumbnailre
van szuksegunk, a tipusok parametereit a ``settings.py``-ben allithatjuk.
Ha a ``regenerate`` ``True``, akkor ujrageneralja a thumbnailt.
"""
ext = file[file.rfind('.') + 1:]
if not os.path.isfile(file) or ext.lower(
) not in settings.KEPTAR_EXTENSIONS:
raise FileNotFound(file)
basename = os.path.basename(file)
dirname = os.path.dirname(file)
thumbname = os.path.join(dirname, settings.KEPTAR_THUMBS[type]['dir'],
basename)
if regenerate or not os.path.isfile(thumbname):
if not os.path.isdir(os.path.dirname(thumbname)):
os.mkdir(os.path.dirname(thumbname))
generate_thumbnail(file, thumbname, settings.KEPTAR_THUMBS[type][
'size'])
thumburl = getattr(settings, 'KEPTAR_URL', '/media') + thumbname[len(
settings.KEPTAR_ROOT):]
return thumburl
def generate_thumbnail(file, thumbname, size):
image = Image.open(file)
image.thumbnail(size)
image.save(thumbname, image.format)
| from django.core.urlresolvers import reverse
from keptar import settings
import os, os.path
import Image
try:
from collections import OrderedDict
except ImportError:
from keptar.odict import OrderedDict
class AccessDenied(Exception):
pass
class FileNotFound(Exception):
pass
class NotDirectory(Exception):
pass
def enrich(filelist, relpath='', thumbnails=True):
"""A kep neveihez hozzateszi a szukseges adatokat"""
files = OrderedDict()
for f in filelist:
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT,
relpath, f))
if os.path.isdir(abspath):
thumb = settings.KEPTAR_ICONS.get('dir', None)
url = reverse('keptar.views.listdir', args=[os.path.join(
relpath, f)])
direct_url = None
type = 'dir'
else:
if thumbnails:
try:
thumb = get_thumbnail(abspath)
except:
thumb = None
else:
thumb = settings.KEPTAR_ICONS.get('file', None)
url = reverse('keptar.views.showfile', args=[os.path.join(
relpath, f)])
direct_url = getattr(settings, 'KEPTAR_URL', '/media/'
) + relpath + f
type = 'file'
files[f] = {'relpath': relpath, 'url': url, 'abspath': abspath,
'thumb': thumb, 'type': type, 'direct_url': direct_url}
return files
def get_parent(path):
"""A megadott elem szulokonyvtarat adja meg"""
parent = os.path.dirname(path)
try:
get_abspath(parent)
except:
parent = ''
return parent
def get_abspath(path):
"""AccessDenied exceptiont dob, ha valaki cselezni akar"""
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))
if not abspath.startswith(settings.KEPTAR_ROOT):
raise AccessDenied('%s < %s' % (abspath, settings.KEPTAR_ROOT))
return abspath
def get_filelist(path, show_hidden=getattr(settings, 'KEPTAR_SHOW_HIDDEN',
False), thumbnails=True):
"""Visszaadja a ``path`` konyvtarban levo konyvtarak es fileok listajat.
A ``path`` a ``settings.KEPTAR_ROOT``-hoz relativ.
A konyvtarak es a fileok listajat ket kulon dict-ben adja vissza,
mindenfele extra parameterrel.
A ``settings.KEPTAR_EXTENSIONS``-nel allithatoak a tamogatott
kiterjesztesek.
"""
abspath = get_abspath(path)
if not os.path.isdir(abspath):
raise NotDirectory(abspath)
dirs = []
pictures = []
for fname in os.listdir(abspath):
file = os.path.join(abspath, fname)
if os.path.isdir(file) and (show_hidden or not fname.startswith('.')):
dirs.append(fname)
if os.path.isfile(file):
ext = file[file.rfind('.') + 1:]
if ext.lower() in settings.KEPTAR_EXTENSIONS and (show_hidden or
not fname.startswith('.')):
pictures.append(fname)
dirs.sort()
pictures.sort()
return enrich(dirs + pictures, relpath=path)
def get_thumbnail(file, type='', regenerate=False):
"""Visszaadja, illetve ha nem letezik, akkor legeneralja a ``file``-hoz
tartozo thumbnailt.
A ``type``-on keresztul mondhatjuk meg, hogy milyen tipusu thumbnailre
van szuksegunk, a tipusok parametereit a ``settings.py``-ben allithatjuk.
Ha a ``regenerate`` ``True``, akkor ujrageneralja a thumbnailt.
"""
ext = file[file.rfind('.') + 1:]
if not os.path.isfile(file) or ext.lower(
) not in settings.KEPTAR_EXTENSIONS:
raise FileNotFound(file)
basename = os.path.basename(file)
dirname = os.path.dirname(file)
thumbname = os.path.join(dirname, settings.KEPTAR_THUMBS[type]['dir'],
basename)
if regenerate or not os.path.isfile(thumbname):
if not os.path.isdir(os.path.dirname(thumbname)):
os.mkdir(os.path.dirname(thumbname))
generate_thumbnail(file, thumbname, settings.KEPTAR_THUMBS[type][
'size'])
thumburl = getattr(settings, 'KEPTAR_URL', '/media') + thumbname[len(
settings.KEPTAR_ROOT):]
return thumburl
def generate_thumbnail(file, thumbname, size):
image = Image.open(file)
image.thumbnail(size)
image.save(thumbname, image.format)
| from django.core.urlresolvers import reverse
from keptar import settings
import os, os.path
import Image
try:
from collections import OrderedDict
except ImportError:
from keptar.odict import OrderedDict
class AccessDenied(Exception):
pass
class FileNotFound(Exception):
pass
class NotDirectory(Exception):
pass
def enrich(filelist, relpath='', thumbnails=True):
"""A kep neveihez hozzateszi a szukseges adatokat"""
files = OrderedDict()
for f in filelist:
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, relpath, f))
if os.path.isdir(abspath):
thumb = settings.KEPTAR_ICONS.get('dir', None)
url = reverse('keptar.views.listdir', args=[os.path.join(relpath, f)])
direct_url = None
type = 'dir'
else:
if thumbnails:
try:
thumb = get_thumbnail(abspath)
except:
thumb = None
else:
thumb = settings.KEPTAR_ICONS.get('file', None)
url = reverse('keptar.views.showfile', args=[os.path.join(relpath, f)])
direct_url = getattr(settings, 'KEPTAR_URL', '/media/')+relpath+f
type = 'file'
# TODO: egyeb adatok
files[f] = {
'relpath': relpath,
'url': url,
'abspath': abspath,
'thumb': thumb,
'type': type,
'direct_url': direct_url,
}
return files
def get_parent(path):
"""A megadott elem szulokonyvtarat adja meg"""
# security check
parent = os.path.dirname(path)
try:
get_abspath(parent)
except:
parent = ''
return parent
def get_abspath(path):
"""AccessDenied exceptiont dob, ha valaki cselezni akar"""
abspath = os.path.abspath(os.path.join(settings.KEPTAR_ROOT, path))
# vajon a celkonyvtar valoban a root-on belul talalhato? - /../... miatt
if not abspath.startswith(settings.KEPTAR_ROOT):
raise AccessDenied("%s < %s" % (abspath, settings.KEPTAR_ROOT))
return abspath
def get_filelist(path, show_hidden=getattr(settings, 'KEPTAR_SHOW_HIDDEN', False), thumbnails=True):
"""Visszaadja a ``path`` konyvtarban levo konyvtarak es fileok listajat.
A ``path`` a ``settings.KEPTAR_ROOT``-hoz relativ.
A konyvtarak es a fileok listajat ket kulon dict-ben adja vissza,
mindenfele extra parameterrel.
A ``settings.KEPTAR_EXTENSIONS``-nel allithatoak a tamogatott
kiterjesztesek.
"""
abspath = get_abspath(path)
if not os.path.isdir(abspath):
raise NotDirectory(abspath)
dirs = []
pictures = []
for fname in os.listdir(abspath):
file = os.path.join(abspath, fname)
if os.path.isdir(file) and (show_hidden or not fname.startswith('.')):
dirs.append(fname)
if os.path.isfile(file):
# a kiterjesztes tamogatott-e
ext = file[file.rfind('.')+1:]
if ext.lower() in settings.KEPTAR_EXTENSIONS and (show_hidden or not fname.startswith('.')):
pictures.append(fname)
dirs.sort()
pictures.sort()
return enrich(dirs+pictures, relpath=path)
def get_thumbnail(file, type='', regenerate=False):
"""Visszaadja, illetve ha nem letezik, akkor legeneralja a ``file``-hoz
tartozo thumbnailt.
A ``type``-on keresztul mondhatjuk meg, hogy milyen tipusu thumbnailre
van szuksegunk, a tipusok parametereit a ``settings.py``-ben allithatjuk.
Ha a ``regenerate`` ``True``, akkor ujrageneralja a thumbnailt.
"""
ext = file[file.rfind('.')+1:]
if not os.path.isfile(file) or ext.lower() not in settings.KEPTAR_EXTENSIONS:
raise FileNotFound(file)
basename = os.path.basename(file)
dirname = os.path.dirname(file)
thumbname = os.path.join(dirname, settings.KEPTAR_THUMBS[type]['dir'], basename)
if regenerate or not os.path.isfile(thumbname):
if not os.path.isdir(os.path.dirname(thumbname)):
os.mkdir(os.path.dirname(thumbname))
generate_thumbnail(file, thumbname, settings.KEPTAR_THUMBS[type]['size'])
thumburl = getattr(settings, 'KEPTAR_URL', '/media') + thumbname[len(settings.KEPTAR_ROOT):]
return thumburl
def generate_thumbnail(file, thumbname, size):
image = Image.open(file)
image.thumbnail(size)
image.save(thumbname, image.format)
| [
5,
6,
10,
11,
12
] |
897 | 5193de15052f81460a23d993cfa039fa90c9de5e | <mask token>
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
<mask token>
@app.route('/api/1.0/create_playlists', methods=['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods=['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
<mask token>
@app.route('/api/1.0/get_all_topics', methods=['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
<mask token>
@app.route('/api/1.0/add_topic', methods=['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods=['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}
return jsonify(retval)
<mask token>
| <mask token>
@app.route('/')
def index():
return 'index'
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
<mask token>
@app.route('/api/1.0/create_playlists', methods=['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods=['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods=['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods=['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods=['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods=['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods=['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}
return jsonify(retval)
<mask token>
| <mask token>
@app.route('/')
def index():
return 'index'
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
@app.route('/tools/')
def tools():
return render_template('tools.html')
@app.route('/api/1.0/create_playlists', methods=['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods=['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods=['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods=['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods=['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods=['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods=['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}
return jsonify(retval)
if __name__ == '__main__':
ALL_DBS = connect_to_db()
app.run(debug=True)
| <mask token>
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import request
from playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics
from db import connect_to_db
ALL_DBS = None
app = Flask(__name__)
@app.route('/')
def index():
return 'index'
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
@app.route('/tools/')
def tools():
return render_template('tools.html')
@app.route('/api/1.0/create_playlists', methods=['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods=['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods=['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods=['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods=['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods=['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods=['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}
return jsonify(retval)
if __name__ == '__main__':
ALL_DBS = connect_to_db()
app.run(debug=True)
| """
Copyright (C) 2014, Jill Huchital
"""
# test comment
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import request
from playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics
from db import connect_to_db
ALL_DBS = None
app = Flask(__name__)
@app.route('/')
def index():
# return render_template('index.html', greeting='here we are then')
return "index"
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
@app.route('/tools/')
def tools():
return render_template('tools.html')
@app.route('/api/1.0/create_playlists', methods = ['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods = ['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods = ['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods = ['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods = ['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods = ['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods = ['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1,
'api_call': api_call,
'param_2': param2}
return jsonify(retval)
if __name__ == '__main__':
# debug = True makes the server restart when the Python files change. TODO: make it
# depend on whether we're running locally or in production.
ALL_DBS = connect_to_db()
# create_playlists(ALL_DBS)
app.run(debug = True)
| [
6,
9,
11,
13,
14
] |
898 | acbe9a9501c6a8532249496f327c2470c1d2f8e0 | <mask token>
class Volume(bt.Strategy):
<mask token>
<mask token>
<mask token>
| <mask token>
class Volume(bt.Strategy):
<mask token>
def __init__(self):
self.mysignal = self.data.volume / bt.ind.Average(self.data.volume,
period=self.params.avg_volume_period) >= self.params.ratio
<mask token>
| <mask token>
class Volume(bt.Strategy):
params = ('avg_volume_period', 10), ('ticker', 'hpg'), ('ratio', 1.25)
def __init__(self):
self.mysignal = self.data.volume / bt.ind.Average(self.data.volume,
period=self.params.avg_volume_period) >= self.params.ratio
def next(self):
self.step_date = self.data.datetime.date().strftime('%Y-%m-%d')
self.today = datetime.now().strftime('%Y-%m-%d')
if self.mysignal and self.step_date == self.today:
TelegramBot.send(
'{} - KLGD lớn hơn KLGD trung bình {} ngày gần nhất.'.
format(self.params.ticker, self.params.avg_volume_period))
| import math
import backtrader as bt
from datetime import datetime
from bots.TelegramBot import TelegramBot
import logging
class Volume(bt.Strategy):
params = ('avg_volume_period', 10), ('ticker', 'hpg'), ('ratio', 1.25)
def __init__(self):
self.mysignal = self.data.volume / bt.ind.Average(self.data.volume,
period=self.params.avg_volume_period) >= self.params.ratio
def next(self):
self.step_date = self.data.datetime.date().strftime('%Y-%m-%d')
self.today = datetime.now().strftime('%Y-%m-%d')
if self.mysignal and self.step_date == self.today:
TelegramBot.send(
'{} - KLGD lớn hơn KLGD trung bình {} ngày gần nhất.'.
format(self.params.ticker, self.params.avg_volume_period))
| import math
import backtrader as bt
from datetime import datetime
from bots.TelegramBot import TelegramBot
import logging
class Volume(bt.Strategy):
params = (('avg_volume_period', 10), ('ticker', 'hpg'), ('ratio', 1.25))
def __init__(self):
self.mysignal = (self.data.volume / bt.ind.Average(self.data.volume, period=self.params.avg_volume_period)) >= self.params.ratio
def next(self):
self.step_date = self.data.datetime.date().strftime("%Y-%m-%d")
self.today = datetime.now().strftime("%Y-%m-%d")
if self.mysignal and self.step_date == self.today:
TelegramBot.send("{} - KLGD lớn hơn KLGD trung bình {} ngày gần nhất.".format(self.params.ticker, self.params.avg_volume_period))
| [
1,
2,
4,
5,
6
] |
899 | e37f4422c1063df50453f7abf72a0a9a31156d8b | <mask token>
| <mask token>
class ScooterHutAUSpider(StockInStoreSpider):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class ScooterHutAUSpider(StockInStoreSpider):
name = 'scooter_hut_au'
item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}
api_site_id = '10112'
api_widget_id = '119'
api_widget_type = 'product'
api_origin = 'https://scooterhut.com.au'
| from locations.storefinders.stockinstore import StockInStoreSpider
class ScooterHutAUSpider(StockInStoreSpider):
name = 'scooter_hut_au'
item_attributes = {'brand': 'Scooter Hut', 'brand_wikidata': 'Q117747623'}
api_site_id = '10112'
api_widget_id = '119'
api_widget_type = 'product'
api_origin = 'https://scooterhut.com.au'
| from locations.storefinders.stockinstore import StockInStoreSpider
class ScooterHutAUSpider(StockInStoreSpider):
name = "scooter_hut_au"
item_attributes = {"brand": "Scooter Hut", "brand_wikidata": "Q117747623"}
api_site_id = "10112"
api_widget_id = "119"
api_widget_type = "product"
api_origin = "https://scooterhut.com.au"
| [
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.