Commit
·
ac48ca6
1
Parent(s):
b8a2c58
Create app.py and requirements
Browse files- app.py +114 -0
- requirements.txt +2 -0
app.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
|
4 |
+
from collections import OrderedDict
|
5 |
+
from sklearn.datasets import make_classification
|
6 |
+
from sklearn.ensemble import RandomForestClassifier
|
7 |
+
|
8 |
+
def do_train(random_state, n_samples, min_estimators, max_estimators):
|
9 |
+
RANDOM_STATE = random_state
|
10 |
+
|
11 |
+
# Generate a binary classification dataset.
|
12 |
+
X, y = make_classification(
|
13 |
+
n_samples=n_samples,
|
14 |
+
n_features=25,
|
15 |
+
n_clusters_per_class=1,
|
16 |
+
n_informative=15,
|
17 |
+
random_state=RANDOM_STATE,
|
18 |
+
)
|
19 |
+
|
20 |
+
# NOTE: Setting the `warm_start` construction parameter to `True` disables
|
21 |
+
# support for parallelized ensembles but is necessary for tracking the OOB
|
22 |
+
# error trajectory during training.
|
23 |
+
ensemble_clfs = [
|
24 |
+
(
|
25 |
+
"RandomForestClassifier, max_features='sqrt'",
|
26 |
+
RandomForestClassifier(
|
27 |
+
warm_start=True,
|
28 |
+
oob_score=True,
|
29 |
+
max_features="sqrt",
|
30 |
+
random_state=RANDOM_STATE,
|
31 |
+
),
|
32 |
+
),
|
33 |
+
(
|
34 |
+
"RandomForestClassifier, max_features='log2'",
|
35 |
+
RandomForestClassifier(
|
36 |
+
warm_start=True,
|
37 |
+
max_features="log2",
|
38 |
+
oob_score=True,
|
39 |
+
random_state=RANDOM_STATE,
|
40 |
+
),
|
41 |
+
),
|
42 |
+
(
|
43 |
+
"RandomForestClassifier, max_features=None",
|
44 |
+
RandomForestClassifier(
|
45 |
+
warm_start=True,
|
46 |
+
max_features=None,
|
47 |
+
oob_score=True,
|
48 |
+
random_state=RANDOM_STATE,
|
49 |
+
),
|
50 |
+
),
|
51 |
+
]
|
52 |
+
|
53 |
+
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
|
54 |
+
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
|
55 |
+
|
56 |
+
# Range of `n_estimators` values to explore.
|
57 |
+
min_estimators = 15
|
58 |
+
max_estimators = 150
|
59 |
+
|
60 |
+
for label, clf in ensemble_clfs:
|
61 |
+
for i in range(min_estimators, max_estimators + 1, 5):
|
62 |
+
clf.set_params(n_estimators=i)
|
63 |
+
clf.fit(X, y)
|
64 |
+
|
65 |
+
# Record the OOB error for each `n_estimators=i` setting.
|
66 |
+
oob_error = 1 - clf.oob_score_
|
67 |
+
error_rate[label].append((i, oob_error))
|
68 |
+
|
69 |
+
# Generate the "OOB error rate" vs. "n_estimators" plot.
|
70 |
+
fig, ax = plt.subplots()
|
71 |
+
for label, clf_err in error_rate.items():
|
72 |
+
xs, ys = zip(*clf_err)
|
73 |
+
ax.plot(xs, ys, label=label)
|
74 |
+
|
75 |
+
ax.set_xlim(min_estimators, max_estimators)
|
76 |
+
ax.set_xlabel("n_estimators")
|
77 |
+
ax.set_ylabel("OOB error rate")
|
78 |
+
ax.legend(loc="upper right")
|
79 |
+
return fig
|
80 |
+
|
81 |
+
model_card = f"""
|
82 |
+
## Description
|
83 |
+
The ``RandomForestClassifier`` is trained using bootstrap aggregation, where each new tree is fit from a bootstrap sample of the training observations $z_i = (x_i, y_i)$.
|
84 |
+
The out-of-bag (OOB) error is the average error for each $z_i$ calculated using predictions from the trees that do not contain
|
85 |
+
$z_i$ in their respective bootstrap sample. This allows the ``RandomForestClassifier`` to be fit and validated whilst being trained.
|
86 |
+
You can play around with ``number of samples``, ``random seed``, ``min estimators`` and ``max estimators`` controlling the number of trees.
|
87 |
+
The example demonstrates how the OOB error can be measured at the addition of each new tree during training.
|
88 |
+
The resulting plot allows a practitioner to approximate a suitable value of ``n_estimators`` at which the error stabilizes.
|
89 |
+
## Dataset
|
90 |
+
Simulation data
|
91 |
+
"""
|
92 |
+
with gr.Blocks() as demo:
|
93 |
+
gr.Markdown('''
|
94 |
+
<div>
|
95 |
+
<h1 style='text-align: center'>Out-of-Bag(OOB) Errors for Random Forests</h1>
|
96 |
+
</div>
|
97 |
+
''')
|
98 |
+
gr.Markdown(model_card)
|
99 |
+
gr.Markdown("Author: <a href=\"https://huggingface.co/vumichien\">Vu Minh Chien</a>. Based on the example from <a href=\"https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_oob.html#sphx-glr-auto-examples-ensemble-plot-gradient-boosting-oob-py\">scikit-learn</a>")
|
100 |
+
n_samples = gr.Slider(minimum=500, maximum=5000, step=500, value=500, label="Number of samples")
|
101 |
+
random_state = gr.Slider(minimum=0, maximum=2000, step=1, value=0, label="Random seed")
|
102 |
+
min_estimators = gr.Slider(minimum=5, maximum=300, step=5, value=15, label="Minimum Number of trees")
|
103 |
+
max_estimators = gr.Slider(minimum=min_estimators, maximum=300, step=5, value=150, label="Maximum Number of trees")
|
104 |
+
|
105 |
+
with gr.Row():
|
106 |
+
with gr.Column():
|
107 |
+
plot = gr.Plot()
|
108 |
+
|
109 |
+
n_samples.change(fn=do_train, inputs=[n_samples, random_state, min_estimators, max_estimators], outputs=[plot])
|
110 |
+
random_state.change(fn=do_train, inputs=[n_samples, random_state, min_estimators, max_estimators], outputs=[plot])
|
111 |
+
min_estimators.change(fn=do_train, inputs=[n_samples, random_state, min_estimators, max_estimators], outputs=[plot])
|
112 |
+
max_estimators.change(fn=do_train, inputs=[n_samples, random_state, min_estimators, max_estimators], outputs=[plot])
|
113 |
+
|
114 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
scikit-learn
|
2 |
+
matplotlib
|