file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
Payment-temp.js | return new Promise((resolve) => {
const script = document.createElement("script");
script.src = src;
script.onload = () => {
resolve(true);
};
script.onerror = () => {
resolve(false);
};
document.body.appendChild(script);
});
};
const _DEV_ = document.domain === "localhost";
export default function HorizontalLabelPositionBelowStepper() {
const navigate = useNavigate();
const buyerId = useParams().buyerId;
const [buyerData, setBuyerData] = useState();
const { token } = useAuth();
const [finalMessage, setFinalMessage] = useState(false);
const classes = useStyles();
const [activeStep, setActiveStep] = React.useState(0);
const steps = getSteps();
const handleNext = () => {
setActiveStep((prevActiveStep) => prevActiveStep + 1);
};
const handleBack = () => {
setActiveStep((prevActiveStep) => prevActiveStep - 1);
};
const handleReset = () => {
setActiveStep(0);
};
useEffect(() => {
const getbuyerData = async () => {
const response = await axios.get(`http://localhost:5000/seller/buyer/${buyerId}`, {
headers: { "x-access-token": token },
});
console.log(response);
const data = await response.data;
console.log(data);
setBuyerData(response.data);
};
getbuyerData();
}, [token, buyerId]);
//////////////////////////////////////////////////////////////////////////
const displayRazorPay = async () => {
console.log(token);
const res = await loadScript("https://checkout.razorpay.com/v1/checkout.js");
if (!res) {
alert("razorpay sdk failed to load. are u online");
return;
}
// const data = await fetch("http://localhost:5000/buyer/checkout", {
// method: "POST",
// }).then((t) => t.json());
// const data = await axios.post(`http://localhost:5000/buyer/checkout`, {
// headers: { "x-access-token": token },
// });
const data = await fetch(`http://localhost:5000/buyer/checkout`, {
method: "POST",
headers: {
"x-access-token": token,
},
}).then((t) => t.json());
console.log(data);
var options = {
key: _DEV_ ? "rzp_test_5AmHwMVymTPMzT" : "PRODUCTION_KEY", // Enter the Key ID generated from the Dashboard
amount: data.amount.toString(), // Amount is in currency subunits. Default currency is INR. Hence, 50000 refers to 50000 paise
currency: data.currency,
name: "Payment",
description: "Test Transaction",
image: "https://example.com/your_logo",
order_id: data.id, //This is a sample Order ID. Pass the `id` obtained in the response of Step 1
handler: function async(response) {
alert(response.razorpay_payment_id);
alert(response.razorpay_order_id);
alert(response.razorpay_signature);
const sendVerify = async (response) => {
console.log(response);
const details = {
razorpay_order_id: response.razorpay_order_id,
razorpay_payment_id: response.razorpay_payment_id,
razorpay_signature: response.razorpay_signature,
};
const res = await axios
.post(`http://localhost:5000/buyer/payment/verify`, details, {
headers: {
"x-access-token": token,
},
})
.then(setFinalMessage(true));
console.log(res);
};
sendVerify(response);
},
prefill: {
name: "Ankur",
email: "[email protected]",
contact: "9999999999",
},
};
var paymentObject = new window.Razorpay(options);
// document.getElementById("rzp-button1").onclick = function (e) {
// rzp1.open();
// e.preventDefault();
// };
paymentObject.open();
paymentObject.on("payment.failed", function (response) {
alert(response.error.code);
alert(response.error.description);
alert(response.error.source);
alert(response.error.step);
alert(response.error.reason);
alert(response.error.metadata.order_id);
alert(response.error.metadata.payment_id);
});
};
/////////////////////////////////////////////////////////////////////
const CODhandler = async () => {
const response = await axios.get(`http://localhost:5000/buyer/COD`, {
headers: {
"x-access-token": token,
},
});
// .then(alert("checkout complete please close this window"));
setFinalMessage(true);
console.log(response);
};
return (
<div className={classes.root}>
{/* <button onClick={()=>{console.log(buyerData);}} >vlivk</button> */}
<Navigation />
<Stepper activeStep={activeStep} alternativeLabel>
{steps.map((label) => (
<Step key={label}>
<StepLabel>{label}</StepLabel>
</Step>
))}
</Stepper>
<div>
{activeStep === steps.length ? (
<div>
<Typography className={classes.instructions}>All steps completed</Typography>
<Button onClick={handleReset}>Reset</Button>
</div>
) : (
<div>
<Typography className={classes.instructions}>
{getStepContent(activeStep, buyerData)}
</Typography>
<div className="containerOverride">
{/* <Button
disabled={activeStep === 0}
onClick={handleBack}
className={classes.backButton}
>
Back
</Button> */}
{/* <Button
style={{
position: "fixed",
width: "45%",
padding: "1rem",
marginTop: "1rem",
bottom: 0,
left: 0,
display: "flex",
alignItems: "center",
justifyContent: "center",
marginLeft: 370,
height: 50,
color: "white",
background: "aqua",
}}
variant="contained"
color=""
onClick={handleNext}
>
{activeStep === steps.length - 1 ? "Finish" : "Deliver Here"}
</Button> */}
<button
// style={{
// // position: "fixed",
// border: "none",
// width: "20%",
// padding: "1rem",
// marginTop: "1rem",
// display: "flex",
// alignItems: "center",
// justifyContent: "center",
// marginLeft: 370,
// height: 50,
// color: "grey",
// background: "aqua",
// }}
onClick={displayRazorPay}
>
click for online payment
</button>
<button
// style={{
// border: "none",
// width: "20%",
// padding: "1rem",
// marginTop: "1rem",
// display: "flex",
// alignItems: "center",
// justifyContent: "center",
// marginLeft: 370,
// height: 50,
// color: "grey",
// background: "aqua",
// }}
onClick={CODhandler}
>
click for cash on delivery payment
</button>
</div>
</div>
)}
</div>
{finalMessage && (
<Link to={`/home`}>Payment Successful, click here to continue shopping </Link>
)}
</div>
);
}
function | (stepIndex, buyerData) {
switch (stepIndex) {
case 0:
return (
<div>
{buyerData && (
<div style={{ background: "#ecf0f1", margin: "auto", width: 630 }}>
<font color="red" style={{ color: "red", fontWieght: "bold" }}>
<b>Delivery Address</b>
</font>
{"\n\n"}
<br></br>
<div></div>
<div></div>
Name:{buyerData.fullname}
<br></br>
MobileNo:{buyerData.phone}
<br></br>
EmailID:{buyerData.email}
<br></br>
Address:{buyerData.shopAddress}
</div>
)}
</div>
);
case 1:
return (
<div style={{ marginLeft: 150, marginTop: 20, width: 630 }}>
<h3>Select Payment Method</h3>
<div style={{ background: "#ecf0f1", marginTop: 20, width: 630 }}>Cart</div>
<div style={{ marginTop: 20 }}>Credit Debit & ATM Cards</div>
<div style={{ marginTop: 20 }}>Sodexco Meal Pass</div>
<div style={{ background: "#ecf0f1", marginTop: | getStepContent | identifier_name |
Payment-temp.js | return new Promise((resolve) => {
const script = document.createElement("script");
script.src = src;
script.onload = () => {
resolve(true);
};
script.onerror = () => {
resolve(false);
};
document.body.appendChild(script);
});
};
const _DEV_ = document.domain === "localhost";
export default function HorizontalLabelPositionBelowStepper() {
const navigate = useNavigate();
const buyerId = useParams().buyerId;
const [buyerData, setBuyerData] = useState();
const { token } = useAuth();
const [finalMessage, setFinalMessage] = useState(false);
const classes = useStyles();
const [activeStep, setActiveStep] = React.useState(0);
const steps = getSteps();
const handleNext = () => {
setActiveStep((prevActiveStep) => prevActiveStep + 1);
};
const handleBack = () => {
setActiveStep((prevActiveStep) => prevActiveStep - 1);
};
const handleReset = () => {
setActiveStep(0);
};
useEffect(() => {
const getbuyerData = async () => {
const response = await axios.get(`http://localhost:5000/seller/buyer/${buyerId}`, {
headers: { "x-access-token": token },
});
console.log(response);
const data = await response.data;
console.log(data);
setBuyerData(response.data);
};
getbuyerData();
}, [token, buyerId]);
//////////////////////////////////////////////////////////////////////////
const displayRazorPay = async () => {
console.log(token);
const res = await loadScript("https://checkout.razorpay.com/v1/checkout.js");
if (!res) |
// const data = await fetch("http://localhost:5000/buyer/checkout", {
// method: "POST",
// }).then((t) => t.json());
// const data = await axios.post(`http://localhost:5000/buyer/checkout`, {
// headers: { "x-access-token": token },
// });
const data = await fetch(`http://localhost:5000/buyer/checkout`, {
method: "POST",
headers: {
"x-access-token": token,
},
}).then((t) => t.json());
console.log(data);
var options = {
key: _DEV_ ? "rzp_test_5AmHwMVymTPMzT" : "PRODUCTION_KEY", // Enter the Key ID generated from the Dashboard
amount: data.amount.toString(), // Amount is in currency subunits. Default currency is INR. Hence, 50000 refers to 50000 paise
currency: data.currency,
name: "Payment",
description: "Test Transaction",
image: "https://example.com/your_logo",
order_id: data.id, //This is a sample Order ID. Pass the `id` obtained in the response of Step 1
handler: function async(response) {
alert(response.razorpay_payment_id);
alert(response.razorpay_order_id);
alert(response.razorpay_signature);
const sendVerify = async (response) => {
console.log(response);
const details = {
razorpay_order_id: response.razorpay_order_id,
razorpay_payment_id: response.razorpay_payment_id,
razorpay_signature: response.razorpay_signature,
};
const res = await axios
.post(`http://localhost:5000/buyer/payment/verify`, details, {
headers: {
"x-access-token": token,
},
})
.then(setFinalMessage(true));
console.log(res);
};
sendVerify(response);
},
prefill: {
name: "Ankur",
email: "[email protected]",
contact: "9999999999",
},
};
var paymentObject = new window.Razorpay(options);
// document.getElementById("rzp-button1").onclick = function (e) {
// rzp1.open();
// e.preventDefault();
// };
paymentObject.open();
paymentObject.on("payment.failed", function (response) {
alert(response.error.code);
alert(response.error.description);
alert(response.error.source);
alert(response.error.step);
alert(response.error.reason);
alert(response.error.metadata.order_id);
alert(response.error.metadata.payment_id);
});
};
/////////////////////////////////////////////////////////////////////
const CODhandler = async () => {
const response = await axios.get(`http://localhost:5000/buyer/COD`, {
headers: {
"x-access-token": token,
},
});
// .then(alert("checkout complete please close this window"));
setFinalMessage(true);
console.log(response);
};
return (
<div className={classes.root}>
{/* <button onClick={()=>{console.log(buyerData);}} >vlivk</button> */}
<Navigation />
<Stepper activeStep={activeStep} alternativeLabel>
{steps.map((label) => (
<Step key={label}>
<StepLabel>{label}</StepLabel>
</Step>
))}
</Stepper>
<div>
{activeStep === steps.length ? (
<div>
<Typography className={classes.instructions}>All steps completed</Typography>
<Button onClick={handleReset}>Reset</Button>
</div>
) : (
<div>
<Typography className={classes.instructions}>
{getStepContent(activeStep, buyerData)}
</Typography>
<div className="containerOverride">
{/* <Button
disabled={activeStep === 0}
onClick={handleBack}
className={classes.backButton}
>
Back
</Button> */}
{/* <Button
style={{
position: "fixed",
width: "45%",
padding: "1rem",
marginTop: "1rem",
bottom: 0,
left: 0,
display: "flex",
alignItems: "center",
justifyContent: "center",
marginLeft: 370,
height: 50,
color: "white",
background: "aqua",
}}
variant="contained"
color=""
onClick={handleNext}
>
{activeStep === steps.length - 1 ? "Finish" : "Deliver Here"}
</Button> */}
<button
// style={{
// // position: "fixed",
// border: "none",
// width: "20%",
// padding: "1rem",
// marginTop: "1rem",
// display: "flex",
// alignItems: "center",
// justifyContent: "center",
// marginLeft: 370,
// height: 50,
// color: "grey",
// background: "aqua",
// }}
onClick={displayRazorPay}
>
click for online payment
</button>
<button
// style={{
// border: "none",
// width: "20%",
// padding: "1rem",
// marginTop: "1rem",
// display: "flex",
// alignItems: "center",
// justifyContent: "center",
// marginLeft: 370,
// height: 50,
// color: "grey",
// background: "aqua",
// }}
onClick={CODhandler}
>
click for cash on delivery payment
</button>
</div>
</div>
)}
</div>
{finalMessage && (
<Link to={`/home`}>Payment Successful, click here to continue shopping </Link>
)}
</div>
);
}
function getStepContent(stepIndex, buyerData) {
switch (stepIndex) {
case 0:
return (
<div>
{buyerData && (
<div style={{ background: "#ecf0f1", margin: "auto", width: 630 }}>
<font color="red" style={{ color: "red", fontWieght: "bold" }}>
<b>Delivery Address</b>
</font>
{"\n\n"}
<br></br>
<div></div>
<div></div>
Name:{buyerData.fullname}
<br></br>
MobileNo:{buyerData.phone}
<br></br>
EmailID:{buyerData.email}
<br></br>
Address:{buyerData.shopAddress}
</div>
)}
</div>
);
case 1:
return (
<div style={{ marginLeft: 150, marginTop: 20, width: 630 }}>
<h3>Select Payment Method</h3>
<div style={{ background: "#ecf0f1", marginTop: 20, width: 630 }}>Cart</div>
<div style={{ marginTop: 20 }}>Credit Debit & ATM Cards</div>
<div style={{ marginTop: 20 }}>Sodexco Meal Pass</div>
<div style={{ background: "#ecf0f1", marginTop | {
alert("razorpay sdk failed to load. are u online");
return;
} | conditional_block |
Payment-temp.js | return new Promise((resolve) => {
const script = document.createElement("script");
script.src = src;
script.onload = () => {
resolve(true);
};
script.onerror = () => {
resolve(false);
};
document.body.appendChild(script);
});
};
const _DEV_ = document.domain === "localhost";
export default function HorizontalLabelPositionBelowStepper() | setActiveStep(0);
};
useEffect(() => {
const getbuyerData = async () => {
const response = await axios.get(`http://localhost:5000/seller/buyer/${buyerId}`, {
headers: { "x-access-token": token },
});
console.log(response);
const data = await response.data;
console.log(data);
setBuyerData(response.data);
};
getbuyerData();
}, [token, buyerId]);
//////////////////////////////////////////////////////////////////////////
const displayRazorPay = async () => {
console.log(token);
const res = await loadScript("https://checkout.razorpay.com/v1/checkout.js");
if (!res) {
alert("razorpay sdk failed to load. are u online");
return;
}
// const data = await fetch("http://localhost:5000/buyer/checkout", {
// method: "POST",
// }).then((t) => t.json());
// const data = await axios.post(`http://localhost:5000/buyer/checkout`, {
// headers: { "x-access-token": token },
// });
const data = await fetch(`http://localhost:5000/buyer/checkout`, {
method: "POST",
headers: {
"x-access-token": token,
},
}).then((t) => t.json());
console.log(data);
var options = {
key: _DEV_ ? "rzp_test_5AmHwMVymTPMzT" : "PRODUCTION_KEY", // Enter the Key ID generated from the Dashboard
amount: data.amount.toString(), // Amount is in currency subunits. Default currency is INR. Hence, 50000 refers to 50000 paise
currency: data.currency,
name: "Payment",
description: "Test Transaction",
image: "https://example.com/your_logo",
order_id: data.id, //This is a sample Order ID. Pass the `id` obtained in the response of Step 1
handler: function async(response) {
alert(response.razorpay_payment_id);
alert(response.razorpay_order_id);
alert(response.razorpay_signature);
const sendVerify = async (response) => {
console.log(response);
const details = {
razorpay_order_id: response.razorpay_order_id,
razorpay_payment_id: response.razorpay_payment_id,
razorpay_signature: response.razorpay_signature,
};
const res = await axios
.post(`http://localhost:5000/buyer/payment/verify`, details, {
headers: {
"x-access-token": token,
},
})
.then(setFinalMessage(true));
console.log(res);
};
sendVerify(response);
},
prefill: {
name: "Ankur",
email: "[email protected]",
contact: "9999999999",
},
};
var paymentObject = new window.Razorpay(options);
// document.getElementById("rzp-button1").onclick = function (e) {
// rzp1.open();
// e.preventDefault();
// };
paymentObject.open();
paymentObject.on("payment.failed", function (response) {
alert(response.error.code);
alert(response.error.description);
alert(response.error.source);
alert(response.error.step);
alert(response.error.reason);
alert(response.error.metadata.order_id);
alert(response.error.metadata.payment_id);
});
};
/////////////////////////////////////////////////////////////////////
const CODhandler = async () => {
const response = await axios.get(`http://localhost:5000/buyer/COD`, {
headers: {
"x-access-token": token,
},
});
// .then(alert("checkout complete please close this window"));
setFinalMessage(true);
console.log(response);
};
return (
<div className={classes.root}>
{/* <button onClick={()=>{console.log(buyerData);}} >vlivk</button> */}
<Navigation />
<Stepper activeStep={activeStep} alternativeLabel>
{steps.map((label) => (
<Step key={label}>
<StepLabel>{label}</StepLabel>
</Step>
))}
</Stepper>
<div>
{activeStep === steps.length ? (
<div>
<Typography className={classes.instructions}>All steps completed</Typography>
<Button onClick={handleReset}>Reset</Button>
</div>
) : (
<div>
<Typography className={classes.instructions}>
{getStepContent(activeStep, buyerData)}
</Typography>
<div className="containerOverride">
{/* <Button
disabled={activeStep === 0}
onClick={handleBack}
className={classes.backButton}
>
Back
</Button> */}
{/* <Button
style={{
position: "fixed",
width: "45%",
padding: "1rem",
marginTop: "1rem",
bottom: 0,
left: 0,
display: "flex",
alignItems: "center",
justifyContent: "center",
marginLeft: 370,
height: 50,
color: "white",
background: "aqua",
}}
variant="contained"
color=""
onClick={handleNext}
>
{activeStep === steps.length - 1 ? "Finish" : "Deliver Here"}
</Button> */}
<button
// style={{
// // position: "fixed",
// border: "none",
// width: "20%",
// padding: "1rem",
// marginTop: "1rem",
// display: "flex",
// alignItems: "center",
// justifyContent: "center",
// marginLeft: 370,
// height: 50,
// color: "grey",
// background: "aqua",
// }}
onClick={displayRazorPay}
>
click for online payment
</button>
<button
// style={{
// border: "none",
// width: "20%",
// padding: "1rem",
// marginTop: "1rem",
// display: "flex",
// alignItems: "center",
// justifyContent: "center",
// marginLeft: 370,
// height: 50,
// color: "grey",
// background: "aqua",
// }}
onClick={CODhandler}
>
click for cash on delivery payment
</button>
</div>
</div>
)}
</div>
{finalMessage && (
<Link to={`/home`}>Payment Successful, click here to continue shopping </Link>
)}
</div>
);
}
function getStepContent(stepIndex, buyerData) {
switch (stepIndex) {
case 0:
return (
<div>
{buyerData && (
<div style={{ background: "#ecf0f1", margin: "auto", width: 630 }}>
<font color="red" style={{ color: "red", fontWieght: "bold" }}>
<b>Delivery Address</b>
</font>
{"\n\n"}
<br></br>
<div></div>
<div></div>
Name:{buyerData.fullname}
<br></br>
MobileNo:{buyerData.phone}
<br></br>
EmailID:{buyerData.email}
<br></br>
Address:{buyerData.shopAddress}
</div>
)}
</div>
);
case 1:
return (
<div style={{ marginLeft: 150, marginTop: 20, width: 630 }}>
<h3>Select Payment Method</h3>
<div style={{ background: "#ecf0f1", marginTop: 20, width: 630 }}>Cart</div>
<div style={{ marginTop: 20 }}>Credit Debit & ATM Cards</div>
<div style={{ marginTop: 20 }}>Sodexco Meal Pass</div>
<div style={{ background: "#ecf0f1", marginTop: | {
const navigate = useNavigate();
const buyerId = useParams().buyerId;
const [buyerData, setBuyerData] = useState();
const { token } = useAuth();
const [finalMessage, setFinalMessage] = useState(false);
const classes = useStyles();
const [activeStep, setActiveStep] = React.useState(0);
const steps = getSteps();
const handleNext = () => {
setActiveStep((prevActiveStep) => prevActiveStep + 1);
};
const handleBack = () => {
setActiveStep((prevActiveStep) => prevActiveStep - 1);
};
const handleReset = () => { | identifier_body |
ldacgsmulti.py |
word_top = (np.frombuffer(_word_top, dtype=np.float64)
+ np.sum(word_top_ls, axis=0))
top_norms = 1. / (word_top.reshape(_m_words.value, _K.value).sum(axis=0))
_word_top[:] = word_top
_top_norms[:] = top_norms
del word_top, top_norms
_train.value = 1
lp = np.sum(logp_ls)
self.log_prob.append((self.iteration, lp))
if verbose:
stdout.write('\rIteration %d: log_prob=' % self.iteration)
stdout.flush()
print '%f' % lp
self.iteration += 1
p.close()
# Final reduction includes assembling the Z and the context posteriors
self._Z = np.hstack(Z_ls)
self.top_ctx = np.hstack(top_ctx_ls)
self.word_top = np.frombuffer(_word_top, dtype=np.float64)
self.word_top = self.word_top.reshape(_m_words.value,_K.value)
@property
def W(self):
# For viewer until it gets updated
# This method is very slow for corpora with many documents
return [np.array(_corpus[ctx], dtype=np.int) for ctx in self.contexts]
@property
def Z(self):
# For viewer until it gets updated
return [self._Z[ctx] for ctx in self.contexts]
@property
def doc_top(self):
# For viewer until it gets updated
return self.top_ctx.T
@property
def top_word(self):
# For viewer until it gets updated
return self.word_top.T
@staticmethod
def load(filename):
from vsm.corpus import BaseCorpus
print 'Loading LdaCgsMulti data from', filename
arrays_in = np.load(filename)
context_type = arrays_in['context_type'][()]
K = arrays_in['K'][()]
ctx_prior = arrays_in['ctx_prior']
top_prior = arrays_in['top_prior']
c = BaseCorpus(arrays_in['corpus'],
context_types=[context_type],
context_data=[np.array([], dtype=[('idx', np.int)])],
remove_empty=False)
m = LdaCgsMulti(c, context_type, K=K,
ctx_prior=ctx_prior, top_prior=top_prior)
m.contexts = arrays_in['contexts']
m.iteration = arrays_in['iteration'][()]
m.log_prob = arrays_in['log_prob'].tolist()
m._Z = arrays_in['Z']
m.top_ctx = arrays_in['top_ctx']
m.word_top = arrays_in['word_top']
LdaCgsMulti._init_word_top(m.word_top.reshape(-1,))
LdaCgsMulti._init_top_norms(arrays_in['top_norms'])
return m
def save(self, filename):
arrays_out = dict()
arrays_out['corpus'] = np.frombuffer(_corpus, np.int32)
arrays_out['iteration'] = self.iteration
dt = dtype=[('i', np.int), ('v', np.float)]
arrays_out['log_prob'] = np.array(self.log_prob, dtype=dt)
arrays_out['Z'] = self._Z
arrays_out['top_ctx'] = self.top_ctx
arrays_out['word_top'] = self.word_top
arrays_out['context_type'] = self.context_type
arrays_out['contexts'] = np.array(self.contexts)
arrays_out['K'] = _K.value
arrays_out['m_words'] = _m_words.value
arrays_out['ctx_prior'] = self.ctx_prior
arrays_out['top_prior'] = self.top_prior
arrays_out['top_norms'] = np.frombuffer(_top_norms, np.float64)
print 'Saving LdaCgsMulti model to', filename
np.savez(filename, **arrays_out)
def update((ctx_sbls, Z, top_ctx)):
"""
For LdaCgsMulti
"""
np.random.seed()
gbl_word_top = np.frombuffer(_word_top, dtype=np.float64)
gbl_word_top = gbl_word_top.reshape(_m_words.value, _K.value)
loc_word_top = gbl_word_top.copy()
top_norms = np.frombuffer(_top_norms, dtype=np.float64).copy()
log_p = 0
log_wk = np.log(gbl_word_top * top_norms[np.newaxis, :])
log_kc = np.log(top_ctx / top_ctx.sum(0)[np.newaxis, :])
for i in xrange(len(ctx_sbls)):
c = _corpus[ctx_sbls[i]]
offset = ctx_sbls[i].start - ctx_sbls[0].start
for j in xrange(len(c)):
w,k = c[j],Z[offset+j]
log_p += log_wk[w, k] + log_kc[k, i]
if _train.value:
loc_word_top[w, k] -= 1
top_norms[k] *= 1. / (1 - top_norms[k])
top_ctx[k, i] -= 1
dist = top_norms * loc_word_top[w,:] * top_ctx[:,i]
dist_cum = np.cumsum(dist)
r = np.random.random() * dist_cum[-1]
k = np.searchsorted(dist_cum, r)
loc_word_top[w, k] += 1
top_norms[k] *= 1. / (1 + top_norms[k])
top_ctx[k, i] += 1
Z[offset+j] = k
loc_word_top -= gbl_word_top
return (ctx_sbls, Z, top_ctx, loc_word_top.reshape(-1,), log_p)
#################################################################
# Tests
#################################################################
def test_LdaCgsMulti():
from vsm.util.corpustools import random_corpus
c = random_corpus(100, 5, 4, 20)
m = LdaCgsMulti(c, 'random', K=3)
m.train(itr=5, n_proc=2)
return m
def test_LdaCgsMulti_IO():
from vsm.util.corpustools import random_corpus
from tempfile import NamedTemporaryFile
import os
c = random_corpus(1000, 50, 6, 100)
tmp = NamedTemporaryFile(delete=False, suffix='.npz')
try:
m0 = LdaCgsMulti(c, 'random', K=10)
m0.train(itr=20)
c0 = np.frombuffer(_corpus, np.int32).copy()
K0 = _K.value
m_words0 = _m_words.value
word_top0 = np.frombuffer(_word_top, np.float64).copy()
top_norms0 = np.frombuffer(_top_norms, np.float64).copy()
m0.save(tmp.name)
m1 = LdaCgsMulti.load(tmp.name)
c1 = np.frombuffer(_corpus, np.int32).copy()
K1 = _K.value
m_words1 = _m_words.value
word_top1 = np.frombuffer(_word_top, np.float64).copy()
top_norms1 = np.frombuffer(_top_norms, np.float64).copy()
assert m0.context_type == m1.context_type
assert (m0.ctx_prior == m1.ctx_prior).all()
assert (m0.top_prior == m1.top_prior).all()
assert m0.log_prob == m1.log_prob
for i in xrange(max(len(m0.W), len(m1.W))):
assert m0.W[i].all() == m1.W[i].all()
assert m0.iteration == m1.iteration
assert (m0._Z == m1._Z).all()
assert m0.top_ctx.all() == m1.top_ctx.all()
assert m0.word_top.all() == m1.word_top.all()
assert (c0==c1).all()
assert K0==K1
assert m_words0==m_words1
assert (word_top0==word_top1).all()
assert (top_norms0==top_norms1).all(), (top_norms0, top_norms1)
finally:
os.remove(tmp.name)
def test_continuation():
| """
Note
----
Disable reseeding in `update` before running this test and use
sequential mapping
"""
from vsm.util.corpustools import random_corpus
c = random_corpus(100, 5, 4, 20)
m0 = LdaCgsMulti(c, 'random', K=3)
np.random.seed(0)
m0.train(itr=5, n_proc=2)
m0.train(itr=5, n_proc=2)
m1 = LdaCgsMulti(c, 'random', K=3)
np.random.seed(0)
m1.train(itr=10, n_proc=2)
assert (m0.word_top==m1.word_top).all()
assert (m0._Z==m1._Z).all() | identifier_body |
|
ldacgsmulti.py | .ctx_prior = np.array(ctx_prior,
dtype=np.float64).reshape(_K.value,1)
else:
# Default is a flat prior of .01
self.ctx_prior = np.ones((_K.value,1), dtype=np.float64) * .01
# Topic posterior stored in shared array, initialized to zero
LdaCgsMulti._init_word_top((np.zeros((_m_words.value, _K.value),
dtype=np.float64)
+ self.top_prior).reshape(-1,))
# Topic norms stored in a shared array, initialized to the
# sums over the topic priors
LdaCgsMulti._init_top_norms(1. / (np.ones(_K.value, dtype=np.float64)
* self.top_prior.sum()))
self.iteration = 0
# The 0-th iteration is an initialization step, not a training step
global _train |
# Store log probability computations
self.log_prob = []
@staticmethod
def _init_word_top(a):
global _word_top
_word_top = mp.Array('d', _m_words.value*_K.value, lock=False)
_word_top[:] = a
@staticmethod
def _init_top_norms(a):
global _top_norms
_top_norms = mp.Array('d', _K.value, lock=False)
_top_norms[:] = a
def train(self, itr=500, verbose=True, n_proc=2):
"""
Note
----
Training sessions can be continued only if the previous
training session of completed.
"""
# Split contexts into an `n_proc`-length list of lists of
# contexts
if n_proc == 1:
ctx_ls = [self.contexts]
else:
ctx_ls = np.array_split(self.contexts, n_proc-1)
if len(ctx_ls) != n_proc:
ctx_ls = np.array_split(self.contexts, n_proc)
# Initialize arrays for storing Z and context posteriors for
# each process
if self.iteration == 0:
self._Z = np.zeros(len(_corpus), dtype=np.int)
self.top_ctx = (np.zeros((_K.value, len(self.contexts)),
dtype=np.float64)
+ self.ctx_prior)
ctx_ls_flat = [slice(c[0].start, c[-1].stop) for c in ctx_ls]
Z_ls = [self._Z[s] for s in ctx_ls_flat]
ctx_sbls_spans = np.cumsum([len(ctx_sbls) for ctx_sbls in ctx_ls][:-1])
top_ctx_ls = np.split(self.top_ctx, ctx_sbls_spans, axis=1)
# Clean
del self._Z, self.top_ctx
if hasattr(self, 'word_top'):
del self.word_top
p=mp.Pool(n_proc)
itr += self.iteration
while self.iteration < itr:
if verbose:
stdout.write('\rIteration %d: mapping ' % self.iteration)
stdout.flush()
data = zip(ctx_ls, Z_ls, top_ctx_ls)
# For debugging
# results = map(update, data)
results = p.map(update, data)
if verbose:
stdout.write('\rIteration %d: reducing ' % self.iteration)
stdout.flush()
# Unzip results
ctx_ls, Z_ls, top_ctx_ls, word_top_ls, logp_ls = zip(*results)
# Reduce word by topic matrices and store in global shared array
word_top = (np.frombuffer(_word_top, dtype=np.float64)
+ np.sum(word_top_ls, axis=0))
top_norms = 1. / (word_top.reshape(_m_words.value, _K.value).sum(axis=0))
_word_top[:] = word_top
_top_norms[:] = top_norms
del word_top, top_norms
_train.value = 1
lp = np.sum(logp_ls)
self.log_prob.append((self.iteration, lp))
if verbose:
stdout.write('\rIteration %d: log_prob=' % self.iteration)
stdout.flush()
print '%f' % lp
self.iteration += 1
p.close()
# Final reduction includes assembling the Z and the context posteriors
self._Z = np.hstack(Z_ls)
self.top_ctx = np.hstack(top_ctx_ls)
self.word_top = np.frombuffer(_word_top, dtype=np.float64)
self.word_top = self.word_top.reshape(_m_words.value,_K.value)
@property
def W(self):
# For viewer until it gets updated
# This method is very slow for corpora with many documents
return [np.array(_corpus[ctx], dtype=np.int) for ctx in self.contexts]
@property
def Z(self):
# For viewer until it gets updated
return [self._Z[ctx] for ctx in self.contexts]
@property
def doc_top(self):
# For viewer until it gets updated
return self.top_ctx.T
@property
def top_word(self):
# For viewer until it gets updated
return self.word_top.T
@staticmethod
def load(filename):
from vsm.corpus import BaseCorpus
print 'Loading LdaCgsMulti data from', filename
arrays_in = np.load(filename)
context_type = arrays_in['context_type'][()]
K = arrays_in['K'][()]
ctx_prior = arrays_in['ctx_prior']
top_prior = arrays_in['top_prior']
c = BaseCorpus(arrays_in['corpus'],
context_types=[context_type],
context_data=[np.array([], dtype=[('idx', np.int)])],
remove_empty=False)
m = LdaCgsMulti(c, context_type, K=K,
ctx_prior=ctx_prior, top_prior=top_prior)
m.contexts = arrays_in['contexts']
m.iteration = arrays_in['iteration'][()]
m.log_prob = arrays_in['log_prob'].tolist()
m._Z = arrays_in['Z']
m.top_ctx = arrays_in['top_ctx']
m.word_top = arrays_in['word_top']
LdaCgsMulti._init_word_top(m.word_top.reshape(-1,))
LdaCgsMulti._init_top_norms(arrays_in['top_norms'])
return m
def save(self, filename):
arrays_out = dict()
arrays_out['corpus'] = np.frombuffer(_corpus, np.int32)
arrays_out['iteration'] = self.iteration
dt = dtype=[('i', np.int), ('v', np.float)]
arrays_out['log_prob'] = np.array(self.log_prob, dtype=dt)
arrays_out['Z'] = self._Z
arrays_out['top_ctx'] = self.top_ctx
arrays_out['word_top'] = self.word_top
arrays_out['context_type'] = self.context_type
arrays_out['contexts'] = np.array(self.contexts)
arrays_out['K'] = _K.value
arrays_out['m_words'] = _m_words.value
arrays_out['ctx_prior'] = self.ctx_prior
arrays_out['top_prior'] = self.top_prior
arrays_out['top_norms'] = np.frombuffer(_top_norms, np.float64)
print 'Saving LdaCgsMulti model to', filename
np.savez(filename, **arrays_out)
def update((ctx_sbls, Z, top_ctx)):
"""
For LdaCgsMulti
"""
np.random.seed()
gbl_word_top = np.frombuffer(_word_top, dtype=np.float64)
gbl_word_top = gbl_word_top.reshape(_m_words.value, _K.value)
loc_word_top = gbl_word_top.copy()
top_norms = np.frombuffer(_top_norms, dtype=np.float64).copy()
log_p = 0
log_wk = np.log(gbl_word_top * top_norms[np.newaxis, :])
log_kc = np.log(top_ctx / top_ctx.sum(0)[np.newaxis, :])
for i in xrange(len(ctx_sbls)):
c = _corpus[ctx_sbls[i]]
offset = ctx_sbls[i].start - ctx_sbls[0].start
for j in xrange(len(c)):
w,k = c[j],Z[offset+j]
log_p += log_wk[w, k] + log_kc[k, i]
if _train.value:
loc_word_top[w, k] -= 1
top_norms[k] *= 1. / (1 - top_norms[k])
top_ctx[k, i] -= 1
dist = top_norms * loc_word_top[w,:] * top_ctx[:,i]
dist_cum = np.cumsum(dist)
r = np.random.random() * dist_cum[-1]
k = np.searchsorted(dist_cum, r)
loc_word_top[w, k] += 1
top_norms[k] *= 1. / (1 + top_norms[k])
top_ctx[k, i] += 1
Z[offset+j] = k
loc | _train = mp.Value('b', 0, lock=False) | random_line_split |
ldacgsmulti.py | .ctx_prior = np.array(ctx_prior,
dtype=np.float64).reshape(_K.value,1)
else:
# Default is a flat prior of .01
self.ctx_prior = np.ones((_K.value,1), dtype=np.float64) * .01
# Topic posterior stored in shared array, initialized to zero
LdaCgsMulti._init_word_top((np.zeros((_m_words.value, _K.value),
dtype=np.float64)
+ self.top_prior).reshape(-1,))
# Topic norms stored in a shared array, initialized to the
# sums over the topic priors
LdaCgsMulti._init_top_norms(1. / (np.ones(_K.value, dtype=np.float64)
* self.top_prior.sum()))
self.iteration = 0
# The 0-th iteration is an initialization step, not a training step
global _train
_train = mp.Value('b', 0, lock=False)
# Store log probability computations
self.log_prob = []
@staticmethod
def _init_word_top(a):
global _word_top
_word_top = mp.Array('d', _m_words.value*_K.value, lock=False)
_word_top[:] = a
@staticmethod
def _init_top_norms(a):
global _top_norms
_top_norms = mp.Array('d', _K.value, lock=False)
_top_norms[:] = a
def train(self, itr=500, verbose=True, n_proc=2):
"""
Note
----
Training sessions can be continued only if the previous
training session of completed.
"""
# Split contexts into an `n_proc`-length list of lists of
# contexts
if n_proc == 1:
ctx_ls = [self.contexts]
else:
ctx_ls = np.array_split(self.contexts, n_proc-1)
if len(ctx_ls) != n_proc:
ctx_ls = np.array_split(self.contexts, n_proc)
# Initialize arrays for storing Z and context posteriors for
# each process
if self.iteration == 0:
self._Z = np.zeros(len(_corpus), dtype=np.int)
self.top_ctx = (np.zeros((_K.value, len(self.contexts)),
dtype=np.float64)
+ self.ctx_prior)
ctx_ls_flat = [slice(c[0].start, c[-1].stop) for c in ctx_ls]
Z_ls = [self._Z[s] for s in ctx_ls_flat]
ctx_sbls_spans = np.cumsum([len(ctx_sbls) for ctx_sbls in ctx_ls][:-1])
top_ctx_ls = np.split(self.top_ctx, ctx_sbls_spans, axis=1)
# Clean
del self._Z, self.top_ctx
if hasattr(self, 'word_top'):
del self.word_top
p=mp.Pool(n_proc)
itr += self.iteration
while self.iteration < itr:
if verbose:
|
data = zip(ctx_ls, Z_ls, top_ctx_ls)
# For debugging
# results = map(update, data)
results = p.map(update, data)
if verbose:
stdout.write('\rIteration %d: reducing ' % self.iteration)
stdout.flush()
# Unzip results
ctx_ls, Z_ls, top_ctx_ls, word_top_ls, logp_ls = zip(*results)
# Reduce word by topic matrices and store in global shared array
word_top = (np.frombuffer(_word_top, dtype=np.float64)
+ np.sum(word_top_ls, axis=0))
top_norms = 1. / (word_top.reshape(_m_words.value, _K.value).sum(axis=0))
_word_top[:] = word_top
_top_norms[:] = top_norms
del word_top, top_norms
_train.value = 1
lp = np.sum(logp_ls)
self.log_prob.append((self.iteration, lp))
if verbose:
stdout.write('\rIteration %d: log_prob=' % self.iteration)
stdout.flush()
print '%f' % lp
self.iteration += 1
p.close()
# Final reduction includes assembling the Z and the context posteriors
self._Z = np.hstack(Z_ls)
self.top_ctx = np.hstack(top_ctx_ls)
self.word_top = np.frombuffer(_word_top, dtype=np.float64)
self.word_top = self.word_top.reshape(_m_words.value,_K.value)
@property
def W(self):
# For viewer until it gets updated
# This method is very slow for corpora with many documents
return [np.array(_corpus[ctx], dtype=np.int) for ctx in self.contexts]
@property
def Z(self):
# For viewer until it gets updated
return [self._Z[ctx] for ctx in self.contexts]
@property
def doc_top(self):
# For viewer until it gets updated
return self.top_ctx.T
@property
def top_word(self):
# For viewer until it gets updated
return self.word_top.T
@staticmethod
def load(filename):
from vsm.corpus import BaseCorpus
print 'Loading LdaCgsMulti data from', filename
arrays_in = np.load(filename)
context_type = arrays_in['context_type'][()]
K = arrays_in['K'][()]
ctx_prior = arrays_in['ctx_prior']
top_prior = arrays_in['top_prior']
c = BaseCorpus(arrays_in['corpus'],
context_types=[context_type],
context_data=[np.array([], dtype=[('idx', np.int)])],
remove_empty=False)
m = LdaCgsMulti(c, context_type, K=K,
ctx_prior=ctx_prior, top_prior=top_prior)
m.contexts = arrays_in['contexts']
m.iteration = arrays_in['iteration'][()]
m.log_prob = arrays_in['log_prob'].tolist()
m._Z = arrays_in['Z']
m.top_ctx = arrays_in['top_ctx']
m.word_top = arrays_in['word_top']
LdaCgsMulti._init_word_top(m.word_top.reshape(-1,))
LdaCgsMulti._init_top_norms(arrays_in['top_norms'])
return m
def save(self, filename):
arrays_out = dict()
arrays_out['corpus'] = np.frombuffer(_corpus, np.int32)
arrays_out['iteration'] = self.iteration
dt = dtype=[('i', np.int), ('v', np.float)]
arrays_out['log_prob'] = np.array(self.log_prob, dtype=dt)
arrays_out['Z'] = self._Z
arrays_out['top_ctx'] = self.top_ctx
arrays_out['word_top'] = self.word_top
arrays_out['context_type'] = self.context_type
arrays_out['contexts'] = np.array(self.contexts)
arrays_out['K'] = _K.value
arrays_out['m_words'] = _m_words.value
arrays_out['ctx_prior'] = self.ctx_prior
arrays_out['top_prior'] = self.top_prior
arrays_out['top_norms'] = np.frombuffer(_top_norms, np.float64)
print 'Saving LdaCgsMulti model to', filename
np.savez(filename, **arrays_out)
def update((ctx_sbls, Z, top_ctx)):
"""
For LdaCgsMulti
"""
np.random.seed()
gbl_word_top = np.frombuffer(_word_top, dtype=np.float64)
gbl_word_top = gbl_word_top.reshape(_m_words.value, _K.value)
loc_word_top = gbl_word_top.copy()
top_norms = np.frombuffer(_top_norms, dtype=np.float64).copy()
log_p = 0
log_wk = np.log(gbl_word_top * top_norms[np.newaxis, :])
log_kc = np.log(top_ctx / top_ctx.sum(0)[np.newaxis, :])
for i in xrange(len(ctx_sbls)):
c = _corpus[ctx_sbls[i]]
offset = ctx_sbls[i].start - ctx_sbls[0].start
for j in xrange(len(c)):
w,k = c[j],Z[offset+j]
log_p += log_wk[w, k] + log_kc[k, i]
if _train.value:
loc_word_top[w, k] -= 1
top_norms[k] *= 1. / (1 - top_norms[k])
top_ctx[k, i] -= 1
dist = top_norms * loc_word_top[w,:] * top_ctx[:,i]
dist_cum = np.cumsum(dist)
r = np.random.random() * dist_cum[-1]
k = np.searchsorted(dist_cum, r)
loc_word_top[w, k] += 1
top_norms[k] *= 1. / (1 + top_norms[k])
top_ctx[k, i] += 1
Z[offset+j] = k
| stdout.write('\rIteration %d: mapping ' % self.iteration)
stdout.flush() | conditional_block |
ldacgsmulti.py | array, initialized to the
# sums over the topic priors
LdaCgsMulti._init_top_norms(1. / (np.ones(_K.value, dtype=np.float64)
* self.top_prior.sum()))
self.iteration = 0
# The 0-th iteration is an initialization step, not a training step
global _train
_train = mp.Value('b', 0, lock=False)
# Store log probability computations
self.log_prob = []
@staticmethod
def _init_word_top(a):
global _word_top
_word_top = mp.Array('d', _m_words.value*_K.value, lock=False)
_word_top[:] = a
@staticmethod
def _init_top_norms(a):
global _top_norms
_top_norms = mp.Array('d', _K.value, lock=False)
_top_norms[:] = a
def train(self, itr=500, verbose=True, n_proc=2):
"""
Note
----
Training sessions can be continued only if the previous
training session of completed.
"""
# Split contexts into an `n_proc`-length list of lists of
# contexts
if n_proc == 1:
ctx_ls = [self.contexts]
else:
ctx_ls = np.array_split(self.contexts, n_proc-1)
if len(ctx_ls) != n_proc:
ctx_ls = np.array_split(self.contexts, n_proc)
# Initialize arrays for storing Z and context posteriors for
# each process
if self.iteration == 0:
self._Z = np.zeros(len(_corpus), dtype=np.int)
self.top_ctx = (np.zeros((_K.value, len(self.contexts)),
dtype=np.float64)
+ self.ctx_prior)
ctx_ls_flat = [slice(c[0].start, c[-1].stop) for c in ctx_ls]
Z_ls = [self._Z[s] for s in ctx_ls_flat]
ctx_sbls_spans = np.cumsum([len(ctx_sbls) for ctx_sbls in ctx_ls][:-1])
top_ctx_ls = np.split(self.top_ctx, ctx_sbls_spans, axis=1)
# Clean
del self._Z, self.top_ctx
if hasattr(self, 'word_top'):
del self.word_top
p=mp.Pool(n_proc)
itr += self.iteration
while self.iteration < itr:
if verbose:
stdout.write('\rIteration %d: mapping ' % self.iteration)
stdout.flush()
data = zip(ctx_ls, Z_ls, top_ctx_ls)
# For debugging
# results = map(update, data)
results = p.map(update, data)
if verbose:
stdout.write('\rIteration %d: reducing ' % self.iteration)
stdout.flush()
# Unzip results
ctx_ls, Z_ls, top_ctx_ls, word_top_ls, logp_ls = zip(*results)
# Reduce word by topic matrices and store in global shared array
word_top = (np.frombuffer(_word_top, dtype=np.float64)
+ np.sum(word_top_ls, axis=0))
top_norms = 1. / (word_top.reshape(_m_words.value, _K.value).sum(axis=0))
_word_top[:] = word_top
_top_norms[:] = top_norms
del word_top, top_norms
_train.value = 1
lp = np.sum(logp_ls)
self.log_prob.append((self.iteration, lp))
if verbose:
stdout.write('\rIteration %d: log_prob=' % self.iteration)
stdout.flush()
print '%f' % lp
self.iteration += 1
p.close()
# Final reduction includes assembling the Z and the context posteriors
self._Z = np.hstack(Z_ls)
self.top_ctx = np.hstack(top_ctx_ls)
self.word_top = np.frombuffer(_word_top, dtype=np.float64)
self.word_top = self.word_top.reshape(_m_words.value,_K.value)
@property
def W(self):
# For viewer until it gets updated
# This method is very slow for corpora with many documents
return [np.array(_corpus[ctx], dtype=np.int) for ctx in self.contexts]
@property
def Z(self):
# For viewer until it gets updated
return [self._Z[ctx] for ctx in self.contexts]
@property
def doc_top(self):
# For viewer until it gets updated
return self.top_ctx.T
@property
def top_word(self):
# For viewer until it gets updated
return self.word_top.T
@staticmethod
def load(filename):
from vsm.corpus import BaseCorpus
print 'Loading LdaCgsMulti data from', filename
arrays_in = np.load(filename)
context_type = arrays_in['context_type'][()]
K = arrays_in['K'][()]
ctx_prior = arrays_in['ctx_prior']
top_prior = arrays_in['top_prior']
c = BaseCorpus(arrays_in['corpus'],
context_types=[context_type],
context_data=[np.array([], dtype=[('idx', np.int)])],
remove_empty=False)
m = LdaCgsMulti(c, context_type, K=K,
ctx_prior=ctx_prior, top_prior=top_prior)
m.contexts = arrays_in['contexts']
m.iteration = arrays_in['iteration'][()]
m.log_prob = arrays_in['log_prob'].tolist()
m._Z = arrays_in['Z']
m.top_ctx = arrays_in['top_ctx']
m.word_top = arrays_in['word_top']
LdaCgsMulti._init_word_top(m.word_top.reshape(-1,))
LdaCgsMulti._init_top_norms(arrays_in['top_norms'])
return m
def save(self, filename):
arrays_out = dict()
arrays_out['corpus'] = np.frombuffer(_corpus, np.int32)
arrays_out['iteration'] = self.iteration
dt = dtype=[('i', np.int), ('v', np.float)]
arrays_out['log_prob'] = np.array(self.log_prob, dtype=dt)
arrays_out['Z'] = self._Z
arrays_out['top_ctx'] = self.top_ctx
arrays_out['word_top'] = self.word_top
arrays_out['context_type'] = self.context_type
arrays_out['contexts'] = np.array(self.contexts)
arrays_out['K'] = _K.value
arrays_out['m_words'] = _m_words.value
arrays_out['ctx_prior'] = self.ctx_prior
arrays_out['top_prior'] = self.top_prior
arrays_out['top_norms'] = np.frombuffer(_top_norms, np.float64)
print 'Saving LdaCgsMulti model to', filename
np.savez(filename, **arrays_out)
def update((ctx_sbls, Z, top_ctx)):
"""
For LdaCgsMulti
"""
np.random.seed()
gbl_word_top = np.frombuffer(_word_top, dtype=np.float64)
gbl_word_top = gbl_word_top.reshape(_m_words.value, _K.value)
loc_word_top = gbl_word_top.copy()
top_norms = np.frombuffer(_top_norms, dtype=np.float64).copy()
log_p = 0
log_wk = np.log(gbl_word_top * top_norms[np.newaxis, :])
log_kc = np.log(top_ctx / top_ctx.sum(0)[np.newaxis, :])
for i in xrange(len(ctx_sbls)):
c = _corpus[ctx_sbls[i]]
offset = ctx_sbls[i].start - ctx_sbls[0].start
for j in xrange(len(c)):
w,k = c[j],Z[offset+j]
log_p += log_wk[w, k] + log_kc[k, i]
if _train.value:
loc_word_top[w, k] -= 1
top_norms[k] *= 1. / (1 - top_norms[k])
top_ctx[k, i] -= 1
dist = top_norms * loc_word_top[w,:] * top_ctx[:,i]
dist_cum = np.cumsum(dist)
r = np.random.random() * dist_cum[-1]
k = np.searchsorted(dist_cum, r)
loc_word_top[w, k] += 1
top_norms[k] *= 1. / (1 + top_norms[k])
top_ctx[k, i] += 1
Z[offset+j] = k
loc_word_top -= gbl_word_top
return (ctx_sbls, Z, top_ctx, loc_word_top.reshape(-1,), log_p)
#################################################################
# Tests
#################################################################
def test_LdaCgsMulti():
from vsm.util.corpustools import random_corpus
c = random_corpus(100, 5, 4, 20)
m = LdaCgsMulti(c, 'random', K=3)
m.train(itr=5, n_proc=2)
return m
def | test_LdaCgsMulti_IO | identifier_name |
|
app.py | if st.checkbox("Visualize actual vs predicted conflict"):
if st.checkbox("2019: 12 months"):
fig, axes = plt.subplots(ncols=2)
ax = plt.subplots()
ax = cc_actual.plot(column='2019')
axes[0].set_title("Actual")
axes[1].set_title("Predicted")
axes[1].legend(title="Months in conflict", loc="upper right")
cc_actual.plot(column='2019', cmap='OrRd', ax=axes[0])
cc_prediction.plot(column='2019', cmap='OrRd',
legend=True, ax=axes[1])
st.pyplot(fig)
if st.checkbox("2019-01"):
fig, axes = plt.subplots(ncols=2)
ax = plt.subplots()
ax = cc_actual.plot(column='2019-01')
axes[0].set_title("Actual")
axes[1].set_title("Predicted")
axes[1].legend(title="Months in conflict", loc="upper right")
cc_actual.plot(column='2019-01', cmap='OrRd', ax=axes[0])
cc_prediction.plot(column='2019-01', cmap='OrRd',
legend=True, ax=axes[1])
st.pyplot(fig)
if st.checkbox("2019-02"):
fig, axes = plt.subplots(ncols=2)
ax = plt.subplots()
ax = cc_actual.plot(column='2019-02')
axes[0].set_title("Actual")
axes[1].set_title("Predicted")
axes[1].legend(title="Months in conflict", loc="upper right")
cc_actual.plot(column='2019-01', cmap='OrRd', ax=axes[0])
cc_prediction.plot(column='2019-01', cmap='OrRd',
legend=True, ax=axes[1])
st.pyplot(fig)
columns = X_train.shape[1]
def new_data_downloader(df):
st.write("")
st.subheader("Want to new data to perform forecasting?")
if st.checkbox("New data"):
csv = current.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(csv.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}">Download CSV File</a> (right-click and save as <some_name>.csv)'
st.markdown(href, unsafe_allow_html=True)
st.write("")
st.subheader(
"Want to download the new dataset to perform forecasting?")
csv = current.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(csv.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}">Download CSV File</a> (right-click and save as <some_name>.csv)'
st.markdown(href, unsafe_allow_html=True)
def file_uploader(uploaded_file):
st.file_uploader("Choose a CSV file", type="csv")
uploaded_file = pd.read_csv(uploaded_file, low_memory=False)
st.text("This process probably takes few seconds...")
return uploaded_file
def logistic_predictor():
st.title("Kimetrica Conflict Forecasting Model: Myanmar Analytical Activity (MAA)")
st.subheader("FORECAST")
st.write("This page enables you to make forecasting by uploading system generated or user defined dataset.")
st.write(
" Please check the following box to perform forecasting and view the data")
if st.checkbox("Do you want to upload your own data?"):
st.write(
f"Note: Currently, the file to be uploaded should have **exactly the same** format with **training dataset** which is **{current.shape[1]}** columns in the following format.",
current.head(2),
)
uploaded_file = st.file_uploader("Choose a CSV file", type="csv")
if st.checkbox("Preview uploaded data"):
uploaded_file = pd.read_csv(
uploaded_file, low_memory=False, index_col=0).drop_duplicates(subset=['admin1', 'admin2', 'geometry',
'month_year'])
st.write("Uploaded data:", uploaded_file.head())
st.write("-" * 80)
st.text(
f"Uploaded data includes {uploaded_file.shape[1]} columns"
)
st.write("-" * 80)
start_time = datetime.datetime.now()
if st.checkbox("Forecast and preview the results with the available data"):
if st.checkbox("Preveiw the data with forecasted values"):
y_forecast_binary = model_reg.predict(X_current)
current["conflict_forecast_binary"] = [
"No conflict" if i == 0 else "Conflict" for i in y_forecast_binary
]
y_forecast_proba = model_reg.predict_proba(X_current)[:, 1]
current["conflict_forecast_probability"] = y_forecast_proba.tolist(
)
st.write(current.head(10))
if st.checkbox("Visualize conflict forecast in a binary format"):
df_evl1_b = current[[
'admin1', 'admin2', 'geometry', 'month_year', 'conflict_forecast_binary']]
cc_onset_actual = df_evl1_b.pivot(
index=['admin1', 'admin2', 'geometry'], columns='month_year', values='conflict_forecast_binary').reset_index()
cc_onset_actual.columns = cc_onset_actual.columns.get_level_values(
'month_year')
cc_onset_actual.columns = [''.join(col).strip()
for col in cc_onset_actual.columns.values]
cc_actual = cc_onset_actual.reset_index()
cc_actual['2021'] = cc_actual.iloc[:, 3:].sum(axis=1)
cc_actual['geometry'] = cc_actual['geometry'].apply(
wkt.loads)
cc_forecast = gpd.GeoDataFrame(
cc_actual, geometry='geometry')
if st.checkbox("2021: First Quarter-binary"):
fig, axes = plt.subplots(ncols=4)
ax = plt.subplots()
axes[0].set_title("2021-01")
axes[1].set_title("2021-02")
axes[2].set_title("2021-03")
axes[3].set_title("2021-04")
axes[3].legend(
title="Probability of conflict", loc="upper right")
cc_forecast.plot(
column='2021-01', cmap='OrRd', ax=axes[0], legend=True)
cc_forecast.plot(column='2021-02', cmap='OrRd',
ax=axes[1], legend=True)
cc_forecast.plot(column='2021-03', cmap='OrRd',
ax=axes[2], legend=True)
cc_forecast.plot(column='2021-04', cmap='OrRd',
ax=axes[3], legend=True)
st.pyplot(fig)
if st.checkbox("Visualize conflict forecast in a probability format"):
df_evl1_p = current[['admin1', 'admin2', 'geometry',
'month_year', 'conflict_forecast_probability']]
cc_onset_p = df_evl1_p.pivot(
index=['admin1', 'admin2', 'geometry'], columns='month_year', values='conflict_forecast_probability').reset_index()
cc_onset_p.columns = cc_onset_p.columns.get_level_values(
'month_year')
cc_onset_p.columns = [''.join(col).strip()
for col in cc_onset_p.columns.values]
cc_forecast_p = cc_onset_p.reset_index()
cc_forecast_p['geometry'] = cc_forecast_p['geometry'].apply(
wkt.loads)
cc_forecast_p = gpd.GeoDataFrame(
cc_forecast_p, geometry='geometry')
if st.checkbox("2021: First Quarter-probability"):
fig, axes = plt.subplots(ncols=4)
ax = plt.subplots()
ax = cc_forecast_p.plot(column='2021-01')
axes[0].set_title("2021-01")
axes[1].set_title("2021-02")
axes[2].set_title("2021-03")
axes[3].set_title("2021-04")
axes[3].legend(
title="Probability of conflict", loc="upper right")
cc_forecast_p.plot(
column='2021-01', cmap='OrRd', ax=axes[0], legend=True)
cc_forecast_p.plot(column='2021-02', cmap='OrRd',
ax=axes[1], legend=True)
cc_forecast_p.plot(column='2021-03', cmap='OrRd',
ax=axes[2], legend=True)
cc_forecast_p.plot(column='2021-04', cmap='OrRd',
ax=axes[3], legend=True)
st.pyplot(fig)
def | main | identifier_name |
|
app.py | = test1.reset_index()
df_evl = df_test.join(y_pred)
df_evl1 = df_evl[['admin1', 'admin2', 'geometry',
'month_year', 'cc_onset_y', 'cc_onset_prediction']]
df_evl1.cc_onset_y = df_evl1.cc_onset_y.astype(int)
cc_onset_actual = df_evl1.pivot(
index=['admin1', 'admin2', 'geometry'], columns='month_year', values='cc_onset_y')
cc_onset_actual.columns = cc_onset_actual.columns.get_level_values(
'month_year')
cc_onset_actual.columns = [''.join(col).strip()
for col in cc_onset_actual.columns.values]
cc_actual = cc_onset_actual.reset_index()
cc_actual['2019'] = cc_actual.iloc[:, 3:].sum(axis=1)
cc_actual = cc_actual[['admin1', 'admin2', 'geometry',
'2019-01', '2019-02', '2019-03', '2019-04', '2019-05', '2019-06',
'2019-07', '2019-08', '2019-09', '2019-10', '2019-11', '2019-12', '2019']]
cc_actual['geometry'] = cc_actual['geometry'].apply(wkt.loads)
cc_actual = gpd.GeoDataFrame(cc_actual, geometry='geometry')
cc_onset_prediction = df_evl1.pivot(
index=['admin1', 'admin2', 'geometry'], columns='month_year', values='cc_onset_prediction').reset_index()
cc_onset_prediction.columns = cc_onset_prediction.columns.get_level_values(
'month_year')
cc_onset_prediction.columns = [
''.join(col).strip() for col in cc_onset_prediction.columns.values]
cc_prediction = cc_onset_prediction.reset_index()
cc_prediction['2019'] = cc_onset_prediction.iloc[:, 3:].sum(axis=1)
cc_prediction = cc_prediction[['admin1', 'admin2', 'geometry', '2019-01', '2019-02', '2019-03', '2019-04', '2019-05', '2019-06',
'2019-07', '2019-08', '2019-09', '2019-10', '2019-11', '2019-12', '2019']]
cc_prediction['geometry'] = cc_prediction['geometry'].apply(wkt.loads)
cc_prediction = gpd.GeoDataFrame(cc_prediction, geometry='geometry')
def logistic_page_builder(model_reg, X_test):
st.title("Kimetrica Conflict Forecasting Model: Myanmar Analytical Activity (MAA)")
st.subheader("TRAIN AND TEST")
start_time = datetime.datetime.now()
# model_reg = logistic_train_metrics(data)
st.write("In this page, you will be able to view model performance results(error matrix and classification report). You can also visualize actual vs predicted conflict on annual and monthly basis.")
st.write(
f"The model took a total running time of {(datetime.datetime.now() - start_time).seconds} s.")
if st.checkbox("Show model error matrix"):
conf_ee = confusion_matrix(y_test, y_pred)
group_names = ["True Neg", "False Pos", "False Neg", "True Pos"]
group_counts = ["{0:0.0f}".format(value)
for value in conf_ee.flatten()]
group_percentages = [
"{0:.2%}".format(value) for value in conf_ee.flatten() / np.sum(conf_ee)
]
labels = [
f"{v1}\n{v2}\n{v3}"
for v1, v2, v3 in zip(group_names, group_counts, group_percentages)
]
labels = np.asarray(labels).reshape(2, 2)
fig, ax = plt.subplots()
ax = plt.axes()
st.write(
sns.heatmap(
conf_ee,
annot=labels,
fmt="",
cmap="Blues",
xticklabels=["No Conflict", "Conflict"],
yticklabels=["No Conflict", "Conflict"],
ax=ax,
)
)
ax.set_title("Final Model Error Matrix")
sns.set(font_scale=0.5)
st.pyplot(fig)
if st.checkbox("Show classification report"):
st.subheader('Classification Report')
report = classification_report(
y_test, y_pred)
st.write(report)
if st.checkbox("Visualize actual vs predicted conflict"):
if st.checkbox("2019: 12 months"):
fig, axes = plt.subplots(ncols=2)
ax = plt.subplots()
ax = cc_actual.plot(column='2019')
axes[0].set_title("Actual")
axes[1].set_title("Predicted")
axes[1].legend(title="Months in conflict", loc="upper right")
cc_actual.plot(column='2019', cmap='OrRd', ax=axes[0])
cc_prediction.plot(column='2019', cmap='OrRd',
legend=True, ax=axes[1])
st.pyplot(fig)
if st.checkbox("2019-01"):
fig, axes = plt.subplots(ncols=2)
ax = plt.subplots()
ax = cc_actual.plot(column='2019-01')
axes[0].set_title("Actual")
axes[1].set_title("Predicted")
axes[1].legend(title="Months in conflict", loc="upper right")
cc_actual.plot(column='2019-01', cmap='OrRd', ax=axes[0])
cc_prediction.plot(column='2019-01', cmap='OrRd',
legend=True, ax=axes[1])
st.pyplot(fig)
if st.checkbox("2019-02"):
fig, axes = plt.subplots(ncols=2)
ax = plt.subplots()
ax = cc_actual.plot(column='2019-02')
axes[0].set_title("Actual")
axes[1].set_title("Predicted")
axes[1].legend(title="Months in conflict", loc="upper right")
cc_actual.plot(column='2019-01', cmap='OrRd', ax=axes[0])
cc_prediction.plot(column='2019-01', cmap='OrRd',
legend=True, ax=axes[1])
st.pyplot(fig)
columns = X_train.shape[1]
def new_data_downloader(df):
st.write("")
st.subheader("Want to new data to perform forecasting?")
if st.checkbox("New data"):
csv = current.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(csv.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}">Download CSV File</a> (right-click and save as <some_name>.csv)'
st.markdown(href, unsafe_allow_html=True)
st.write("")
st.subheader(
"Want to download the new dataset to perform forecasting?")
csv = current.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(csv.encode()).decode()
href = f'<a href="data:file/csv;base64,{b64}">Download CSV File</a> (right-click and save as <some_name>.csv)'
st.markdown(href, unsafe_allow_html=True)
def file_uploader(uploaded_file):
st.file_uploader("Choose a CSV file", type="csv")
uploaded_file = pd.read_csv(uploaded_file, low_memory=False)
st.text("This process probably takes few seconds...")
return uploaded_file
def logistic_predictor():
st.title("Kimetrica Conflict Forecasting Model: Myanmar Analytical Activity (MAA)")
st.subheader("FORECAST")
st.write("This page enables you to make forecasting by uploading system generated or user defined dataset.")
st.write(
" Please check the following box to perform forecasting and view the data")
if st.checkbox("Do you want to upload your own data?"):
| st.write(
f"Note: Currently, the file to be uploaded should have **exactly the same** format with **training dataset** which is **{current.shape[1]}** columns in the following format.",
current.head(2),
)
uploaded_file = st.file_uploader("Choose a CSV file", type="csv")
if st.checkbox("Preview uploaded data"):
uploaded_file = pd.read_csv(
uploaded_file, low_memory=False, index_col=0).drop_duplicates(subset=['admin1', 'admin2', 'geometry',
'month_year'])
st.write("Uploaded data:", uploaded_file.head())
st.write("-" * 80)
st.text(
f"Uploaded data includes {uploaded_file.shape[1]} columns"
)
st.write("-" * 80)
start_time = datetime.datetime.now() | conditional_block |
|
app.py |
df2 = df.drop(['Unnamed: 0',
'Unnamed: 0.1',
'admin1',
'admin2',
'geometry',
'location',
'year'], axis=1)
end_date = "2021-01"
mask = (df2['month_year'] < end_date)
df2 = df2.loc[mask]
df3 = df2.drop(['month_year'], axis=1)
X = df3[df3.columns[:-1]]
y = df3[df3.columns[-1]]
model = Pipeline([("StandardScaller", StandardScaler()),
("RF", ExtraTreesClassifier())])
model.fit(X, y)
feat_importances = model.named_steps['RF'].feature_importances_
most_important = dict(sorted(dict(
zip(X.columns, feat_importances)).items(), key=lambda x: x[1], reverse=True))
fp = pd.DataFrame(list(most_important.items()))
vip = dict(sorted(most_important.items(), key=lambda x: x[1], reverse=True))
def model_description_page_builder():
st.title("Kimetrica Conflict Forecasting Model: Myanmar Analytical Activity (MAA)")
st.write("")
st.write("")
st.subheader("MODEL DESCRIPTION")
st.write("")
st.write("The conflict data has two distinct features that require special care compared to conventional machine learning problems. These are class imbalance and recurrence.")
st.write("")
st.subheader("Class imbalance")
st.write("")
st.write("In reality, conflict occurs in a rare situation resulting in a significant class imbalance in the output data between conflict and non-conflict events. As can be seen from the following chart, overall, the percent of positive records for conflict ranges between 20 and 40 percent for most of the years. This requires a mechanism that can take into account for the less number of positive(conflict) records in the dataset.")
st.write("")
if st.checkbox("Show class imbalance"):
source = df.groupby(["year", "cc_onset_y"])[
"admin1"].count().reset_index()
c_onset_chart = (
alt.Chart(source, title="Number of conflict records by year")
.mark_bar(size=20)
.encode(
alt.X("year:O", title="year"),
alt.Y("admin1", title="percent of records"),
alt.Color("cc_onset_y:O", legend=alt.Legend(
title="conflict Status")),
)
.properties(width=500)
)
st.altair_chart(c_onset_chart)
st.write("")
st.subheader("Recurrance")
st.write("")
st.write("The second aspect of the conflict event dataset is that, once conflict occurs, it has a tendency to last for an extended number of months and years. As such, the model needs to have the capacity to trace recurrence. CFM handles this issue by incorporating a threshold of probability of confidence in claiming the events. In this case, the model takes the current situation if the confidence level drops less than the average mean difference.")
st.write("")
st.subheader("EasyEnsemble classifier")
st.write("")
st.write("Undersampling is among the popular methods of handling class-imbalance. This method entails taking a subset of the major class to train the classifier. However, this method has a main deficiency as it ignores portions of the dataset in an attempt to balance the number of positive records.")
st.write("")
st.write("Xu-Ying, Jianxin, and Zhi-Hua (2080), proposed EasyEnsemble classifier to overcome the above problem of under sampling. EasyEnsemble forecast samples several subsets from the majority class and combines for a final decision. These independent samples ultimately take into account the different aspects of the entire dataset.")
st.write("")
st.subheader("Output data")
if st.checkbox('View output variables'):
st.write("* `cc_onset_y`: is our target variable representing conflict in a binary (0, no conflict; 1, conflict) and probability format.")
st.subheader("Input data")
if st.checkbox('View input variables'):
st.write("* `cc_onset_x`: current and previous conflict at admin2 level. Data comes from ACLED compiled on a monthly.")
st.write("")
st.write("* `cellphone`: household access to cell phones")
st.write("")
st.write("* `electricity`: household access to electricity")
st.write("")
st.write("* `ethnicty_count`: number of ethnic groups")
st.write("")
st.write("* `fatalities`: number of fatalities due to conflict")
st.write("")
st.write("* `gender_index`: gender index")
st.write("")
st.write("* `infant_mortality`: infant mortality rate ")
st.write("")
st.write("* `lc`: landuse change index")
st.write("")
st.write("* `mean_rf`: average monthly rainfall")
st.write("")
st.write("* `patrilocal_index`: patriolocal index")
st.write("")
st.write("* `pop_density`: number of people per KM2")
st.write("")
st.write("* `poverty`: percent of poor households")
st.write("")
st.write("* `rice_price`: monthly rice price")
st.write("")
st.write("* `stunting`: percentage of stunted children ")
st.write("")
st.write("* `tv`: household access to tv ")
st.write("")
st.write("* `urban_pop`: percent of population in urban areas")
st.write("")
st.write("* wasting`: percentage of wasted children")
st.write("")
st.write("* `pulses_price`: monthly pulses price")
st.write("")
st.write("* `years_schooling`: mean years of schooling ")
st.write("")
st.write(
"* `youth_buldge`: proportion of working age group to the active population")
st.write("")
st.write("* `drought_risk`: evaporative stress index (4 week)")
st.subheader("Feature Importances")
if st.checkbox("View feature importances"):
source = pd.DataFrame({
'Feature': list(vip.keys())[:20],
'Importance': list(vip.values())[:20]
})
feature_importance_chart = alt.Chart(source, title="Twenty most important predictors of conflict").mark_bar().encode(
x='Importance:Q',
y=alt.Y('Feature:N', sort='-x'),
color='Feature',
tooltip=['Feature', 'Importance']
).properties(
width=500)
st.altair_chart(feature_importance_chart)
def logistic_train_metrics(df):
"""Return metrics and model for Logistic Regression."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
model_reg = dill.load(open('maa_conflict_model.dill', 'rb'))
return model_reg
model_reg = logistic_train_metrics(df)
y_pred = model_reg.predict(X_test)
y_pred = pd.DataFrame(y_pred.astype(int))
y_pred.rename(columns={0: 'cc_onset_prediction'}, inplace=True)
df_test = test1.reset_index()
df_evl = df_test.join(y_pred)
df_evl1 = df_evl[['admin1', 'admin2', 'geometry',
'month_year', 'cc_onset_y', 'cc_onset_prediction']]
df_evl1.cc_onset_y = df_evl1.cc_onset_y.astype(int)
cc_onset_actual = df_evl1.pivot(
index=['admin1', 'admin2', 'geometry'], columns='month_year', values='cc_onset_y')
| st.title("Kimetrica Conflict Forecasting Model: Myanmar Analytical Activity (MAA)")
st.write("")
st.write("")
st.subheader("INTRODUCTION")
st.write("")
st.write(
"An early-warning system that can meaningfully forecast conflict in its various forms is necessary to respond to crises ahead of time. The ability to predict where and when conflict is more likely to occur will have a significant impact on reducing the devastating consequences of conflict. The goal of this conflict model is to forecast armed conflict over time and space in Myanmar at the second administrative level and on a monthly basis. This document will outline the model construction methodology and the model output.")
st.write("")
st.write("Most predictive models for conflict use country-level data in yearly time increments (Aas Rustad et al., 2011). One problem with this type of analysis is that it assumes that conflict is distributed uniformly throughout the country and uniformly throughout the year. This situation is rarely the case as conflict usually takes place on the borders of countries. For a model to be maximally useful, it must predict where in the country the conflict is likely to occur. Likewise, for a model to be useful for decision-makers, it must be able to predict when the conflict will occur (Brandt et al., 2011).")
st.write("")
st.write("To satisfy the requirements of the MAA project, we have built a model to predict conflict at the county (admin2) level at monthly time intervals one year into the future. This application presents the steps taken to build the model, visualize the data and result , run the model and model performance. ")
st.write("")
st.write("")
st.subheader("INSTRUCTION")
st.write("")
st.write(
"This website runs the conflict model and the associated pages that are useful for the users to understand the model outputs. The navigation buttons are provided in the drop down list under the main menu. The Home button represents the current page. You can navigate between pages by clicking a list of buttons including the page to run the model."
)
st.write("")
st.write("") | identifier_body |
|
app.py | ")
st.write("")
st.write("* `ethnicty_count`: number of ethnic groups")
st.write("")
st.write("* `fatalities`: number of fatalities due to conflict")
st.write("")
st.write("* `gender_index`: gender index")
st.write("")
st.write("* `infant_mortality`: infant mortality rate ")
st.write("")
st.write("* `lc`: landuse change index")
st.write("")
st.write("* `mean_rf`: average monthly rainfall")
st.write("")
st.write("* `patrilocal_index`: patriolocal index")
st.write("")
st.write("* `pop_density`: number of people per KM2")
st.write("")
st.write("* `poverty`: percent of poor households")
st.write("")
st.write("* `rice_price`: monthly rice price")
st.write("")
st.write("* `stunting`: percentage of stunted children ")
st.write("")
st.write("* `tv`: household access to tv ")
st.write("")
st.write("* `urban_pop`: percent of population in urban areas")
st.write("")
st.write("* wasting`: percentage of wasted children")
st.write("")
st.write("* `pulses_price`: monthly pulses price")
st.write("")
st.write("* `years_schooling`: mean years of schooling ")
st.write("")
st.write(
"* `youth_buldge`: proportion of working age group to the active population")
st.write("")
st.write("* `drought_risk`: evaporative stress index (4 week)")
st.subheader("Feature Importances")
if st.checkbox("View feature importances"):
source = pd.DataFrame({
'Feature': list(vip.keys())[:20],
'Importance': list(vip.values())[:20]
})
feature_importance_chart = alt.Chart(source, title="Twenty most important predictors of conflict").mark_bar().encode(
x='Importance:Q',
y=alt.Y('Feature:N', sort='-x'),
color='Feature',
tooltip=['Feature', 'Importance']
).properties(
width=500)
st.altair_chart(feature_importance_chart)
def logistic_train_metrics(df):
"""Return metrics and model for Logistic Regression."""
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
model_reg = dill.load(open('maa_conflict_model.dill', 'rb'))
return model_reg
model_reg = logistic_train_metrics(df)
y_pred = model_reg.predict(X_test)
y_pred = pd.DataFrame(y_pred.astype(int))
y_pred.rename(columns={0: 'cc_onset_prediction'}, inplace=True)
df_test = test1.reset_index()
df_evl = df_test.join(y_pred)
df_evl1 = df_evl[['admin1', 'admin2', 'geometry',
'month_year', 'cc_onset_y', 'cc_onset_prediction']]
df_evl1.cc_onset_y = df_evl1.cc_onset_y.astype(int)
cc_onset_actual = df_evl1.pivot(
index=['admin1', 'admin2', 'geometry'], columns='month_year', values='cc_onset_y')
cc_onset_actual.columns = cc_onset_actual.columns.get_level_values(
'month_year')
cc_onset_actual.columns = [''.join(col).strip()
for col in cc_onset_actual.columns.values]
cc_actual = cc_onset_actual.reset_index()
cc_actual['2019'] = cc_actual.iloc[:, 3:].sum(axis=1)
cc_actual = cc_actual[['admin1', 'admin2', 'geometry',
'2019-01', '2019-02', '2019-03', '2019-04', '2019-05', '2019-06',
'2019-07', '2019-08', '2019-09', '2019-10', '2019-11', '2019-12', '2019']]
cc_actual['geometry'] = cc_actual['geometry'].apply(wkt.loads)
cc_actual = gpd.GeoDataFrame(cc_actual, geometry='geometry')
cc_onset_prediction = df_evl1.pivot(
index=['admin1', 'admin2', 'geometry'], columns='month_year', values='cc_onset_prediction').reset_index()
cc_onset_prediction.columns = cc_onset_prediction.columns.get_level_values(
'month_year')
cc_onset_prediction.columns = [
''.join(col).strip() for col in cc_onset_prediction.columns.values]
cc_prediction = cc_onset_prediction.reset_index()
cc_prediction['2019'] = cc_onset_prediction.iloc[:, 3:].sum(axis=1)
cc_prediction = cc_prediction[['admin1', 'admin2', 'geometry', '2019-01', '2019-02', '2019-03', '2019-04', '2019-05', '2019-06',
'2019-07', '2019-08', '2019-09', '2019-10', '2019-11', '2019-12', '2019']]
cc_prediction['geometry'] = cc_prediction['geometry'].apply(wkt.loads)
cc_prediction = gpd.GeoDataFrame(cc_prediction, geometry='geometry')
def logistic_page_builder(model_reg, X_test):
st.title("Kimetrica Conflict Forecasting Model: Myanmar Analytical Activity (MAA)")
st.subheader("TRAIN AND TEST")
start_time = datetime.datetime.now()
# model_reg = logistic_train_metrics(data)
st.write("In this page, you will be able to view model performance results(error matrix and classification report). You can also visualize actual vs predicted conflict on annual and monthly basis.")
st.write(
f"The model took a total running time of {(datetime.datetime.now() - start_time).seconds} s.")
if st.checkbox("Show model error matrix"):
conf_ee = confusion_matrix(y_test, y_pred)
group_names = ["True Neg", "False Pos", "False Neg", "True Pos"]
group_counts = ["{0:0.0f}".format(value)
for value in conf_ee.flatten()]
group_percentages = [
"{0:.2%}".format(value) for value in conf_ee.flatten() / np.sum(conf_ee)
]
labels = [
f"{v1}\n{v2}\n{v3}"
for v1, v2, v3 in zip(group_names, group_counts, group_percentages)
]
labels = np.asarray(labels).reshape(2, 2)
fig, ax = plt.subplots()
ax = plt.axes()
st.write(
sns.heatmap(
conf_ee,
annot=labels,
fmt="",
cmap="Blues",
xticklabels=["No Conflict", "Conflict"],
yticklabels=["No Conflict", "Conflict"],
ax=ax,
)
)
ax.set_title("Final Model Error Matrix")
sns.set(font_scale=0.5)
st.pyplot(fig)
if st.checkbox("Show classification report"):
st.subheader('Classification Report')
report = classification_report(
y_test, y_pred)
st.write(report)
if st.checkbox("Visualize actual vs predicted conflict"):
if st.checkbox("2019: 12 months"):
fig, axes = plt.subplots(ncols=2)
ax = plt.subplots()
ax = cc_actual.plot(column='2019')
axes[0].set_title("Actual")
axes[1].set_title("Predicted")
axes[1].legend(title="Months in conflict", loc="upper right")
cc_actual.plot(column='2019', cmap='OrRd', ax=axes[0])
cc_prediction.plot(column='2019', cmap='OrRd',
legend=True, ax=axes[1])
st.pyplot(fig)
if st.checkbox("2019-01"):
fig, axes = plt.subplots(ncols=2)
ax = plt.subplots()
ax = cc_actual.plot(column='2019-01')
axes[0].set_title("Actual")
axes[1].set_title("Predicted")
axes[1].legend(title="Months in conflict", loc="upper right")
cc_actual.plot(column='2019-01', cmap='OrRd', ax=axes[0])
cc_prediction.plot(column='2019-01', cmap='OrRd',
legend=True, ax=axes[1])
st.pyplot(fig)
if st.checkbox("2019-02"):
fig, axes = plt.subplots(ncols=2)
ax = plt.subplots()
ax = cc_actual.plot(column='2019-02')
axes[0].set_title("Actual")
axes[1].set_title("Predicted")
axes[1].legend(title="Months in conflict", loc="upper right")
cc_actual.plot(column='2019-01', cmap='OrRd', ax=axes[0]) | cc_prediction.plot(column='2019-01', cmap='OrRd',
legend=True, ax=axes[1])
st.pyplot(fig)
| random_line_split |
|
emacs.js | dir < 0 ? -1 : 0));
if (!next) { // End/beginning of line reached
if (line == (dir < 0 ? cm.firstLine() : cm.lastLine())) return Pos(line, ch);
text = cm.getLine(line + dir);
if (!/\S/.test(text)) return Pos(line, ch);
line += dir;
ch = dir < 0 ? text.length : 0;
continue;
}
if (sawWord && /[!?.]/.test(next)) return Pos(line, ch + (dir > 0 ? 1 : 0));
if (!sawWord) sawWord = /\w/.test(next);
ch += dir;
}
}
function byExpr(cm, pos, dir) {
var wrap;
if (cm.findMatchingBracket && (wrap = cm.findMatchingBracket(pos, {strict: true}))
&& wrap.match && (wrap.forward ? 1 : -1) == dir)
return dir > 0 ? Pos(wrap.to.line, wrap.to.ch + 1) : wrap.to;
for (var first = true;; first = false) {
var token = cm.getTokenAt(pos);
var after = Pos(pos.line, dir < 0 ? token.start : token.end);
if (first && dir > 0 && token.end == pos.ch || !/\w/.test(token.string)) {
var newPos = cm.findPosH(after, dir, "char");
if (posEq(after, newPos)) return pos;
else pos = newPos;
} else {
return after;
}
}
}
// Prefixes (only crudely supported)
function getPrefix(cm, precise) {
var digits = cm.state.emacsPrefix;
if (!digits) return precise ? null : 1;
clearPrefix(cm);
return digits == "-" ? -1 : Number(digits);
}
function repeated(cmd) {
var f = typeof cmd == "string" ? function(cm) { cm.execCommand(cmd); } : cmd;
return function(cm) {
var prefix = getPrefix(cm);
f(cm);
for (var i = 1; i < prefix; ++i) f(cm);
};
}
function findEnd(cm, pos, by, dir) {
var prefix = getPrefix(cm);
if (prefix < 0) { dir = -dir; prefix = -prefix; }
for (var i = 0; i < prefix; ++i) {
var newPos = by(cm, pos, dir);
if (posEq(newPos, pos)) break;
pos = newPos;
}
return pos;
}
function move(by, dir) {
var f = function(cm) {
cm.extendSelection(findEnd(cm, cm.getCursor(), by, dir));
};
f.motion = true;
return f;
}
function killTo(cm, by, dir, ring) {
var selections = cm.listSelections(), cursor;
var i = selections.length;
while (i--) {
cursor = selections[i].head;
_kill(cm, cursor, findEnd(cm, cursor, by, dir), ring);
}
}
function _killRegion(cm, ring) {
if (cm.somethingSelected()) {
var selections = cm.listSelections(), selection;
var i = selections.length;
while (i--) {
selection = selections[i];
_kill(cm, selection.anchor, selection.head, ring);
}
return true;
}
}
function addPrefix(cm, digit) {
if (cm.state.emacsPrefix) {
if (digit != "-") cm.state.emacsPrefix += digit;
return;
}
// Not active yet
cm.state.emacsPrefix = digit;
cm.on("keyHandled", maybeClearPrefix);
cm.on("inputRead", maybeDuplicateInput);
}
var prefixPreservingKeys = {"Alt-G": true, "Ctrl-X": true, "Ctrl-Q": true, "Ctrl-U": true};
function maybeClearPrefix(cm, arg) {
if (!cm.state.emacsPrefixMap && !prefixPreservingKeys.hasOwnProperty(arg))
clearPrefix(cm);
}
function clearPrefix(cm) {
cm.state.emacsPrefix = null;
cm.off("keyHandled", maybeClearPrefix);
cm.off("inputRead", maybeDuplicateInput);
}
function maybeDuplicateInput(cm, event) {
var dup = getPrefix(cm);
if (dup > 1 && event.origin == "+input") {
var one = event.text.join("\n"), txt = "";
for (var i = 1; i < dup; ++i) txt += one;
cm.replaceSelection(txt);
}
}
function maybeRemovePrefixMap(cm, arg) {
if (typeof arg == "string" && (/^\d$/.test(arg) || arg == "Ctrl-U")) return;
cm.removeKeyMap(prefixMap);
cm.state.emacsPrefixMap = false;
cm.off("keyHandled", maybeRemovePrefixMap);
cm.off("inputRead", maybeRemovePrefixMap);
}
// Utilities
cmds.setMark = function (cm) {
cm.setCursor(cm.getCursor());
cm.setExtending(!cm.getExtending());
cm.on("change", function() { cm.setExtending(false); });
}
function | (cm) {
cm.setExtending(false);
cm.setCursor(cm.getCursor());
}
function makePrompt(msg) {
var fragment = document.createDocumentFragment();
var input = document.createElement("input");
input.setAttribute("type", "text");
input.style.width = "10em";
fragment.appendChild(document.createTextNode(msg + ": "));
fragment.appendChild(input);
return fragment;
}
function getInput(cm, msg, f) {
if (cm.openDialog)
cm.openDialog(makePrompt(msg), f, {bottom: true});
else
f(prompt(msg, ""));
}
function operateOnWord(cm, op) {
var start = cm.getCursor(), end = cm.findPosH(start, 1, "word");
cm.replaceRange(op(cm.getRange(start, end)), start, end);
cm.setCursor(end);
}
function toEnclosingExpr(cm) {
var pos = cm.getCursor(), line = pos.line, ch = pos.ch;
var stack = [];
while (line >= cm.firstLine()) {
var text = cm.getLine(line);
for (var i = ch == null ? text.length : ch; i > 0;) {
var ch = text.charAt(--i);
if (ch == ")")
stack.push("(");
else if (ch == "]")
stack.push("[");
else if (ch == "}")
stack.push("{");
else if (/[\(\{\[]/.test(ch) && (!stack.length || stack.pop() != ch))
return cm.extendSelection(Pos(line, i));
}
--line; ch = null;
}
}
// Commands. Names should match emacs function names (albeit in camelCase)
// except where emacs function names collide with code mirror core commands.
cmds.killRegion = function(cm) {
_kill(cm, cm.getCursor("start"), cm.getCursor("end"), true);
};
// Maps to emacs kill-line
cmds.killLineEmacs = repeated(function(cm) {
var start = cm.getCursor(), end = cm.clipPos(Pos(start.line));
var text = cm.getRange(start, end);
if (!/\S/.test(text)) {
text += "\n";
end = Pos(start.line + 1, 0);
}
_kill(cm, start, end, "grow", text);
});
cmds.killRingSave = function(cm) {
addToRing(cm.getSelection());
clearMark(cm);
};
cmds.yank = function(cm) {
var start = cm.getCursor();
cm.replaceRange(getFromRing(getPrefix(cm)), start, start, "paste");
cm.setSelection(start, cm.getCursor());
};
cmds.yankPop = function(cm) {
cm.replaceSelection(popFromRing(), "around", "paste");
};
cmds.forwardChar = move(byChar, 1);
cmds.backwardChar = move(byChar, -1)
cmds.deleteChar = function(cm) { killTo(cm, byChar, 1, false); };
cmds.deleteForwardChar = function(cm) {
_killRegion(cm, false) || killTo(cm, byChar, 1, false);
};
cmds.deleteBackwardChar = function(cm) {
_killRegion(cm, false) || killTo(cm, byChar, -1, false);
};
cmds.forwardWord = move(byWord, 1);
cmds.backwardWord = move(byWord, -1);
cmds.killWord = function(cm) { killTo(cm, byWord, 1, "grow"); };
cmds.backwardKillWord = function(cm) { killTo(cm, byWord, -1, "grow"); };
cmds.nextLine = move(byLine, 1);
cmds.previousLine = move(byLine, -1);
cmds.scrollDownCommand = move(byPage, -1);
cmds.scrollUpCommand = move(byPage, 1);
cmds.backwardParagraph = move(byParagraph, -1);
cmds.forwardParagraph = move(byParagraph, 1);
cmds.backwardSentence = move(bySentence, -1);
cmds.forwardSentence = move(bySentence, 1);
cmds.killSentence = function(cm) { killTo | clearMark | identifier_name |
emacs.js | dir < 0 ? -1 : 0));
if (!next) { // End/beginning of line reached
if (line == (dir < 0 ? cm.firstLine() : cm.lastLine())) return Pos(line, ch);
text = cm.getLine(line + dir);
if (!/\S/.test(text)) return Pos(line, ch);
line += dir;
ch = dir < 0 ? text.length : 0;
continue;
}
if (sawWord && /[!?.]/.test(next)) return Pos(line, ch + (dir > 0 ? 1 : 0));
if (!sawWord) sawWord = /\w/.test(next);
ch += dir;
}
}
function byExpr(cm, pos, dir) {
var wrap;
if (cm.findMatchingBracket && (wrap = cm.findMatchingBracket(pos, {strict: true}))
&& wrap.match && (wrap.forward ? 1 : -1) == dir)
return dir > 0 ? Pos(wrap.to.line, wrap.to.ch + 1) : wrap.to;
for (var first = true;; first = false) {
var token = cm.getTokenAt(pos);
var after = Pos(pos.line, dir < 0 ? token.start : token.end);
if (first && dir > 0 && token.end == pos.ch || !/\w/.test(token.string)) {
var newPos = cm.findPosH(after, dir, "char");
if (posEq(after, newPos)) return pos;
else pos = newPos;
} else {
return after;
}
}
}
// Prefixes (only crudely supported)
function getPrefix(cm, precise) {
var digits = cm.state.emacsPrefix;
if (!digits) return precise ? null : 1;
clearPrefix(cm);
return digits == "-" ? -1 : Number(digits);
}
function repeated(cmd) {
var f = typeof cmd == "string" ? function(cm) { cm.execCommand(cmd); } : cmd;
return function(cm) {
var prefix = getPrefix(cm);
f(cm);
for (var i = 1; i < prefix; ++i) f(cm);
};
}
function findEnd(cm, pos, by, dir) {
var prefix = getPrefix(cm);
if (prefix < 0) { dir = -dir; prefix = -prefix; }
for (var i = 0; i < prefix; ++i) {
var newPos = by(cm, pos, dir);
if (posEq(newPos, pos)) break;
pos = newPos;
}
return pos;
}
function move(by, dir) {
var f = function(cm) {
cm.extendSelection(findEnd(cm, cm.getCursor(), by, dir));
};
f.motion = true;
return f;
}
function killTo(cm, by, dir, ring) {
var selections = cm.listSelections(), cursor;
var i = selections.length;
while (i--) {
cursor = selections[i].head;
_kill(cm, cursor, findEnd(cm, cursor, by, dir), ring);
}
}
function _killRegion(cm, ring) {
if (cm.somethingSelected()) {
var selections = cm.listSelections(), selection;
var i = selections.length;
while (i--) {
selection = selections[i];
_kill(cm, selection.anchor, selection.head, ring);
}
return true;
}
}
function addPrefix(cm, digit) {
if (cm.state.emacsPrefix) {
if (digit != "-") cm.state.emacsPrefix += digit;
return;
}
// Not active yet
cm.state.emacsPrefix = digit;
cm.on("keyHandled", maybeClearPrefix);
cm.on("inputRead", maybeDuplicateInput);
}
var prefixPreservingKeys = {"Alt-G": true, "Ctrl-X": true, "Ctrl-Q": true, "Ctrl-U": true};
function maybeClearPrefix(cm, arg) {
if (!cm.state.emacsPrefixMap && !prefixPreservingKeys.hasOwnProperty(arg))
clearPrefix(cm);
}
function clearPrefix(cm) {
cm.state.emacsPrefix = null;
cm.off("keyHandled", maybeClearPrefix);
cm.off("inputRead", maybeDuplicateInput);
}
function maybeDuplicateInput(cm, event) |
function maybeRemovePrefixMap(cm, arg) {
if (typeof arg == "string" && (/^\d$/.test(arg) || arg == "Ctrl-U")) return;
cm.removeKeyMap(prefixMap);
cm.state.emacsPrefixMap = false;
cm.off("keyHandled", maybeRemovePrefixMap);
cm.off("inputRead", maybeRemovePrefixMap);
}
// Utilities
cmds.setMark = function (cm) {
cm.setCursor(cm.getCursor());
cm.setExtending(!cm.getExtending());
cm.on("change", function() { cm.setExtending(false); });
}
function clearMark(cm) {
cm.setExtending(false);
cm.setCursor(cm.getCursor());
}
function makePrompt(msg) {
var fragment = document.createDocumentFragment();
var input = document.createElement("input");
input.setAttribute("type", "text");
input.style.width = "10em";
fragment.appendChild(document.createTextNode(msg + ": "));
fragment.appendChild(input);
return fragment;
}
function getInput(cm, msg, f) {
if (cm.openDialog)
cm.openDialog(makePrompt(msg), f, {bottom: true});
else
f(prompt(msg, ""));
}
function operateOnWord(cm, op) {
var start = cm.getCursor(), end = cm.findPosH(start, 1, "word");
cm.replaceRange(op(cm.getRange(start, end)), start, end);
cm.setCursor(end);
}
function toEnclosingExpr(cm) {
var pos = cm.getCursor(), line = pos.line, ch = pos.ch;
var stack = [];
while (line >= cm.firstLine()) {
var text = cm.getLine(line);
for (var i = ch == null ? text.length : ch; i > 0;) {
var ch = text.charAt(--i);
if (ch == ")")
stack.push("(");
else if (ch == "]")
stack.push("[");
else if (ch == "}")
stack.push("{");
else if (/[\(\{\[]/.test(ch) && (!stack.length || stack.pop() != ch))
return cm.extendSelection(Pos(line, i));
}
--line; ch = null;
}
}
// Commands. Names should match emacs function names (albeit in camelCase)
// except where emacs function names collide with code mirror core commands.
cmds.killRegion = function(cm) {
_kill(cm, cm.getCursor("start"), cm.getCursor("end"), true);
};
// Maps to emacs kill-line
cmds.killLineEmacs = repeated(function(cm) {
var start = cm.getCursor(), end = cm.clipPos(Pos(start.line));
var text = cm.getRange(start, end);
if (!/\S/.test(text)) {
text += "\n";
end = Pos(start.line + 1, 0);
}
_kill(cm, start, end, "grow", text);
});
cmds.killRingSave = function(cm) {
addToRing(cm.getSelection());
clearMark(cm);
};
cmds.yank = function(cm) {
var start = cm.getCursor();
cm.replaceRange(getFromRing(getPrefix(cm)), start, start, "paste");
cm.setSelection(start, cm.getCursor());
};
cmds.yankPop = function(cm) {
cm.replaceSelection(popFromRing(), "around", "paste");
};
cmds.forwardChar = move(byChar, 1);
cmds.backwardChar = move(byChar, -1)
cmds.deleteChar = function(cm) { killTo(cm, byChar, 1, false); };
cmds.deleteForwardChar = function(cm) {
_killRegion(cm, false) || killTo(cm, byChar, 1, false);
};
cmds.deleteBackwardChar = function(cm) {
_killRegion(cm, false) || killTo(cm, byChar, -1, false);
};
cmds.forwardWord = move(byWord, 1);
cmds.backwardWord = move(byWord, -1);
cmds.killWord = function(cm) { killTo(cm, byWord, 1, "grow"); };
cmds.backwardKillWord = function(cm) { killTo(cm, byWord, -1, "grow"); };
cmds.nextLine = move(byLine, 1);
cmds.previousLine = move(byLine, -1);
cmds.scrollDownCommand = move(byPage, -1);
cmds.scrollUpCommand = move(byPage, 1);
cmds.backwardParagraph = move(byParagraph, -1);
cmds.forwardParagraph = move(byParagraph, 1);
cmds.backwardSentence = move(bySentence, -1);
cmds.forwardSentence = move(bySentence, 1);
cmds.killSentence = function(cm) { kill | {
var dup = getPrefix(cm);
if (dup > 1 && event.origin == "+input") {
var one = event.text.join("\n"), txt = "";
for (var i = 1; i < dup; ++i) txt += one;
cm.replaceSelection(txt);
}
} | identifier_body |
emacs.js | dir < 0 ? -1 : 0));
if (!next) { // End/beginning of line reached
if (line == (dir < 0 ? cm.firstLine() : cm.lastLine())) return Pos(line, ch);
text = cm.getLine(line + dir);
if (!/\S/.test(text)) return Pos(line, ch);
line += dir;
ch = dir < 0 ? text.length : 0;
continue;
}
if (sawWord && /[!?.]/.test(next)) return Pos(line, ch + (dir > 0 ? 1 : 0));
if (!sawWord) sawWord = /\w/.test(next);
ch += dir;
}
}
function byExpr(cm, pos, dir) {
var wrap;
if (cm.findMatchingBracket && (wrap = cm.findMatchingBracket(pos, {strict: true}))
&& wrap.match && (wrap.forward ? 1 : -1) == dir)
return dir > 0 ? Pos(wrap.to.line, wrap.to.ch + 1) : wrap.to;
for (var first = true;; first = false) {
var token = cm.getTokenAt(pos);
var after = Pos(pos.line, dir < 0 ? token.start : token.end);
if (first && dir > 0 && token.end == pos.ch || !/\w/.test(token.string)) {
var newPos = cm.findPosH(after, dir, "char");
if (posEq(after, newPos)) return pos;
else pos = newPos;
} else {
return after;
}
}
}
// Prefixes (only crudely supported)
function getPrefix(cm, precise) {
var digits = cm.state.emacsPrefix;
if (!digits) return precise ? null : 1;
clearPrefix(cm);
return digits == "-" ? -1 : Number(digits);
}
function repeated(cmd) {
var f = typeof cmd == "string" ? function(cm) { cm.execCommand(cmd); } : cmd;
return function(cm) {
var prefix = getPrefix(cm);
f(cm);
for (var i = 1; i < prefix; ++i) f(cm);
};
}
function findEnd(cm, pos, by, dir) {
var prefix = getPrefix(cm);
if (prefix < 0) { dir = -dir; prefix = -prefix; }
for (var i = 0; i < prefix; ++i) {
var newPos = by(cm, pos, dir);
if (posEq(newPos, pos)) break;
pos = newPos;
}
return pos;
}
function move(by, dir) {
var f = function(cm) {
cm.extendSelection(findEnd(cm, cm.getCursor(), by, dir));
};
f.motion = true;
return f;
}
function killTo(cm, by, dir, ring) {
var selections = cm.listSelections(), cursor;
var i = selections.length;
while (i--) {
cursor = selections[i].head;
_kill(cm, cursor, findEnd(cm, cursor, by, dir), ring);
}
}
function _killRegion(cm, ring) {
if (cm.somethingSelected()) {
var selections = cm.listSelections(), selection;
var i = selections.length;
while (i--) {
selection = selections[i];
_kill(cm, selection.anchor, selection.head, ring);
}
return true;
}
}
function addPrefix(cm, digit) {
if (cm.state.emacsPrefix) {
if (digit != "-") cm.state.emacsPrefix += digit;
return;
}
// Not active yet
cm.state.emacsPrefix = digit;
cm.on("keyHandled", maybeClearPrefix);
cm.on("inputRead", maybeDuplicateInput);
}
var prefixPreservingKeys = {"Alt-G": true, "Ctrl-X": true, "Ctrl-Q": true, "Ctrl-U": true}; |
function maybeClearPrefix(cm, arg) {
if (!cm.state.emacsPrefixMap && !prefixPreservingKeys.hasOwnProperty(arg))
clearPrefix(cm);
}
function clearPrefix(cm) {
cm.state.emacsPrefix = null;
cm.off("keyHandled", maybeClearPrefix);
cm.off("inputRead", maybeDuplicateInput);
}
function maybeDuplicateInput(cm, event) {
var dup = getPrefix(cm);
if (dup > 1 && event.origin == "+input") {
var one = event.text.join("\n"), txt = "";
for (var i = 1; i < dup; ++i) txt += one;
cm.replaceSelection(txt);
}
}
function maybeRemovePrefixMap(cm, arg) {
if (typeof arg == "string" && (/^\d$/.test(arg) || arg == "Ctrl-U")) return;
cm.removeKeyMap(prefixMap);
cm.state.emacsPrefixMap = false;
cm.off("keyHandled", maybeRemovePrefixMap);
cm.off("inputRead", maybeRemovePrefixMap);
}
// Utilities
cmds.setMark = function (cm) {
cm.setCursor(cm.getCursor());
cm.setExtending(!cm.getExtending());
cm.on("change", function() { cm.setExtending(false); });
}
function clearMark(cm) {
cm.setExtending(false);
cm.setCursor(cm.getCursor());
}
function makePrompt(msg) {
var fragment = document.createDocumentFragment();
var input = document.createElement("input");
input.setAttribute("type", "text");
input.style.width = "10em";
fragment.appendChild(document.createTextNode(msg + ": "));
fragment.appendChild(input);
return fragment;
}
function getInput(cm, msg, f) {
if (cm.openDialog)
cm.openDialog(makePrompt(msg), f, {bottom: true});
else
f(prompt(msg, ""));
}
function operateOnWord(cm, op) {
var start = cm.getCursor(), end = cm.findPosH(start, 1, "word");
cm.replaceRange(op(cm.getRange(start, end)), start, end);
cm.setCursor(end);
}
function toEnclosingExpr(cm) {
var pos = cm.getCursor(), line = pos.line, ch = pos.ch;
var stack = [];
while (line >= cm.firstLine()) {
var text = cm.getLine(line);
for (var i = ch == null ? text.length : ch; i > 0;) {
var ch = text.charAt(--i);
if (ch == ")")
stack.push("(");
else if (ch == "]")
stack.push("[");
else if (ch == "}")
stack.push("{");
else if (/[\(\{\[]/.test(ch) && (!stack.length || stack.pop() != ch))
return cm.extendSelection(Pos(line, i));
}
--line; ch = null;
}
}
// Commands. Names should match emacs function names (albeit in camelCase)
// except where emacs function names collide with code mirror core commands.
cmds.killRegion = function(cm) {
_kill(cm, cm.getCursor("start"), cm.getCursor("end"), true);
};
// Maps to emacs kill-line
cmds.killLineEmacs = repeated(function(cm) {
var start = cm.getCursor(), end = cm.clipPos(Pos(start.line));
var text = cm.getRange(start, end);
if (!/\S/.test(text)) {
text += "\n";
end = Pos(start.line + 1, 0);
}
_kill(cm, start, end, "grow", text);
});
cmds.killRingSave = function(cm) {
addToRing(cm.getSelection());
clearMark(cm);
};
cmds.yank = function(cm) {
var start = cm.getCursor();
cm.replaceRange(getFromRing(getPrefix(cm)), start, start, "paste");
cm.setSelection(start, cm.getCursor());
};
cmds.yankPop = function(cm) {
cm.replaceSelection(popFromRing(), "around", "paste");
};
cmds.forwardChar = move(byChar, 1);
cmds.backwardChar = move(byChar, -1)
cmds.deleteChar = function(cm) { killTo(cm, byChar, 1, false); };
cmds.deleteForwardChar = function(cm) {
_killRegion(cm, false) || killTo(cm, byChar, 1, false);
};
cmds.deleteBackwardChar = function(cm) {
_killRegion(cm, false) || killTo(cm, byChar, -1, false);
};
cmds.forwardWord = move(byWord, 1);
cmds.backwardWord = move(byWord, -1);
cmds.killWord = function(cm) { killTo(cm, byWord, 1, "grow"); };
cmds.backwardKillWord = function(cm) { killTo(cm, byWord, -1, "grow"); };
cmds.nextLine = move(byLine, 1);
cmds.previousLine = move(byLine, -1);
cmds.scrollDownCommand = move(byPage, -1);
cmds.scrollUpCommand = move(byPage, 1);
cmds.backwardParagraph = move(byParagraph, -1);
cmds.forwardParagraph = move(byParagraph, 1);
cmds.backwardSentence = move(bySentence, -1);
cmds.forwardSentence = move(bySentence, 1);
cmds.killSentence = function(cm) { killTo | random_line_split |
|
parse.rs | ;
} else {
return None;
}
}
}
}
fn scan_path(lines: &Lines, p: Point, d: Direction) -> Vec<Point> {
if !lines.at(p).map(|c| can_go(c, d)).unwrap_or(false) {
return vec![];
}
let mut ret = vec![];
let mut it = PathIter::new(&lines, p, d);
while let Some(next) = it.next() {
if ret.contains(&next) {
return ret;
}
ret.push(next);
}
ret
}
pub fn boxes(lines: &Lines) -> Vec<TBox> {
top_lefts(lines)
.into_iter()
.filter_map(|tl| {
let tr = scan_dir(lines, tl.0, Direction::Rt)?;
let bl = scan_dir(lines, tl.0, Direction::Dn)?;
let br = scan_dir(lines, bl.0, Direction::Rt)?;
let br2 = scan_dir(lines, tr.0, Direction::Dn)?;
if br2 != br {
return None;
}
Some(TBox(tl.0, br.0))
})
.collect()
}
pub fn border_in_dir(b: TBox, d: Direction) -> TBox {
use Direction::*;
match d {
Up => TBox::from((b.0, Point::from((b.0.row, b.1.col)))),
Dn => TBox::from((Point::from((b.1.row, b.0.col)), b.1)),
Lt => TBox::from((b.0, Point::from((b.1.row, b.0.col)))),
Rt => TBox::from((Point::from((b.0.row, b.1.col)), b.1)),
}
}
fn border(b: TBox) -> Vec<(Point, Direction)> {
Direction::VALUES
.into_iter()
// get eg top border
.map(|d| (border_in_dir(b, d), d))
// push top border up to get just outside top border
.filter_map(|(b, d)| b.in_dir(d).map(|b| (b, d)))
// get points of just-outside-top-border
.flat_map(|(b, d)| b.points().map(move |p| (p, d)))
.collect()
}
pub fn path_contains(pth: &Vec<Point>, p: Point) -> bool {
let mut it = pth.iter();
let fst = it.next();
if !fst.is_some() {
return false;
}
let mut last = fst.unwrap();
if *last == p {
return true;
}
while let Some(next) = it.next() {
if TBox::from((*last, *next)).contains(p) {
return true;
}
last = next;
}
false
}
pub fn edges(lines: &Lines, boxes: &Vec<TBox>) -> HashSet<Vec<Point>> {
// ###
// ,---. ##
// #| |,--. find all possible starts for edges between boxes
// '---''--'
// ### ##
boxes
.iter()
.map(|b| border(*b))
.flat_map(|v| v.into_iter())
.filter(|(p, d)| lines.at(*p).map(|c| can_go(c, d.rev())).unwrap_or(false))
.map(|(p, d)| scan_path(lines, p, d))
.filter(|pth| pth.len() > 0)
.fold(HashSet::new(), |mut map, mut pth| {
// checking the forward path then inserting
// the reverse means we don't double-count paths
if !map.contains(&pth) {
pth.reverse();
map.insert(pth);
}
map
})
}
#[cfg(test)]
mod test {
use super::*;
fn lines() -> Lines {
let lines: Vec<Vec<char>> = r#"
,---.,-----------.
| |',-. |
| | | | ,-----'
'---' | | |
| |--'
'-'
"#
.lines()
.map(|l| l.chars().collect())
.collect();
Lines(lines)
}
#[test]
fn test_top_lefts() {
let lines = lines();
assert_eq!(
vec![
(Point { row: 1, col: 1 }, ','),
(Point { row: 1, col: 6 }, ','),
(Point { row: 2, col: 7 }, ','),
(Point { row: 3, col: 12 }, ','),
],
top_lefts(&lines)
);
}
#[test]
fn test_scan_dir() {
let lines = lines();
let tl = Point { row: 1, col: 1 };
let tr = Point { row: 1, col: 5 };
let bl = Point { row: 4, col: 1 };
let br = Point { row: 4, col: 5 };
assert_eq!(Some((tr, '.')), scan_dir(&lines, tl, Direction::Rt),);
assert_eq!(Some((bl, '\'')), scan_dir(&lines, tl, Direction::Dn),);
assert_eq!(Some((br, '\'')), scan_dir(&lines, bl, Direction::Rt),);
assert_eq!(
Some((Point { row: 1, col: 18 }, '.')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Rt),
);
assert_eq!(
Some((Point { row: 2, col: 6 }, '\'')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Dn),
);
assert_eq!(
Some((Point { row: 1, col: 6 }, ',')),
scan_dir(&lines, Point { row: 1, col: 6 }, Direction::Lt),
);
}
#[test]
fn test_boxes() {
let lines = lines();
assert_eq!(
vec![
TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 }),
TBox(Point { row: 2, col: 7 }, Point { row: 6, col: 9 }),
],
boxes(&lines),
);
}
#[test]
fn test_scan_path() {
let lines = lines();
let mut pth = vec![
Point { row: 2, col: 6 },
Point { row: 1, col: 6 },
Point { row: 1, col: 18 },
Point { row: 3, col: 18 },
Point { row: 3, col: 12 },
Point { row: 5, col: 12 },
Point { row: 5, col: 10 },
];
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// should work in reverse
pth.reverse();
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
// |--' |--'
// ^ ^
// instead of the beginning, start a little aways
pth[0].col += 1;
assert_eq!(pth, scan_path(&lines, pth[0], Direction::Rt),);
}
#[test]
fn test_box_contains() {
let lb = TBox(Point { row: 1, col: 1 }, Point { row: 4, col: 5 });
assert_eq!(true, lb.contains(lb.0) && lb.contains(lb.1));
assert_eq!(false, lb.contains(Point { row: 5, col: 4 }),);
}
#[test]
fn test_border() | {
let b = TBox(Point { row: 1, col: 1 }, Point { row: 3, col: 4 });
use Direction::*;
assert_eq!(
vec![
(Point { row: 0, col: 1 }, Up),
(Point { row: 0, col: 2 }, Up),
(Point { row: 0, col: 3 }, Up),
(Point { row: 0, col: 4 }, Up),
(Point { row: 4, col: 1 }, Dn),
(Point { row: 4, col: 2 }, Dn),
(Point { row: 4, col: 3 }, Dn),
(Point { row: 4, col: 4 }, Dn),
(Point { row: 1, col: 0 }, Lt),
(Point { row: 2, col: 0 }, Lt),
(Point { row: 3, col: 0 }, Lt),
(Point { row: 1, col: 5 }, Rt),
(Point { row: 2, col: 5 }, Rt),
(Point { row: 3, col: 5 }, Rt),
], | identifier_body |
|
parse.rs | ::Formatter) -> std::fmt::Result {
write!(f, "[{:?} {:?}]", self.0, self.1)
}
}
impl TBox {
#[inline]
pub fn contains(&self, p: Point) -> bool {
["hey", "there"].into_iter().flat_map(|s| s.chars());
p.row >= self.0.row && p.row <= self.1.row && p.col >= self.0.col && p.col <= self.1.col
}
#[inline]
pub fn intersects(&self, b: TBox) -> bool {
!(self.1.row < b.0.row
|| self.0.row > b.1.row
|| self.1.col < b.0.col
|| self.0.col > b.1.col)
}
#[inline]
fn points<'p>(self) -> impl Iterator<Item = Point> + 'p {
let col_iter = move |row| {
(self.0.col..=self.1.col)
.into_iter()
.map(move |col| Point { row, col })
};
(self.0.row..=self.1.row).into_iter().flat_map(col_iter)
}
#[inline]
pub fn in_dir(&self, d: Direction) -> Option<TBox> {
self
.0
.in_dir(d)
.and_then(|p0| self.1.in_dir(d).map(|p1| TBox(p0, p1)))
}
}
impl std::ops::Index<Point> for Lines {
type Output = char;
fn index(&self, p: Point) -> &char {
self.0[p.row].index(p.col)
}
}
impl std::ops::IndexMut<Point> for Lines {
fn index_mut(&mut self, p: Point) -> &mut char {
self.0[p.row].index_mut(p.col)
}
}
impl Lines {
fn at(&self, p: Point) -> Option<char> {
if p.row as usize >= self.0.len() {
return None;
}
let line = &self.0[p.row as usize];
if p.col as usize >= line.len() {
return None;
}
Some(line[p.col as usize])
}
fn in_dir(&self, p: Point, d: Direction) -> Option<(Point, char)> {
p.in_dir(d).and_then(|p| self.at(p).map(|c| (p, c)))
}
fn visit(&self, mut pred: impl FnMut(Point, char)) {
for r in 0..self.0.len() {
for c in 0..self.0[r].len() {
pred((r, c).into(), self.0[r][c]);
}
}
}
}
fn top_lefts(lines: &Lines) -> Vec<(Point, char)> {
let mut ret = vec![];
for row in 0..lines.0.len() {
for col in 0..lines.0[row].len() {
let c = lines.0[row][col];
let p = Point { row, col };
if can_go(c, Direction::Dn)
&& can_go(c, Direction::Rt)
&& lines
.in_dir(p, Direction::Rt)
.map(|(_, c)| can_go(c, Direction::Lt))
.unwrap_or(false)
&& lines
.in_dir(p, Direction::Dn)
.map(|(_, c)| can_go(c, Direction::Up))
.unwrap_or(false)
{
ret.push((p, c));
}
}
}
ret
}
fn | (lines: &Lines, mut p: Point, d: Direction) -> Option<(Point, char)> {
while let Some((q, c)) = lines.in_dir(p, d) {
// p
// --* < can't connect
//
if !can_go(c, d.rev()) {
return lines.at(p).map(|c| (p, c));
}
p = q;
// p
// --. < can connect, can't continue
//
if !can_go(c, d) {
return Some((p, c));
}
}
lines.at(p).map(|c| (p, c))
}
struct PathIter<'l> {
start: bool,
lines: &'l Lines,
p: Point,
d: Direction,
}
impl<'l> PathIter<'l> {
fn new(lines: &'l Lines, p: Point, d: Direction) -> PathIter<'l> {
PathIter {
start: true,
lines: lines,
p: p,
d: d,
}
}
}
// * 4
// 1 2 |
// |----' 3
//
// 1. start, returns point, begins path-scan
// 2. edge, while current can send, and next can recv, advance cursor
// 3. turn, return point, find next direction (if you can)
// 4. end, current can't send or next can't recv, return final point (if not already returned)
// 5. exit, same as end, but signal end of iteration
//
//
// * > point and direction
//
// 0. test if point exists
// 1. test if you can go that direction
// 2. if so, scan in that direction (returns last point *after* initial, character)
// 2a. mark last point as path point
// 3. if not, pick a direction you haven't tried, go back to 1.
impl<'l> Iterator for PathIter<'l> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
if self.lines.at(self.p).is_none() {
return None;
} else if self.start {
self.start = false;
return Some(self.p);
}
let mut cant_go = vec![self.d.rev()];
loop {
// println!("PathIter {{ p: {:?}, d: {:?} }}", self.p, self.d);
if let (Some(true), Some(true)) = (
self.lines.at(self.p).map(|c| can_go(c, self.d)),
self
.lines
.in_dir(self.p, self.d)
.map(|(_, c)| can_go(c, self.d.rev())),
) {
if let Some((pnext, c)) = scan_dir(self.lines, self.p, self.d) {
// println!("scan_dir = Some(({:?}, {:?}))", pnext, c);
self.p = pnext;
return Some(pnext);
}
}
cant_go.push(self.d);
if let Some(dnext) = Direction::VALUES
.into_iter()
.filter(|d| !cant_go.contains(d))
.next()
{
self.d = dnext;
continue;
} else {
return None;
}
}
}
}
fn scan_path(lines: &Lines, p: Point, d: Direction) -> Vec<Point> {
if !lines.at(p).map(|c| can_go(c, d)).unwrap_or(false) {
return vec![];
}
let mut ret = vec![];
let mut it = PathIter::new(&lines, p, d);
while let Some(next) = it.next() {
if ret.contains(&next) {
return ret;
}
ret.push(next);
}
ret
}
pub fn boxes(lines: &Lines) -> Vec<TBox> {
top_lefts(lines)
.into_iter()
.filter_map(|tl| {
let tr = scan_dir(lines, tl.0, Direction::Rt)?;
let bl = scan_dir(lines, tl.0, Direction::Dn)?;
let br = scan_dir(lines, bl.0, Direction::Rt)?;
let br2 = scan_dir(lines, tr.0, Direction::Dn)?;
if br2 != br {
return None;
}
Some(TBox(tl.0, br.0))
})
.collect()
}
pub fn border_in_dir(b: TBox, d: Direction) -> TBox {
use Direction::*;
match d {
Up => TBox::from((b.0, Point::from((b.0.row, b.1.col)))),
Dn => TBox::from((Point::from((b.1.row, b.0.col)), b.1)),
Lt => TBox::from((b.0, Point::from((b.1.row, b.0.col)))),
Rt => TBox::from((Point::from((b.0.row, b.1.col)), b.1)),
}
}
fn border(b: TBox) -> Vec<(Point, Direction)> {
Direction::VALUES
.into_iter()
// get eg top border
.map(|d| (border_in_dir(b, d), d))
// push top border up to get just outside top border
.filter_map(|(b, d)| b.in_dir(d).map(|b| (b, d)))
// get points of just-outside-top-border
.flat_map | scan_dir | identifier_name |
parse.rs | ::Formatter) -> std::fmt::Result {
write!(f, "[{:?} {:?}]", self.0, self.1)
}
}
impl TBox {
#[inline]
pub fn contains(&self, p: Point) -> bool {
["hey", "there"].into_iter().flat_map(|s| s.chars());
p.row >= self.0.row && p.row <= self.1.row && p.col >= self.0.col && p.col <= self.1.col
}
#[inline]
pub fn intersects(&self, b: TBox) -> bool {
!(self.1.row < b.0.row
|| self.0.row > b.1.row
|| self.1.col < b.0.col
|| self.0.col > b.1.col)
}
#[inline]
fn points<'p>(self) -> impl Iterator<Item = Point> + 'p {
let col_iter = move |row| {
(self.0.col..=self.1.col)
.into_iter()
.map(move |col| Point { row, col })
};
(self.0.row..=self.1.row).into_iter().flat_map(col_iter)
}
#[inline]
pub fn in_dir(&self, d: Direction) -> Option<TBox> {
self
.0
.in_dir(d)
.and_then(|p0| self.1.in_dir(d).map(|p1| TBox(p0, p1)))
}
}
impl std::ops::Index<Point> for Lines {
type Output = char;
fn index(&self, p: Point) -> &char {
self.0[p.row].index(p.col)
}
}
impl std::ops::IndexMut<Point> for Lines {
fn index_mut(&mut self, p: Point) -> &mut char {
self.0[p.row].index_mut(p.col)
}
}
impl Lines {
fn at(&self, p: Point) -> Option<char> {
if p.row as usize >= self.0.len() {
return None;
}
let line = &self.0[p.row as usize];
if p.col as usize >= line.len() {
return None;
}
Some(line[p.col as usize])
}
fn in_dir(&self, p: Point, d: Direction) -> Option<(Point, char)> {
p.in_dir(d).and_then(|p| self.at(p).map(|c| (p, c)))
}
fn visit(&self, mut pred: impl FnMut(Point, char)) {
for r in 0..self.0.len() {
for c in 0..self.0[r].len() {
pred((r, c).into(), self.0[r][c]);
}
}
}
}
fn top_lefts(lines: &Lines) -> Vec<(Point, char)> {
let mut ret = vec![];
for row in 0..lines.0.len() {
for col in 0..lines.0[row].len() {
let c = lines.0[row][col];
let p = Point { row, col };
if can_go(c, Direction::Dn)
&& can_go(c, Direction::Rt)
&& lines
.in_dir(p, Direction::Rt)
.map(|(_, c)| can_go(c, Direction::Lt))
.unwrap_or(false)
&& lines
.in_dir(p, Direction::Dn)
.map(|(_, c)| can_go(c, Direction::Up))
.unwrap_or(false)
|
}
}
ret
}
fn scan_dir(lines: &Lines, mut p: Point, d: Direction) -> Option<(Point, char)> {
while let Some((q, c)) = lines.in_dir(p, d) {
// p
// --* < can't connect
//
if !can_go(c, d.rev()) {
return lines.at(p).map(|c| (p, c));
}
p = q;
// p
// --. < can connect, can't continue
//
if !can_go(c, d) {
return Some((p, c));
}
}
lines.at(p).map(|c| (p, c))
}
struct PathIter<'l> {
start: bool,
lines: &'l Lines,
p: Point,
d: Direction,
}
impl<'l> PathIter<'l> {
fn new(lines: &'l Lines, p: Point, d: Direction) -> PathIter<'l> {
PathIter {
start: true,
lines: lines,
p: p,
d: d,
}
}
}
// * 4
// 1 2 |
// |----' 3
//
// 1. start, returns point, begins path-scan
// 2. edge, while current can send, and next can recv, advance cursor
// 3. turn, return point, find next direction (if you can)
// 4. end, current can't send or next can't recv, return final point (if not already returned)
// 5. exit, same as end, but signal end of iteration
//
//
// * > point and direction
//
// 0. test if point exists
// 1. test if you can go that direction
// 2. if so, scan in that direction (returns last point *after* initial, character)
// 2a. mark last point as path point
// 3. if not, pick a direction you haven't tried, go back to 1.
impl<'l> Iterator for PathIter<'l> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
if self.lines.at(self.p).is_none() {
return None;
} else if self.start {
self.start = false;
return Some(self.p);
}
let mut cant_go = vec![self.d.rev()];
loop {
// println!("PathIter {{ p: {:?}, d: {:?} }}", self.p, self.d);
if let (Some(true), Some(true)) = (
self.lines.at(self.p).map(|c| can_go(c, self.d)),
self
.lines
.in_dir(self.p, self.d)
.map(|(_, c)| can_go(c, self.d.rev())),
) {
if let Some((pnext, c)) = scan_dir(self.lines, self.p, self.d) {
// println!("scan_dir = Some(({:?}, {:?}))", pnext, c);
self.p = pnext;
return Some(pnext);
}
}
cant_go.push(self.d);
if let Some(dnext) = Direction::VALUES
.into_iter()
.filter(|d| !cant_go.contains(d))
.next()
{
self.d = dnext;
continue;
} else {
return None;
}
}
}
}
fn scan_path(lines: &Lines, p: Point, d: Direction) -> Vec<Point> {
if !lines.at(p).map(|c| can_go(c, d)).unwrap_or(false) {
return vec![];
}
let mut ret = vec![];
let mut it = PathIter::new(&lines, p, d);
while let Some(next) = it.next() {
if ret.contains(&next) {
return ret;
}
ret.push(next);
}
ret
}
pub fn boxes(lines: &Lines) -> Vec<TBox> {
top_lefts(lines)
.into_iter()
.filter_map(|tl| {
let tr = scan_dir(lines, tl.0, Direction::Rt)?;
let bl = scan_dir(lines, tl.0, Direction::Dn)?;
let br = scan_dir(lines, bl.0, Direction::Rt)?;
let br2 = scan_dir(lines, tr.0, Direction::Dn)?;
if br2 != br {
return None;
}
Some(TBox(tl.0, br.0))
})
.collect()
}
pub fn border_in_dir(b: TBox, d: Direction) -> TBox {
use Direction::*;
match d {
Up => TBox::from((b.0, Point::from((b.0.row, b.1.col)))),
Dn => TBox::from((Point::from((b.1.row, b.0.col)), b.1)),
Lt => TBox::from((b.0, Point::from((b.1.row, b.0.col)))),
Rt => TBox::from((Point::from((b.0.row, b.1.col)), b.1)),
}
}
fn border(b: TBox) -> Vec<(Point, Direction)> {
Direction::VALUES
.into_iter()
// get eg top border
.map(|d| (border_in_dir(b, d), d))
// push top border up to get just outside top border
.filter_map(|(b, d)| b.in_dir(d).map(|b| (b, d)))
// get points of just-outside-top-border
.flat_map(| | {
ret.push((p, c));
} | conditional_block |
parse.rs | ::Formatter) -> std::fmt::Result {
write!(f, "[{:?} {:?}]", self.0, self.1)
}
}
impl TBox {
#[inline]
pub fn contains(&self, p: Point) -> bool {
["hey", "there"].into_iter().flat_map(|s| s.chars());
p.row >= self.0.row && p.row <= self.1.row && p.col >= self.0.col && p.col <= self.1.col
}
#[inline]
pub fn intersects(&self, b: TBox) -> bool {
!(self.1.row < b.0.row
|| self.0.row > b.1.row
|| self.1.col < b.0.col
|| self.0.col > b.1.col)
}
#[inline]
fn points<'p>(self) -> impl Iterator<Item = Point> + 'p {
let col_iter = move |row| {
(self.0.col..=self.1.col)
.into_iter()
.map(move |col| Point { row, col })
};
(self.0.row..=self.1.row).into_iter().flat_map(col_iter)
}
#[inline]
pub fn in_dir(&self, d: Direction) -> Option<TBox> {
self
.0
.in_dir(d)
.and_then(|p0| self.1.in_dir(d).map(|p1| TBox(p0, p1)))
}
}
impl std::ops::Index<Point> for Lines {
type Output = char;
fn index(&self, p: Point) -> &char {
self.0[p.row].index(p.col)
}
}
impl std::ops::IndexMut<Point> for Lines {
fn index_mut(&mut self, p: Point) -> &mut char {
self.0[p.row].index_mut(p.col)
}
}
impl Lines {
fn at(&self, p: Point) -> Option<char> {
if p.row as usize >= self.0.len() {
return None;
}
let line = &self.0[p.row as usize];
if p.col as usize >= line.len() {
return None;
}
Some(line[p.col as usize])
}
fn in_dir(&self, p: Point, d: Direction) -> Option<(Point, char)> {
p.in_dir(d).and_then(|p| self.at(p).map(|c| (p, c)))
}
fn visit(&self, mut pred: impl FnMut(Point, char)) {
for r in 0..self.0.len() {
for c in 0..self.0[r].len() {
pred((r, c).into(), self.0[r][c]);
}
}
}
}
fn top_lefts(lines: &Lines) -> Vec<(Point, char)> {
let mut ret = vec![];
for row in 0..lines.0.len() {
for col in 0..lines.0[row].len() {
let c = lines.0[row][col];
let p = Point { row, col };
if can_go(c, Direction::Dn)
&& can_go(c, Direction::Rt)
&& lines
.in_dir(p, Direction::Rt)
.map(|(_, c)| can_go(c, Direction::Lt))
.unwrap_or(false)
&& lines
.in_dir(p, Direction::Dn)
.map(|(_, c)| can_go(c, Direction::Up))
.unwrap_or(false)
{
ret.push((p, c));
}
}
}
ret
}
fn scan_dir(lines: &Lines, mut p: Point, d: Direction) -> Option<(Point, char)> {
while let Some((q, c)) = lines.in_dir(p, d) {
// p
// --* < can't connect
//
if !can_go(c, d.rev()) {
return lines.at(p).map(|c| (p, c));
}
p = q;
// p
// --. < can connect, can't continue
//
if !can_go(c, d) {
return Some((p, c));
}
}
lines.at(p).map(|c| (p, c))
}
struct PathIter<'l> {
start: bool,
lines: &'l Lines,
p: Point,
d: Direction,
}
impl<'l> PathIter<'l> {
fn new(lines: &'l Lines, p: Point, d: Direction) -> PathIter<'l> {
PathIter {
start: true,
lines: lines,
p: p,
d: d,
}
}
}
// * 4
// 1 2 |
// |----' 3
//
// 1. start, returns point, begins path-scan
// 2. edge, while current can send, and next can recv, advance cursor
// 3. turn, return point, find next direction (if you can)
// 4. end, current can't send or next can't recv, return final point (if not already returned)
// 5. exit, same as end, but signal end of iteration
//
//
// * > point and direction
//
// 0. test if point exists
// 1. test if you can go that direction
// 2. if so, scan in that direction (returns last point *after* initial, character)
// 2a. mark last point as path point
// 3. if not, pick a direction you haven't tried, go back to 1.
impl<'l> Iterator for PathIter<'l> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
if self.lines.at(self.p).is_none() {
return None;
} else if self.start {
self.start = false;
return Some(self.p);
}
let mut cant_go = vec![self.d.rev()];
loop {
// println!("PathIter {{ p: {:?}, d: {:?} }}", self.p, self.d);
if let (Some(true), Some(true)) = (
self.lines.at(self.p).map(|c| can_go(c, self.d)),
self
.lines
.in_dir(self.p, self.d)
.map(|(_, c)| can_go(c, self.d.rev())),
) {
if let Some((pnext, c)) = scan_dir(self.lines, self.p, self.d) {
// println!("scan_dir = Some(({:?}, {:?}))", pnext, c);
self.p = pnext;
return Some(pnext);
}
}
cant_go.push(self.d);
if let Some(dnext) = Direction::VALUES
.into_iter()
.filter(|d| !cant_go.contains(d))
.next()
{
self.d = dnext;
continue;
} else {
return None; | }
fn scan_path(lines: &Lines, p: Point, d: Direction) -> Vec<Point> {
if !lines.at(p).map(|c| can_go(c, d)).unwrap_or(false) {
return vec![];
}
let mut ret = vec![];
let mut it = PathIter::new(&lines, p, d);
while let Some(next) = it.next() {
if ret.contains(&next) {
return ret;
}
ret.push(next);
}
ret
}
pub fn boxes(lines: &Lines) -> Vec<TBox> {
top_lefts(lines)
.into_iter()
.filter_map(|tl| {
let tr = scan_dir(lines, tl.0, Direction::Rt)?;
let bl = scan_dir(lines, tl.0, Direction::Dn)?;
let br = scan_dir(lines, bl.0, Direction::Rt)?;
let br2 = scan_dir(lines, tr.0, Direction::Dn)?;
if br2 != br {
return None;
}
Some(TBox(tl.0, br.0))
})
.collect()
}
pub fn border_in_dir(b: TBox, d: Direction) -> TBox {
use Direction::*;
match d {
Up => TBox::from((b.0, Point::from((b.0.row, b.1.col)))),
Dn => TBox::from((Point::from((b.1.row, b.0.col)), b.1)),
Lt => TBox::from((b.0, Point::from((b.1.row, b.0.col)))),
Rt => TBox::from((Point::from((b.0.row, b.1.col)), b.1)),
}
}
fn border(b: TBox) -> Vec<(Point, Direction)> {
Direction::VALUES
.into_iter()
// get eg top border
.map(|d| (border_in_dir(b, d), d))
// push top border up to get just outside top border
.filter_map(|(b, d)| b.in_dir(d).map(|b| (b, d)))
// get points of just-outside-top-border
.flat_map(| | }
}
} | random_line_split |
client.go | the server to send a greeting message, and then
// requests server capabilities if they weren't included in the greeting. An
// error is returned if either operation fails or does not complete before the
// timeout, which must be positive to have any effect. If an error is returned,
// it is the caller's responsibility to close the connection.
func NewClient(conn net.Conn, host string, timeout time.Duration) (c *Client, err error) {
log := newDebugLog(DefaultLogger, DefaultLogMask)
cch := make(chan chan<- *response, 1)
c = &Client{
Caps: make(map[string]bool),
CommandConfig: defaultCommands(),
host: host,
state: unknown,
tag: *newTagGen(0),
cmds: make(map[string]*Command),
t: newTransport(conn, log),
debugLog: log,
}
c.r = newReader(c.t, MemoryReader{}, string(c.tag.id))
c.Logf(LogConn, "Connected to %v (Tag=%s)", conn.RemoteAddr(), c.tag.id)
if err = c.greeting(timeout); err != nil {
c.Logln(LogConn, "Greeting error:", err)
return nil, err
}
c.cch = cch
go c.receiver(cch)
runtime.Gosched()
return
}
// State returns the current connection state (Login, Auth, Selected, Logout, or
// Closed). See RFC 3501 page 15 for a state diagram. The caller must continue
// receiving responses until this method returns Closed (same as c.Recv
// returning io.EOF). Failure to do so may result in memory leaks.
func (c *Client) State() ConnState {
return c.state
}
// Send issues a new command, returning as soon as the last line is flushed from
// the send buffer. This may involve waiting for continuation requests if
// non-synchronizing literals (RFC 2088) are not supported by the server.
//
// This is the raw command interface that does not encode or perform any
// validation of the supplied fields. It should only be used for implementing
// new commands that do not change the connection state. For commands already
// supported by this package, use the provided wrapper methods instead.
func (c *Client) Send(name string, fields ...Field) (cmd *Command, err error) {
if cmd = newCommand(c, name); cmd == nil {
return nil, NotAvailableError(name)
} else if cmd.config.States&c.state == 0 {
return nil, ErrNotAllowed
} else if len(c.tags) > 0 {
other := c.cmds[c.tags[0]]
if cmd.config.Exclusive || other.config.Exclusive {
return nil, ErrExclusive
}
}
// Build command
raw, err := cmd.build(c.tag.Next(), fields)
if err != nil {
return nil, err
}
// Write first line and update command state
c.Logln(LogCmd, ">>>", cmd)
if err = c.t.WriteLine(raw.ReadLine()); err != nil {
return nil, err
}
c.tags = append(c.tags, cmd.tag)
c.cmds[cmd.tag] = cmd
// Write remaining parts, flushing the transport buffer as needed
var rsp *Response
for i := 0; i < len(raw.literals) && err == nil; i++ {
if rsp, err = c.checkContinue(cmd, !raw.nonsync); err == nil {
if rsp == nil || rsp.Type == Continue {
if _, err = raw.literals[i].WriteTo(c.t); err == nil {
err = c.t.WriteLine(raw.ReadLine())
}
} else {
err = ResponseError{rsp, "unexpected command completion"}
}
}
}
// Flush buffer after the last line
if err == nil {
if err = c.t.Flush(); err == nil {
return
}
}
c.done(cmd, abort)
return nil, err
}
// Recv receives at most one response from the server, updates the client state,
// and delivers the response to its final destination (c.Data or one of the
// commands in progress). io.EOF is returned once all responses have been
// received and the connection is closed.
//
// If the timeout is negative, Recv blocks indefinitely until a response is
// received or an error is encountered. If the timeout is zero, Recv polls for
// buffered responses, returning ErrTimeout immediately if none are available.
// Otherwise, Recv blocks until a response is received or the timeout expires.
func (c *Client) Recv(timeout time.Duration) error {
rsp, err := c.recv(timeout)
if err == nil && !c.deliver(rsp) {
if rsp.Type == Continue {
err = ResponseError{rsp, "unexpected continuation request"}
} else {
err = ResponseError{rsp, "undeliverable response"}
}
}
return err
}
// SetLiteralReader installs a custom LiteralReader implementation into the
// response receiver pipeline. It returns the previously installed LiteralReader
// instance.
func (c *Client) SetLiteralReader(lr LiteralReader) LiteralReader {
prev := c.r.LiteralReader
if lr != nil {
c.r.LiteralReader = lr
}
return prev
}
// Quote attempts to represent v, which must be string, []byte, or fmt.Stringer,
// as a quoted string for use with Client.Send. A literal string representation
// is used if v cannot be quoted.
func (c *Client) Quote(v interface{}) Field {
var b []byte
var cp bool
switch s := v.(type) {
case string:
b = []byte(s)
case []byte:
b, cp = s, true
case fmt.Stringer:
b = []byte(s.String())
default:
return nil
}
if q := QuoteBytes(b, false); q != nil {
return string(q)
} else if cp {
b = append([]byte(nil), b...)
}
return NewLiteral(b)
}
// next returns the next server response obtained directly from the reader.
func (c *Client) next() (rsp *Response, err error) {
raw, err := c.r.Next()
if err == nil {
rsp, err = raw.Parse()
}
return
}
// greeting receives the server greeting, sets initial connection state, and
// requests server capabilities if they weren't included in the greeting.
func (c *Client) greeting(timeout time.Duration) (err error) {
if timeout > 0 {
// If c.recv fails, c.t.conn may be nil by the time the deferred
// function executes; keep a reference to avoid a panic.
conn := c.t.conn
conn.SetDeadline(time.Now().Add(timeout))
defer func() {
conn.SetDeadline(time.Time{})
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
err = ErrTimeout
}
}()
}
// Wait for server greeting
rsp, err := c.recv(block)
if err != nil {
return
} else if rsp.Type != Status || !c.deliver(rsp) {
return ResponseError{rsp, "invalid server greeting"}
}
// Set initial connection state
switch rsp.Status {
case OK:
c.setState(Login)
case PREAUTH:
c.setState(Auth)
case BYE:
c.setState(Logout)
fallthrough
default:
return ResponseError{rsp, "invalid greeting status"}
}
c.Logln(LogConn, "Server greeting:", rsp.Info)
// Request capabilities if not included in the greeting
if len(c.Caps) == 0 {
_, err = c.Capability()
}
return
}
// receiver runs in a separate goroutine, reading a single server response for
// each request sent on the cch channel.
func (c *Client) receiver(cch <-chan chan<- *response) {
recv := func() (r *response) {
defer func() {
if err := recover(); err != nil {
r = &response{nil, fmt.Errorf("imap: receiver panic: %v", err)}
c.Logf(LogGo, "Receiver panic (Tag=%s): %v\n%s", c.tag.id, err, debug.Stack())
}
}()
rsp, err := c.next()
return &response{rsp, err}
}
c.Logf(LogGo, "Receiver started (Tag=%s)", c.tag.id)
defer c.Logf(LogGo, "Receiver finished (Tag=%s)", c.tag.id)
for rch := range cch {
rch <- recv()
}
}
// recv returns the next server response, updating the client state beforehand.
func (c *Client) | (timeout time.Duration) (rsp *Response, err error) {
if c.state == Closed {
return nil, io.EOF
} else if c.rch == nil && (timeout < 0 || c.cch == nil) {
rsp, err = c.next()
} else {
if c.rch == nil {
rch := make(chan *response, 1)
c.cch <- rch
c.rch = rch
runtime.Gosched()
}
var r *response
if timeout < 0 {
r = <-c.rch
} else {
select {
case r = <-c.rch:
default:
if timeout == 0 {
| recv | identifier_name |
client.go | panic.
conn := c.t.conn
conn.SetDeadline(time.Now().Add(timeout))
defer func() {
conn.SetDeadline(time.Time{})
if neterr, ok := err.(net.Error); ok && neterr.Timeout() {
err = ErrTimeout
}
}()
}
// Wait for server greeting
rsp, err := c.recv(block)
if err != nil {
return
} else if rsp.Type != Status || !c.deliver(rsp) {
return ResponseError{rsp, "invalid server greeting"}
}
// Set initial connection state
switch rsp.Status {
case OK:
c.setState(Login)
case PREAUTH:
c.setState(Auth)
case BYE:
c.setState(Logout)
fallthrough
default:
return ResponseError{rsp, "invalid greeting status"}
}
c.Logln(LogConn, "Server greeting:", rsp.Info)
// Request capabilities if not included in the greeting
if len(c.Caps) == 0 {
_, err = c.Capability()
}
return
}
// receiver runs in a separate goroutine, reading a single server response for
// each request sent on the cch channel.
func (c *Client) receiver(cch <-chan chan<- *response) {
recv := func() (r *response) {
defer func() {
if err := recover(); err != nil {
r = &response{nil, fmt.Errorf("imap: receiver panic: %v", err)}
c.Logf(LogGo, "Receiver panic (Tag=%s): %v\n%s", c.tag.id, err, debug.Stack())
}
}()
rsp, err := c.next()
return &response{rsp, err}
}
c.Logf(LogGo, "Receiver started (Tag=%s)", c.tag.id)
defer c.Logf(LogGo, "Receiver finished (Tag=%s)", c.tag.id)
for rch := range cch {
rch <- recv()
}
}
// recv returns the next server response, updating the client state beforehand.
func (c *Client) recv(timeout time.Duration) (rsp *Response, err error) {
if c.state == Closed {
return nil, io.EOF
} else if c.rch == nil && (timeout < 0 || c.cch == nil) {
rsp, err = c.next()
} else {
if c.rch == nil {
rch := make(chan *response, 1)
c.cch <- rch
c.rch = rch
runtime.Gosched()
}
var r *response
if timeout < 0 {
r = <-c.rch
} else {
select {
case r = <-c.rch:
default:
if timeout == 0 {
return nil, ErrTimeout
}
select {
case r = <-c.rch:
case <-time.After(timeout):
return nil, ErrTimeout
}
}
}
c.rch = nil
rsp, err = r.rsp, r.err
}
if err == nil {
c.update(rsp)
} else if rsp == nil {
defer c.setState(Closed)
if err != io.EOF {
c.close("protocol error")
} else if err = c.close("end of stream"); err == nil {
err = io.EOF
}
}
return
}
// update examines server responses and updates client state as needed.
func (c *Client) update(rsp *Response) {
if rsp.Label == "CAPABILITY" {
c.setCaps(rsp.Fields[1:])
return
}
switch rsp.Type {
case Data:
if c.Mailbox == nil {
return
}
switch rsp.Label {
case "FLAGS":
c.Mailbox.Flags.Replace(rsp.Fields[1])
case "EXISTS":
c.Mailbox.Messages = rsp.Value()
case "RECENT":
c.Mailbox.Recent = rsp.Value()
case "EXPUNGE":
c.Mailbox.Messages--
if c.Mailbox.Recent > c.Mailbox.Messages {
c.Mailbox.Recent = c.Mailbox.Messages
}
if c.Mailbox.Unseen == rsp.Value() {
c.Mailbox.Unseen = 0
}
}
case Status:
switch rsp.Status {
case BAD:
// RFC 3501 is a bit vague on how the client is expected to react to
// an untagged BAD response. It's probably best to close this
// connection and open a new one; leave this up to the caller. For
// now, abort all active commands to avoid waiting for completion
// responses that may never come.
c.Logln(LogCmd, "ABORT!", rsp.Info)
c.deliver(abort)
case BYE:
c.Logln(LogConn, "Logout reason:", rsp.Info)
c.setState(Logout)
}
fallthrough
case Done:
if rsp.Label == "ALERT" {
c.Logln(LogConn, "ALERT!", rsp.Info)
return
} else if c.Mailbox == nil {
return
}
switch selected := (c.state == Selected); rsp.Label {
case "PERMANENTFLAGS":
c.Mailbox.PermFlags.Replace(rsp.Fields[1])
case "READ-ONLY":
if selected && !c.Mailbox.ReadOnly {
c.Logln(LogState, "Mailbox access change: RW -> RO")
}
c.Mailbox.ReadOnly = true
case "READ-WRITE":
if selected && c.Mailbox.ReadOnly {
c.Logln(LogState, "Mailbox access change: RO -> RW")
}
c.Mailbox.ReadOnly = false
case "UIDNEXT":
c.Mailbox.UIDNext = rsp.Value()
case "UIDVALIDITY":
v := rsp.Value()
if u := c.Mailbox.UIDValidity; selected && u != v {
c.Logf(LogState, "Mailbox UIDVALIDITY change: %d -> %d", u, v)
}
c.Mailbox.UIDValidity = v
case "UNSEEN":
c.Mailbox.Unseen = rsp.Value()
case "UIDNOTSTICKY":
c.Mailbox.UIDNotSticky = true
}
}
}
// deliver saves the response to its final destination. It returns false for
// continuation requests and unknown command completions. The abort response is
// delivered to all commands in progress.
func (c *Client) deliver(rsp *Response) bool {
if rsp.Type&(Data|Status) != 0 {
for _, tag := range c.tags {
cmd := c.cmds[tag]
if filter := cmd.config.Filter; filter != nil && filter(cmd, rsp) {
cmd.Data = append(cmd.Data, rsp)
return true
}
}
c.Data = append(c.Data, rsp)
return true
} else if rsp.Type == Done {
if cmd := c.cmds[rsp.Tag]; cmd != nil {
c.done(cmd, rsp)
return true
}
c.Logln(LogCmd, "<<<", rsp.Tag, "(Unknown)")
} else if rsp == abort {
for _, tag := range c.tags {
c.done(c.cmds[tag], abort)
}
return true
}
return false
}
// done completes command execution by setting cmd.result to rsp and updating
// the client's command state.
func (c *Client) done(cmd *Command, rsp *Response) {
if cmd.result != nil {
return
}
cmd.result = rsp
if tag := cmd.tag; c.cmds[tag] != nil {
delete(c.cmds, tag)
if c.tags[0] == tag {
c.tags = c.tags[1:]
} else if n := len(c.tags); c.tags[n-1] == tag {
c.tags = c.tags[:n-1]
} else {
for i, v := range c.tags {
if v == tag {
c.tags = append(c.tags[:i], c.tags[i+1:]...)
break
}
}
}
}
if rsp == abort {
c.Logln(LogCmd, "<<<", cmd.tag, "(Abort)")
} else {
c.Logln(LogCmd, "<<<", rsp)
}
}
// checkContinue returns the next continuation request or completion result of
// cmd. In synchronous mode (sync == true), it flushes the buffer and blocks
// until a continuation request or cmd completion response is received. In
// asynchronous mode, it polls for cmd completion, returning as soon as all
// buffered responses are processed. A continuation request is not expected in
// asynchronous mode and results in an error.
func (c *Client) checkContinue(cmd *Command, sync bool) (rsp *Response, err error) | {
mode := poll
if sync {
if err = c.t.Flush(); err != nil {
return
}
mode = block
}
for cmd.InProgress() {
if rsp, err = c.recv(mode); err != nil {
if err == ErrTimeout {
err = nil
}
return
} else if !c.deliver(rsp) {
if rsp.Type == Continue {
if !sync {
err = ResponseError{rsp, "unexpected continuation request"}
}
} else { | identifier_body |
|
client.go | =%s)", c.tag.id)
defer c.Logf(LogGo, "Receiver finished (Tag=%s)", c.tag.id)
for rch := range cch {
rch <- recv()
}
}
// recv returns the next server response, updating the client state beforehand.
func (c *Client) recv(timeout time.Duration) (rsp *Response, err error) {
if c.state == Closed {
return nil, io.EOF
} else if c.rch == nil && (timeout < 0 || c.cch == nil) {
rsp, err = c.next()
} else {
if c.rch == nil {
rch := make(chan *response, 1)
c.cch <- rch
c.rch = rch
runtime.Gosched()
}
var r *response
if timeout < 0 {
r = <-c.rch
} else {
select {
case r = <-c.rch:
default:
if timeout == 0 {
return nil, ErrTimeout
}
select {
case r = <-c.rch:
case <-time.After(timeout):
return nil, ErrTimeout
}
}
}
c.rch = nil
rsp, err = r.rsp, r.err
}
if err == nil {
c.update(rsp)
} else if rsp == nil {
defer c.setState(Closed)
if err != io.EOF {
c.close("protocol error")
} else if err = c.close("end of stream"); err == nil {
err = io.EOF
}
}
return
}
// update examines server responses and updates client state as needed.
func (c *Client) update(rsp *Response) {
if rsp.Label == "CAPABILITY" {
c.setCaps(rsp.Fields[1:])
return
}
switch rsp.Type {
case Data:
if c.Mailbox == nil {
return
}
switch rsp.Label {
case "FLAGS":
c.Mailbox.Flags.Replace(rsp.Fields[1])
case "EXISTS":
c.Mailbox.Messages = rsp.Value()
case "RECENT":
c.Mailbox.Recent = rsp.Value()
case "EXPUNGE":
c.Mailbox.Messages--
if c.Mailbox.Recent > c.Mailbox.Messages {
c.Mailbox.Recent = c.Mailbox.Messages
}
if c.Mailbox.Unseen == rsp.Value() {
c.Mailbox.Unseen = 0
}
}
case Status:
switch rsp.Status {
case BAD:
// RFC 3501 is a bit vague on how the client is expected to react to
// an untagged BAD response. It's probably best to close this
// connection and open a new one; leave this up to the caller. For
// now, abort all active commands to avoid waiting for completion
// responses that may never come.
c.Logln(LogCmd, "ABORT!", rsp.Info)
c.deliver(abort)
case BYE:
c.Logln(LogConn, "Logout reason:", rsp.Info)
c.setState(Logout)
}
fallthrough
case Done:
if rsp.Label == "ALERT" {
c.Logln(LogConn, "ALERT!", rsp.Info)
return
} else if c.Mailbox == nil {
return
}
switch selected := (c.state == Selected); rsp.Label {
case "PERMANENTFLAGS":
c.Mailbox.PermFlags.Replace(rsp.Fields[1])
case "READ-ONLY":
if selected && !c.Mailbox.ReadOnly {
c.Logln(LogState, "Mailbox access change: RW -> RO")
}
c.Mailbox.ReadOnly = true
case "READ-WRITE":
if selected && c.Mailbox.ReadOnly {
c.Logln(LogState, "Mailbox access change: RO -> RW")
}
c.Mailbox.ReadOnly = false
case "UIDNEXT":
c.Mailbox.UIDNext = rsp.Value()
case "UIDVALIDITY":
v := rsp.Value()
if u := c.Mailbox.UIDValidity; selected && u != v {
c.Logf(LogState, "Mailbox UIDVALIDITY change: %d -> %d", u, v)
}
c.Mailbox.UIDValidity = v
case "UNSEEN":
c.Mailbox.Unseen = rsp.Value()
case "UIDNOTSTICKY":
c.Mailbox.UIDNotSticky = true
}
}
}
// deliver saves the response to its final destination. It returns false for
// continuation requests and unknown command completions. The abort response is
// delivered to all commands in progress.
func (c *Client) deliver(rsp *Response) bool {
if rsp.Type&(Data|Status) != 0 {
for _, tag := range c.tags {
cmd := c.cmds[tag]
if filter := cmd.config.Filter; filter != nil && filter(cmd, rsp) {
cmd.Data = append(cmd.Data, rsp)
return true
}
}
c.Data = append(c.Data, rsp)
return true
} else if rsp.Type == Done {
if cmd := c.cmds[rsp.Tag]; cmd != nil {
c.done(cmd, rsp)
return true
}
c.Logln(LogCmd, "<<<", rsp.Tag, "(Unknown)")
} else if rsp == abort {
for _, tag := range c.tags {
c.done(c.cmds[tag], abort)
}
return true
}
return false
}
// done completes command execution by setting cmd.result to rsp and updating
// the client's command state.
func (c *Client) done(cmd *Command, rsp *Response) {
if cmd.result != nil {
return
}
cmd.result = rsp
if tag := cmd.tag; c.cmds[tag] != nil {
delete(c.cmds, tag)
if c.tags[0] == tag {
c.tags = c.tags[1:]
} else if n := len(c.tags); c.tags[n-1] == tag {
c.tags = c.tags[:n-1]
} else {
for i, v := range c.tags {
if v == tag {
c.tags = append(c.tags[:i], c.tags[i+1:]...)
break
}
}
}
}
if rsp == abort {
c.Logln(LogCmd, "<<<", cmd.tag, "(Abort)")
} else {
c.Logln(LogCmd, "<<<", rsp)
}
}
// checkContinue returns the next continuation request or completion result of
// cmd. In synchronous mode (sync == true), it flushes the buffer and blocks
// until a continuation request or cmd completion response is received. In
// asynchronous mode, it polls for cmd completion, returning as soon as all
// buffered responses are processed. A continuation request is not expected in
// asynchronous mode and results in an error.
func (c *Client) checkContinue(cmd *Command, sync bool) (rsp *Response, err error) {
mode := poll
if sync {
if err = c.t.Flush(); err != nil {
return
}
mode = block
}
for cmd.InProgress() {
if rsp, err = c.recv(mode); err != nil {
if err == ErrTimeout {
err = nil
}
return
} else if !c.deliver(rsp) {
if rsp.Type == Continue {
if !sync {
err = ResponseError{rsp, "unexpected continuation request"}
}
} else {
err = ResponseError{rsp, "undeliverable response"}
}
return
}
}
return cmd.Result(0)
}
// setState changes connection state and performs the associated client updates.
// If the new state is Selected, it is assumed that c.Mailbox is already set.
func (c *Client) setState(s ConnState) {
prev := c.state
if prev == s || prev == Closed {
return
}
c.state = s
if s != Selected {
c.Logf(LogState, "State change: %v -> %v", prev, s)
c.Mailbox = nil
if s == Closed {
if c.cch != nil {
close(c.cch)
runtime.Gosched()
}
c.setCaps(nil)
c.deliver(abort)
}
} else if c.debugLog.mask&LogState != 0 {
mb, rw := c.Mailbox.Name, "[RW]"
if c.Mailbox.ReadOnly {
rw = "[RO]"
}
c.Logf(LogState, "State change: %v -> %v (%+q %s)", prev, s, mb, rw)
}
}
// setCaps updates the server capability set.
func (c *Client) setCaps(caps []Field) {
for v := range c.Caps {
delete(c.Caps, v)
}
for _, f := range caps {
if v := toUpper(AsAtom(f)); v != "" {
c.Caps[v] = true
} else {
c.Logln(LogState, "Invalid capability:", f)
}
}
if c.debugLog.mask&LogState != 0 {
caps := strings.Join(c.getCaps(""), " ") | if caps == "" { | random_line_split |
|
client.go | c.rch = nil
rsp, err = r.rsp, r.err
}
if err == nil {
c.update(rsp)
} else if rsp == nil {
defer c.setState(Closed)
if err != io.EOF {
c.close("protocol error")
} else if err = c.close("end of stream"); err == nil {
err = io.EOF
}
}
return
}
// update examines server responses and updates client state as needed.
func (c *Client) update(rsp *Response) {
if rsp.Label == "CAPABILITY" {
c.setCaps(rsp.Fields[1:])
return
}
switch rsp.Type {
case Data:
if c.Mailbox == nil {
return
}
switch rsp.Label {
case "FLAGS":
c.Mailbox.Flags.Replace(rsp.Fields[1])
case "EXISTS":
c.Mailbox.Messages = rsp.Value()
case "RECENT":
c.Mailbox.Recent = rsp.Value()
case "EXPUNGE":
c.Mailbox.Messages--
if c.Mailbox.Recent > c.Mailbox.Messages {
c.Mailbox.Recent = c.Mailbox.Messages
}
if c.Mailbox.Unseen == rsp.Value() {
c.Mailbox.Unseen = 0
}
}
case Status:
switch rsp.Status {
case BAD:
// RFC 3501 is a bit vague on how the client is expected to react to
// an untagged BAD response. It's probably best to close this
// connection and open a new one; leave this up to the caller. For
// now, abort all active commands to avoid waiting for completion
// responses that may never come.
c.Logln(LogCmd, "ABORT!", rsp.Info)
c.deliver(abort)
case BYE:
c.Logln(LogConn, "Logout reason:", rsp.Info)
c.setState(Logout)
}
fallthrough
case Done:
if rsp.Label == "ALERT" {
c.Logln(LogConn, "ALERT!", rsp.Info)
return
} else if c.Mailbox == nil {
return
}
switch selected := (c.state == Selected); rsp.Label {
case "PERMANENTFLAGS":
c.Mailbox.PermFlags.Replace(rsp.Fields[1])
case "READ-ONLY":
if selected && !c.Mailbox.ReadOnly {
c.Logln(LogState, "Mailbox access change: RW -> RO")
}
c.Mailbox.ReadOnly = true
case "READ-WRITE":
if selected && c.Mailbox.ReadOnly {
c.Logln(LogState, "Mailbox access change: RO -> RW")
}
c.Mailbox.ReadOnly = false
case "UIDNEXT":
c.Mailbox.UIDNext = rsp.Value()
case "UIDVALIDITY":
v := rsp.Value()
if u := c.Mailbox.UIDValidity; selected && u != v {
c.Logf(LogState, "Mailbox UIDVALIDITY change: %d -> %d", u, v)
}
c.Mailbox.UIDValidity = v
case "UNSEEN":
c.Mailbox.Unseen = rsp.Value()
case "UIDNOTSTICKY":
c.Mailbox.UIDNotSticky = true
}
}
}
// deliver saves the response to its final destination. It returns false for
// continuation requests and unknown command completions. The abort response is
// delivered to all commands in progress.
func (c *Client) deliver(rsp *Response) bool {
if rsp.Type&(Data|Status) != 0 {
for _, tag := range c.tags {
cmd := c.cmds[tag]
if filter := cmd.config.Filter; filter != nil && filter(cmd, rsp) {
cmd.Data = append(cmd.Data, rsp)
return true
}
}
c.Data = append(c.Data, rsp)
return true
} else if rsp.Type == Done {
if cmd := c.cmds[rsp.Tag]; cmd != nil {
c.done(cmd, rsp)
return true
}
c.Logln(LogCmd, "<<<", rsp.Tag, "(Unknown)")
} else if rsp == abort {
for _, tag := range c.tags {
c.done(c.cmds[tag], abort)
}
return true
}
return false
}
// done completes command execution by setting cmd.result to rsp and updating
// the client's command state.
func (c *Client) done(cmd *Command, rsp *Response) {
if cmd.result != nil {
return
}
cmd.result = rsp
if tag := cmd.tag; c.cmds[tag] != nil {
delete(c.cmds, tag)
if c.tags[0] == tag {
c.tags = c.tags[1:]
} else if n := len(c.tags); c.tags[n-1] == tag {
c.tags = c.tags[:n-1]
} else {
for i, v := range c.tags {
if v == tag {
c.tags = append(c.tags[:i], c.tags[i+1:]...)
break
}
}
}
}
if rsp == abort {
c.Logln(LogCmd, "<<<", cmd.tag, "(Abort)")
} else {
c.Logln(LogCmd, "<<<", rsp)
}
}
// checkContinue returns the next continuation request or completion result of
// cmd. In synchronous mode (sync == true), it flushes the buffer and blocks
// until a continuation request or cmd completion response is received. In
// asynchronous mode, it polls for cmd completion, returning as soon as all
// buffered responses are processed. A continuation request is not expected in
// asynchronous mode and results in an error.
func (c *Client) checkContinue(cmd *Command, sync bool) (rsp *Response, err error) {
mode := poll
if sync {
if err = c.t.Flush(); err != nil {
return
}
mode = block
}
for cmd.InProgress() {
if rsp, err = c.recv(mode); err != nil {
if err == ErrTimeout {
err = nil
}
return
} else if !c.deliver(rsp) {
if rsp.Type == Continue {
if !sync {
err = ResponseError{rsp, "unexpected continuation request"}
}
} else {
err = ResponseError{rsp, "undeliverable response"}
}
return
}
}
return cmd.Result(0)
}
// setState changes connection state and performs the associated client updates.
// If the new state is Selected, it is assumed that c.Mailbox is already set.
func (c *Client) setState(s ConnState) {
prev := c.state
if prev == s || prev == Closed {
return
}
c.state = s
if s != Selected {
c.Logf(LogState, "State change: %v -> %v", prev, s)
c.Mailbox = nil
if s == Closed {
if c.cch != nil {
close(c.cch)
runtime.Gosched()
}
c.setCaps(nil)
c.deliver(abort)
}
} else if c.debugLog.mask&LogState != 0 {
mb, rw := c.Mailbox.Name, "[RW]"
if c.Mailbox.ReadOnly {
rw = "[RO]"
}
c.Logf(LogState, "State change: %v -> %v (%+q %s)", prev, s, mb, rw)
}
}
// setCaps updates the server capability set.
func (c *Client) setCaps(caps []Field) {
for v := range c.Caps {
delete(c.Caps, v)
}
for _, f := range caps {
if v := toUpper(AsAtom(f)); v != "" {
c.Caps[v] = true
} else {
c.Logln(LogState, "Invalid capability:", f)
}
}
if c.debugLog.mask&LogState != 0 {
caps := strings.Join(c.getCaps(""), " ")
if caps == "" {
caps = "(none)"
}
c.Logln(LogState, "Capabilities:", caps)
}
}
// getCaps returns a sorted list of capabilities that share a common prefix. The
// prefix is stripped from the returned strings.
func (c *Client) getCaps(prefix string) []string {
caps := make([]string, 0, len(c.Caps))
if n := len(prefix); n == 0 {
for v := range c.Caps {
caps = append(caps, v)
}
} else {
for v := range c.Caps {
if strings.HasPrefix(v, prefix) {
caps = append(caps, v[n:])
}
}
}
sort.Strings(caps)
return caps
}
// close closes the connection without sending any additional data or updating
// client state. After the first invocation this method becomes a no-op.
func (c *Client) close(reason string) (err error) {
c.closer.Do(func() {
if reason != "" {
c.Logln(LogConn, "Close reason:", reason)
}
if err = c.t.Close(false); err != nil | {
c.Logln(LogConn, "Close error:", err)
} | conditional_block |
|
main.ts | Player{
wins: number = 0;
points: number = 100;
constructor(public name:string){
}
}
abstract class Character{
health: number = 100;
position: number;
abstract damage: number;
abstract image: string;
abstract distance: number;
abstract cost: number;
abstract type: CharacterTypes;
constructor(position: number){
this.position = position;
}
showInfo(){
return ("Type: " + CharacterTypes[this.type] + "</br> Distance: " + this.distance + "</br>Cost: " + this.cost);
}
}
class Warrior extends Character{
damage: number = 20;
image: string = "src/images/warrior.png";
distance: number = 1;
cost: number = 15;
type: CharacterTypes = CharacterTypes.Warrior;
position: number;
constructor(position){
super(position);
}
}
class Archer extends Character{
type: CharacterTypes = CharacterTypes.Archer;
image: string = "src/images/archer.png";
damage: number = 25;
distance: number = 2;
cost: number = 20;
constructor(position){
super(position);
}
}
class Thrower extends Character{
type: CharacterTypes = CharacterTypes.Thrower;
image: string = "src/images/thrower.png";
damage: number = 25;
distance: number = 2;
cost: number = 20;
constructor(position){
super(position);
}
}
class Squad{
private _resources: Character[] = [];
positions = [];
types = [];
constructor(){}
addMember(value: Character):void{
this._resources.push(value);
this.positions.push(value.position);
this.types.push(value.type);
}
deleteMember(value: Character):void{
this._resources.splice(this._resources.indexOf(value), 1);
}
findMember(position: number){
for(let i=0; i<this._resources.length; i++){
if(this._resources[i].position = position){
return this._resources[i];
};
}
}
get resources(): Character[]{
return this._resources;
}
}
class EnemySquad{
wins: number = 0;
private _resources: Character[] = [];
constructor(){}
addMember(value: Character):void{
this._resources.push(value);
}
deleteMember(value: Character):void{
this._resources.splice(this._resources.indexOf(value), 1);
}
findMember(position: number): Character{
for(let i=0; i<this._resources.length; i++){
if(this._resources[i].position = position){
return this._resources[i];
};
}
}
get resources(): Character[]{
return this._resources;
}
}
let currentPlayer: Player;
let playersSquad = new Squad();
let enemies = new EnemySquad();
let enemyPositions = [16,17,18,26,27,28,36,37,38,46,47,48,56,57,58];
$("form").submit(function(event){
event.preventDefault();
let name: string = <string>($('input[name="name"]').val());
$(this).addClass("hidden");
$(this).parent().hide();
if(!name) name = "Player";
currentPlayer = new Player(name);
$("#player-score").html(currentPlayer.name + "'s squad");
updatePoints();
})
$("#characters td").click(function(e){
let clicked = true;
let that = $(this);
$(this).addClass("clicked");
$('#field tr td:nth-child(-n+3)').addClass("available-cells");
$("#field td").click(function(){
$('#field tr td:nth-child(-n+3)').removeClass("available-cells");
if(!clicked) return;
clicked = false;
that.removeClass("clicked");
let chosenChar = that.attr("id");
let position = $(this).attr("id");
$(this).append($("<div/>").attr("class", "healt-points"));
if(+position[1] !== 1 && +position[1] !== 2 && +position[1] !== 3) {return};
if(chosenChar === "warrior"){
let cost = (new Warrior("")).cost;
if(currentPlayer.points < cost) return;
playersSquad.addMember(new Warrior(position));
$(this).addClass("warrior");
currentPlayer.points -= cost;
that.clone().css("border", "0").appendTo($(this));
updatePoints();
}
else if(chosenChar === "archer"){
let cost = (new Archer("")).cost;
if(currentPlayer.points < cost) return;
playersSquad.addMember(new Archer(position));
that.clone().css("border", "0").appendTo($(this));
$(this).addClass("archer");
currentPlayer.points -= cost;
updatePoints();
}
else if(chosenChar === "thrower"){
let cost = (new Thrower("")).cost;
if(currentPlayer.points < cost) return;
playersSquad.addMember(new Thrower(position));
that.clone().css("border", "0").appendTo($(this));
$(this).addClass("thrower");
currentPlayer.points -= cost;
updatePoints();
}
})
})
.hover(function(){
$(this).addClass("hover");
$("#info").css("visibility", "visible");
let char;
if ($(this).attr("id") === "warrior"){
char = new Warrior("");
}
else if($(this).attr("id") === "archer"){
char = new Archer("");
}
else if($(this).attr("id") === "thrower"){
char = new Thrower("");
}
$("#info").html(char.showInfo());
}, function(){
$(this).removeClass("hover");
$("#info").html("");
$("#info").css("visibility", "hidden");
})
$("body").click(function(e)
{
var container = $("#characters td");
if (!container.is(e.target) && !$("#field td").is(e.target))
{
container.removeClass("clicked");
}
})
$("#enemy-squad").one("click", function(){
let number = playersSquad.resources.length;
for(let i=0; i<number;i++){
let type = playersSquad.types[i];
placeEnemy(type);
}
})
$("#field td").click(function(){
$("#field td").each(function(){
$(this).removeClass("clicked");
})
let cellFrom = $(this);
let chose = false;
if($(this).html()){
let position = ($(this).attr("id"));
if($(this).hasClass("warrior")){
highlight(1, position); | }
if($(this).hasClass("archer")||$(this).hasClass("thrower")){
highlight(2, position);
chose = true;
}
}
$("#field td").click(function(){
if(chose && !$(this).html() && $(this).hasClass("highlighted")){
moveChar(cellFrom, $(this));
chose = false;
$("#field td").each(function(){
$(this).removeClass("highlighted");
});
setTimeout(function(){
enemyMoves();
}, 1000)
}
if(chose && $(this).hasClass("enemy") && $(this).hasClass("highlighted")){
playerAttacks(enemies.findMember(+$(this).attr("id")), playersSquad.findMember(+cellFrom.attr("id")));
setTimeout(function(){
enemyMoves();
}, 1000)
global.console.log("player attacks")
}
})
})
function enemyMoves(){
let i= Math.floor(Math.random()*enemies.resources.length);
let enemy = enemies.resources[i];
let position = (enemy.position).toString();
let cells = findPossibleMoves(enemy, position);
let playersChar;
cells.forEach(function(item, index, array){
if(item.hasClass("warrior") || item.hasClass("archer") || item.hasClass("thrower")){
playersChar = playersSquad.findMember(item.attr("id"));
global.console.log(playersChar);
}
})
let endCell;
if(playersChar){
enemyAttacks(enemy, playersChar)
}
else{
endCell = cells[Math.floor(Math.random()*cells.length)];
while(endCell.html()){
endCell = cells[Math.floor(Math.random()*cells.length)];
}
endCell.append($("#field #"+position).find("img")).addClass($("#field #"+position).attr("class"));
endCell.addClass("enemy");
$("#field #"+position).empty().removeClass();
enemy.position = endCell.attr("id");
}
}
function moveChar(cellFrom, cellTo){
let char = playersSquad.findMember(cellFrom.attr("id"));
cellTo.append(cellFrom.find("img")).addClass(cellFrom.attr("class"));
char.position = cellTo.attr("id");
cellFrom.empty().removeClass();
}
function findPossibleMoves(char, position){
let distance;
if(char.type === 0) distance = 1;
else if(char.type === 1 || char.type === 2) distance = 2;
let result = [];
$("#field td").each(function(){
let id = $(this).attr("id");
if((id[1]===position[1] && Math.abs(+id[0]-+position[0])<= |
chose = true; | random_line_split |
main.ts | Player{
wins: number = 0;
points: number = 100;
constructor(public name:string){
}
}
abstract class Character{
health: number = 100;
position: number;
abstract damage: number;
abstract image: string;
abstract distance: number;
abstract cost: number;
abstract type: CharacterTypes;
constructor(position: number){
this.position = position;
}
showInfo(){
return ("Type: " + CharacterTypes[this.type] + "</br> Distance: " + this.distance + "</br>Cost: " + this.cost);
}
}
class Warrior extends Character{
damage: number = 20;
image: string = "src/images/warrior.png";
distance: number = 1;
cost: number = 15;
type: CharacterTypes = CharacterTypes.Warrior;
position: number;
constructor(position){
super(position);
}
}
class Archer extends Character{
type: CharacterTypes = CharacterTypes.Archer;
image: string = "src/images/archer.png";
damage: number = 25;
distance: number = 2;
cost: number = 20;
constructor(position){
super(position);
}
}
class Thrower extends Character{
type: CharacterTypes = CharacterTypes.Thrower;
image: string = "src/images/thrower.png";
damage: number = 25;
distance: number = 2;
cost: number = 20;
constructor(position){
super(position);
}
}
class Squad{
private _resources: Character[] = [];
positions = [];
types = [];
constructor(){}
addMember(value: Character):void{
this._resources.push(value);
this.positions.push(value.position);
this.types.push(value.type);
}
deleteMember(value: Character):void{
this._resources.splice(this._resources.indexOf(value), 1);
}
findMember(position: number) |
get resources(): Character[]{
return this._resources;
}
}
class EnemySquad{
wins: number = 0;
private _resources: Character[] = [];
constructor(){}
addMember(value: Character):void{
this._resources.push(value);
}
deleteMember(value: Character):void{
this._resources.splice(this._resources.indexOf(value), 1);
}
findMember(position: number): Character{
for(let i=0; i<this._resources.length; i++){
if(this._resources[i].position = position){
return this._resources[i];
};
}
}
get resources(): Character[]{
return this._resources;
}
}
let currentPlayer: Player;
let playersSquad = new Squad();
let enemies = new EnemySquad();
let enemyPositions = [16,17,18,26,27,28,36,37,38,46,47,48,56,57,58];
$("form").submit(function(event){
event.preventDefault();
let name: string = <string>($('input[name="name"]').val());
$(this).addClass("hidden");
$(this).parent().hide();
if(!name) name = "Player";
currentPlayer = new Player(name);
$("#player-score").html(currentPlayer.name + "'s squad");
updatePoints();
})
$("#characters td").click(function(e){
let clicked = true;
let that = $(this);
$(this).addClass("clicked");
$('#field tr td:nth-child(-n+3)').addClass("available-cells");
$("#field td").click(function(){
$('#field tr td:nth-child(-n+3)').removeClass("available-cells");
if(!clicked) return;
clicked = false;
that.removeClass("clicked");
let chosenChar = that.attr("id");
let position = $(this).attr("id");
$(this).append($("<div/>").attr("class", "healt-points"));
if(+position[1] !== 1 && +position[1] !== 2 && +position[1] !== 3) {return};
if(chosenChar === "warrior"){
let cost = (new Warrior("")).cost;
if(currentPlayer.points < cost) return;
playersSquad.addMember(new Warrior(position));
$(this).addClass("warrior");
currentPlayer.points -= cost;
that.clone().css("border", "0").appendTo($(this));
updatePoints();
}
else if(chosenChar === "archer"){
let cost = (new Archer("")).cost;
if(currentPlayer.points < cost) return;
playersSquad.addMember(new Archer(position));
that.clone().css("border", "0").appendTo($(this));
$(this).addClass("archer");
currentPlayer.points -= cost;
updatePoints();
}
else if(chosenChar === "thrower"){
let cost = (new Thrower("")).cost;
if(currentPlayer.points < cost) return;
playersSquad.addMember(new Thrower(position));
that.clone().css("border", "0").appendTo($(this));
$(this).addClass("thrower");
currentPlayer.points -= cost;
updatePoints();
}
})
})
.hover(function(){
$(this).addClass("hover");
$("#info").css("visibility", "visible");
let char;
if ($(this).attr("id") === "warrior"){
char = new Warrior("");
}
else if($(this).attr("id") === "archer"){
char = new Archer("");
}
else if($(this).attr("id") === "thrower"){
char = new Thrower("");
}
$("#info").html(char.showInfo());
}, function(){
$(this).removeClass("hover");
$("#info").html("");
$("#info").css("visibility", "hidden");
})
$("body").click(function(e)
{
var container = $("#characters td");
if (!container.is(e.target) && !$("#field td").is(e.target))
{
container.removeClass("clicked");
}
})
$("#enemy-squad").one("click", function(){
let number = playersSquad.resources.length;
for(let i=0; i<number;i++){
let type = playersSquad.types[i];
placeEnemy(type);
}
})
$("#field td").click(function(){
$("#field td").each(function(){
$(this).removeClass("clicked");
})
let cellFrom = $(this);
let chose = false;
if($(this).html()){
let position = ($(this).attr("id"));
if($(this).hasClass("warrior")){
highlight(1, position);
chose = true;
}
if($(this).hasClass("archer")||$(this).hasClass("thrower")){
highlight(2, position);
chose = true;
}
}
$("#field td").click(function(){
if(chose && !$(this).html() && $(this).hasClass("highlighted")){
moveChar(cellFrom, $(this));
chose = false;
$("#field td").each(function(){
$(this).removeClass("highlighted");
});
setTimeout(function(){
enemyMoves();
}, 1000)
}
if(chose && $(this).hasClass("enemy") && $(this).hasClass("highlighted")){
playerAttacks(enemies.findMember(+$(this).attr("id")), playersSquad.findMember(+cellFrom.attr("id")));
setTimeout(function(){
enemyMoves();
}, 1000)
global.console.log("player attacks")
}
})
})
function enemyMoves(){
let i= Math.floor(Math.random()*enemies.resources.length);
let enemy = enemies.resources[i];
let position = (enemy.position).toString();
let cells = findPossibleMoves(enemy, position);
let playersChar;
cells.forEach(function(item, index, array){
if(item.hasClass("warrior") || item.hasClass("archer") || item.hasClass("thrower")){
playersChar = playersSquad.findMember(item.attr("id"));
global.console.log(playersChar);
}
})
let endCell;
if(playersChar){
enemyAttacks(enemy, playersChar)
}
else{
endCell = cells[Math.floor(Math.random()*cells.length)];
while(endCell.html()){
endCell = cells[Math.floor(Math.random()*cells.length)];
}
endCell.append($("#field #"+position).find("img")).addClass($("#field #"+position).attr("class"));
endCell.addClass("enemy");
$("#field #"+position).empty().removeClass();
enemy.position = endCell.attr("id");
}
}
function moveChar(cellFrom, cellTo){
let char = playersSquad.findMember(cellFrom.attr("id"));
cellTo.append(cellFrom.find("img")).addClass(cellFrom.attr("class"));
char.position = cellTo.attr("id");
cellFrom.empty().removeClass();
}
function findPossibleMoves(char, position){
let distance;
if(char.type === 0) distance = 1;
else if(char.type === 1 || char.type === 2) distance = 2;
let result = [];
$("#field td").each(function(){
let id = $(this).attr("id");
if((id[1]===position[1] && Math.abs(+id[0]-+position[0]) | {
for(let i=0; i<this._resources.length; i++){
if(this._resources[i].position = position){
return this._resources[i];
};
}
} | identifier_body |
main.ts | ;
abstract image: string;
abstract distance: number;
abstract cost: number;
abstract type: CharacterTypes;
constructor(position: number){
this.position = position;
}
showInfo(){
return ("Type: " + CharacterTypes[this.type] + "</br> Distance: " + this.distance + "</br>Cost: " + this.cost);
}
}
class Warrior extends Character{
damage: number = 20;
image: string = "src/images/warrior.png";
distance: number = 1;
cost: number = 15;
type: CharacterTypes = CharacterTypes.Warrior;
position: number;
constructor(position){
super(position);
}
}
class Archer extends Character{
type: CharacterTypes = CharacterTypes.Archer;
image: string = "src/images/archer.png";
damage: number = 25;
distance: number = 2;
cost: number = 20;
constructor(position){
super(position);
}
}
class Thrower extends Character{
type: CharacterTypes = CharacterTypes.Thrower;
image: string = "src/images/thrower.png";
damage: number = 25;
distance: number = 2;
cost: number = 20;
constructor(position){
super(position);
}
}
class Squad{
private _resources: Character[] = [];
positions = [];
types = [];
constructor(){}
addMember(value: Character):void{
this._resources.push(value);
this.positions.push(value.position);
this.types.push(value.type);
}
deleteMember(value: Character):void{
this._resources.splice(this._resources.indexOf(value), 1);
}
findMember(position: number){
for(let i=0; i<this._resources.length; i++){
if(this._resources[i].position = position){
return this._resources[i];
};
}
}
get resources(): Character[]{
return this._resources;
}
}
class EnemySquad{
wins: number = 0;
private _resources: Character[] = [];
constructor(){}
addMember(value: Character):void{
this._resources.push(value);
}
deleteMember(value: Character):void{
this._resources.splice(this._resources.indexOf(value), 1);
}
findMember(position: number): Character{
for(let i=0; i<this._resources.length; i++){
if(this._resources[i].position = position){
return this._resources[i];
};
}
}
get resources(): Character[]{
return this._resources;
}
}
let currentPlayer: Player;
let playersSquad = new Squad();
let enemies = new EnemySquad();
let enemyPositions = [16,17,18,26,27,28,36,37,38,46,47,48,56,57,58];
$("form").submit(function(event){
event.preventDefault();
let name: string = <string>($('input[name="name"]').val());
$(this).addClass("hidden");
$(this).parent().hide();
if(!name) name = "Player";
currentPlayer = new Player(name);
$("#player-score").html(currentPlayer.name + "'s squad");
updatePoints();
})
$("#characters td").click(function(e){
let clicked = true;
let that = $(this);
$(this).addClass("clicked");
$('#field tr td:nth-child(-n+3)').addClass("available-cells");
$("#field td").click(function(){
$('#field tr td:nth-child(-n+3)').removeClass("available-cells");
if(!clicked) return;
clicked = false;
that.removeClass("clicked");
let chosenChar = that.attr("id");
let position = $(this).attr("id");
$(this).append($("<div/>").attr("class", "healt-points"));
if(+position[1] !== 1 && +position[1] !== 2 && +position[1] !== 3) {return};
if(chosenChar === "warrior"){
let cost = (new Warrior("")).cost;
if(currentPlayer.points < cost) return;
playersSquad.addMember(new Warrior(position));
$(this).addClass("warrior");
currentPlayer.points -= cost;
that.clone().css("border", "0").appendTo($(this));
updatePoints();
}
else if(chosenChar === "archer"){
let cost = (new Archer("")).cost;
if(currentPlayer.points < cost) return;
playersSquad.addMember(new Archer(position));
that.clone().css("border", "0").appendTo($(this));
$(this).addClass("archer");
currentPlayer.points -= cost;
updatePoints();
}
else if(chosenChar === "thrower"){
let cost = (new Thrower("")).cost;
if(currentPlayer.points < cost) return;
playersSquad.addMember(new Thrower(position));
that.clone().css("border", "0").appendTo($(this));
$(this).addClass("thrower");
currentPlayer.points -= cost;
updatePoints();
}
})
})
.hover(function(){
$(this).addClass("hover");
$("#info").css("visibility", "visible");
let char;
if ($(this).attr("id") === "warrior"){
char = new Warrior("");
}
else if($(this).attr("id") === "archer"){
char = new Archer("");
}
else if($(this).attr("id") === "thrower"){
char = new Thrower("");
}
$("#info").html(char.showInfo());
}, function(){
$(this).removeClass("hover");
$("#info").html("");
$("#info").css("visibility", "hidden");
})
$("body").click(function(e)
{
var container = $("#characters td");
if (!container.is(e.target) && !$("#field td").is(e.target))
{
container.removeClass("clicked");
}
})
$("#enemy-squad").one("click", function(){
let number = playersSquad.resources.length;
for(let i=0; i<number;i++){
let type = playersSquad.types[i];
placeEnemy(type);
}
})
$("#field td").click(function(){
$("#field td").each(function(){
$(this).removeClass("clicked");
})
let cellFrom = $(this);
let chose = false;
if($(this).html()){
let position = ($(this).attr("id"));
if($(this).hasClass("warrior")){
highlight(1, position);
chose = true;
}
if($(this).hasClass("archer")||$(this).hasClass("thrower")){
highlight(2, position);
chose = true;
}
}
$("#field td").click(function(){
if(chose && !$(this).html() && $(this).hasClass("highlighted")){
moveChar(cellFrom, $(this));
chose = false;
$("#field td").each(function(){
$(this).removeClass("highlighted");
});
setTimeout(function(){
enemyMoves();
}, 1000)
}
if(chose && $(this).hasClass("enemy") && $(this).hasClass("highlighted")){
playerAttacks(enemies.findMember(+$(this).attr("id")), playersSquad.findMember(+cellFrom.attr("id")));
setTimeout(function(){
enemyMoves();
}, 1000)
global.console.log("player attacks")
}
})
})
function enemyMoves(){
let i= Math.floor(Math.random()*enemies.resources.length);
let enemy = enemies.resources[i];
let position = (enemy.position).toString();
let cells = findPossibleMoves(enemy, position);
let playersChar;
cells.forEach(function(item, index, array){
if(item.hasClass("warrior") || item.hasClass("archer") || item.hasClass("thrower")){
playersChar = playersSquad.findMember(item.attr("id"));
global.console.log(playersChar);
}
})
let endCell;
if(playersChar){
enemyAttacks(enemy, playersChar)
}
else{
endCell = cells[Math.floor(Math.random()*cells.length)];
while(endCell.html()){
endCell = cells[Math.floor(Math.random()*cells.length)];
}
endCell.append($("#field #"+position).find("img")).addClass($("#field #"+position).attr("class"));
endCell.addClass("enemy");
$("#field #"+position).empty().removeClass();
enemy.position = endCell.attr("id");
}
}
function moveChar(cellFrom, cellTo){
let char = playersSquad.findMember(cellFrom.attr("id"));
cellTo.append(cellFrom.find("img")).addClass(cellFrom.attr("class"));
char.position = cellTo.attr("id");
cellFrom.empty().removeClass();
}
function findPossibleMoves(char, position){
let distance;
if(char.type === 0) distance = 1;
else if(char.type === 1 || char.type === 2) distance = 2;
let result = [];
$("#field td").each(function(){
let id = $(this).attr("id");
if((id[1]===position[1] && Math.abs(+id[0]-+position[0])<=distance) ||
(id[0]===position[0] && Math.abs(+id[1]-+position[1])<=distance)) {
result.push($(this))
}
})
return result
}
function | highlight | identifier_name |
|
image.go | `json:"properties"`
Protected bool `json:"protected"`
Status string `json:"status"`
Size int64 `json:"size"`
VirtualSize *int64 `json:"virtual_size"` // Note: Property exists in OpenStack dev stack payloads but not Helion public cloud.
}
// QueryParameters is a structure that
// contains the filter, sort, and paging parameters for
// an image or imagedetail query.
type QueryParameters struct {
Name string
Status string
ContainerFormat string
DiskFormat string
MinSize int64
MaxSize int64
SortKey string
SortDirection SortDirection
Marker string
Limit int64
}
type UploadParameters struct {
Name string
DiskFormat string
ContainerFormat string
IsPublic bool
MinDisk int64
MinRam int64
Owner string
CopyFromUrl string
}
// AddImageResponse is a structure containing relevant properties for new images
type AddImageResponse struct {
CheckSum string `json:"checksum"`
ContainerFormat string `json:"container_format"`
CreatedAt misc.RFC8601DateTime `json:"created_at"`
DiskFormat string `json:"disk_format"`
ID string `json:"id"`
IsPublic bool `json:"is_public"`
MinDisk int64 `json:"min_disk"`
MinRAM int64 `json:"min_ram"`
Name string `json:"name"`
Owner *string `json:"owner"`
UpdatedAt misc.RFC8601DateTime `json:"updated_at"`
Status string `json:"status"`
}
type GlanceLinks struct {
HRef string `json:"href"`
Relationship string `json:"rel"`
}
type GlanceVersion struct {
Status string `json:"status"`
Id string `json:"id"`
Links []GlanceLinks `json:"links"`
}
type glanceVersionResponse struct {
Versions []GlanceVersion `json:"versions"`
}
type glanceAddImageResponse struct {
Image AddImageResponse `json:"image"`
}
// SortDirection of the sort, ascending or descending.
type SortDirection string
const (
// Desc specifies the sort direction to be descending.
Desc SortDirection = "desc"
// Asc specifies the sort direction to be ascending.
Asc SortDirection = "asc"
)
// we support a v1.0 interface, so lets ensure we can find that interface in the list
func (imageService Service) GetV1Interface () (correctVersion bool, updateUrl string, err error) {
versionContainer := glanceVersionResponse {}
err = misc.GetJSON(imageService.URL, imageService.TokenID, imageService.Client, &versionContainer )
if err != nil {
return false, "", err
} else {
for _, version := range versionContainer.Versions {
if version.Status == "SUPPORTED" && version.Id == "v1.0" {
updateUrl = version.Links[0].HRef
correctVersion = true
}
}
}
return correctVersion , updateUrl , nil
}
// Images will issue a get request to OpenStack to retrieve the list of images.
func (imageService Service) Images() (image []Response, err error) {
return imageService.QueryImages(nil)
}
// ImagesDetail will issue a get request to OpenStack to retrieve the list of images complete with
// additional details.
func (imageService Service) ImagesDetail() (image []DetailResponse, err error) {
return imageService.QueryImagesDetail(nil)
}
// ReserveImage will issue a post request to OpenStack to reserve an image instance
func (imageService Service) ReserveImage (uploadParameters UploadParameters, hypervisor string, mode string) (ID string, Status string, err error) {
addImageContainer := glanceAddImageResponse {}
headers := make( []string, 10 )
i := 0
headers[i] = "x-image-meta-name^" + uploadParameters.Name
i++
headers[i] = "x-image-meta-disk_format^" + uploadParameters.DiskFormat
i++
headers[i] = "x-image-meta-container_format^" + uploadParameters.ContainerFormat
i++
headers[i] = "x-image-meta-property-hypervisor_type^" + hypervisor
i++
headers[i] = "x-image-meta-property-vm_mode^" + mode
i++
if uploadParameters.CopyFromUrl != "" {
headers[i] = "x-glance-api-copy-from^" + uploadParameters.CopyFromUrl
i++
}
if uploadParameters.Owner != "" {
headers[i] = "x-glance-meta-owner^" + uploadParameters.Owner
i++
}
if uploadParameters.IsPublic {
headers[i] = "x-image-meta-is_public^true"
i++
}
if uploadParameters.MinRam != 0 {
headers[i] = "x-image-meta-min_ram^" + fmt.Sprintf("%d", uploadParameters.MinRam)
i++
}
if uploadParameters.MinDisk != 0 {
headers[i] = "x-image-meta-min_disk^" + fmt.Sprintf("%d", uploadParameters.MinDisk)
i++
}
url := strings.TrimSuffix(imageService.URL, "/") + "/images"
err = misc.PostHeader(url, imageService.TokenID, imageService.Client, headers, &addImageContainer)
if err != nil {
return "", "", err
}
ID = addImageContainer.Image.ID
Status = addImageContainer.Image.Status
return
}
// QueryImages will issue a get request with the specified ImageQueryParameters to retrieve the list of
// images.
func (imageService Service) ImageStatus(Id string) (Status string, err error) {
url := strings.TrimSuffix(imageService.URL, "/") + "/images/" + Id
var headers http.Header
headers, err = misc.GetHeader(url, imageService.TokenID, imageService.Client)
if err != nil {
return "", err
} else {
for header, value := range headers {
//log.Printf ("header '%s'='%s'", header, value[0])
if strings.ToLower(header) == "x-image-meta-status" {
Status = value[0]
break
}
}
}
return Status, nil
}
// QueryImages will issue a get request with the specified ImageQueryParameters to retrieve the list of
// images.
func (imageService Service) QueryImages(queryParameters *QueryParameters) ([]Response, error) {
imagesContainer := imagesResponse{}
err := imageService.queryImages(false /*includeDetails*/, &imagesContainer, queryParameters)
if err != nil {
return nil, err
}
return imagesContainer.Images, nil
}
// QueryImagesDetail will issue a get request with the specified QueryParameters to retrieve the list of
// images with additional details.
func (imageService Service) QueryImagesDetail(queryParameters *QueryParameters) ([]DetailResponse, error) {
imagesDetailContainer := imagesDetailResponse{}
err := imageService.queryImages(true /*includeDetails*/, &imagesDetailContainer, queryParameters)
if err != nil {
return nil, err
}
return imagesDetailContainer.Images, nil
}
func (imageService Service) queryImages(includeDetails bool, imagesResponseContainer interface{}, queryParameters *QueryParameters) error {
urlPostFix := "/images"
if includeDetails {
urlPostFix = urlPostFix + "/detail"
}
reqURL, err := buildQueryURL(imageService, queryParameters, urlPostFix)
if err != nil {
return err
}
err = misc.GetJSON(reqURL.String(), imageService.TokenID, imageService.Client, &imagesResponseContainer)
if err != nil {
return err
}
return nil
}
func buildQueryURL(imageService Service, queryParameters *QueryParameters, imagePartialURL string) (*url.URL, error) {
reqURL, err := url.Parse(imageService.URL)
if err != nil {
return nil, err
}
if queryParameters != nil {
values := url.Values{}
if queryParameters.Name != "" {
values.Set("name", queryParameters.Name)
}
if queryParameters.ContainerFormat != "" {
values.Set("container_format", queryParameters.ContainerFormat)
}
if queryParameters.DiskFormat != "" {
values.Set("disk_format", queryParameters.DiskFormat)
}
if queryParameters.Status != "" {
values.Set("status", queryParameters.Status)
}
if queryParameters.MinSize != 0 {
values.Set("size_min", fmt.Sprintf("%d", queryParameters.MinSize))
}
if queryParameters.MaxSize != 0 {
values.Set("size_max", fmt.Sprintf("%d", queryParameters.MaxSize))
}
if queryParameters.Limit != 0 {
values.Set("limit", fmt.Sprintf("%d", queryParameters.Limit))
}
if queryParameters.Marker != "" {
values.Set("marker", queryParameters.Marker)
}
if queryParameters.SortKey != "" {
values.Set("sort_key", queryParameters.SortKey)
}
if queryParameters.SortDirection != "" {
values.Set("sort_dir", string(queryParameters.SortDirection))
}
if len(values) > 0 {
reqURL.RawQuery = values.Encode()
}
}
reqURL.Path = strings.TrimSuffix(reqURL.Path, "/") + imagePartialURL |
return reqURL, nil
}
type imagesDetailResponse struct { | random_line_split |
|
image.go | DeletedAt *misc.RFC8601DateTime `json:"deleted_at"`
DiskFormat string `json:"disk_format"`
ID string `json:"id"`
IsPublic bool `json:"is_public"`
MinDisk int64 `json:"min_disk"`
MinRAM int64 `json:"min_ram"`
Name string `json:"name"`
Owner *string `json:"owner"`
UpdatedAt misc.RFC8601DateTime `json:"updated_at"`
Properties map[string]string `json:"properties"`
Protected bool `json:"protected"`
Status string `json:"status"`
Size int64 `json:"size"`
VirtualSize *int64 `json:"virtual_size"` // Note: Property exists in OpenStack dev stack payloads but not Helion public cloud.
}
// QueryParameters is a structure that
// contains the filter, sort, and paging parameters for
// an image or imagedetail query.
type QueryParameters struct {
Name string
Status string
ContainerFormat string
DiskFormat string
MinSize int64
MaxSize int64
SortKey string
SortDirection SortDirection
Marker string
Limit int64
}
type UploadParameters struct {
Name string
DiskFormat string
ContainerFormat string
IsPublic bool
MinDisk int64
MinRam int64
Owner string
CopyFromUrl string
}
// AddImageResponse is a structure containing relevant properties for new images
type AddImageResponse struct {
CheckSum string `json:"checksum"`
ContainerFormat string `json:"container_format"`
CreatedAt misc.RFC8601DateTime `json:"created_at"`
DiskFormat string `json:"disk_format"`
ID string `json:"id"`
IsPublic bool `json:"is_public"`
MinDisk int64 `json:"min_disk"`
MinRAM int64 `json:"min_ram"`
Name string `json:"name"`
Owner *string `json:"owner"`
UpdatedAt misc.RFC8601DateTime `json:"updated_at"`
Status string `json:"status"`
}
type GlanceLinks struct {
HRef string `json:"href"`
Relationship string `json:"rel"`
}
type GlanceVersion struct {
Status string `json:"status"`
Id string `json:"id"`
Links []GlanceLinks `json:"links"`
}
type glanceVersionResponse struct {
Versions []GlanceVersion `json:"versions"`
}
type glanceAddImageResponse struct {
Image AddImageResponse `json:"image"`
}
// SortDirection of the sort, ascending or descending.
type SortDirection string
const (
// Desc specifies the sort direction to be descending.
Desc SortDirection = "desc"
// Asc specifies the sort direction to be ascending.
Asc SortDirection = "asc"
)
// we support a v1.0 interface, so lets ensure we can find that interface in the list
func (imageService Service) GetV1Interface () (correctVersion bool, updateUrl string, err error) {
versionContainer := glanceVersionResponse {}
err = misc.GetJSON(imageService.URL, imageService.TokenID, imageService.Client, &versionContainer )
if err != nil {
return false, "", err
} else {
for _, version := range versionContainer.Versions {
if version.Status == "SUPPORTED" && version.Id == "v1.0" {
updateUrl = version.Links[0].HRef
correctVersion = true
}
}
}
return correctVersion , updateUrl , nil
}
// Images will issue a get request to OpenStack to retrieve the list of images.
func (imageService Service) Images() (image []Response, err error) {
return imageService.QueryImages(nil)
}
// ImagesDetail will issue a get request to OpenStack to retrieve the list of images complete with
// additional details.
func (imageService Service) ImagesDetail() (image []DetailResponse, err error) {
return imageService.QueryImagesDetail(nil)
}
// ReserveImage will issue a post request to OpenStack to reserve an image instance
func (imageService Service) ReserveImage (uploadParameters UploadParameters, hypervisor string, mode string) (ID string, Status string, err error) {
addImageContainer := glanceAddImageResponse {}
headers := make( []string, 10 )
i := 0
headers[i] = "x-image-meta-name^" + uploadParameters.Name
i++
headers[i] = "x-image-meta-disk_format^" + uploadParameters.DiskFormat
i++
headers[i] = "x-image-meta-container_format^" + uploadParameters.ContainerFormat
i++
headers[i] = "x-image-meta-property-hypervisor_type^" + hypervisor
i++
headers[i] = "x-image-meta-property-vm_mode^" + mode
i++
if uploadParameters.CopyFromUrl != "" {
headers[i] = "x-glance-api-copy-from^" + uploadParameters.CopyFromUrl
i++
}
if uploadParameters.Owner != "" {
headers[i] = "x-glance-meta-owner^" + uploadParameters.Owner
i++
}
if uploadParameters.IsPublic {
headers[i] = "x-image-meta-is_public^true"
i++
}
if uploadParameters.MinRam != 0 {
headers[i] = "x-image-meta-min_ram^" + fmt.Sprintf("%d", uploadParameters.MinRam)
i++
}
if uploadParameters.MinDisk != 0 {
headers[i] = "x-image-meta-min_disk^" + fmt.Sprintf("%d", uploadParameters.MinDisk)
i++
}
url := strings.TrimSuffix(imageService.URL, "/") + "/images"
err = misc.PostHeader(url, imageService.TokenID, imageService.Client, headers, &addImageContainer)
if err != nil {
return "", "", err
}
ID = addImageContainer.Image.ID
Status = addImageContainer.Image.Status
return
}
// QueryImages will issue a get request with the specified ImageQueryParameters to retrieve the list of
// images.
func (imageService Service) ImageStatus(Id string) (Status string, err error) {
url := strings.TrimSuffix(imageService.URL, "/") + "/images/" + Id
var headers http.Header
headers, err = misc.GetHeader(url, imageService.TokenID, imageService.Client)
if err != nil {
return "", err
} else {
for header, value := range headers {
//log.Printf ("header '%s'='%s'", header, value[0])
if strings.ToLower(header) == "x-image-meta-status" {
Status = value[0]
break
}
}
}
return Status, nil
}
// QueryImages will issue a get request with the specified ImageQueryParameters to retrieve the list of
// images.
func (imageService Service) QueryImages(queryParameters *QueryParameters) ([]Response, error) {
imagesContainer := imagesResponse{}
err := imageService.queryImages(false /*includeDetails*/, &imagesContainer, queryParameters)
if err != nil {
return nil, err
}
return imagesContainer.Images, nil
}
// QueryImagesDetail will issue a get request with the specified QueryParameters to retrieve the list of
// images with additional details.
func (imageService Service) QueryImagesDetail(queryParameters *QueryParameters) ([]DetailResponse, error) {
imagesDetailContainer := imagesDetailResponse{}
err := imageService.queryImages(true /*includeDetails*/, &imagesDetailContainer, queryParameters)
if err != nil {
return nil, err
}
return imagesDetailContainer.Images, nil
}
func (imageService Service) queryImages(includeDetails bool, imagesResponseContainer interface{}, queryParameters *QueryParameters) error {
urlPostFix := "/images"
if includeDetails {
urlPostFix = urlPostFix + "/detail"
}
reqURL, err := buildQueryURL(imageService, queryParameters, urlPostFix)
if err != nil {
return err
}
err = misc.GetJSON(reqURL.String(), imageService.TokenID, imageService.Client, &imagesResponseContainer)
if err != nil {
return err
}
return nil
}
func buildQueryURL(imageService Service, queryParameters *QueryParameters, imagePartialURL string) (*url.URL, error) {
reqURL, err := url.Parse(imageService.URL)
if err != nil {
return nil, err
}
if queryParameters != nil {
values := url.Values{}
if queryParameters.Name != "" {
values.Set("name", queryParameters.Name)
}
if queryParameters.ContainerFormat != "" {
values.Set("container_format", queryParameters.ContainerFormat)
}
if queryParameters.DiskFormat != "" {
values.Set("disk_format", queryParameters.DiskFormat)
}
if queryParameters.Status != "" {
values.Set("status", queryParameters.Status)
}
if queryParameters.MinSize != 0 {
values.Set("size_min", fmt.Sprintf("%d", queryParameters.MinSize))
}
if queryParameters.MaxSize != 0 {
values.Set("size_max", fmt.Sprintf("%d", queryParameters.MaxSize))
}
if queryParameters.Limit != 0 | {
values.Set("limit", fmt.Sprintf("%d", queryParameters.Limit))
} | conditional_block |
|
image.go | QueryParameters.
*/
package image
import (
"fmt"
"github.com/xenserverarmy/go-osglance/misc"
"net/http"
"net/url"
"strings"
)
// Service is a client service that can make
// requests against a OpenStack version 1 image service.
// Below is an example on creating an image service and getting images:
// imageService := image.ImageService{Client: *http.DefaultClient, TokenId: tokenId, Url: "http://imageservicelocation"}
// images:= imageService.Images()
type Service struct {
Client http.Client
TokenID string
URL string
}
// Response is a structure for all properties of
// an image for a non detailed query
type Response struct {
CheckSum string `json:"checksum"`
ContainerFormat string `json:"container_format"`
DiskFormat string `json:"disk_format"`
ID string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
}
// DetailResponse is a structure for all properties of
// an image for a detailed query
type DetailResponse struct {
CheckSum string `json:"checksum"`
ContainerFormat string `json:"container_format"`
CreatedAt misc.RFC8601DateTime `json:"created_at"`
Deleted bool `json:"deleted"`
DeletedAt *misc.RFC8601DateTime `json:"deleted_at"`
DiskFormat string `json:"disk_format"`
ID string `json:"id"`
IsPublic bool `json:"is_public"`
MinDisk int64 `json:"min_disk"`
MinRAM int64 `json:"min_ram"`
Name string `json:"name"`
Owner *string `json:"owner"`
UpdatedAt misc.RFC8601DateTime `json:"updated_at"`
Properties map[string]string `json:"properties"`
Protected bool `json:"protected"`
Status string `json:"status"`
Size int64 `json:"size"`
VirtualSize *int64 `json:"virtual_size"` // Note: Property exists in OpenStack dev stack payloads but not Helion public cloud.
}
// QueryParameters is a structure that
// contains the filter, sort, and paging parameters for
// an image or imagedetail query.
type QueryParameters struct {
Name string
Status string
ContainerFormat string
DiskFormat string
MinSize int64
MaxSize int64
SortKey string
SortDirection SortDirection
Marker string
Limit int64
}
type UploadParameters struct {
Name string
DiskFormat string
ContainerFormat string
IsPublic bool
MinDisk int64
MinRam int64
Owner string
CopyFromUrl string
}
// AddImageResponse is a structure containing relevant properties for new images
type AddImageResponse struct {
CheckSum string `json:"checksum"`
ContainerFormat string `json:"container_format"`
CreatedAt misc.RFC8601DateTime `json:"created_at"`
DiskFormat string `json:"disk_format"`
ID string `json:"id"`
IsPublic bool `json:"is_public"`
MinDisk int64 `json:"min_disk"`
MinRAM int64 `json:"min_ram"`
Name string `json:"name"`
Owner *string `json:"owner"`
UpdatedAt misc.RFC8601DateTime `json:"updated_at"`
Status string `json:"status"`
}
type GlanceLinks struct {
HRef string `json:"href"`
Relationship string `json:"rel"`
}
type GlanceVersion struct {
Status string `json:"status"`
Id string `json:"id"`
Links []GlanceLinks `json:"links"`
}
type glanceVersionResponse struct {
Versions []GlanceVersion `json:"versions"`
}
type glanceAddImageResponse struct {
Image AddImageResponse `json:"image"`
}
// SortDirection of the sort, ascending or descending.
type SortDirection string
const (
// Desc specifies the sort direction to be descending.
Desc SortDirection = "desc"
// Asc specifies the sort direction to be ascending.
Asc SortDirection = "asc"
)
// we support a v1.0 interface, so lets ensure we can find that interface in the list
func (imageService Service) GetV1Interface () (correctVersion bool, updateUrl string, err error) {
versionContainer := glanceVersionResponse {}
err = misc.GetJSON(imageService.URL, imageService.TokenID, imageService.Client, &versionContainer )
if err != nil {
return false, "", err
} else {
for _, version := range versionContainer.Versions {
if version.Status == "SUPPORTED" && version.Id == "v1.0" {
updateUrl = version.Links[0].HRef
correctVersion = true
}
}
}
return correctVersion , updateUrl , nil
}
// Images will issue a get request to OpenStack to retrieve the list of images.
func (imageService Service) Images() (image []Response, err error) {
return imageService.QueryImages(nil)
}
// ImagesDetail will issue a get request to OpenStack to retrieve the list of images complete with
// additional details.
func (imageService Service) ImagesDetail() (image []DetailResponse, err error) {
return imageService.QueryImagesDetail(nil)
}
// ReserveImage will issue a post request to OpenStack to reserve an image instance
func (imageService Service) ReserveImage (uploadParameters UploadParameters, hypervisor string, mode string) (ID string, Status string, err error) {
addImageContainer := glanceAddImageResponse {}
headers := make( []string, 10 )
i := 0
headers[i] = "x-image-meta-name^" + uploadParameters.Name
i++
headers[i] = "x-image-meta-disk_format^" + uploadParameters.DiskFormat
i++
headers[i] = "x-image-meta-container_format^" + uploadParameters.ContainerFormat
i++
headers[i] = "x-image-meta-property-hypervisor_type^" + hypervisor
i++
headers[i] = "x-image-meta-property-vm_mode^" + mode
i++
if uploadParameters.CopyFromUrl != "" {
headers[i] = "x-glance-api-copy-from^" + uploadParameters.CopyFromUrl
i++
}
if uploadParameters.Owner != "" {
headers[i] = "x-glance-meta-owner^" + uploadParameters.Owner
i++
}
if uploadParameters.IsPublic {
headers[i] = "x-image-meta-is_public^true"
i++
}
if uploadParameters.MinRam != 0 {
headers[i] = "x-image-meta-min_ram^" + fmt.Sprintf("%d", uploadParameters.MinRam)
i++
}
if uploadParameters.MinDisk != 0 {
headers[i] = "x-image-meta-min_disk^" + fmt.Sprintf("%d", uploadParameters.MinDisk)
i++
}
url := strings.TrimSuffix(imageService.URL, "/") + "/images"
err = misc.PostHeader(url, imageService.TokenID, imageService.Client, headers, &addImageContainer)
if err != nil {
return "", "", err
}
ID = addImageContainer.Image.ID
Status = addImageContainer.Image.Status
return
}
// QueryImages will issue a get request with the specified ImageQueryParameters to retrieve the list of
// images.
func (imageService Service) ImageStatus(Id string) (Status string, err error) {
url := strings.TrimSuffix(imageService.URL, "/") + "/images/" + Id
var headers http.Header
headers, err = misc.GetHeader(url, imageService.TokenID, imageService.Client)
if err != nil {
return "", err
} else {
for header, value := range headers {
//log.Printf ("header '%s'='%s'", header, value[0])
if strings.ToLower(header) == "x-image-meta-status" {
Status = value[0]
break
}
}
}
return Status, nil
}
// QueryImages will issue a get request with the specified ImageQueryParameters to retrieve the list of
// images.
func (imageService Service) | (queryParameters *QueryParameters) ([]Response, error) {
imagesContainer := imagesResponse{}
err := imageService.queryImages(false /*includeDetails*/, &imagesContainer, queryParameters)
if err != nil {
return nil, err
}
return imagesContainer.Images, nil
}
// QueryImagesDetail will issue a get request with the specified QueryParameters to retrieve the list of
// images with additional details.
func (imageService Service) QueryImagesDetail(queryParameters *QueryParameters) ([]DetailResponse, error) {
imagesDetailContainer := imagesDetailResponse{}
err := imageService.queryImages(true /*includeDetails*/, &imagesDetailContainer, queryParameters)
if err != nil {
return nil, err
}
return imagesDetailContainer.Images, nil
}
func (imageService Service) queryImages(includeDetails bool, imagesResponseContainer interface{}, queryParameters *QueryParameters) error {
urlPostFix := "/images"
if includeDetails {
urlPostFix = urlPostFix + "/detail"
}
reqURL, err := buildQueryURL(imageService, queryParameters, urlPostFix)
if err != nil {
| QueryImages | identifier_name |
image.go | json:"name"`
Size int64 `json:"size"`
}
// DetailResponse is a structure for all properties of
// an image for a detailed query
type DetailResponse struct {
CheckSum string `json:"checksum"`
ContainerFormat string `json:"container_format"`
CreatedAt misc.RFC8601DateTime `json:"created_at"`
Deleted bool `json:"deleted"`
DeletedAt *misc.RFC8601DateTime `json:"deleted_at"`
DiskFormat string `json:"disk_format"`
ID string `json:"id"`
IsPublic bool `json:"is_public"`
MinDisk int64 `json:"min_disk"`
MinRAM int64 `json:"min_ram"`
Name string `json:"name"`
Owner *string `json:"owner"`
UpdatedAt misc.RFC8601DateTime `json:"updated_at"`
Properties map[string]string `json:"properties"`
Protected bool `json:"protected"`
Status string `json:"status"`
Size int64 `json:"size"`
VirtualSize *int64 `json:"virtual_size"` // Note: Property exists in OpenStack dev stack payloads but not Helion public cloud.
}
// QueryParameters is a structure that
// contains the filter, sort, and paging parameters for
// an image or imagedetail query.
type QueryParameters struct {
Name string
Status string
ContainerFormat string
DiskFormat string
MinSize int64
MaxSize int64
SortKey string
SortDirection SortDirection
Marker string
Limit int64
}
type UploadParameters struct {
Name string
DiskFormat string
ContainerFormat string
IsPublic bool
MinDisk int64
MinRam int64
Owner string
CopyFromUrl string
}
// AddImageResponse is a structure containing relevant properties for new images
type AddImageResponse struct {
CheckSum string `json:"checksum"`
ContainerFormat string `json:"container_format"`
CreatedAt misc.RFC8601DateTime `json:"created_at"`
DiskFormat string `json:"disk_format"`
ID string `json:"id"`
IsPublic bool `json:"is_public"`
MinDisk int64 `json:"min_disk"`
MinRAM int64 `json:"min_ram"`
Name string `json:"name"`
Owner *string `json:"owner"`
UpdatedAt misc.RFC8601DateTime `json:"updated_at"`
Status string `json:"status"`
}
type GlanceLinks struct {
HRef string `json:"href"`
Relationship string `json:"rel"`
}
type GlanceVersion struct {
Status string `json:"status"`
Id string `json:"id"`
Links []GlanceLinks `json:"links"`
}
type glanceVersionResponse struct {
Versions []GlanceVersion `json:"versions"`
}
type glanceAddImageResponse struct {
Image AddImageResponse `json:"image"`
}
// SortDirection of the sort, ascending or descending.
type SortDirection string
const (
// Desc specifies the sort direction to be descending.
Desc SortDirection = "desc"
// Asc specifies the sort direction to be ascending.
Asc SortDirection = "asc"
)
// we support a v1.0 interface, so lets ensure we can find that interface in the list
func (imageService Service) GetV1Interface () (correctVersion bool, updateUrl string, err error) {
versionContainer := glanceVersionResponse {}
err = misc.GetJSON(imageService.URL, imageService.TokenID, imageService.Client, &versionContainer )
if err != nil {
return false, "", err
} else {
for _, version := range versionContainer.Versions {
if version.Status == "SUPPORTED" && version.Id == "v1.0" {
updateUrl = version.Links[0].HRef
correctVersion = true
}
}
}
return correctVersion , updateUrl , nil
}
// Images will issue a get request to OpenStack to retrieve the list of images.
func (imageService Service) Images() (image []Response, err error) {
return imageService.QueryImages(nil)
}
// ImagesDetail will issue a get request to OpenStack to retrieve the list of images complete with
// additional details.
func (imageService Service) ImagesDetail() (image []DetailResponse, err error) {
return imageService.QueryImagesDetail(nil)
}
// ReserveImage will issue a post request to OpenStack to reserve an image instance
func (imageService Service) ReserveImage (uploadParameters UploadParameters, hypervisor string, mode string) (ID string, Status string, err error) {
addImageContainer := glanceAddImageResponse {}
headers := make( []string, 10 )
i := 0
headers[i] = "x-image-meta-name^" + uploadParameters.Name
i++
headers[i] = "x-image-meta-disk_format^" + uploadParameters.DiskFormat
i++
headers[i] = "x-image-meta-container_format^" + uploadParameters.ContainerFormat
i++
headers[i] = "x-image-meta-property-hypervisor_type^" + hypervisor
i++
headers[i] = "x-image-meta-property-vm_mode^" + mode
i++
if uploadParameters.CopyFromUrl != "" {
headers[i] = "x-glance-api-copy-from^" + uploadParameters.CopyFromUrl
i++
}
if uploadParameters.Owner != "" {
headers[i] = "x-glance-meta-owner^" + uploadParameters.Owner
i++
}
if uploadParameters.IsPublic {
headers[i] = "x-image-meta-is_public^true"
i++
}
if uploadParameters.MinRam != 0 {
headers[i] = "x-image-meta-min_ram^" + fmt.Sprintf("%d", uploadParameters.MinRam)
i++
}
if uploadParameters.MinDisk != 0 {
headers[i] = "x-image-meta-min_disk^" + fmt.Sprintf("%d", uploadParameters.MinDisk)
i++
}
url := strings.TrimSuffix(imageService.URL, "/") + "/images"
err = misc.PostHeader(url, imageService.TokenID, imageService.Client, headers, &addImageContainer)
if err != nil {
return "", "", err
}
ID = addImageContainer.Image.ID
Status = addImageContainer.Image.Status
return
}
// QueryImages will issue a get request with the specified ImageQueryParameters to retrieve the list of
// images.
func (imageService Service) ImageStatus(Id string) (Status string, err error) {
url := strings.TrimSuffix(imageService.URL, "/") + "/images/" + Id
var headers http.Header
headers, err = misc.GetHeader(url, imageService.TokenID, imageService.Client)
if err != nil {
return "", err
} else {
for header, value := range headers {
//log.Printf ("header '%s'='%s'", header, value[0])
if strings.ToLower(header) == "x-image-meta-status" {
Status = value[0]
break
}
}
}
return Status, nil
}
// QueryImages will issue a get request with the specified ImageQueryParameters to retrieve the list of
// images.
func (imageService Service) QueryImages(queryParameters *QueryParameters) ([]Response, error) {
imagesContainer := imagesResponse{}
err := imageService.queryImages(false /*includeDetails*/, &imagesContainer, queryParameters)
if err != nil {
return nil, err
}
return imagesContainer.Images, nil
}
// QueryImagesDetail will issue a get request with the specified QueryParameters to retrieve the list of
// images with additional details.
func (imageService Service) QueryImagesDetail(queryParameters *QueryParameters) ([]DetailResponse, error) {
imagesDetailContainer := imagesDetailResponse{}
err := imageService.queryImages(true /*includeDetails*/, &imagesDetailContainer, queryParameters)
if err != nil {
return nil, err
}
return imagesDetailContainer.Images, nil
}
func (imageService Service) queryImages(includeDetails bool, imagesResponseContainer interface{}, queryParameters *QueryParameters) error {
urlPostFix := "/images"
if includeDetails {
urlPostFix = urlPostFix + "/detail"
}
reqURL, err := buildQueryURL(imageService, queryParameters, urlPostFix)
if err != nil {
return err
}
err = misc.GetJSON(reqURL.String(), imageService.TokenID, imageService.Client, &imagesResponseContainer)
if err != nil {
return err
}
return nil
}
func buildQueryURL(imageService Service, queryParameters *QueryParameters, imagePartialURL string) (*url.URL, error) | {
reqURL, err := url.Parse(imageService.URL)
if err != nil {
return nil, err
}
if queryParameters != nil {
values := url.Values{}
if queryParameters.Name != "" {
values.Set("name", queryParameters.Name)
}
if queryParameters.ContainerFormat != "" {
values.Set("container_format", queryParameters.ContainerFormat)
}
if queryParameters.DiskFormat != "" {
values.Set("disk_format", queryParameters.DiskFormat)
}
if queryParameters.Status != "" {
values.Set("status", queryParameters.Status)
} | identifier_body |
|
thread.rs | create()`], but only start executing
/// when either [`Thread::start()`] or [`Process::start()`] are called. Both syscalls
/// take as an argument the entrypoint of the initial routine to execute.
///
/// The thread passed to [`Process::start()`] should be the first thread to start execution
/// on a process.
///
/// A thread terminates execution:
/// - by calling [`Thread::exit()`]
/// - when the parent process terminates
/// - by calling [`Task::kill()`]
/// - after generating an exception for which there is no handler or the handler
/// decides to terminate the thread.
///
/// Returning from the entrypoint routine does not terminate execution. The last
/// action of the entrypoint should be to call [`Thread::exit()`].
///
/// Closing the last handle to a thread does not terminate execution. In order to
/// forcefully kill a thread for which there is no available handle, use
/// `KernelObject::get_child()` to obtain a handle to the thread. This method is strongly
/// discouraged. Killing a thread that is executing might leave the process in a
/// corrupt state.
///
/// Fuchsia native threads are always *detached*. That is, there is no *join()* operation
/// needed to do a clean termination. However, some runtimes above the kernel, such as
/// C11 or POSIX might require threads to be joined.
///
/// ### Signals
/// Threads provide the following signals:
/// - [`THREAD_TERMINATED`]
/// - [`THREAD_SUSPENDED`]
/// - [`THREAD_RUNNING`]
///
/// When a thread is started [`THREAD_RUNNING`] is asserted. When it is suspended
/// [`THREAD_RUNNING`] is deasserted, and [`THREAD_SUSPENDED`] is asserted. When
/// the thread is resumed [`THREAD_SUSPENDED`] is deasserted and
/// [`THREAD_RUNNING`] is asserted. When a thread terminates both
/// [`THREAD_RUNNING`] and [`THREAD_SUSPENDED`] are deasserted and
/// [`THREAD_TERMINATED`] is asserted.
///
/// Note that signals are OR'd into the state maintained by the
/// `KernelObject::wait_signal_async()` family of functions thus
/// you may see any combination of requested signals when they return.
///
/// [`Thread::create()`]: Thread::create
/// [`Thread::exit()`]: Thread::exit
/// [`Process::exit()`]: crate::task::Process::exit
/// [`THREAD_TERMINATED`]: crate::object::Signal::THREAD_TERMINATED
/// [`THREAD_SUSPENDED`]: crate::object::Signal::THREAD_SUSPENDED
/// [`THREAD_RUNNING`]: crate::object::Signal::THREAD_RUNNING
pub struct Thread {
base: KObjectBase,
proc: Arc<Process>,
ext: Box<dyn Any + Send + Sync>,
inner: Mutex<ThreadInner>,
}
impl_kobject!(Thread);
#[no_mangle]
extern "C" fn thread_check_runnable(
thread: &'static Arc<Thread>,
) -> Pin<Box<dyn Future<Output = ()>>> {
Box::pin(check_runnable_async(thread))
}
/// Check whether a thread is runnable
async fn check_runnable_async(thread: &Arc<Thread>) {
thread.check_runnable().await
}
#[export_name = "thread_set_state"]
pub fn thread_set_state(thread: &'static Arc<Thread>, state: &'static mut ThreadState) {
let mut inner = thread.inner.lock();
if let Some(old_state) = inner.state.take() {
state.general = old_state.general;
}
inner.state = Some(state);
}
#[derive(Default)]
struct ThreadInner {
/// HAL thread handle
///
/// Should be `None` before start or after terminated.
hal_thread: Option<kernel_hal::Thread>,
/// Thread state
///
/// Only be `Some` on suspended.
state: Option<&'static mut ThreadState>,
suspend_count: usize,
waker: Option<Waker>,
}
impl Thread {
/// Create a new thread.
pub fn create(proc: &Arc<Process>, name: &str, _options: u32) -> ZxResult<Arc<Self>> {
Self::create_with_ext(proc, name, ())
}
/// Create a new thread with extension info.
pub fn create_with_ext(
proc: &Arc<Process>,
name: &str,
ext: impl Any + Send + Sync,
) -> ZxResult<Arc<Self>> {
// TODO: options
let thread = Arc::new(Thread {
base: {
let base = KObjectBase::new();
base.set_name(name);
base
},
proc: proc.clone(),
ext: Box::new(ext),
inner: Mutex::new(ThreadInner::default()),
});
proc.add_thread(thread.clone());
Ok(thread)
}
/// Get the process.
pub fn proc(&self) -> &Arc<Process> {
&self.proc
}
/// Get the extension.
pub fn ext(&self) -> &Box<dyn Any + Send + Sync> {
&self.ext
}
/// Start execution on the thread.
pub fn | (
self: &Arc<Self>,
entry: usize,
stack: usize,
arg1: usize,
arg2: usize,
) -> ZxResult<()> {
let regs = GeneralRegs::new_fn(entry, stack, arg1, arg2);
self.start_with_regs(regs)
}
/// Start execution with given registers.
pub fn start_with_regs(self: &Arc<Self>, regs: GeneralRegs) -> ZxResult<()> {
let mut inner = self.inner.lock();
if inner.hal_thread.is_some() {
return Err(ZxError::BAD_STATE);
}
let hal_thread =
kernel_hal::Thread::spawn(self.clone(), regs, self.proc.vmar().table_phys());
inner.hal_thread = Some(hal_thread);
self.base.signal_set(Signal::THREAD_RUNNING);
Ok(())
}
/// Terminate the current running thread.
/// TODO: move to CurrentThread
pub fn exit(&self) {
self.proc().remove_thread(self.base.id);
self.base.signal_set(Signal::THREAD_TERMINATED);
}
/// Read one aspect of thread state.
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
let inner = self.inner.lock();
let state = inner.state.as_ref().ok_or(ZxError::BAD_STATE)?;
let len = state.read(kind, buf)?;
Ok(len)
}
#[allow(unsafe_code)]
/// Write one aspect of thread state.
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult<()> {
let mut inner = self.inner.lock();
//let state = inner.state.as_mut().ok_or(ZxError::BAD_STATE)?;
let state = inner.state.get_or_insert({
unsafe {
static mut STATE: ThreadState = ThreadState {
general: GeneralRegs::zero(),
};
&mut STATE
}
});
state.write(kind, buf)?;
Ok(())
}
pub fn suspend(&self) {
let mut inner = self.inner.lock();
inner.suspend_count += 1;
self.base.signal_set(Signal::THREAD_SUSPENDED);
info!(
"thread {} suspend_count {}",
self.base.get_name(),
inner.suspend_count
);
}
pub fn check_runnable(self: &Arc<Thread>) -> impl Future<Output = ()> {
struct RunnableChecker {
thread: Arc<Thread>,
}
impl Future for RunnableChecker {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let count = self.thread.inner.lock().suspend_count;
if count == 0 {
Poll::Ready(())
} else {
// 把waker存起来,比如self.thread.get_waker
let mut inner = self.thread.inner.lock();
inner.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
RunnableChecker {
thread: self.clone(),
}
}
pub fn resume(&self) {
let mut inner = self.inner.lock();
assert_ne!(inner.suspend_count, 0);
inner.suspend_count -= 1;
if inner.suspend_count == 0 {
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
}
}
#[cfg(test)]
mod tests {
use super::job::Job;
use super::*;
use std::sync::atomic::*;
use std::vec;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let _thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
}
#[test]
fn start() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
let thread1 = Thread::create(&proc, "thread1", 0).expect("failed to create thread");
// allocate stack for new thread
let mut stack = vec![0u8; 0x10 | start | identifier_name |
thread.rs | create()`], but only start executing
/// when either [`Thread::start()`] or [`Process::start()`] are called. Both syscalls
/// take as an argument the entrypoint of the initial routine to execute.
///
/// The thread passed to [`Process::start()`] should be the first thread to start execution
/// on a process.
///
/// A thread terminates execution:
/// - by calling [`Thread::exit()`]
/// - when the parent process terminates
/// - by calling [`Task::kill()`]
/// - after generating an exception for which there is no handler or the handler
/// decides to terminate the thread.
///
/// Returning from the entrypoint routine does not terminate execution. The last
/// action of the entrypoint should be to call [`Thread::exit()`].
///
/// Closing the last handle to a thread does not terminate execution. In order to
/// forcefully kill a thread for which there is no available handle, use
/// `KernelObject::get_child()` to obtain a handle to the thread. This method is strongly
/// discouraged. Killing a thread that is executing might leave the process in a
/// corrupt state.
///
/// Fuchsia native threads are always *detached*. That is, there is no *join()* operation
/// needed to do a clean termination. However, some runtimes above the kernel, such as
/// C11 or POSIX might require threads to be joined.
///
/// ### Signals
/// Threads provide the following signals:
/// - [`THREAD_TERMINATED`]
/// - [`THREAD_SUSPENDED`]
/// - [`THREAD_RUNNING`]
///
/// When a thread is started [`THREAD_RUNNING`] is asserted. When it is suspended
/// [`THREAD_RUNNING`] is deasserted, and [`THREAD_SUSPENDED`] is asserted. When
/// the thread is resumed [`THREAD_SUSPENDED`] is deasserted and
/// [`THREAD_RUNNING`] is asserted. When a thread terminates both
/// [`THREAD_RUNNING`] and [`THREAD_SUSPENDED`] are deasserted and
/// [`THREAD_TERMINATED`] is asserted.
///
/// Note that signals are OR'd into the state maintained by the
/// `KernelObject::wait_signal_async()` family of functions thus
/// you may see any combination of requested signals when they return.
///
/// [`Thread::create()`]: Thread::create
/// [`Thread::exit()`]: Thread::exit
/// [`Process::exit()`]: crate::task::Process::exit
/// [`THREAD_TERMINATED`]: crate::object::Signal::THREAD_TERMINATED
/// [`THREAD_SUSPENDED`]: crate::object::Signal::THREAD_SUSPENDED
/// [`THREAD_RUNNING`]: crate::object::Signal::THREAD_RUNNING
pub struct Thread {
base: KObjectBase,
proc: Arc<Process>,
ext: Box<dyn Any + Send + Sync>,
inner: Mutex<ThreadInner>,
}
impl_kobject!(Thread);
#[no_mangle]
extern "C" fn thread_check_runnable(
thread: &'static Arc<Thread>,
) -> Pin<Box<dyn Future<Output = ()>>> {
Box::pin(check_runnable_async(thread))
}
/// Check whether a thread is runnable
async fn check_runnable_async(thread: &Arc<Thread>) {
thread.check_runnable().await
}
#[export_name = "thread_set_state"]
pub fn thread_set_state(thread: &'static Arc<Thread>, state: &'static mut ThreadState) {
let mut inner = thread.inner.lock();
if let Some(old_state) = inner.state.take() |
inner.state = Some(state);
}
#[derive(Default)]
struct ThreadInner {
/// HAL thread handle
///
/// Should be `None` before start or after terminated.
hal_thread: Option<kernel_hal::Thread>,
/// Thread state
///
/// Only be `Some` on suspended.
state: Option<&'static mut ThreadState>,
suspend_count: usize,
waker: Option<Waker>,
}
impl Thread {
/// Create a new thread.
pub fn create(proc: &Arc<Process>, name: &str, _options: u32) -> ZxResult<Arc<Self>> {
Self::create_with_ext(proc, name, ())
}
/// Create a new thread with extension info.
pub fn create_with_ext(
proc: &Arc<Process>,
name: &str,
ext: impl Any + Send + Sync,
) -> ZxResult<Arc<Self>> {
// TODO: options
let thread = Arc::new(Thread {
base: {
let base = KObjectBase::new();
base.set_name(name);
base
},
proc: proc.clone(),
ext: Box::new(ext),
inner: Mutex::new(ThreadInner::default()),
});
proc.add_thread(thread.clone());
Ok(thread)
}
/// Get the process.
pub fn proc(&self) -> &Arc<Process> {
&self.proc
}
/// Get the extension.
pub fn ext(&self) -> &Box<dyn Any + Send + Sync> {
&self.ext
}
/// Start execution on the thread.
pub fn start(
self: &Arc<Self>,
entry: usize,
stack: usize,
arg1: usize,
arg2: usize,
) -> ZxResult<()> {
let regs = GeneralRegs::new_fn(entry, stack, arg1, arg2);
self.start_with_regs(regs)
}
/// Start execution with given registers.
pub fn start_with_regs(self: &Arc<Self>, regs: GeneralRegs) -> ZxResult<()> {
let mut inner = self.inner.lock();
if inner.hal_thread.is_some() {
return Err(ZxError::BAD_STATE);
}
let hal_thread =
kernel_hal::Thread::spawn(self.clone(), regs, self.proc.vmar().table_phys());
inner.hal_thread = Some(hal_thread);
self.base.signal_set(Signal::THREAD_RUNNING);
Ok(())
}
/// Terminate the current running thread.
/// TODO: move to CurrentThread
pub fn exit(&self) {
self.proc().remove_thread(self.base.id);
self.base.signal_set(Signal::THREAD_TERMINATED);
}
/// Read one aspect of thread state.
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
let inner = self.inner.lock();
let state = inner.state.as_ref().ok_or(ZxError::BAD_STATE)?;
let len = state.read(kind, buf)?;
Ok(len)
}
#[allow(unsafe_code)]
/// Write one aspect of thread state.
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult<()> {
let mut inner = self.inner.lock();
//let state = inner.state.as_mut().ok_or(ZxError::BAD_STATE)?;
let state = inner.state.get_or_insert({
unsafe {
static mut STATE: ThreadState = ThreadState {
general: GeneralRegs::zero(),
};
&mut STATE
}
});
state.write(kind, buf)?;
Ok(())
}
pub fn suspend(&self) {
let mut inner = self.inner.lock();
inner.suspend_count += 1;
self.base.signal_set(Signal::THREAD_SUSPENDED);
info!(
"thread {} suspend_count {}",
self.base.get_name(),
inner.suspend_count
);
}
pub fn check_runnable(self: &Arc<Thread>) -> impl Future<Output = ()> {
struct RunnableChecker {
thread: Arc<Thread>,
}
impl Future for RunnableChecker {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let count = self.thread.inner.lock().suspend_count;
if count == 0 {
Poll::Ready(())
} else {
// 把waker存起来,比如self.thread.get_waker
let mut inner = self.thread.inner.lock();
inner.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
RunnableChecker {
thread: self.clone(),
}
}
pub fn resume(&self) {
let mut inner = self.inner.lock();
assert_ne!(inner.suspend_count, 0);
inner.suspend_count -= 1;
if inner.suspend_count == 0 {
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
}
}
#[cfg(test)]
mod tests {
use super::job::Job;
use super::*;
use std::sync::atomic::*;
use std::vec;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let _thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
}
#[test]
fn start() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
let thread1 = Thread::create(&proc, "thread1", 0).expect("failed to create thread");
// allocate stack for new thread
let mut stack = vec![0u8; 0x1 | {
state.general = old_state.general;
} | conditional_block |
thread.rs | ::create()`], but only start executing
/// when either [`Thread::start()`] or [`Process::start()`] are called. Both syscalls
/// take as an argument the entrypoint of the initial routine to execute.
///
/// The thread passed to [`Process::start()`] should be the first thread to start execution
/// on a process.
///
/// A thread terminates execution:
/// - by calling [`Thread::exit()`]
/// - when the parent process terminates
/// - by calling [`Task::kill()`]
/// - after generating an exception for which there is no handler or the handler
/// decides to terminate the thread.
///
/// Returning from the entrypoint routine does not terminate execution. The last
/// action of the entrypoint should be to call [`Thread::exit()`].
///
/// Closing the last handle to a thread does not terminate execution. In order to
/// forcefully kill a thread for which there is no available handle, use
/// `KernelObject::get_child()` to obtain a handle to the thread. This method is strongly
/// discouraged. Killing a thread that is executing might leave the process in a
/// corrupt state.
///
/// Fuchsia native threads are always *detached*. That is, there is no *join()* operation
/// needed to do a clean termination. However, some runtimes above the kernel, such as
/// C11 or POSIX might require threads to be joined.
///
/// ### Signals
/// Threads provide the following signals:
/// - [`THREAD_TERMINATED`]
/// - [`THREAD_SUSPENDED`]
/// - [`THREAD_RUNNING`]
///
/// When a thread is started [`THREAD_RUNNING`] is asserted. When it is suspended
/// [`THREAD_RUNNING`] is deasserted, and [`THREAD_SUSPENDED`] is asserted. When
/// the thread is resumed [`THREAD_SUSPENDED`] is deasserted and
/// [`THREAD_RUNNING`] is asserted. When a thread terminates both
/// [`THREAD_RUNNING`] and [`THREAD_SUSPENDED`] are deasserted and
/// [`THREAD_TERMINATED`] is asserted.
///
/// Note that signals are OR'd into the state maintained by the
/// `KernelObject::wait_signal_async()` family of functions thus
/// you may see any combination of requested signals when they return.
///
/// [`Thread::create()`]: Thread::create
/// [`Thread::exit()`]: Thread::exit
/// [`Process::exit()`]: crate::task::Process::exit
/// [`THREAD_TERMINATED`]: crate::object::Signal::THREAD_TERMINATED
/// [`THREAD_SUSPENDED`]: crate::object::Signal::THREAD_SUSPENDED
/// [`THREAD_RUNNING`]: crate::object::Signal::THREAD_RUNNING
pub struct Thread {
base: KObjectBase,
proc: Arc<Process>,
ext: Box<dyn Any + Send + Sync>,
inner: Mutex<ThreadInner>,
}
impl_kobject!(Thread);
#[no_mangle]
extern "C" fn thread_check_runnable(
thread: &'static Arc<Thread>,
) -> Pin<Box<dyn Future<Output = ()>>> {
Box::pin(check_runnable_async(thread))
}
/// Check whether a thread is runnable
async fn check_runnable_async(thread: &Arc<Thread>) {
thread.check_runnable().await
}
#[export_name = "thread_set_state"]
pub fn thread_set_state(thread: &'static Arc<Thread>, state: &'static mut ThreadState) {
let mut inner = thread.inner.lock();
if let Some(old_state) = inner.state.take() {
state.general = old_state.general;
}
inner.state = Some(state);
}
#[derive(Default)]
struct ThreadInner {
/// HAL thread handle
///
/// Should be `None` before start or after terminated.
hal_thread: Option<kernel_hal::Thread>,
/// Thread state
///
/// Only be `Some` on suspended.
state: Option<&'static mut ThreadState>,
suspend_count: usize,
waker: Option<Waker>,
}
impl Thread {
/// Create a new thread.
pub fn create(proc: &Arc<Process>, name: &str, _options: u32) -> ZxResult<Arc<Self>> {
Self::create_with_ext(proc, name, ())
}
/// Create a new thread with extension info.
pub fn create_with_ext(
proc: &Arc<Process>,
name: &str,
ext: impl Any + Send + Sync,
) -> ZxResult<Arc<Self>> {
// TODO: options
let thread = Arc::new(Thread {
base: {
let base = KObjectBase::new();
base.set_name(name);
base
},
proc: proc.clone(),
ext: Box::new(ext),
inner: Mutex::new(ThreadInner::default()),
});
proc.add_thread(thread.clone());
Ok(thread)
}
/// Get the process.
pub fn proc(&self) -> &Arc<Process> {
&self.proc
}
/// Get the extension.
pub fn ext(&self) -> &Box<dyn Any + Send + Sync> {
&self.ext
}
/// Start execution on the thread.
pub fn start(
self: &Arc<Self>,
entry: usize,
stack: usize,
arg1: usize,
arg2: usize,
) -> ZxResult<()> {
let regs = GeneralRegs::new_fn(entry, stack, arg1, arg2);
self.start_with_regs(regs)
}
/// Start execution with given registers.
pub fn start_with_regs(self: &Arc<Self>, regs: GeneralRegs) -> ZxResult<()> {
let mut inner = self.inner.lock();
if inner.hal_thread.is_some() {
return Err(ZxError::BAD_STATE);
}
let hal_thread =
kernel_hal::Thread::spawn(self.clone(), regs, self.proc.vmar().table_phys());
inner.hal_thread = Some(hal_thread);
self.base.signal_set(Signal::THREAD_RUNNING);
Ok(())
}
/// Terminate the current running thread.
/// TODO: move to CurrentThread
pub fn exit(&self) {
self.proc().remove_thread(self.base.id);
self.base.signal_set(Signal::THREAD_TERMINATED);
}
/// Read one aspect of thread state.
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
let inner = self.inner.lock();
let state = inner.state.as_ref().ok_or(ZxError::BAD_STATE)?;
let len = state.read(kind, buf)?;
Ok(len)
}
#[allow(unsafe_code)]
/// Write one aspect of thread state.
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult<()> {
let mut inner = self.inner.lock();
//let state = inner.state.as_mut().ok_or(ZxError::BAD_STATE)?;
let state = inner.state.get_or_insert({
unsafe {
static mut STATE: ThreadState = ThreadState {
general: GeneralRegs::zero(),
};
&mut STATE
}
});
state.write(kind, buf)?;
Ok(())
}
pub fn suspend(&self) {
let mut inner = self.inner.lock();
inner.suspend_count += 1;
self.base.signal_set(Signal::THREAD_SUSPENDED);
info!(
"thread {} suspend_count {}",
self.base.get_name(),
inner.suspend_count
);
}
pub fn check_runnable(self: &Arc<Thread>) -> impl Future<Output = ()> {
struct RunnableChecker {
thread: Arc<Thread>,
}
impl Future for RunnableChecker {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let count = self.thread.inner.lock().suspend_count;
if count == 0 {
Poll::Ready(())
} else {
// 把waker存起来,比如self.thread.get_waker
let mut inner = self.thread.inner.lock();
inner.waker = Some(cx.waker().clone());
Poll::Pending | thread: self.clone(),
}
}
pub fn resume(&self) {
let mut inner = self.inner.lock();
assert_ne!(inner.suspend_count, 0);
inner.suspend_count -= 1;
if inner.suspend_count == 0 {
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
}
}
#[cfg(test)]
mod tests {
use super::job::Job;
use super::*;
use std::sync::atomic::*;
use std::vec;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let _thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
}
#[test]
fn start() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
let thread1 = Thread::create(&proc, "thread1", 0).expect("failed to create thread");
// allocate stack for new thread
let mut stack = vec![0u8; 0x100 | }
}
}
RunnableChecker { | random_line_split |
thread.rs | does not terminate execution. The last
/// action of the entrypoint should be to call [`Thread::exit()`].
///
/// Closing the last handle to a thread does not terminate execution. In order to
/// forcefully kill a thread for which there is no available handle, use
/// `KernelObject::get_child()` to obtain a handle to the thread. This method is strongly
/// discouraged. Killing a thread that is executing might leave the process in a
/// corrupt state.
///
/// Fuchsia native threads are always *detached*. That is, there is no *join()* operation
/// needed to do a clean termination. However, some runtimes above the kernel, such as
/// C11 or POSIX might require threads to be joined.
///
/// ### Signals
/// Threads provide the following signals:
/// - [`THREAD_TERMINATED`]
/// - [`THREAD_SUSPENDED`]
/// - [`THREAD_RUNNING`]
///
/// When a thread is started [`THREAD_RUNNING`] is asserted. When it is suspended
/// [`THREAD_RUNNING`] is deasserted, and [`THREAD_SUSPENDED`] is asserted. When
/// the thread is resumed [`THREAD_SUSPENDED`] is deasserted and
/// [`THREAD_RUNNING`] is asserted. When a thread terminates both
/// [`THREAD_RUNNING`] and [`THREAD_SUSPENDED`] are deasserted and
/// [`THREAD_TERMINATED`] is asserted.
///
/// Note that signals are OR'd into the state maintained by the
/// `KernelObject::wait_signal_async()` family of functions thus
/// you may see any combination of requested signals when they return.
///
/// [`Thread::create()`]: Thread::create
/// [`Thread::exit()`]: Thread::exit
/// [`Process::exit()`]: crate::task::Process::exit
/// [`THREAD_TERMINATED`]: crate::object::Signal::THREAD_TERMINATED
/// [`THREAD_SUSPENDED`]: crate::object::Signal::THREAD_SUSPENDED
/// [`THREAD_RUNNING`]: crate::object::Signal::THREAD_RUNNING
pub struct Thread {
base: KObjectBase,
proc: Arc<Process>,
ext: Box<dyn Any + Send + Sync>,
inner: Mutex<ThreadInner>,
}
impl_kobject!(Thread);
#[no_mangle]
extern "C" fn thread_check_runnable(
thread: &'static Arc<Thread>,
) -> Pin<Box<dyn Future<Output = ()>>> {
Box::pin(check_runnable_async(thread))
}
/// Check whether a thread is runnable
async fn check_runnable_async(thread: &Arc<Thread>) {
thread.check_runnable().await
}
#[export_name = "thread_set_state"]
pub fn thread_set_state(thread: &'static Arc<Thread>, state: &'static mut ThreadState) {
let mut inner = thread.inner.lock();
if let Some(old_state) = inner.state.take() {
state.general = old_state.general;
}
inner.state = Some(state);
}
#[derive(Default)]
struct ThreadInner {
/// HAL thread handle
///
/// Should be `None` before start or after terminated.
hal_thread: Option<kernel_hal::Thread>,
/// Thread state
///
/// Only be `Some` on suspended.
state: Option<&'static mut ThreadState>,
suspend_count: usize,
waker: Option<Waker>,
}
impl Thread {
/// Create a new thread.
pub fn create(proc: &Arc<Process>, name: &str, _options: u32) -> ZxResult<Arc<Self>> {
Self::create_with_ext(proc, name, ())
}
/// Create a new thread with extension info.
pub fn create_with_ext(
proc: &Arc<Process>,
name: &str,
ext: impl Any + Send + Sync,
) -> ZxResult<Arc<Self>> {
// TODO: options
let thread = Arc::new(Thread {
base: {
let base = KObjectBase::new();
base.set_name(name);
base
},
proc: proc.clone(),
ext: Box::new(ext),
inner: Mutex::new(ThreadInner::default()),
});
proc.add_thread(thread.clone());
Ok(thread)
}
/// Get the process.
pub fn proc(&self) -> &Arc<Process> {
&self.proc
}
/// Get the extension.
pub fn ext(&self) -> &Box<dyn Any + Send + Sync> {
&self.ext
}
/// Start execution on the thread.
pub fn start(
self: &Arc<Self>,
entry: usize,
stack: usize,
arg1: usize,
arg2: usize,
) -> ZxResult<()> {
let regs = GeneralRegs::new_fn(entry, stack, arg1, arg2);
self.start_with_regs(regs)
}
/// Start execution with given registers.
pub fn start_with_regs(self: &Arc<Self>, regs: GeneralRegs) -> ZxResult<()> {
let mut inner = self.inner.lock();
if inner.hal_thread.is_some() {
return Err(ZxError::BAD_STATE);
}
let hal_thread =
kernel_hal::Thread::spawn(self.clone(), regs, self.proc.vmar().table_phys());
inner.hal_thread = Some(hal_thread);
self.base.signal_set(Signal::THREAD_RUNNING);
Ok(())
}
/// Terminate the current running thread.
/// TODO: move to CurrentThread
pub fn exit(&self) {
self.proc().remove_thread(self.base.id);
self.base.signal_set(Signal::THREAD_TERMINATED);
}
/// Read one aspect of thread state.
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
let inner = self.inner.lock();
let state = inner.state.as_ref().ok_or(ZxError::BAD_STATE)?;
let len = state.read(kind, buf)?;
Ok(len)
}
#[allow(unsafe_code)]
/// Write one aspect of thread state.
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult<()> {
let mut inner = self.inner.lock();
//let state = inner.state.as_mut().ok_or(ZxError::BAD_STATE)?;
let state = inner.state.get_or_insert({
unsafe {
static mut STATE: ThreadState = ThreadState {
general: GeneralRegs::zero(),
};
&mut STATE
}
});
state.write(kind, buf)?;
Ok(())
}
pub fn suspend(&self) {
let mut inner = self.inner.lock();
inner.suspend_count += 1;
self.base.signal_set(Signal::THREAD_SUSPENDED);
info!(
"thread {} suspend_count {}",
self.base.get_name(),
inner.suspend_count
);
}
pub fn check_runnable(self: &Arc<Thread>) -> impl Future<Output = ()> {
struct RunnableChecker {
thread: Arc<Thread>,
}
impl Future for RunnableChecker {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let count = self.thread.inner.lock().suspend_count;
if count == 0 {
Poll::Ready(())
} else {
// 把waker存起来,比如self.thread.get_waker
let mut inner = self.thread.inner.lock();
inner.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
RunnableChecker {
thread: self.clone(),
}
}
pub fn resume(&self) {
let mut inner = self.inner.lock();
assert_ne!(inner.suspend_count, 0);
inner.suspend_count -= 1;
if inner.suspend_count == 0 {
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
}
}
#[cfg(test)]
mod tests {
use super::job::Job;
use super::*;
use std::sync::atomic::*;
use std::vec;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let _thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
}
#[test]
fn start() {
let | root_job = Job::root();
let proc = Process::create(&root_job, "proc", 0).expect("failed to create process");
let thread = Thread::create(&proc, "thread", 0).expect("failed to create thread");
let thread1 = Thread::create(&proc, "thread1", 0).expect("failed to create thread");
// allocate stack for new thread
let mut stack = vec![0u8; 0x1000];
let stack_top = stack.as_mut_ptr() as usize + 0x1000;
// global variable for validation
static ARG1: AtomicUsize = AtomicUsize::new(0);
static ARG2: AtomicUsize = AtomicUsize::new(0);
// function for new thread
#[allow(unsafe_code)]
unsafe extern "C" fn entry(arg1: usize, arg2: usize) -> ! {
ARG1.store(arg1, Ordering::SeqCst);
ARG2.store(arg2, Ordering::SeqCst);
kernel_hal_unix::syscall_entry();
unreachable!(); | identifier_body |
|
lib.rs | https://github.com/Geal/nom) parser combinator
//! framework.
//!
//! It is written in pure Rust, fast, and makes extensive use of zero-copy. A lot of care is taken
//! to ensure security and safety of this crate, including design (recursion limit, defensive
//! programming), tests, and fuzzing. It also aims to be panic-free.
//!
//! Historically, this parser was intended for DER only, and BER support was added later. This may
//! still reflect on some naming schemes, but has no other consequence: the `BerObject` and
//! `DerObject` used in this crate are type aliases, so all functions are compatible.
//!
//! DER parsing functions have additional constraints verification, however.
//!
//! Serialization has also been added (see [Serialization](#serialization) )
//!
//! The code is available on [Github](https://github.com/rusticata/der-parser)
//! and is part of the [Rusticata](https://github.com/rusticata) project.
//!
//! # BER/DER parsers
//!
//! BER stands for Basic Encoding Rules, and is defined in [X.690]. It defines a set of rules to
//! encode and decode ASN.1 objects in binary.
//!
//! [X.690] also defines Distinguished Encoding Rules (DER), which is BER with added rules to
//! ensure canonical and unequivocal binary representation of objects.
//!
//! The choice of which one to use is usually guided by the speficication of the data format based
//! on BER or DER: for example, X.509 uses DER as encoding representation.
//!
//! See the related modules for object definitions, functions, and example:
//! - [`ber`]: Basic Encoding Rules
//! - [`der`]: Distinguished Encoding Rules
//!
//! ## Examples
//!
//! Parse two BER integers (see [BER/DER Integers](#berder-integers)):
//!
//! ```rust
//! use der_parser::ber::parse_ber_integer;
//!
//! let bytes = [ 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//!
//! let (rem, obj1) = parse_ber_integer(&bytes).expect("parsing failed");
//! let (rem, obj2) = parse_ber_integer(&rem).expect("parsing failed");
//! ```
//!
//! Parse a DER sequence of integers:
//!
//! ```rust
//! use der_parser::der::{parse_der_integer, parse_der_sequence_of};
//!
//! let bytes = [ 0x30, 0x0a,
//! 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//!
//! let (rem, seq) = parse_der_sequence_of(parse_der_integer)(&bytes)
//! .expect("parsing failed");
//! ```
//!
//! Note: all parsing functions return the remaining (unparsed) bytes and the parsed object, or an
//! error.
//!
//! # DER parser design
//!
//! Parsing functions are inspired from `nom`, and follow the same interface. The most common
//! return type is [`BerResult`](error/type.BerResult.html), that stores the remaining bytes and
//! parsed [`BerObject`](ber/struct.BerObject.html), or an error. Reading the nom documentation may
//! help understanding how to write parsers and use the output.
//!
//! There are two different approaches for parsing DER objects: reading the objects recursively as
//! long as the tags are known, or specifying a description of the expected objects (generally from
//! the [ASN.1][X.680] description).
//!
//! The first parsing method can be done using the [`parse_ber`](ber/fn.parse_ber.html) and
//! [`parse_der`](der/fn.parse_der.html) methods.
//! It is useful when decoding an arbitrary DER object.
//! However, it cannot fully parse all objects, especially those containing IMPLICIT, OPTIONAL, or
//! DEFINED BY items.
//!
//! ```rust
//! use der_parser::parse_der;
//!
//! let bytes = [ 0x30, 0x0a,
//! 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//!
//! let parsed = parse_der(&bytes);
//! ```
//!
//! The second (and preferred) parsing method is to specify the expected objects recursively. The
//! following functions can be used:
//! - [`parse_ber_sequence_defined`](ber/fn.parse_ber_sequence_defined.html) and similar functions
//! for sequences and sets variants
//! - [`parse_ber_tagged_explicit`](ber/fn.parse_ber_tagged_explicit.html) for tagged explicit
//! - [`parse_ber_tagged_implicit`](ber/fn.parse_ber_tagged_implicit.html) for tagged implicit
//! - [`parse_ber_container`](ber/fn.parse_ber_container.html) for generic parsing, etc.
//! - DER objects use the `_der_` variants
//!
//! For example, to read a BER sequence containing two integers:
//!
//! ```rust
//! use der_parser::ber::*;
//! use der_parser::error::BerResult;
//!
//! fn localparse_seq(i:&[u8]) -> BerResult {
//! parse_ber_sequence_defined(|data| {
//! let (rem, a) = parse_ber_integer(data)?;
//! let (rem, b) = parse_ber_integer(rem)?;
//! Ok((rem, vec![a, b]))
//! })(i)
//! }
//!
//! let bytes = [ 0x30, 0x0a,
//! 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//!
//! let (_, parsed) = localparse_seq(&bytes).expect("parsing failed");
//!
//! assert_eq!(parsed[0].as_u64(), Ok(65537));
//! assert_eq!(parsed[1].as_u64(), Ok(65536));
//! ```
//!
//! All functions return a [`BerResult`](error/type.BerResult.html) object: the parsed
//! [`BerObject`](ber/struct.BerObject.html), an `Incomplete` value, or an error.
//!
//! Note that this type is also a `Result`, so usual functions (`map`, `unwrap` etc.) are available.
//!
//! # Notes
//!
//! ## BER/DER Integers
//!
//! DER integers can be of any size, so it is not possible to store them as simple integers (they
//! are stored as raw bytes).
//!
//! Note that, by default, BER/DER integers are signed. Functions are provided to request reading
//! unsigned values, but they will fail if the integer value is negative.
//!
//! To get the integer value for all possible integer sign and size, use
//! [`BerObject::as_bigint`](ber/struct.BerObject.html#method.as_bigint)) (requires the `bigint` feature).
//!
//! To get a simple value expected to be in a known range, use methods like
//! [`BerObject::as_i32`](ber/struct.BerObject.html#method.as_i32)) and
//! [`BerObject::as_i64`](ber/struct.BerObject.html#method.as_i64) (or the unsigned versions
//! [`BerObject::as_u32`](ber/struct.BerObject.html#method.as_u32) and
//! [`BerObject::as_u64`](ber/struct.BerObject.html#method.as_u64)
//!),
//! which will return the value, or an error if the integer is too large (or is negative).
//!
//! ```rust
//! use der_parser::ber::*;
//!
//! let data = &[0x02, 0x03, 0x01, 0x00, 0x01];
//!
//! let (_, object) = parse_ber_integer(data).expect("parsing failed");
//! assert_eq!(object.as_u64(), Ok(65537));
//!
//! #[cfg(feature = "bigint")]
//! assert_eq!(object.as_bigint(), Ok(65537.into()))
//! ```
//!
//! Access to the raw value is possible using the `as_slice` method.
//!
//! ## Parsers, combinators, macros
//!
//! Some parsing tools (for ex for tagged objects) are available in different forms:
//! - parsers: (regular) functions that takes input and create an object
//! - combinators: functions that takes parsers (or combinators) as input, and return a function
//! (usually, the parser). They are used (combined) as building blocks to create more complex
//! parsers. | //! - macros: these are generally previous (historic) versions of parsers, kept for compatibility.
//! They can sometime reduce the amount of code to write, but are hard to debug.
//! Parsers should be preferred when possible.
//!
//! ## Misc Notes | random_line_split |
|
lib.rs | (code_byte: u8) -> ResponseCode {
use self::ResponseCode::*;
match code_byte {
x if x == NoDataExpected as u8 => NoDataExpected,
x if x == Pending as u8 => Pending,
x if x == DeviceError as u8 => DeviceError,
x if x == Success as u8 => Success,
_ => UnknownError,
}
}
/// Allowable baudrates used when changing the chip to UART mode.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BpsRate {
Bps300 = 300,
Bps1200 = 1200,
Bps2400 = 2400,
Bps9600 = 9600,
Bps19200 = 19200,
Bps38400 = 38400,
Bps57600 = 57600,
Bps115200 = 115200,
}
impl BpsRate {
/// Returns the `BpsRate` from a `u32` value.
pub fn parse_u32(bps_rate: u32) -> Result<BpsRate, EzoError> {
let bps = match bps_rate {
x if x == BpsRate::Bps300 as u32 => BpsRate::Bps300,
x if x == BpsRate::Bps1200 as u32 => BpsRate::Bps1200,
x if x == BpsRate::Bps2400 as u32 => BpsRate::Bps2400,
x if x == BpsRate::Bps9600 as u32 => BpsRate::Bps9600,
x if x == BpsRate::Bps19200 as u32 => BpsRate::Bps19200,
x if x == BpsRate::Bps38400 as u32 => BpsRate::Bps38400,
x if x == BpsRate::Bps57600 as u32 => BpsRate::Bps57600,
x if x == BpsRate::Bps115200 as u32 => BpsRate::Bps115200,
_ => return Err(ErrorKind::BpsRateParse)?,
};
Ok(bps)
}
/// Returns the BpsRate as a `u32` value.
pub fn parse(&self) -> u32 {
match *self {
BpsRate::Bps300 => BpsRate::Bps300 as u32,
BpsRate::Bps1200 => BpsRate::Bps1200 as u32,
BpsRate::Bps2400 => BpsRate::Bps2400 as u32,
BpsRate::Bps9600 => BpsRate::Bps9600 as u32,
BpsRate::Bps19200 => BpsRate::Bps19200 as u32,
BpsRate::Bps38400 => BpsRate::Bps38400 as u32,
BpsRate::Bps57600 => BpsRate::Bps57600 as u32,
BpsRate::Bps115200 => BpsRate::Bps115200 as u32,
}
}
}
/// Known response codes from EZO chip interactions.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ResponseCode {
NoDataExpected = 0xFF,
Pending = 0xFE,
DeviceError = 0x02,
Success = 0x01,
UnknownError = 0x00, // This code is NOT implemented by the EZO chips
}
/// Writes the ASCII command to the EZO chip, with one retry.
pub fn write_to_ezo(dev: &mut LinuxI2CDevice, cmd_str: &str) -> Result<(), EzoError> {
let cmd = CString::new(cmd_str).context(ErrorKind::UnreadableCommand)?;
if let Err(_) = dev.write(cmd.as_bytes_with_nul()) {
thread::sleep(Duration::from_millis(100));
dev.write(cmd.as_bytes_with_nul())
.context(ErrorKind::UnwritableCommand)?;
};
Ok(())
}
/// Turns off the high bit in each of the bytes of `v`. Raspberry Pi
/// for some reason outputs i2c buffers with some of the high bits
/// turned on.
fn turn_off_high_bits(v: &mut [u8]) {
for b in v.iter_mut() {
*b = *b & 0x7f;
}
}
/// Converts a slice of bytes, as they come raw from the i2c buffer,
/// into an owned String. Due to a hardware glitch in the Broadcom
/// I2C module, we need to strip off the high bit of each byte in the
/// response strings.
///
/// This function ensures that the response is a nul-terminated string
/// and that it is valid UTF-8 (a superset of ASCII).
///
/// After reading your buffer from the i2c device, check the first
/// byte for the response code. Then, pass a slice with the rest of
/// the buffer (without that first byte) to this function to get an
/// UTF-8 string.
pub fn string_from_response_data(response: &[u8]) -> Result<String, EzoError> {
let mut buf = response.to_owned();
turn_off_high_bits(&mut buf);
let terminated = CStr::from_bytes_with_nul(&buf).context(ErrorKind::MalformedResponse)?;
let s = terminated
.to_str()
.context(ErrorKind::MalformedResponse)?
.to_owned();
Ok(s)
}
#[cfg(test)]
mod tests {
use super::*;
use super::response::ResponseStatus;
#[test]
fn converts_baud_rates_to_numbers() {
assert_eq!(BpsRate::Bps300.parse(), 300);
assert_eq!(BpsRate::Bps1200.parse(), 1200);
assert_eq!(BpsRate::Bps2400.parse(), 2400);
assert_eq!(BpsRate::Bps9600.parse(), 9600);
assert_eq!(BpsRate::Bps19200.parse(), 19200);
assert_eq!(BpsRate::Bps38400.parse(), 38400);
assert_eq!(BpsRate::Bps57600.parse(), 57600);
assert_eq!(BpsRate::Bps115200.parse(), 115200);
}
#[test]
fn converts_numbers_to_baud_rates() {
assert_eq!(BpsRate::Bps300, BpsRate::parse_u32(300).unwrap());
assert_eq!(BpsRate::Bps1200, BpsRate::parse_u32(1200).unwrap());
assert_eq!(BpsRate::Bps2400, BpsRate::parse_u32(2400).unwrap());
assert_eq!(BpsRate::Bps9600, BpsRate::parse_u32(9600).unwrap());
assert_eq!(BpsRate::Bps19200, BpsRate::parse_u32(19200).unwrap());
assert_eq!(BpsRate::Bps38400, BpsRate::parse_u32(38400).unwrap());
assert_eq!(BpsRate::Bps57600, BpsRate::parse_u32(57600).unwrap());
assert_eq!(BpsRate::Bps115200, BpsRate::parse_u32(115200).unwrap());
}
#[test]
fn turns_off_high_bits() {
let data: [u8; 11] = [63, 73, 44, 112, 72, 44, 49, 46, 57, 56, 0];
let mut flipped_data: [u8; 11] = [63, 73, 172, 112, 200, 172, 49, 46, 57, 56, 0];
turn_off_high_bits(&mut flipped_data);
assert_eq!(data, flipped_data);
}
#[test]
fn converts_valid_response_to_string() {
// empty nul-terminated string
assert_eq!(string_from_response_data(&b"\0"[..]).unwrap(), "");
// non-empty n | response_code | identifier_name |
|
lib.rs | ,
Bps115200 = 115200,
}
impl BpsRate {
/// Returns the `BpsRate` from a `u32` value.
pub fn parse_u32(bps_rate: u32) -> Result<BpsRate, EzoError> {
let bps = match bps_rate {
x if x == BpsRate::Bps300 as u32 => BpsRate::Bps300,
x if x == BpsRate::Bps1200 as u32 => BpsRate::Bps1200,
x if x == BpsRate::Bps2400 as u32 => BpsRate::Bps2400,
x if x == BpsRate::Bps9600 as u32 => BpsRate::Bps9600,
x if x == BpsRate::Bps19200 as u32 => BpsRate::Bps19200,
x if x == BpsRate::Bps38400 as u32 => BpsRate::Bps38400,
x if x == BpsRate::Bps57600 as u32 => BpsRate::Bps57600,
x if x == BpsRate::Bps115200 as u32 => BpsRate::Bps115200,
_ => return Err(ErrorKind::BpsRateParse)?,
};
Ok(bps)
}
/// Returns the BpsRate as a `u32` value.
pub fn parse(&self) -> u32 {
match *self {
BpsRate::Bps300 => BpsRate::Bps300 as u32,
BpsRate::Bps1200 => BpsRate::Bps1200 as u32,
BpsRate::Bps2400 => BpsRate::Bps2400 as u32,
BpsRate::Bps9600 => BpsRate::Bps9600 as u32,
BpsRate::Bps19200 => BpsRate::Bps19200 as u32,
BpsRate::Bps38400 => BpsRate::Bps38400 as u32,
BpsRate::Bps57600 => BpsRate::Bps57600 as u32,
BpsRate::Bps115200 => BpsRate::Bps115200 as u32,
}
}
}
/// Known response codes from EZO chip interactions.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ResponseCode {
NoDataExpected = 0xFF,
Pending = 0xFE,
DeviceError = 0x02,
Success = 0x01,
UnknownError = 0x00, // This code is NOT implemented by the EZO chips
}
/// Writes the ASCII command to the EZO chip, with one retry.
pub fn write_to_ezo(dev: &mut LinuxI2CDevice, cmd_str: &str) -> Result<(), EzoError> {
let cmd = CString::new(cmd_str).context(ErrorKind::UnreadableCommand)?;
if let Err(_) = dev.write(cmd.as_bytes_with_nul()) {
thread::sleep(Duration::from_millis(100));
dev.write(cmd.as_bytes_with_nul())
.context(ErrorKind::UnwritableCommand)?;
};
Ok(())
}
/// Turns off the high bit in each of the bytes of `v`. Raspberry Pi
/// for some reason outputs i2c buffers with some of the high bits
/// turned on.
fn turn_off_high_bits(v: &mut [u8]) {
for b in v.iter_mut() {
*b = *b & 0x7f;
}
}
/// Converts a slice of bytes, as they come raw from the i2c buffer,
/// into an owned String. Due to a hardware glitch in the Broadcom
/// I2C module, we need to strip off the high bit of each byte in the
/// response strings.
///
/// This function ensures that the response is a nul-terminated string
/// and that it is valid UTF-8 (a superset of ASCII).
///
/// After reading your buffer from the i2c device, check the first
/// byte for the response code. Then, pass a slice with the rest of
/// the buffer (without that first byte) to this function to get an
/// UTF-8 string.
pub fn string_from_response_data(response: &[u8]) -> Result<String, EzoError> {
let mut buf = response.to_owned();
turn_off_high_bits(&mut buf);
let terminated = CStr::from_bytes_with_nul(&buf).context(ErrorKind::MalformedResponse)?;
let s = terminated
.to_str()
.context(ErrorKind::MalformedResponse)?
.to_owned();
Ok(s)
}
#[cfg(test)]
mod tests {
use super::*;
use super::response::ResponseStatus;
#[test]
fn converts_baud_rates_to_numbers() {
assert_eq!(BpsRate::Bps300.parse(), 300);
assert_eq!(BpsRate::Bps1200.parse(), 1200);
assert_eq!(BpsRate::Bps2400.parse(), 2400);
assert_eq!(BpsRate::Bps9600.parse(), 9600);
assert_eq!(BpsRate::Bps19200.parse(), 19200);
assert_eq!(BpsRate::Bps38400.parse(), 38400);
assert_eq!(BpsRate::Bps57600.parse(), 57600);
assert_eq!(BpsRate::Bps115200.parse(), 115200);
}
#[test]
fn converts_numbers_to_baud_rates() {
assert_eq!(BpsRate::Bps300, BpsRate::parse_u32(300).unwrap());
assert_eq!(BpsRate::Bps1200, BpsRate::parse_u32(1200).unwrap());
assert_eq!(BpsRate::Bps2400, BpsRate::parse_u32(2400).unwrap());
assert_eq!(BpsRate::Bps9600, BpsRate::parse_u32(9600).unwrap());
assert_eq!(BpsRate::Bps19200, BpsRate::parse_u32(19200).unwrap());
assert_eq!(BpsRate::Bps38400, BpsRate::parse_u32(38400).unwrap());
assert_eq!(BpsRate::Bps57600, BpsRate::parse_u32(57600).unwrap());
assert_eq!(BpsRate::Bps115200, BpsRate::parse_u32(115200).unwrap());
}
#[test]
fn turns_off_high_bits() {
let data: [u8; 11] = [63, 73, 44, 112, 72, 44, 49, 46, 57, 56, 0];
let mut flipped_data: [u8; 11] = [63, 73, 172, 112, 200, 172, 49, 46, 57, 56, 0];
turn_off_high_bits(&mut flipped_data);
assert_eq!(data, flipped_data);
}
#[test]
fn converts_valid_response_to_string() {
// empty nul-terminated string
assert_eq!(string_from_response_data(&b"\0"[..]).unwrap(), "");
// non-empty nul-terminated string
assert_eq!(string_from_response_data(&b"hello\0"[..]).unwrap(), "hello");
// high bit is on in the last character
assert_eq!(
string_from_response_data(&b"hell\xef\0"[..]).unwrap(),
"hello"
);
}
fn assert_converts_to_malformed_response(data: &[u8]) {
let result = string_from_response_data(&data);
match result {
Err(e) => assert_eq!(e.kind(), ErrorKind::MalformedResponse),
_ => unreachable!(),
}
}
#[test]
fn converts_invalid_response_to_error() {
// No nul terminator in either of these
assert_converts_to_malformed_response(&b""[..]);
assert_converts_to_malformed_response(&b"\xff"[..]);
}
#[test]
fn process_no_data_response_code() | {
assert_eq!(response_code(255), ResponseCode::NoDataExpected);
} | identifier_body |
|
lib.rs | => Pending,
x if x == DeviceError as u8 => DeviceError,
x if x == Success as u8 => Success,
_ => UnknownError,
}
}
/// Allowable baudrates used when changing the chip to UART mode.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum BpsRate {
Bps300 = 300,
Bps1200 = 1200,
Bps2400 = 2400,
Bps9600 = 9600,
Bps19200 = 19200,
Bps38400 = 38400,
Bps57600 = 57600,
Bps115200 = 115200,
}
impl BpsRate {
/// Returns the `BpsRate` from a `u32` value.
pub fn parse_u32(bps_rate: u32) -> Result<BpsRate, EzoError> {
let bps = match bps_rate {
x if x == BpsRate::Bps300 as u32 => BpsRate::Bps300,
x if x == BpsRate::Bps1200 as u32 => BpsRate::Bps1200,
x if x == BpsRate::Bps2400 as u32 => BpsRate::Bps2400,
x if x == BpsRate::Bps9600 as u32 => BpsRate::Bps9600,
x if x == BpsRate::Bps19200 as u32 => BpsRate::Bps19200,
x if x == BpsRate::Bps38400 as u32 => BpsRate::Bps38400,
x if x == BpsRate::Bps57600 as u32 => BpsRate::Bps57600,
x if x == BpsRate::Bps115200 as u32 => BpsRate::Bps115200,
_ => return Err(ErrorKind::BpsRateParse)?,
};
Ok(bps)
}
/// Returns the BpsRate as a `u32` value.
pub fn parse(&self) -> u32 {
match *self {
BpsRate::Bps300 => BpsRate::Bps300 as u32,
BpsRate::Bps1200 => BpsRate::Bps1200 as u32,
BpsRate::Bps2400 => BpsRate::Bps2400 as u32,
BpsRate::Bps9600 => BpsRate::Bps9600 as u32,
BpsRate::Bps19200 => BpsRate::Bps19200 as u32,
BpsRate::Bps38400 => BpsRate::Bps38400 as u32,
BpsRate::Bps57600 => BpsRate::Bps57600 as u32,
BpsRate::Bps115200 => BpsRate::Bps115200 as u32,
}
}
}
/// Known response codes from EZO chip interactions.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ResponseCode {
NoDataExpected = 0xFF,
Pending = 0xFE,
DeviceError = 0x02,
Success = 0x01,
UnknownError = 0x00, // This code is NOT implemented by the EZO chips
}
/// Writes the ASCII command to the EZO chip, with one retry.
pub fn write_to_ezo(dev: &mut LinuxI2CDevice, cmd_str: &str) -> Result<(), EzoError> {
let cmd = CString::new(cmd_str).context(ErrorKind::UnreadableCommand)?;
if let Err(_) = dev.write(cmd.as_bytes_with_nul()) {
thread::sleep(Duration::from_millis(100));
dev.write(cmd.as_bytes_with_nul())
.context(ErrorKind::UnwritableCommand)?;
};
Ok(())
}
/// Turns off the high bit in each of the bytes of `v`. Raspberry Pi
/// for some reason outputs i2c buffers with some of the high bits
/// turned on.
fn turn_off_high_bits(v: &mut [u8]) {
for b in v.iter_mut() {
*b = *b & 0x7f;
}
}
/// Converts a slice of bytes, as they come raw from the i2c buffer,
/// into an owned String. Due to a hardware glitch in the Broadcom
/// I2C module, we need to strip off the high bit of each byte in the
/// response strings.
///
/// This function ensures that the response is a nul-terminated string
/// and that it is valid UTF-8 (a superset of ASCII).
///
/// After reading your buffer from the i2c device, check the first
/// byte for the response code. Then, pass a slice with the rest of
/// the buffer (without that first byte) to this function to get an
/// UTF-8 string.
pub fn string_from_response_data(response: &[u8]) -> Result<String, EzoError> {
let mut buf = response.to_owned();
turn_off_high_bits(&mut buf);
let terminated = CStr::from_bytes_with_nul(&buf).context(ErrorKind::MalformedResponse)?;
let s = terminated
.to_str()
.context(ErrorKind::MalformedResponse)?
.to_owned();
Ok(s)
}
#[cfg(test)]
mod tests {
use super::*;
use super::response::ResponseStatus;
#[test]
fn converts_baud_rates_to_numbers() {
assert_eq!(BpsRate::Bps300.parse(), 300);
assert_eq!(BpsRate::Bps1200.parse(), 1200);
assert_eq!(BpsRate::Bps2400.parse(), 2400);
assert_eq!(BpsRate::Bps9600.parse(), 9600);
assert_eq!(BpsRate::Bps19200.parse(), 19200);
assert_eq!(BpsRate::Bps38400.parse(), 38400);
assert_eq!(BpsRate::Bps57600.parse(), 57600);
assert_eq!(BpsRate::Bps115200.parse(), 115200);
}
#[test]
fn converts_numbers_to_baud_rates() {
assert_eq!(BpsRate::Bps300, BpsRate::parse_u32(300).unwrap());
assert_eq!(BpsRate::Bps1200, BpsRate::parse_u32(1200).unwrap());
assert_eq!(BpsRate::Bps2400, BpsRate::parse_u32(2400).unwrap());
assert_eq!(BpsRate::Bps9600, BpsRate::parse_u32(9600).unwrap());
assert_eq!(BpsRate::Bps19200, BpsRate::parse_u32(19200).unwrap());
assert_eq!(BpsRate::Bps38400, BpsRate::parse_u32(38400).unwrap());
assert_eq!(BpsRate::Bps57600, BpsRate::parse_u32(57600).unwrap());
assert_eq!(BpsRate::Bps115200, BpsRate::parse_u32(115200).unwrap());
}
| assert_eq!(data, flipped_data);
}
#[test]
fn converts_valid_response_to_string() {
// empty nul-terminated string
assert_eq!(string_from_response_data(&b"\0"[..]).unwrap(), "");
// non-empty nul-terminated string
assert_eq!(string_from_response_data(&b"hello\0"[..]).unwrap(), "hello");
// high bit is on in the last character
assert_eq!(
string_from_response_data(&b | #[test]
fn turns_off_high_bits() {
let data: [u8; 11] = [63, 73, 44, 112, 72, 44, 49, 46, 57, 56, 0];
let mut flipped_data: [u8; 11] = [63, 73, 172, 112, 200, 172, 49, 46, 57, 56, 0];
turn_off_high_bits(&mut flipped_data); | random_line_split |
resource.go | _one WHERE state=0`
// index-icon
_indexIconSQL = `SELECT id,type,title,state,link,icon,weight,user_name,sttime,endtime,deltime,ctime,mtime FROM icon WHERE state=1 AND deltime=0 AND (type=1 OR (type=2 AND sttime>0))`
_playIconSQL = `SELECT icon1,hash1,icon2,hash2,stime FROM bar_icon WHERE stime<? AND etime>? AND is_deleted=0`
// cmtbox
_cmtboxSQL = `SELECT id,load_cid,server,port,size_factor,speed_factor,max_onscreen,style,style_param,top_margin,state,ctime,mtime FROM cmtbox WHERE state=1`
// update resource assignment etime
_updateResourceAssignmentEtime = `UPDATE resource_assignment SET etime=? WHERE id=?`
// update resource apply status
_updateResourceApplyStatus = `UPDATE resource_apply SET audit_state=? WHERE apply_group_id IN (%s)`
// insert resource logs
_inResourceLogger = `INSERT INTO resource_logger (uname,uid,module,oid,content) VALUES (?,?,?,?,?)`
)
// Resources get resource infos from db
func (d *Dao) Resources(c context.Context) (rscs []*model.Resource, err error) {
var size sql.NullString
rows, err := d.db.Query(c, _allResSQL)
if err != nil {
log.Error("d.Resources query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
rsc := &model.Resource{}
if err = rows.Scan(&rsc.ID, &rsc.Platform, &rsc.Name, &rsc.Parent, &rsc.Counter, &rsc.Position, &rsc.Rule, &size, &rsc.Previce,
&rsc.Desc, &rsc.Mark, &rsc.CTime, &rsc.MTime, &rsc.Level, &rsc.Type, &rsc.IsAd); err != nil {
log.Error("Resources rows.Scan err (%v)", err)
return
}
rsc.Size = size.String
rscs = append(rscs, rsc)
}
err = rows.Err()
return
}
// Assignment get assigment from db
func (d *Dao) Assignment(c context.Context) (asgs []*model.Assignment, err error) {
rows, err := d.db.Query(c, _allAssignSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.Assignment query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.Atype, &asg.Username); err != nil {
log.Error("Assignment rows.Scan err (%v)", err)
return
}
asg.AsgID = asg.ID
asgs = append(asgs, asg)
}
err = rows.Err()
return
}
// AssignmentNew get resource_assigment from new db
func (d *Dao) AssignmentNew(c context.Context) (asgs []*model.Assignment, err error) {
var (
ok bool
pm map[string]string
)
rows, err := d.db.Query(c, _allAssignNewSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.AssignmentNew query error (%v)", err)
return
}
defer rows.Close()
pm = make(map[string]string)
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.AsgID, &asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.STime, &asg.ETime, &asg.ApplyGroupID, &asg.CTime, &asg.MTime, &asg.Atype, &asg.Username, &asg.PlayerCategory); err != nil {
log.Error("AssignmentNew rows.Scan err (%v)", err)
return
}
if (asg.ResID == 2054) || (asg.ResID == 2055) || (asg.ResID == 2056) ||
(asg.ResID == 2073) || (asg.ResID == 2074) || (asg.ResID == 2075) ||
(asg.ResID == 1671) || (asg.ResID == 1672) || (asg.ResID == 1673) ||
(asg.ResID == 2315) || (asg.ResID == 2316) || (asg.ResID == 2317) ||
(asg.ResID == 2489) || (asg.ResID == 2490) || (asg.ResID == 2491) ||
(asg.ResID == 2459) || (asg.ResID == 2460) || (asg.ResID == 2461) ||
(asg.ResID == 2469) || (asg.ResID == 2470) || (asg.ResID == 2471) ||
(asg.ResID == 2479) || (asg.ResID == 2480) || (asg.ResID == 2481) ||
(asg.ResID == 2499) || (asg.ResID == 2500) || (asg.ResID == 2501) ||
(asg.ResID == 2606) || (asg.ResID == 2607) || (asg.ResID == 2608) || (asg.ResID == 2609) || (asg.ResID == 2610) ||
(asg.ResID == 2618) || (asg.ResID == 2619) || (asg.ResID == 2620) || (asg.ResID == 2621) || (asg.ResID == 2622) || (asg.ResID == 2623) ||
(asg.ResID == 2556) || (asg.ResID == 2557) || (asg.ResID == 2558) || (asg.ResID == 2559) || (asg.ResID == 2560) ||
(asg.ResID == 2991) || (asg.ResID == 2992) || (asg.ResID == 2993) {
asg.ContractID = "rec_video"
}
pindex := fmt.Sprintf("%d_%d", asg.ResID, asg.Weight)
if _, ok = pm[pindex]; ok {
continue
}
asgs = append(asgs, asg)
pm[pindex] = pindex
}
err = rows.Err()
return
}
// CategoryAssignment get recommend resource_assigment from db
func (d *Dao) CategoryAssignment(c context.Context) (asgs []*model.Assignment, err error) {
rows, err := d.db.Query(c, _categoryAssignSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.CategoryAssignment query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.AsgID, &asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.STime, &asg.ETime, &asg.ApplyGroupID, &asg.CTime, &asg.MTime, &asg.Atype, &asg.Username, &asg.PlayerCategory); err != nil {
log.Error("CategoryAssignment rows.Scan err (%v)", err)
return
}
if (asg.ResID == 2048) || (asg.ResID == 2066) || (asg.ResID == 1670) || (asg.ResID == 2308) || (asg.ResID == 2521) || (asg.ResID == 2979) {
asg.ContractID = "rec_video"
}
asgs = append(asgs, asg)
}
err = rows.Err()
return
}
// DefaultBanner get default banner info
func (d *Dao) | DefaultBanner | identifier_name |
|
resource.go | ,style,style_param,top_margin,state,ctime,mtime FROM cmtbox WHERE state=1`
// update resource assignment etime
_updateResourceAssignmentEtime = `UPDATE resource_assignment SET etime=? WHERE id=?`
// update resource apply status
_updateResourceApplyStatus = `UPDATE resource_apply SET audit_state=? WHERE apply_group_id IN (%s)`
// insert resource logs
_inResourceLogger = `INSERT INTO resource_logger (uname,uid,module,oid,content) VALUES (?,?,?,?,?)`
)
// Resources get resource infos from db
func (d *Dao) Resources(c context.Context) (rscs []*model.Resource, err error) {
var size sql.NullString
rows, err := d.db.Query(c, _allResSQL)
if err != nil {
log.Error("d.Resources query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
rsc := &model.Resource{}
if err = rows.Scan(&rsc.ID, &rsc.Platform, &rsc.Name, &rsc.Parent, &rsc.Counter, &rsc.Position, &rsc.Rule, &size, &rsc.Previce,
&rsc.Desc, &rsc.Mark, &rsc.CTime, &rsc.MTime, &rsc.Level, &rsc.Type, &rsc.IsAd); err != nil {
log.Error("Resources rows.Scan err (%v)", err)
return
}
rsc.Size = size.String
rscs = append(rscs, rsc)
}
err = rows.Err()
return
}
// Assignment get assigment from db
func (d *Dao) Assignment(c context.Context) (asgs []*model.Assignment, err error) {
rows, err := d.db.Query(c, _allAssignSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.Assignment query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.Atype, &asg.Username); err != nil {
log.Error("Assignment rows.Scan err (%v)", err)
return
}
asg.AsgID = asg.ID
asgs = append(asgs, asg)
}
err = rows.Err()
return
}
// AssignmentNew get resource_assigment from new db
func (d *Dao) AssignmentNew(c context.Context) (asgs []*model.Assignment, err error) {
var (
ok bool
pm map[string]string
)
rows, err := d.db.Query(c, _allAssignNewSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.AssignmentNew query error (%v)", err)
return
}
defer rows.Close()
pm = make(map[string]string)
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.AsgID, &asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.STime, &asg.ETime, &asg.ApplyGroupID, &asg.CTime, &asg.MTime, &asg.Atype, &asg.Username, &asg.PlayerCategory); err != nil {
log.Error("AssignmentNew rows.Scan err (%v)", err)
return
}
if (asg.ResID == 2054) || (asg.ResID == 2055) || (asg.ResID == 2056) ||
(asg.ResID == 2073) || (asg.ResID == 2074) || (asg.ResID == 2075) ||
(asg.ResID == 1671) || (asg.ResID == 1672) || (asg.ResID == 1673) ||
(asg.ResID == 2315) || (asg.ResID == 2316) || (asg.ResID == 2317) ||
(asg.ResID == 2489) || (asg.ResID == 2490) || (asg.ResID == 2491) ||
(asg.ResID == 2459) || (asg.ResID == 2460) || (asg.ResID == 2461) ||
(asg.ResID == 2469) || (asg.ResID == 2470) || (asg.ResID == 2471) ||
(asg.ResID == 2479) || (asg.ResID == 2480) || (asg.ResID == 2481) ||
(asg.ResID == 2499) || (asg.ResID == 2500) || (asg.ResID == 2501) ||
(asg.ResID == 2606) || (asg.ResID == 2607) || (asg.ResID == 2608) || (asg.ResID == 2609) || (asg.ResID == 2610) ||
(asg.ResID == 2618) || (asg.ResID == 2619) || (asg.ResID == 2620) || (asg.ResID == 2621) || (asg.ResID == 2622) || (asg.ResID == 2623) ||
(asg.ResID == 2556) || (asg.ResID == 2557) || (asg.ResID == 2558) || (asg.ResID == 2559) || (asg.ResID == 2560) ||
(asg.ResID == 2991) || (asg.ResID == 2992) || (asg.ResID == 2993) {
asg.ContractID = "rec_video"
}
pindex := fmt.Sprintf("%d_%d", asg.ResID, asg.Weight)
if _, ok = pm[pindex]; ok {
continue
}
asgs = append(asgs, asg)
pm[pindex] = pindex
}
err = rows.Err()
return
}
// CategoryAssignment get recommend resource_assigment from db
func (d *Dao) CategoryAssignment(c context.Context) (asgs []*model.Assignment, err error) {
rows, err := d.db.Query(c, _categoryAssignSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.CategoryAssignment query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.AsgID, &asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.STime, &asg.ETime, &asg.ApplyGroupID, &asg.CTime, &asg.MTime, &asg.Atype, &asg.Username, &asg.PlayerCategory); err != nil {
log.Error("CategoryAssignment rows.Scan err (%v)", err)
return
}
if (asg.ResID == 2048) || (asg.ResID == 2066) || (asg.ResID == 1670) || (asg.ResID == 2308) || (asg.ResID == 2521) || (asg.ResID == 2979) {
asg.ContractID = "rec_video"
}
asgs = append(asgs, asg)
}
err = rows.Err()
return
}
// DefaultBanner get default banner info
func (d *Dao) DefaultBanner(c context.Context) (asg *model.Assignment, err error) {
row := d.db.QueryRow(c, _defBannerSQL)
asg = &model.Assignment{}
if err = row.Scan(&asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.Atype, &asg.Username); err != nil {
if err == sql.ErrNoRows {
asg = nil | err = nil | random_line_split |
|
resource.go | , &rsc.Mark, &rsc.CTime, &rsc.MTime, &rsc.Level, &rsc.Type, &rsc.IsAd); err != nil {
log.Error("Resources rows.Scan err (%v)", err)
return
}
rsc.Size = size.String
rscs = append(rscs, rsc)
}
err = rows.Err()
return
}
// Assignment get assigment from db
func (d *Dao) Assignment(c context.Context) (asgs []*model.Assignment, err error) {
rows, err := d.db.Query(c, _allAssignSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.Assignment query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.Atype, &asg.Username); err != nil {
log.Error("Assignment rows.Scan err (%v)", err)
return
}
asg.AsgID = asg.ID
asgs = append(asgs, asg)
}
err = rows.Err()
return
}
// AssignmentNew get resource_assigment from new db
func (d *Dao) AssignmentNew(c context.Context) (asgs []*model.Assignment, err error) {
var (
ok bool
pm map[string]string
)
rows, err := d.db.Query(c, _allAssignNewSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.AssignmentNew query error (%v)", err)
return
}
defer rows.Close()
pm = make(map[string]string)
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.AsgID, &asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.STime, &asg.ETime, &asg.ApplyGroupID, &asg.CTime, &asg.MTime, &asg.Atype, &asg.Username, &asg.PlayerCategory); err != nil {
log.Error("AssignmentNew rows.Scan err (%v)", err)
return
}
if (asg.ResID == 2054) || (asg.ResID == 2055) || (asg.ResID == 2056) ||
(asg.ResID == 2073) || (asg.ResID == 2074) || (asg.ResID == 2075) ||
(asg.ResID == 1671) || (asg.ResID == 1672) || (asg.ResID == 1673) ||
(asg.ResID == 2315) || (asg.ResID == 2316) || (asg.ResID == 2317) ||
(asg.ResID == 2489) || (asg.ResID == 2490) || (asg.ResID == 2491) ||
(asg.ResID == 2459) || (asg.ResID == 2460) || (asg.ResID == 2461) ||
(asg.ResID == 2469) || (asg.ResID == 2470) || (asg.ResID == 2471) ||
(asg.ResID == 2479) || (asg.ResID == 2480) || (asg.ResID == 2481) ||
(asg.ResID == 2499) || (asg.ResID == 2500) || (asg.ResID == 2501) ||
(asg.ResID == 2606) || (asg.ResID == 2607) || (asg.ResID == 2608) || (asg.ResID == 2609) || (asg.ResID == 2610) ||
(asg.ResID == 2618) || (asg.ResID == 2619) || (asg.ResID == 2620) || (asg.ResID == 2621) || (asg.ResID == 2622) || (asg.ResID == 2623) ||
(asg.ResID == 2556) || (asg.ResID == 2557) || (asg.ResID == 2558) || (asg.ResID == 2559) || (asg.ResID == 2560) ||
(asg.ResID == 2991) || (asg.ResID == 2992) || (asg.ResID == 2993) {
asg.ContractID = "rec_video"
}
pindex := fmt.Sprintf("%d_%d", asg.ResID, asg.Weight)
if _, ok = pm[pindex]; ok {
continue
}
asgs = append(asgs, asg)
pm[pindex] = pindex
}
err = rows.Err()
return
}
// CategoryAssignment get recommend resource_assigment from db
func (d *Dao) CategoryAssignment(c context.Context) (asgs []*model.Assignment, err error) {
rows, err := d.db.Query(c, _categoryAssignSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.CategoryAssignment query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.AsgID, &asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.STime, &asg.ETime, &asg.ApplyGroupID, &asg.CTime, &asg.MTime, &asg.Atype, &asg.Username, &asg.PlayerCategory); err != nil {
log.Error("CategoryAssignment rows.Scan err (%v)", err)
return
}
if (asg.ResID == 2048) || (asg.ResID == 2066) || (asg.ResID == 1670) || (asg.ResID == 2308) || (asg.ResID == 2521) || (asg.ResID == 2979) {
asg.ContractID = "rec_video"
}
asgs = append(asgs, asg)
}
err = rows.Err()
return
}
// DefaultBanner get default banner info
func (d *Dao) DefaultBanner(c context.Context) (asg *model.Assignment, err error) {
row := d.db.QueryRow(c, _defBannerSQL)
asg = &model.Assignment{}
if err = row.Scan(&asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.Atype, &asg.Username); err != nil {
if err == sql.ErrNoRows {
asg = nil
err = nil
} else {
log.Error("d.DefaultBanner.Scan error(%v)", err)
}
}
return
}
// IndexIcon get index icon.
func (d *Dao) IndexIcon(c context.Context) (icons map[int][]*model.IndexIcon, err error) {
rows, err := d.db.Query(c, _indexIconSQL)
if err != nil {
log.Error("d.IndexIcon query error (%v)", err)
return
}
defer rows.Close()
icons = make(map[int][]*model.IndexIcon)
for rows.Next() | {
var link string
icon := &model.IndexIcon{}
if err = rows.Scan(&icon.ID, &icon.Type, &icon.Title, &icon.State, &link, &icon.Icon,
&icon.Weight, &icon.UserName, &icon.StTime, &icon.EndTime, &icon.DelTime, &icon.CTime, &icon.MTime); err != nil {
log.Error("IndexIcon rows.Scan err (%v)", err)
return
}
icon.Links = strings.Split(link, ",")
icons[icon.Type] = append(icons[icon.Type], icon)
} | conditional_block |
|
resource.go | _updateResourceAssignmentEtime = `UPDATE resource_assignment SET etime=? WHERE id=?`
// update resource apply status
_updateResourceApplyStatus = `UPDATE resource_apply SET audit_state=? WHERE apply_group_id IN (%s)`
// insert resource logs
_inResourceLogger = `INSERT INTO resource_logger (uname,uid,module,oid,content) VALUES (?,?,?,?,?)`
)
// Resources get resource infos from db
func (d *Dao) Resources(c context.Context) (rscs []*model.Resource, err error) {
var size sql.NullString
rows, err := d.db.Query(c, _allResSQL)
if err != nil {
log.Error("d.Resources query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
rsc := &model.Resource{}
if err = rows.Scan(&rsc.ID, &rsc.Platform, &rsc.Name, &rsc.Parent, &rsc.Counter, &rsc.Position, &rsc.Rule, &size, &rsc.Previce,
&rsc.Desc, &rsc.Mark, &rsc.CTime, &rsc.MTime, &rsc.Level, &rsc.Type, &rsc.IsAd); err != nil {
log.Error("Resources rows.Scan err (%v)", err)
return
}
rsc.Size = size.String
rscs = append(rscs, rsc)
}
err = rows.Err()
return
}
// Assignment get assigment from db
func (d *Dao) Assignment(c context.Context) (asgs []*model.Assignment, err error) {
rows, err := d.db.Query(c, _allAssignSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.Assignment query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.Atype, &asg.Username); err != nil {
log.Error("Assignment rows.Scan err (%v)", err)
return
}
asg.AsgID = asg.ID
asgs = append(asgs, asg)
}
err = rows.Err()
return
}
// AssignmentNew get resource_assigment from new db
func (d *Dao) AssignmentNew(c context.Context) (asgs []*model.Assignment, err error) {
var (
ok bool
pm map[string]string
)
rows, err := d.db.Query(c, _allAssignNewSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.AssignmentNew query error (%v)", err)
return
}
defer rows.Close()
pm = make(map[string]string)
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.AsgID, &asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.STime, &asg.ETime, &asg.ApplyGroupID, &asg.CTime, &asg.MTime, &asg.Atype, &asg.Username, &asg.PlayerCategory); err != nil {
log.Error("AssignmentNew rows.Scan err (%v)", err)
return
}
if (asg.ResID == 2054) || (asg.ResID == 2055) || (asg.ResID == 2056) ||
(asg.ResID == 2073) || (asg.ResID == 2074) || (asg.ResID == 2075) ||
(asg.ResID == 1671) || (asg.ResID == 1672) || (asg.ResID == 1673) ||
(asg.ResID == 2315) || (asg.ResID == 2316) || (asg.ResID == 2317) ||
(asg.ResID == 2489) || (asg.ResID == 2490) || (asg.ResID == 2491) ||
(asg.ResID == 2459) || (asg.ResID == 2460) || (asg.ResID == 2461) ||
(asg.ResID == 2469) || (asg.ResID == 2470) || (asg.ResID == 2471) ||
(asg.ResID == 2479) || (asg.ResID == 2480) || (asg.ResID == 2481) ||
(asg.ResID == 2499) || (asg.ResID == 2500) || (asg.ResID == 2501) ||
(asg.ResID == 2606) || (asg.ResID == 2607) || (asg.ResID == 2608) || (asg.ResID == 2609) || (asg.ResID == 2610) ||
(asg.ResID == 2618) || (asg.ResID == 2619) || (asg.ResID == 2620) || (asg.ResID == 2621) || (asg.ResID == 2622) || (asg.ResID == 2623) ||
(asg.ResID == 2556) || (asg.ResID == 2557) || (asg.ResID == 2558) || (asg.ResID == 2559) || (asg.ResID == 2560) ||
(asg.ResID == 2991) || (asg.ResID == 2992) || (asg.ResID == 2993) {
asg.ContractID = "rec_video"
}
pindex := fmt.Sprintf("%d_%d", asg.ResID, asg.Weight)
if _, ok = pm[pindex]; ok {
continue
}
asgs = append(asgs, asg)
pm[pindex] = pindex
}
err = rows.Err()
return
}
// CategoryAssignment get recommend resource_assigment from db
func (d *Dao) CategoryAssignment(c context.Context) (asgs []*model.Assignment, err error) {
rows, err := d.db.Query(c, _categoryAssignSQL, time.Now(), time.Now())
if err != nil {
log.Error("d.CategoryAssignment query error (%v)", err)
return
}
defer rows.Close()
for rows.Next() {
asg := &model.Assignment{}
if err = rows.Scan(&asg.AsgID, &asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.STime, &asg.ETime, &asg.ApplyGroupID, &asg.CTime, &asg.MTime, &asg.Atype, &asg.Username, &asg.PlayerCategory); err != nil {
log.Error("CategoryAssignment rows.Scan err (%v)", err)
return
}
if (asg.ResID == 2048) || (asg.ResID == 2066) || (asg.ResID == 1670) || (asg.ResID == 2308) || (asg.ResID == 2521) || (asg.ResID == 2979) {
asg.ContractID = "rec_video"
}
asgs = append(asgs, asg)
}
err = rows.Err()
return
}
// DefaultBanner get default banner info
func (d *Dao) DefaultBanner(c context.Context) (asg *model.Assignment, err error) | {
row := d.db.QueryRow(c, _defBannerSQL)
asg = &model.Assignment{}
if err = row.Scan(&asg.ID, &asg.Name, &asg.ContractID, &asg.ResID, &asg.Pic, &asg.LitPic,
&asg.URL, &asg.Rule, &asg.Weight, &asg.Agency, &asg.Price, &asg.Atype, &asg.Username); err != nil {
if err == sql.ErrNoRows {
asg = nil
err = nil
} else {
log.Error("d.DefaultBanner.Scan error(%v)", err)
}
}
return
} | identifier_body |
|
btrfs_tree_h.go | }
type btrfs_extent_inline_ref struct {
type_ uint8
offset uint64
}
/* old style backrefs item */
type btrfs_extent_ref_v0 struct {
root uint64
generation uint64
objectid uint64
count uint32
}
/* dev extents record free space on individual devices. The owner
* field points back to the chunk allocation mapping tree that allocated
* the extent. The chunk tree uuid field is a way to double check the owner
*/
type btrfs_dev_extent struct {
chunk_tree uint64
chunk_objectid uint64
chunk_offset uint64
length uint64
chunk_tree_uuid UUID
}
type btrfs_inode_ref struct {
index uint64
name_len uint16
}
/* name goes here */
type btrfs_inode_extref struct {
parent_objectid uint64
index uint64
name_len uint16
//name [0]uint8
}
/* name goes here */
type btrfs_timespec struct {
sec uint64
nsec uint32
}
type btrfs_inode_item struct {
generation uint64
transid uint64
size uint64
nbytes uint64
block_group uint64
nlink uint32
uid uint32
gid uint32
mode uint32
rdev uint64
flags uint64
sequence uint64
reserved [4]uint64
atime struct {
sec uint64
nsec uint32
}
ctime struct {
sec uint64
nsec uint32
}
mtime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
}
type btrfs_dir_log_item struct {
end uint64
}
type btrfs_dir_item struct {
location struct {
objectid uint64
type_ uint8
offset uint64
}
transid uint64
data_len uint16
name_len uint16
type_ uint8
}
/*
* Internal in-memory flag that a subvolume has been marked for deletion but
* still visible as a directory
*/
type btrfs_root_item struct {
inode struct {
generation uint64
transid uint64
size uint64
nbytes uint64
block_group uint64
nlink uint32
uid uint32
gid uint32
mode uint32
rdev uint64
flags uint64
sequence uint64
reserved [4]uint64
atime struct {
sec uint64
nsec uint32
}
ctime struct {
sec uint64
nsec uint32
}
mtime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
}
generation uint64
root_dirid uint64
bytenr uint64
byte_limit uint64
bytes_used uint64
last_snapshot uint64
flags uint64
refs uint32
drop_progress struct {
objectid uint64
type_ uint8
offset uint64
}
drop_level uint8
level uint8
generation_v2 uint64
uuid UUID
parent_uuid UUID
received_uuid UUID
ctransid uint64
otransid uint64
stransid uint64
rtransid uint64
ctime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
stime struct {
sec uint64
nsec uint32
}
rtime struct {
sec uint64
nsec uint32
}
reserved [8]uint64
}
/*
* this is used for both forward and backward root refs
*/
type btrfs_root_ref struct {
dirid uint64
sequence uint64
name_len uint16
}
type btrfs_disk_balance_args struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
/*
* store balance parameters to disk so that balance can be properly
* resumed after crash or unmount
*/
type btrfs_balance_item struct {
flags uint64
data struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
meta struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
sys struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
unused [4]uint64
}
type btrfs_file_extent_item struct {
generation uint64
ram_bytes uint64
compression uint8
encryption uint8
other_encoding uint16
type_ uint8
disk_bytenr uint64
disk_num_bytes uint64
offset uint64
num_bytes uint64
}
type btrfs_csum_item struct {
csum uint8
}
type btrfs_dev_stats_item struct {
values [_BTRFS_DEV_STAT_VALUES_MAX]uint64
}
type btrfs_dev_replace_item struct {
src_devid uint64
cursor_left uint64
cursor_right uint64
cont_reading_from_srcdev_mode uint64
replace_state uint64
time_started uint64
time_stopped uint64
num_write_errors uint64
num_uncorrectable_read_errors uint64
}
/* different types of block groups (and chunks) */
const (
_BTRFS_RAID_RAID10 = iota
_BTRFS_RAID_RAID1
_BTRFS_RAID_DUP
_BTRFS_RAID_RAID0
_BTRFS_RAID_SINGLE
_BTRFS_RAID_RAID5
_BTRFS_RAID_RAID6
_BTRFS_NR_RAID_TYPES
)
/*
* We need a bit for restriper to be able to tell when chunks of type
* SINGLE are available. This "extended" profile format is used in
* fs_info->avail_*_alloc_bits (in-memory) and balance item fields
* (on-disk). The corresponding on-disk bit in chunk.type is reserved
* to avoid remappings between two formats in future.
*/
/*
* A fake block group type that is used to communicate global block reserve
* size to userspace via the SPACE_INFO ioctl.
*/
func | chunk_to_extended | identifier_name |
|
btrfs_tree_h.go | key per qgroup, (0, _BTRFS_QGROUP_INFO_KEY, qgroupid).
*/
/*
* Contains the user configured limits for the qgroup.
* One key per qgroup, (0, _BTRFS_QGROUP_LIMIT_KEY, qgroupid).
*/
/*
* Records the child-parent relationship of qgroups. For
* each relation, 2 keys are present:
* (childid, _BTRFS_QGROUP_RELATION_KEY, parentid)
* (parentid, _BTRFS_QGROUP_RELATION_KEY, childid)
*/
/*
* Obsolete name, see _BTRFS_TEMPORARY_ITEM_KEY.
*/
/*
* The key type for tree items that are stored persistently, but do not need to
* exist for extended period of time. The items can exist in any tree.
*
* [subtype, _BTRFS_TEMPORARY_ITEM_KEY, data]
*
* Existing items:
*
* - balance status item
* (_BTRFS_BALANCE_OBJECTID, _BTRFS_TEMPORARY_ITEM_KEY, 0)
*/
/*
* Obsolete name, see _BTRFS_PERSISTENT_ITEM_KEY
*/
/*
* The key type for tree items that are stored persistently and usually exist
* for a long period, eg. filesystem lifetime. The item kinds can be status
* information, stats or preference values. The item can exist in any tree.
*
* [subtype, _BTRFS_PERSISTENT_ITEM_KEY, data]
*
* Existing items:
*
* - device statistics, store IO stats in the device tree, one key for all
* stats
* (_BTRFS_DEV_STATS_OBJECTID, _BTRFS_DEV_STATS_KEY, 0)
*/
/*
* Persistantly stores the device replace state in the device tree.
* The key is built like this: (0, _BTRFS_DEV_REPLACE_KEY, 0).
*/
/*
* Stores items that allow to quickly map UUIDs to something else.
* These items are part of the filesystem UUID tree.
* The key is built like this:
* (UUID_upper_64_bits, _BTRFS_UUID_KEY*, UUID_lower_64_bits).
*/
/* for UUIDs assigned to * received subvols */
/*
* string items are for debugging. They just store a short string of
* data in the FS
*/
/* 32 bytes in various csum fields */
/* csum types */
/*
* flags definitions for directory entry item type
*
* Used by:
* struct btrfs_dir_item.type
*/
/*
* The key defines the order in the tree, and so it also defines (optimal)
* block layout.
*
* objectid corresponds to the inode number.
*
* type tells us things about the object, and is a kind of stream selector.
* so for a given inode, keys with type of 1 might refer to the inode data,
* type of 2 may point to file data in the btree and type == 3 may point to
* extents.
*
* offset is the starting byte offset for this key in the stream.
*
* btrfs_disk_key is in disk byte order. struct btrfs_key is always
* in cpu native order. Otherwise they are identical and their sizes
* should be the same (ie both packed)
*/
type btrfs_disk_key struct {
objectid uint64
type_ uint8
offset uint64
}
type btrfs_key struct {
objectid uint64
type_ uint8
offset uint64
}
type btrfs_dev_item struct {
devid uint64
total_bytes uint64
bytes_used uint64
io_align uint32
io_width uint32
sector_size uint32
type_ uint64
generation uint64
start_offset uint64
dev_group uint32
seek_speed uint8
bandwidth uint8
uuid UUID
fsid FSID
}
type btrfs_stripe struct {
devid uint64
offset uint64
dev_uuid UUID
}
type btrfs_chunk struct {
length uint64
owner uint64
stripe_len uint64
type_ uint64
io_align uint32
io_width uint32
sector_size uint32
num_stripes uint16
sub_stripes uint16
stripe struct {
devid uint64
offset uint64
dev_uuid UUID
}
}
/* additional stripes go here */
type btrfs_free_space_entry struct {
offset uint64
bytes uint64
type_ uint8
}
type btrfs_free_space_header struct {
location struct {
objectid uint64
type_ uint8
offset uint64
}
generation uint64
num_entries uint64
num_bitmaps uint64
}
/* Super block flags */
/* Errors detected */
/*
* items in the extent btree are used to record the objectid of the
* owner of the block and the number of references
*/
type btrfs_extent_item struct {
refs uint64
generation uint64
flags uint64
}
type btrfs_extent_item_v0 struct {
refs uint32
}
/* following flags only apply to tree blocks */
/* use full backrefs for extent pointers in the block */
/*
* this flag is only used internally by scrub and may be changed at any time
* it is only declared here to avoid collisions
*/
type btrfs_tree_block_info struct {
key struct {
objectid uint64
type_ uint8
offset uint64
}
level uint8
}
type btrfs_extent_data_ref struct {
root uint64
objectid uint64
offset uint64
count uint32
}
type btrfs_shared_data_ref struct {
count uint32
}
type btrfs_extent_inline_ref struct {
type_ uint8
offset uint64
}
/* old style backrefs item */
type btrfs_extent_ref_v0 struct {
root uint64
generation uint64
objectid uint64
count uint32
}
/* dev extents record free space on individual devices. The owner
* field points back to the chunk allocation mapping tree that allocated
* the extent. The chunk tree uuid field is a way to double check the owner
*/
type btrfs_dev_extent struct {
chunk_tree uint64
chunk_objectid uint64
chunk_offset uint64
length uint64
chunk_tree_uuid UUID
}
type btrfs_inode_ref struct {
index uint64
name_len uint16
}
/* name goes here */
type btrfs_inode_extref struct {
parent_objectid uint64
index uint64
name_len uint16
//name [0]uint8
}
/* name goes here */
type btrfs_timespec struct {
sec uint64
nsec uint32
}
type btrfs_inode_item struct {
generation uint64
transid uint64
size uint64
nbytes uint64
block_group uint64
nlink uint32
uid uint32
gid uint32
mode uint32
rdev uint64
flags uint64
sequence uint64
reserved [4]uint64
atime struct {
sec uint64
nsec uint32
}
ctime struct {
sec uint64
nsec uint32
}
mtime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
}
type btrfs_dir_log_item struct {
end uint64
}
type btrfs_dir_item struct {
location struct {
objectid uint64
type_ uint8
offset uint64
}
transid uint64
data_len uint16
name_len uint16
type_ uint8
}
/*
* Internal in-memory flag that a subvolume has been marked for deletion but
* still visible as a directory
*/
type btrfs_root_item struct {
inode struct {
generation uint64
transid uint64
size uint64
nbytes uint64
block_group uint64
nlink uint32
uid uint32
gid uint32
mode uint32
rdev uint64
flags uint64
sequence uint64
reserved [4]uint64
atime struct {
sec uint64
nsec uint32
}
ctime struct {
sec uint64
nsec uint32
}
mtime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
}
generation uint64
root_dirid uint64
bytenr uint64
byte_limit uint64
bytes_used uint64 | last_snapshot uint64 | random_line_split |
|
btrfs_tree_h.go | }
type btrfs_inode_ref struct {
index uint64
name_len uint16
}
/* name goes here */
type btrfs_inode_extref struct {
parent_objectid uint64
index uint64
name_len uint16
//name [0]uint8
}
/* name goes here */
type btrfs_timespec struct {
sec uint64
nsec uint32
}
type btrfs_inode_item struct {
generation uint64
transid uint64
size uint64
nbytes uint64
block_group uint64
nlink uint32
uid uint32
gid uint32
mode uint32
rdev uint64
flags uint64
sequence uint64
reserved [4]uint64
atime struct {
sec uint64
nsec uint32
}
ctime struct {
sec uint64
nsec uint32
}
mtime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
}
type btrfs_dir_log_item struct {
end uint64
}
type btrfs_dir_item struct {
location struct {
objectid uint64
type_ uint8
offset uint64
}
transid uint64
data_len uint16
name_len uint16
type_ uint8
}
/*
* Internal in-memory flag that a subvolume has been marked for deletion but
* still visible as a directory
*/
type btrfs_root_item struct {
inode struct {
generation uint64
transid uint64
size uint64
nbytes uint64
block_group uint64
nlink uint32
uid uint32
gid uint32
mode uint32
rdev uint64
flags uint64
sequence uint64
reserved [4]uint64
atime struct {
sec uint64
nsec uint32
}
ctime struct {
sec uint64
nsec uint32
}
mtime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
}
generation uint64
root_dirid uint64
bytenr uint64
byte_limit uint64
bytes_used uint64
last_snapshot uint64
flags uint64
refs uint32
drop_progress struct {
objectid uint64
type_ uint8
offset uint64
}
drop_level uint8
level uint8
generation_v2 uint64
uuid UUID
parent_uuid UUID
received_uuid UUID
ctransid uint64
otransid uint64
stransid uint64
rtransid uint64
ctime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
stime struct {
sec uint64
nsec uint32
}
rtime struct {
sec uint64
nsec uint32
}
reserved [8]uint64
}
/*
* this is used for both forward and backward root refs
*/
type btrfs_root_ref struct {
dirid uint64
sequence uint64
name_len uint16
}
type btrfs_disk_balance_args struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
/*
* store balance parameters to disk so that balance can be properly
* resumed after crash or unmount
*/
type btrfs_balance_item struct {
flags uint64
data struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
meta struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
sys struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
unused [4]uint64
}
type btrfs_file_extent_item struct {
generation uint64
ram_bytes uint64
compression uint8
encryption uint8
other_encoding uint16
type_ uint8
disk_bytenr uint64
disk_num_bytes uint64
offset uint64
num_bytes uint64
}
type btrfs_csum_item struct {
csum uint8
}
type btrfs_dev_stats_item struct {
values [_BTRFS_DEV_STAT_VALUES_MAX]uint64
}
type btrfs_dev_replace_item struct {
src_devid uint64
cursor_left uint64
cursor_right uint64
cont_reading_from_srcdev_mode uint64
replace_state uint64
time_started uint64
time_stopped uint64
num_write_errors uint64
num_uncorrectable_read_errors uint64
}
/* different types of block groups (and chunks) */
const (
_BTRFS_RAID_RAID10 = iota
_BTRFS_RAID_RAID1
_BTRFS_RAID_DUP
_BTRFS_RAID_RAID0
_BTRFS_RAID_SINGLE
_BTRFS_RAID_RAID5
_BTRFS_RAID_RAID6
_BTRFS_NR_RAID_TYPES
)
/*
* We need a bit for restriper to be able to tell when chunks of type
* SINGLE are available. This "extended" profile format is used in
* fs_info->avail_*_alloc_bits (in-memory) and balance item fields
* (on-disk). The corresponding on-disk bit in chunk.type is reserved
* to avoid remappings between two formats in future.
*/
/*
* A fake block group type that is used to communicate global block reserve
* size to userspace via the SPACE_INFO ioctl.
*/
func chunk_to_extended(flags uint64) uint64 {
if flags&uint64(_BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 {
flags |= uint64(availAllocBitSingle)
}
return flags
}
func extended_to_chunk(flags uint64) uint64 {
return flags &^ uint64(availAllocBitSingle)
}
type btrfs_block_group_item struct {
used uint64
chunk_objectid uint64
flags uint64
}
type btrfs_free_space_info struct {
extent_count uint32
flags uint32
}
func btrfs_qgroup_level(qgroupid uint64) uint64 | {
return qgroupid >> uint32(qgroupLevelShift)
} | identifier_body |
|
btrfs_tree_h.go |
generation uint64
objectid uint64
count uint32
}
/* dev extents record free space on individual devices. The owner
* field points back to the chunk allocation mapping tree that allocated
* the extent. The chunk tree uuid field is a way to double check the owner
*/
type btrfs_dev_extent struct {
chunk_tree uint64
chunk_objectid uint64
chunk_offset uint64
length uint64
chunk_tree_uuid UUID
}
type btrfs_inode_ref struct {
index uint64
name_len uint16
}
/* name goes here */
type btrfs_inode_extref struct {
parent_objectid uint64
index uint64
name_len uint16
//name [0]uint8
}
/* name goes here */
type btrfs_timespec struct {
sec uint64
nsec uint32
}
type btrfs_inode_item struct {
generation uint64
transid uint64
size uint64
nbytes uint64
block_group uint64
nlink uint32
uid uint32
gid uint32
mode uint32
rdev uint64
flags uint64
sequence uint64
reserved [4]uint64
atime struct {
sec uint64
nsec uint32
}
ctime struct {
sec uint64
nsec uint32
}
mtime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
}
type btrfs_dir_log_item struct {
end uint64
}
type btrfs_dir_item struct {
location struct {
objectid uint64
type_ uint8
offset uint64
}
transid uint64
data_len uint16
name_len uint16
type_ uint8
}
/*
* Internal in-memory flag that a subvolume has been marked for deletion but
* still visible as a directory
*/
type btrfs_root_item struct {
inode struct {
generation uint64
transid uint64
size uint64
nbytes uint64
block_group uint64
nlink uint32
uid uint32
gid uint32
mode uint32
rdev uint64
flags uint64
sequence uint64
reserved [4]uint64
atime struct {
sec uint64
nsec uint32
}
ctime struct {
sec uint64
nsec uint32
}
mtime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
}
generation uint64
root_dirid uint64
bytenr uint64
byte_limit uint64
bytes_used uint64
last_snapshot uint64
flags uint64
refs uint32
drop_progress struct {
objectid uint64
type_ uint8
offset uint64
}
drop_level uint8
level uint8
generation_v2 uint64
uuid UUID
parent_uuid UUID
received_uuid UUID
ctransid uint64
otransid uint64
stransid uint64
rtransid uint64
ctime struct {
sec uint64
nsec uint32
}
otime struct {
sec uint64
nsec uint32
}
stime struct {
sec uint64
nsec uint32
}
rtime struct {
sec uint64
nsec uint32
}
reserved [8]uint64
}
/*
* this is used for both forward and backward root refs
*/
type btrfs_root_ref struct {
dirid uint64
sequence uint64
name_len uint16
}
type btrfs_disk_balance_args struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
/*
* store balance parameters to disk so that balance can be properly
* resumed after crash or unmount
*/
type btrfs_balance_item struct {
flags uint64
data struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
meta struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
sys struct {
profiles uint64
usage uint64
usage_min uint32
usage_max uint32
devid uint64
pstart uint64
pend uint64
vstart uint64
vend uint64
target uint64
flags uint64
limit uint64
limit_min uint32
limit_max uint32
stripes_min uint32
stripes_max uint32
unused [6]uint64
}
unused [4]uint64
}
type btrfs_file_extent_item struct {
generation uint64
ram_bytes uint64
compression uint8
encryption uint8
other_encoding uint16
type_ uint8
disk_bytenr uint64
disk_num_bytes uint64
offset uint64
num_bytes uint64
}
type btrfs_csum_item struct {
csum uint8
}
type btrfs_dev_stats_item struct {
values [_BTRFS_DEV_STAT_VALUES_MAX]uint64
}
type btrfs_dev_replace_item struct {
src_devid uint64
cursor_left uint64
cursor_right uint64
cont_reading_from_srcdev_mode uint64
replace_state uint64
time_started uint64
time_stopped uint64
num_write_errors uint64
num_uncorrectable_read_errors uint64
}
/* different types of block groups (and chunks) */
const (
_BTRFS_RAID_RAID10 = iota
_BTRFS_RAID_RAID1
_BTRFS_RAID_DUP
_BTRFS_RAID_RAID0
_BTRFS_RAID_SINGLE
_BTRFS_RAID_RAID5
_BTRFS_RAID_RAID6
_BTRFS_NR_RAID_TYPES
)
/*
* We need a bit for restriper to be able to tell when chunks of type
* SINGLE are available. This "extended" profile format is used in
* fs_info->avail_*_alloc_bits (in-memory) and balance item fields
* (on-disk). The corresponding on-disk bit in chunk.type is reserved
* to avoid remappings between two formats in future.
*/
/*
* A fake block group type that is used to communicate global block reserve
* size to userspace via the SPACE_INFO ioctl.
*/
func chunk_to_extended(flags uint64) uint64 {
if flags&uint64(_BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 | {
flags |= uint64(availAllocBitSingle)
} | conditional_block |
|
wordlattice.py | __(self):
return "\t".join([str(self.start_node), str(self.end_node), \
self.word.encode(sys.stdout.encoding), str(self.ac_score), str(self.lm_score)])
class Path:
# Constructs a path given a list of node IDs.
def __init__(self, links = []):
self.__links = links
def __repr__(self):
result = "\n".join(str(x) for x in self.__links) + "\n"
result += "\t\t\t" + str(self.total_ac_score())
result += "\t" + str(self.total_lm_score())
return result
def empty(self):
return len(self.__links) == 0
# Returns the node ID of the final node in this path, or -1 if this is
# an empty path.
def final_node(self):
if self.empty():
return -1
else:
return self.__links[-1].end_node
def append(self, link):
self.__links.append(link)
# Returns a list of expansions of the path, one for each of the given
# links.
def create_expansions(self, links):
return [WordLattice.Path(self.__links + [x]) for x in links]
def total_ac_score(self):
return sum(x.ac_score for x in self.__links)
def total_lm_score(self):
return sum(x.lm_score for x in self.__links)
# A list of links and nodes.
class LNList:
def __init__(self):
self.links = []
self.nodes = []
def extend(self, other):
self.links.extend(other.links)
self.nodes.extend(other.nodes)
def __init__(self):
# A regular expression for fields such as E=997788 or W="that is" in an
# SLF file.
self.assignment_re = re.compile(r'(\S+)=(?:"((?:[^\\"]+|\\.)*)"|(\S+))')
def __deepcopy__(self, memo={}):
result = WordLattice()
memo[id(self)] = result
result.__nodes = deepcopy(self.__nodes, memo)
result.__links = deepcopy(self.__links, memo)
result.__start_nodes_of_links = deepcopy(self.__start_nodes_of_links, memo)
result.start_node = self.start_node
result.end_node = self.end_node
result.lm_scale = self.lm_scale
return result
def read_slf(self, input_file):
self.__nodes = []
self.__links = []
self.lm_scale = 1
at_header = True
for line in input_file:
if line.startswith('#'):
continue
fields = dict([(x[0], x[1] or x[2]) for x in self.assignment_re.findall(line.rstrip())])
if at_header:
if 'start' in fields:
self.start_node = int(fields['start'])
if 'end' in fields:
self.end_node = int(fields['end'])
if 'lmscale' in fields:
self.lm_scale = float(fields['lmscale'])
if ('I' in fields) or ('J' in fields):
at_header = False
if not at_header:
if 'I' in fields:
node_id = int(fields['I'])
if 't' in fields:
node_time = int(fields['t'])
else:
node_time = 0
self.__nodes.append(self.Node(node_id, node_time))
elif 'J' in fields:
link_id = int(fields['J'])
start_node = int(fields['S'])
end_node = int(fields['E'])
word = fields['W']
if 'a' in fields:
ac_score = float(fields['a'])
else:
ac_score = 0
lm_score = float(fields['l'])
self.__links.append(self.Link(link_id, start_node, end_node,
word, ac_score, lm_score))
if len(self.__nodes) == 0:
raise Exception("No nodes read.")
if not hasattr(self, 'start_node'):
self.start_node = self.__nodes[0]
if not hasattr(self, 'end_node'):
self.end_node = self.__nodes[-1]
self.__nodes_updated()
self.__links_updated()
self.__nodes[self.start_node].refer()
for link in self.__links:
self.__nodes[link.end_node].refer()
def write_slf(self, output_file):
output_file.write("# Header\n")
output_file.write("VERSION=1.1\n")
output_file.write("base=10\n")
output_file.write("dir=f\n")
output_file.write("lmscale=" + str(self.lm_scale) + "\n")
output_file.write("start=" + str(self.start_node) + "\n")
output_file.write("end=" + str(self.end_node) + "\n")
output_file.write("NODES=" + str(len(self.__nodes)))
output_file.write(" LINKS=" + str(len(self.__links)) + "\n")
output_file.write("# Nodes\n")
for node in self.__nodes:
output_file.write("I=" + str(node.id))
output_file.write("\tt=" + str(node.time) + "\n")
output_file.write("# Links\n")
for link in self.__links:
output_file.write("J=" + str(link.id))
output_file.write("\tS=" + str(link.start_node))
output_file.write("\tE=" + str(link.end_node))
output_file.write("\tW=" + link.word)
output_file.write("\ta=" + str(link.ac_score))
output_file.write("\tv=0")
output_file.write("\tl=" + str(link.lm_score) + "\n")
# Finds a path from start node to end node through given words.
def find_paths(self, words):
tokens = self.expand_path_to_null_links(self.Path())
for word in words:
new_tokens = []
for path in tokens:
|
tokens = new_tokens
new_tokens = []
for path in tokens:
new_tokens.extend(self.expand_path_to_null_links(path))
tokens = new_tokens
print(len(tokens), "tokens @", word)
if tokens == []:
return []
result = []
for path in tokens:
if path.final_node() == self.end_node:
result.append(path)
return result
# Returns the range of links with given start node.
def links_from(self, node_id):
first = bisect_left(self.__start_nodes_of_links, node_id)
last = bisect_right(self.__start_nodes_of_links, node_id)
return self.__links[first:last]
# Returns a list of paths that have been formed my advancing from given path
# to all the !NULL links and recursively to the next !NULL links. the given
# path is also included. If the given path is empty, starts from the global
# start node.
def expand_path_to_null_links(self, path):
if path.empty():
start_node = self.start_node
else:
start_node = path.final_node()
expansion_links = []
for link in self.links_from(start_node):
if link.word == "!NULL":
expansion_links.append(link)
expanded_paths = path.create_expansions(expansion_links)
result = [path]
for expanded_path in expanded_paths:
result.extend(self.expand_path_to_null_links(expanded_path))
return result
# Returns a list of paths that have been formed by advancing from given path
# to all the links with given word.
def find_extensions(self, path, word):
links = []
for link in self.links_from(path.final_node()):
if link.word == word:
links.append(link)
return path.create_expansions(links)
# Returns the set of words present in this lattice.
def words(self):
result = set()
for link in self.__links:
if link.word != '!NULL':
result.add(link.word)
return result
# Returns the set of node IDs present in this lattice.
def node_ids(self):
return set(x.id for x in self.__nodes)
# Returns the set of reachable nodes in the lattice.
def reachable_nodes(self, start_node=None):
if start_node is None:
start_node = self.start_node
result = set([start_node])
for link in self.links_from(start_node):
result.update(self.reachable_nodes(link.end_node))
return result
# Returns the set of unreachable nodes in the lattice.
def unreachable_nodes(self):
return self.node_ids() - self.reachable_nodes()
# Remove links that contain a word from the given list.
def remove_words(self, words):
to_delete = self.LNList()
for link in self.__links:
if link.word in words:
to_delete.links.append(link.id)
to_delete.extend(self.__unlink(link.end_node))
for link in self.__links:
if (link.start_node in to_delete.nodes) or \
(link.end_node in to_delete.nodes):
to_delete.links.append(link.id)
if self.end_node in to_delete.nodes:
self.end_node = -1
self.__links = [x for x in self.__links | new_tokens.extend(self.find_extensions(path, word)) | conditional_block |
wordlattice.py | __(self):
return "\t".join([str(self.start_node), str(self.end_node), \
self.word.encode(sys.stdout.encoding), str(self.ac_score), str(self.lm_score)])
class Path:
# Constructs a path given a list of node IDs.
def __init__(self, links = []):
self.__links = links
def __repr__(self):
result = "\n".join(str(x) for x in self.__links) + "\n"
result += "\t\t\t" + str(self.total_ac_score())
result += "\t" + str(self.total_lm_score())
return result
def empty(self):
return len(self.__links) == 0
# Returns the node ID of the final node in this path, or -1 if this is
# an empty path.
def final_node(self):
if self.empty():
return -1
else:
return self.__links[-1].end_node
def append(self, link):
self.__links.append(link)
# Returns a list of expansions of the path, one for each of the given
# links.
def create_expansions(self, links):
return [WordLattice.Path(self.__links + [x]) for x in links]
def total_ac_score(self):
return sum(x.ac_score for x in self.__links)
def total_lm_score(self):
return sum(x.lm_score for x in self.__links)
# A list of links and nodes.
class LNList:
def __init__(self):
self.links = []
self.nodes = []
def extend(self, other):
self.links.extend(other.links)
self.nodes.extend(other.nodes)
def __init__(self):
# A regular expression for fields such as E=997788 or W="that is" in an
# SLF file.
self.assignment_re = re.compile(r'(\S+)=(?:"((?:[^\\"]+|\\.)*)"|(\S+))')
def __deepcopy__(self, memo={}):
result = WordLattice()
memo[id(self)] = result
result.__nodes = deepcopy(self.__nodes, memo)
result.__links = deepcopy(self.__links, memo)
result.__start_nodes_of_links = deepcopy(self.__start_nodes_of_links, memo)
result.start_node = self.start_node
result.end_node = self.end_node
result.lm_scale = self.lm_scale
return result
def read_slf(self, input_file):
self.__nodes = []
self.__links = []
self.lm_scale = 1
at_header = True
for line in input_file:
if line.startswith('#'):
continue
fields = dict([(x[0], x[1] or x[2]) for x in self.assignment_re.findall(line.rstrip())])
if at_header:
if 'start' in fields:
self.start_node = int(fields['start'])
if 'end' in fields:
self.end_node = int(fields['end'])
if 'lmscale' in fields:
self.lm_scale = float(fields['lmscale'])
if ('I' in fields) or ('J' in fields):
at_header = False
if not at_header:
if 'I' in fields:
node_id = int(fields['I'])
if 't' in fields:
node_time = int(fields['t'])
else:
node_time = 0
self.__nodes.append(self.Node(node_id, node_time))
elif 'J' in fields:
link_id = int(fields['J'])
start_node = int(fields['S'])
end_node = int(fields['E'])
word = fields['W']
if 'a' in fields:
ac_score = float(fields['a'])
else:
ac_score = 0
lm_score = float(fields['l'])
self.__links.append(self.Link(link_id, start_node, end_node,
word, ac_score, lm_score))
if len(self.__nodes) == 0:
raise Exception("No nodes read.")
if not hasattr(self, 'start_node'):
self.start_node = self.__nodes[0]
if not hasattr(self, 'end_node'):
self.end_node = self.__nodes[-1]
self.__nodes_updated()
self.__links_updated()
self.__nodes[self.start_node].refer()
for link in self.__links:
self.__nodes[link.end_node].refer()
def write_slf(self, output_file):
| output_file.write("\tW=" + link.word)
output_file.write("\ta=" + str(link.ac_score))
output_file.write("\tv=0")
output_file.write("\tl=" + str(link.lm_score) + "\n")
# Finds a path from start node to end node through given words.
def find_paths(self, words):
tokens = self.expand_path_to_null_links(self.Path())
for word in words:
new_tokens = []
for path in tokens:
new_tokens.extend(self.find_extensions(path, word))
tokens = new_tokens
new_tokens = []
for path in tokens:
new_tokens.extend(self.expand_path_to_null_links(path))
tokens = new_tokens
print(len(tokens), "tokens @", word)
if tokens == []:
return []
result = []
for path in tokens:
if path.final_node() == self.end_node:
result.append(path)
return result
# Returns the range of links with given start node.
def links_from(self, node_id):
first = bisect_left(self.__start_nodes_of_links, node_id)
last = bisect_right(self.__start_nodes_of_links, node_id)
return self.__links[first:last]
# Returns a list of paths that have been formed my advancing from given path
# to all the !NULL links and recursively to the next !NULL links. the given
# path is also included. If the given path is empty, starts from the global
# start node.
def expand_path_to_null_links(self, path):
if path.empty():
start_node = self.start_node
else:
start_node = path.final_node()
expansion_links = []
for link in self.links_from(start_node):
if link.word == "!NULL":
expansion_links.append(link)
expanded_paths = path.create_expansions(expansion_links)
result = [path]
for expanded_path in expanded_paths:
result.extend(self.expand_path_to_null_links(expanded_path))
return result
# Returns a list of paths that have been formed by advancing from given path
# to all the links with given word.
def find_extensions(self, path, word):
links = []
for link in self.links_from(path.final_node()):
if link.word == word:
links.append(link)
return path.create_expansions(links)
# Returns the set of words present in this lattice.
def words(self):
result = set()
for link in self.__links:
if link.word != '!NULL':
result.add(link.word)
return result
# Returns the set of node IDs present in this lattice.
def node_ids(self):
return set(x.id for x in self.__nodes)
# Returns the set of reachable nodes in the lattice.
def reachable_nodes(self, start_node=None):
if start_node is None:
start_node = self.start_node
result = set([start_node])
for link in self.links_from(start_node):
result.update(self.reachable_nodes(link.end_node))
return result
# Returns the set of unreachable nodes in the lattice.
def unreachable_nodes(self):
return self.node_ids() - self.reachable_nodes()
# Remove links that contain a word from the given list.
def remove_words(self, words):
to_delete = self.LNList()
for link in self.__links:
if link.word in words:
to_delete.links.append(link.id)
to_delete.extend(self.__unlink(link.end_node))
for link in self.__links:
if (link.start_node in to_delete.nodes) or \
(link.end_node in to_delete.nodes):
to_delete.links.append(link.id)
if self.end_node in to_delete.nodes:
self.end_node = -1
self.__links = [x for x in self.__links if | output_file.write("# Header\n")
output_file.write("VERSION=1.1\n")
output_file.write("base=10\n")
output_file.write("dir=f\n")
output_file.write("lmscale=" + str(self.lm_scale) + "\n")
output_file.write("start=" + str(self.start_node) + "\n")
output_file.write("end=" + str(self.end_node) + "\n")
output_file.write("NODES=" + str(len(self.__nodes)))
output_file.write(" LINKS=" + str(len(self.__links)) + "\n")
output_file.write("# Nodes\n")
for node in self.__nodes:
output_file.write("I=" + str(node.id))
output_file.write("\tt=" + str(node.time) + "\n")
output_file.write("# Links\n")
for link in self.__links:
output_file.write("J=" + str(link.id))
output_file.write("\tS=" + str(link.start_node))
output_file.write("\tE=" + str(link.end_node)) | identifier_body |
wordlattice.py | __(self):
return "\t".join([str(self.start_node), str(self.end_node), \
self.word.encode(sys.stdout.encoding), str(self.ac_score), str(self.lm_score)])
class Path:
# Constructs a path given a list of node IDs.
def __init__(self, links = []):
self.__links = links
def __repr__(self):
result = "\n".join(str(x) for x in self.__links) + "\n"
result += "\t\t\t" + str(self.total_ac_score())
result += "\t" + str(self.total_lm_score())
return result
def empty(self):
return len(self.__links) == 0
# Returns the node ID of the final node in this path, or -1 if this is
# an empty path.
def final_node(self):
if self.empty():
return -1
else:
return self.__links[-1].end_node
def append(self, link):
self.__links.append(link)
# Returns a list of expansions of the path, one for each of the given
# links.
def create_expansions(self, links):
return [WordLattice.Path(self.__links + [x]) for x in links]
def total_ac_score(self):
return sum(x.ac_score for x in self.__links)
def total_lm_score(self):
return sum(x.lm_score for x in self.__links)
# A list of links and nodes.
class LNList:
def __init__(self):
self.links = []
self.nodes = []
def extend(self, other):
self.links.extend(other.links)
self.nodes.extend(other.nodes)
def __init__(self):
# A regular expression for fields such as E=997788 or W="that is" in an
# SLF file.
self.assignment_re = re.compile(r'(\S+)=(?:"((?:[^\\"]+|\\.)*)"|(\S+))')
def | (self, memo={}):
result = WordLattice()
memo[id(self)] = result
result.__nodes = deepcopy(self.__nodes, memo)
result.__links = deepcopy(self.__links, memo)
result.__start_nodes_of_links = deepcopy(self.__start_nodes_of_links, memo)
result.start_node = self.start_node
result.end_node = self.end_node
result.lm_scale = self.lm_scale
return result
def read_slf(self, input_file):
self.__nodes = []
self.__links = []
self.lm_scale = 1
at_header = True
for line in input_file:
if line.startswith('#'):
continue
fields = dict([(x[0], x[1] or x[2]) for x in self.assignment_re.findall(line.rstrip())])
if at_header:
if 'start' in fields:
self.start_node = int(fields['start'])
if 'end' in fields:
self.end_node = int(fields['end'])
if 'lmscale' in fields:
self.lm_scale = float(fields['lmscale'])
if ('I' in fields) or ('J' in fields):
at_header = False
if not at_header:
if 'I' in fields:
node_id = int(fields['I'])
if 't' in fields:
node_time = int(fields['t'])
else:
node_time = 0
self.__nodes.append(self.Node(node_id, node_time))
elif 'J' in fields:
link_id = int(fields['J'])
start_node = int(fields['S'])
end_node = int(fields['E'])
word = fields['W']
if 'a' in fields:
ac_score = float(fields['a'])
else:
ac_score = 0
lm_score = float(fields['l'])
self.__links.append(self.Link(link_id, start_node, end_node,
word, ac_score, lm_score))
if len(self.__nodes) == 0:
raise Exception("No nodes read.")
if not hasattr(self, 'start_node'):
self.start_node = self.__nodes[0]
if not hasattr(self, 'end_node'):
self.end_node = self.__nodes[-1]
self.__nodes_updated()
self.__links_updated()
self.__nodes[self.start_node].refer()
for link in self.__links:
self.__nodes[link.end_node].refer()
def write_slf(self, output_file):
output_file.write("# Header\n")
output_file.write("VERSION=1.1\n")
output_file.write("base=10\n")
output_file.write("dir=f\n")
output_file.write("lmscale=" + str(self.lm_scale) + "\n")
output_file.write("start=" + str(self.start_node) + "\n")
output_file.write("end=" + str(self.end_node) + "\n")
output_file.write("NODES=" + str(len(self.__nodes)))
output_file.write(" LINKS=" + str(len(self.__links)) + "\n")
output_file.write("# Nodes\n")
for node in self.__nodes:
output_file.write("I=" + str(node.id))
output_file.write("\tt=" + str(node.time) + "\n")
output_file.write("# Links\n")
for link in self.__links:
output_file.write("J=" + str(link.id))
output_file.write("\tS=" + str(link.start_node))
output_file.write("\tE=" + str(link.end_node))
output_file.write("\tW=" + link.word)
output_file.write("\ta=" + str(link.ac_score))
output_file.write("\tv=0")
output_file.write("\tl=" + str(link.lm_score) + "\n")
# Finds a path from start node to end node through given words.
def find_paths(self, words):
tokens = self.expand_path_to_null_links(self.Path())
for word in words:
new_tokens = []
for path in tokens:
new_tokens.extend(self.find_extensions(path, word))
tokens = new_tokens
new_tokens = []
for path in tokens:
new_tokens.extend(self.expand_path_to_null_links(path))
tokens = new_tokens
print(len(tokens), "tokens @", word)
if tokens == []:
return []
result = []
for path in tokens:
if path.final_node() == self.end_node:
result.append(path)
return result
# Returns the range of links with given start node.
def links_from(self, node_id):
first = bisect_left(self.__start_nodes_of_links, node_id)
last = bisect_right(self.__start_nodes_of_links, node_id)
return self.__links[first:last]
# Returns a list of paths that have been formed my advancing from given path
# to all the !NULL links and recursively to the next !NULL links. the given
# path is also included. If the given path is empty, starts from the global
# start node.
def expand_path_to_null_links(self, path):
if path.empty():
start_node = self.start_node
else:
start_node = path.final_node()
expansion_links = []
for link in self.links_from(start_node):
if link.word == "!NULL":
expansion_links.append(link)
expanded_paths = path.create_expansions(expansion_links)
result = [path]
for expanded_path in expanded_paths:
result.extend(self.expand_path_to_null_links(expanded_path))
return result
# Returns a list of paths that have been formed by advancing from given path
# to all the links with given word.
def find_extensions(self, path, word):
links = []
for link in self.links_from(path.final_node()):
if link.word == word:
links.append(link)
return path.create_expansions(links)
# Returns the set of words present in this lattice.
def words(self):
result = set()
for link in self.__links:
if link.word != '!NULL':
result.add(link.word)
return result
# Returns the set of node IDs present in this lattice.
def node_ids(self):
return set(x.id for x in self.__nodes)
# Returns the set of reachable nodes in the lattice.
def reachable_nodes(self, start_node=None):
if start_node is None:
start_node = self.start_node
result = set([start_node])
for link in self.links_from(start_node):
result.update(self.reachable_nodes(link.end_node))
return result
# Returns the set of unreachable nodes in the lattice.
def unreachable_nodes(self):
return self.node_ids() - self.reachable_nodes()
# Remove links that contain a word from the given list.
def remove_words(self, words):
to_delete = self.LNList()
for link in self.__links:
if link.word in words:
to_delete.links.append(link.id)
to_delete.extend(self.__unlink(link.end_node))
for link in self.__links:
if (link.start_node in to_delete.nodes) or \
(link.end_node in to_delete.nodes):
to_delete.links.append(link.id)
if self.end_node in to_delete.nodes:
self.end_node = -1
self.__links = [x for x in self.__links | __deepcopy__ | identifier_name |
wordlattice.py | __(self):
return "\t".join([str(self.start_node), str(self.end_node), \
self.word.encode(sys.stdout.encoding), str(self.ac_score), str(self.lm_score)])
class Path:
# Constructs a path given a list of node IDs.
def __init__(self, links = []):
self.__links = links
def __repr__(self):
result = "\n".join(str(x) for x in self.__links) + "\n"
result += "\t\t\t" + str(self.total_ac_score())
result += "\t" + str(self.total_lm_score())
return result
def empty(self):
return len(self.__links) == 0
# Returns the node ID of the final node in this path, or -1 if this is
# an empty path.
def final_node(self):
if self.empty():
return -1
else:
return self.__links[-1].end_node
def append(self, link):
self.__links.append(link)
# Returns a list of expansions of the path, one for each of the given
# links.
def create_expansions(self, links):
return [WordLattice.Path(self.__links + [x]) for x in links]
def total_ac_score(self):
return sum(x.ac_score for x in self.__links)
def total_lm_score(self):
return sum(x.lm_score for x in self.__links)
# A list of links and nodes.
class LNList: | self.links.extend(other.links)
self.nodes.extend(other.nodes)
def __init__(self):
# A regular expression for fields such as E=997788 or W="that is" in an
# SLF file.
self.assignment_re = re.compile(r'(\S+)=(?:"((?:[^\\"]+|\\.)*)"|(\S+))')
def __deepcopy__(self, memo={}):
result = WordLattice()
memo[id(self)] = result
result.__nodes = deepcopy(self.__nodes, memo)
result.__links = deepcopy(self.__links, memo)
result.__start_nodes_of_links = deepcopy(self.__start_nodes_of_links, memo)
result.start_node = self.start_node
result.end_node = self.end_node
result.lm_scale = self.lm_scale
return result
def read_slf(self, input_file):
self.__nodes = []
self.__links = []
self.lm_scale = 1
at_header = True
for line in input_file:
if line.startswith('#'):
continue
fields = dict([(x[0], x[1] or x[2]) for x in self.assignment_re.findall(line.rstrip())])
if at_header:
if 'start' in fields:
self.start_node = int(fields['start'])
if 'end' in fields:
self.end_node = int(fields['end'])
if 'lmscale' in fields:
self.lm_scale = float(fields['lmscale'])
if ('I' in fields) or ('J' in fields):
at_header = False
if not at_header:
if 'I' in fields:
node_id = int(fields['I'])
if 't' in fields:
node_time = int(fields['t'])
else:
node_time = 0
self.__nodes.append(self.Node(node_id, node_time))
elif 'J' in fields:
link_id = int(fields['J'])
start_node = int(fields['S'])
end_node = int(fields['E'])
word = fields['W']
if 'a' in fields:
ac_score = float(fields['a'])
else:
ac_score = 0
lm_score = float(fields['l'])
self.__links.append(self.Link(link_id, start_node, end_node,
word, ac_score, lm_score))
if len(self.__nodes) == 0:
raise Exception("No nodes read.")
if not hasattr(self, 'start_node'):
self.start_node = self.__nodes[0]
if not hasattr(self, 'end_node'):
self.end_node = self.__nodes[-1]
self.__nodes_updated()
self.__links_updated()
self.__nodes[self.start_node].refer()
for link in self.__links:
self.__nodes[link.end_node].refer()
def write_slf(self, output_file):
output_file.write("# Header\n")
output_file.write("VERSION=1.1\n")
output_file.write("base=10\n")
output_file.write("dir=f\n")
output_file.write("lmscale=" + str(self.lm_scale) + "\n")
output_file.write("start=" + str(self.start_node) + "\n")
output_file.write("end=" + str(self.end_node) + "\n")
output_file.write("NODES=" + str(len(self.__nodes)))
output_file.write(" LINKS=" + str(len(self.__links)) + "\n")
output_file.write("# Nodes\n")
for node in self.__nodes:
output_file.write("I=" + str(node.id))
output_file.write("\tt=" + str(node.time) + "\n")
output_file.write("# Links\n")
for link in self.__links:
output_file.write("J=" + str(link.id))
output_file.write("\tS=" + str(link.start_node))
output_file.write("\tE=" + str(link.end_node))
output_file.write("\tW=" + link.word)
output_file.write("\ta=" + str(link.ac_score))
output_file.write("\tv=0")
output_file.write("\tl=" + str(link.lm_score) + "\n")
# Finds a path from start node to end node through given words.
def find_paths(self, words):
tokens = self.expand_path_to_null_links(self.Path())
for word in words:
new_tokens = []
for path in tokens:
new_tokens.extend(self.find_extensions(path, word))
tokens = new_tokens
new_tokens = []
for path in tokens:
new_tokens.extend(self.expand_path_to_null_links(path))
tokens = new_tokens
print(len(tokens), "tokens @", word)
if tokens == []:
return []
result = []
for path in tokens:
if path.final_node() == self.end_node:
result.append(path)
return result
# Returns the range of links with given start node.
def links_from(self, node_id):
first = bisect_left(self.__start_nodes_of_links, node_id)
last = bisect_right(self.__start_nodes_of_links, node_id)
return self.__links[first:last]
# Returns a list of paths that have been formed my advancing from given path
# to all the !NULL links and recursively to the next !NULL links. the given
# path is also included. If the given path is empty, starts from the global
# start node.
def expand_path_to_null_links(self, path):
if path.empty():
start_node = self.start_node
else:
start_node = path.final_node()
expansion_links = []
for link in self.links_from(start_node):
if link.word == "!NULL":
expansion_links.append(link)
expanded_paths = path.create_expansions(expansion_links)
result = [path]
for expanded_path in expanded_paths:
result.extend(self.expand_path_to_null_links(expanded_path))
return result
# Returns a list of paths that have been formed by advancing from given path
# to all the links with given word.
def find_extensions(self, path, word):
links = []
for link in self.links_from(path.final_node()):
if link.word == word:
links.append(link)
return path.create_expansions(links)
# Returns the set of words present in this lattice.
def words(self):
result = set()
for link in self.__links:
if link.word != '!NULL':
result.add(link.word)
return result
# Returns the set of node IDs present in this lattice.
def node_ids(self):
return set(x.id for x in self.__nodes)
# Returns the set of reachable nodes in the lattice.
def reachable_nodes(self, start_node=None):
if start_node is None:
start_node = self.start_node
result = set([start_node])
for link in self.links_from(start_node):
result.update(self.reachable_nodes(link.end_node))
return result
# Returns the set of unreachable nodes in the lattice.
def unreachable_nodes(self):
return self.node_ids() - self.reachable_nodes()
# Remove links that contain a word from the given list.
def remove_words(self, words):
to_delete = self.LNList()
for link in self.__links:
if link.word in words:
to_delete.links.append(link.id)
to_delete.extend(self.__unlink(link.end_node))
for link in self.__links:
if (link.start_node in to_delete.nodes) or \
(link.end_node in to_delete.nodes):
to_delete.links.append(link.id)
if self.end_node in to_delete.nodes:
self.end_node = -1
self.__links = [x for x in self.__links if | def __init__(self):
self.links = []
self.nodes = []
def extend(self, other): | random_line_split |
main01.go | ><body><h1>" +
"hello world</h1></body></html>"
w.Write([]byte(str))
}
func writeHeaderExample(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(501)
fmt.Fprintln(w, "No such service, try next door")
}
func headerExample(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Location", "http://baidu.com")
w.WriteHeader(302)
}
//type Post struct {
// User string
// Threads []string
//}
//
//func jsonExample(w http.ResponseWriter, r *http.Request) {
// w.Header().Set("Content-Type", "application/json")
// post := &Post{
// User: "Sau Sheong",
// Threads: []string{"first", "second", "third"},
// }
// json, _ := json.Marshal(post)
// w.Write(json)
//}
func setCookie(w http.ResponseWriter, r *http.Request) {
c1 := http.Cookie{
Name: "first_cookie",
Value: "Go Web Programming",
Path: "",
Domain: "",
Expires: time.Time{},
RawExpires: "",
MaxAge: 0,
Secure: false,
HttpOnly: true,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
c2 := http.Cookie{
Name: "second_cookie",
Value: "Manning Publications Co",
Path: "",
Domain: "",
Expires: time.Time{},
RawExpires: "",
MaxAge: 0,
Secure: false,
HttpOnly: true,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
//w.Header().Set("Set-Cookie", c1.String())
//w.Header().Add("Set-Cookie", c2.String())
http.SetCookie(w, &c1)
http.SetCookie(w, &c2)
}
func getCookie(w http.ResponseWriter, r *http.Request) {
//h := r.Header["Cookie"]
//fmt.Fprintln(w, h)
c1, err := r.Cookie("first_cookie")
if err != nil {
fmt.Fprintln(w, "Cannot get the first cookie")
}
cs := r.Cookies()
fmt.Fprintln(w, c1)
fmt.Fprintln(w, cs)
}
func setMessage(w http.ResponseWriter, r *http.Request) {
msg := []byte("Hello World!")
c := http.Cookie{
Name: "flash",
Value: base64.URLEncoding.EncodeToString(msg),
Path: "",
Domain: "",
Expires: time.Time{},
RawExpires: "",
MaxAge: 0,
Secure: false,
HttpOnly: false,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
http.SetCookie(w, &c)
}
func showMessage(w http.ResponseWriter, r *http.Request) {
c, err := r.Cookie("flash")
if err != nil {
if err == http.ErrNoCookie {
fmt.Fprintln(w, "No message found")
}
} else {
rc := http.Cookie{
Name: "flash",
Value: "",
Path: "",
Domain: "",
Expires: time.Unix(1, 0),
RawExpires: "",
MaxAge: -1,
Secure: false,
HttpOnly: false,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
http.SetCookie(w, &rc)
val, _ := base64.URLEncoding.DecodeString(c.Value)
fmt.Fprintln(w, string(val))
}
}
var mPath = "D:/MyProgram/Go/github/"
//var mPath = "E:/go/project/"
func process2(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl.html")
t.Execute(w, "Hello Workd!")
}
func process3(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl2.html")
rand.Seed(time.Now().Unix())
t.Execute(w, rand.Intn(10) > 5)
}
func process4(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl4.html")
daysOfWeek := []string{"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"}
//daysOfWeek := []string{}
t.Execute(w, daysOfWeek)
}
func process5(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl5.html")
t.Execute(w, "hello")
}
func process6(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath+"test02/src/test/t1.html", mPath+"test02/src/test/t2.html")
t.Execute(w, "Hello World!")
}
func formatDate(t time.Time) string {
layout := "2020-6-21"
return t.Format(layout)
}
func process7(w http.ResponseWriter, r *http.Request) {
funcMap := template.FuncMap{"fdate": formatDate}
t := template.New("tmpl7.html").Funcs(funcMap)
t, _ = t.ParseFiles(mPath + "test02/src/test/tmpl7.html")
t.Execute(w, time.Now())
}
func process8(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl8.html")
content := `I asked: <i>"What's up?"</i>`
t.Execute(w, content)
}
func | (w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl9.html")
t.Execute(w, r.FormValue("comment"))
}
func form(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/form.html")
t.Execute(w, nil)
}
func process10(w http.ResponseWriter, r *http.Request) {
rand.Seed(time.Now().Unix())
var t *template.Template
if rand.Intn(10) > 5 {
t, _ = template.ParseFiles(mPath+"test02/src/test/layout.html", mPath+"test02/src/test/red_hello.html")
} else {
//t, _ = template.ParseFiles(mPath + "test02/src/test/layout.html", mPath + "test02/src/test/blue_hello.html")
t, _ = template.ParseFiles(mPath + "test02/src/test/layout.html")
}
t.ExecuteTemplate(w, "layout", "")
}
//func main() {
// //hello := HelloHandler{}
// //world := WorldHandler{}
//
// //mux:= httprouter.New()
// //mux.GET("/hello/:name", hello)
//
// server := http.Server{
// Addr: "127.0.0.1:8080",
// }
//
// //http.Handle("/hello", &hello)
// //http.Handle("/world", &world)
//
// //http.HandleFunc("/hello", hello)
// //http.HandleFunc("/world", world)
//
// //http.HandleFunc("/hello", log(hello))
// //http.Handle("/hello", protect(log2(&hello)))
//
// //http.HandleFunc("/headers", headers)
// //http.HandleFunc("/body", body)
// //http.HandleFunc("/process", process)
// //http.HandleFunc("/write", writeExample)
// //http.HandleFunc("/writeheader", writeHeaderExample)
// //http.HandleFunc("/redirect", headerExample)
// //http.HandleFunc("/json", jsonExample)
// //http.HandleFunc("/set_cookie", setCookie)
// //http.HandleFunc("/get_cookie", getCookie)
// //http.HandleFunc("/set_message", setMessage)
// //http.HandleFunc("/show_message", showMessage)
// http.HandleFunc("/process2", process2)
// http.HandleFunc("/process3", process3)
// http.HandleFunc("/process4", process4)
// http.HandleFunc("/process5", process5)
// http.HandleFunc("/process6", process6)
// http.HandleFunc("/process7", process7)
// http.HandleFunc("/process8", process8)
// http.HandleFunc("/process9", process9)
// http.HandleFunc("/form", form)
// http.HandleFunc("/process10", process10)
//
// server.ListenAndServe()
//}
//type Post struct {
// Id int
// Content string
// Author string
//}
//
//var PostById map[int]*Post
//var PostsByAuthor map[string][]*Post
//
//func store(post Post) {
// PostById[post.Id] = &post
// PostsByAuthor[post.Author] = append(PostsByAuthor[post.Author], &post)
//}
//
//func main() {
// PostById = make(map[int]*Post)
// PostsByAuthor = make(map[string][]*Post)
//
// post1 := Post{Id: 1, Content: "Hello World!", Author: "Sau Sheong"}
// post2 | process9 | identifier_name |
main01.go | "github.com/julienschmidt/httprouter"
"html/template"
"io/ioutil"
"math/rand"
"net/http"
"reflect"
"runtime"
"time"
)
type HelloHandler struct {
}
func (h *HelloHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Hello!")
}
type WorldHandler struct {
}
func (h *WorldHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "World!")
}
func hello(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
fmt.Fprintf(w, "Hello, %s!\n", p.ByName("name"))
}
func world(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "World!")
}
func log(h http.HandlerFunc) http.HandlerFunc {
return func(writer http.ResponseWriter, request *http.Request) {
name := runtime.FuncForPC(reflect.ValueOf(h).Pointer()).Name()
fmt.Println("Handler function called - " + name)
h(writer, request)
}
}
func log2(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Printf("Handler called - %T\n", h)
h.ServeHTTP(w, r)
})
}
func protect(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h.ServeHTTP(w, r)
})
}
func headers(w http.ResponseWriter, r *http.Request) {
h := r.Header
fmt.Fprintln(w, h)
}
func body(w http.ResponseWriter, r *http.Request) {
len := r.ContentLength
body := make([]byte, len)
r.Body.Read(body)
fmt.Fprintln(w, string(body))
}
func process(w http.ResponseWriter, r *http.Request) {
//r.ParseForm()
//fmt.Fprintln(w, r.Form)
r.ParseMultipartForm(1024)
//fmt.Fprintln(w, "(1)", r.FormValue("hello"))
//fmt.Fprintln(w, "(2)", r.PostFormValue("hello"))
//fmt.Fprintln(w, "(3)", r.PostForm)
//fmt.Fprintln(w, "(4)", r.MultipartForm)
//fileHeader := r.MultipartForm.File["uploaded"][0]
//file, err := fileHeader.Open()
//if err == nil {
// data, err := ioutil.ReadAll(file)
// if err == nil {
// fmt.Fprintln(w, string(data))
// }
//}
file, _, err := r.FormFile("uploaded")
if err == nil {
data, err := ioutil.ReadAll(file)
if err == nil {
fmt.Fprintln(w, string(data))
}
}
}
func writeExample(w http.ResponseWriter, r *http.Request) {
str := "<html> <head><title>Go web Programming</title></head><body><h1>" +
"hello world</h1></body></html>"
w.Write([]byte(str))
}
func writeHeaderExample(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(501)
fmt.Fprintln(w, "No such service, try next door")
}
func headerExample(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Location", "http://baidu.com")
w.WriteHeader(302)
}
//type Post struct {
// User string
// Threads []string
//}
//
//func jsonExample(w http.ResponseWriter, r *http.Request) {
// w.Header().Set("Content-Type", "application/json")
// post := &Post{
// User: "Sau Sheong",
// Threads: []string{"first", "second", "third"},
// }
// json, _ := json.Marshal(post)
// w.Write(json)
//}
func setCookie(w http.ResponseWriter, r *http.Request) {
c1 := http.Cookie{
Name: "first_cookie",
Value: "Go Web Programming",
Path: "",
Domain: "",
Expires: time.Time{},
RawExpires: "",
MaxAge: 0,
Secure: false,
HttpOnly: true,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
c2 := http.Cookie{
Name: "second_cookie",
Value: "Manning Publications Co",
Path: "",
Domain: "",
Expires: time.Time{},
RawExpires: "",
MaxAge: 0,
Secure: false,
HttpOnly: true,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
//w.Header().Set("Set-Cookie", c1.String())
//w.Header().Add("Set-Cookie", c2.String())
http.SetCookie(w, &c1)
http.SetCookie(w, &c2)
}
func getCookie(w http.ResponseWriter, r *http.Request) {
//h := r.Header["Cookie"]
//fmt.Fprintln(w, h)
c1, err := r.Cookie("first_cookie")
if err != nil {
fmt.Fprintln(w, "Cannot get the first cookie")
}
cs := r.Cookies()
fmt.Fprintln(w, c1)
fmt.Fprintln(w, cs)
}
func setMessage(w http.ResponseWriter, r *http.Request) {
msg := []byte("Hello World!")
c := http.Cookie{
Name: "flash",
Value: base64.URLEncoding.EncodeToString(msg),
Path: "",
Domain: "",
Expires: time.Time{},
RawExpires: "",
MaxAge: 0,
Secure: false,
HttpOnly: false,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
http.SetCookie(w, &c)
}
func showMessage(w http.ResponseWriter, r *http.Request) {
c, err := r.Cookie("flash")
if err != nil {
if err == http.ErrNoCookie {
fmt.Fprintln(w, "No message found")
}
} else {
rc := http.Cookie{
Name: "flash",
Value: "",
Path: "",
Domain: "",
Expires: time.Unix(1, 0),
RawExpires: "",
MaxAge: -1,
Secure: false,
HttpOnly: false,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
http.SetCookie(w, &rc)
val, _ := base64.URLEncoding.DecodeString(c.Value)
fmt.Fprintln(w, string(val))
}
}
var mPath = "D:/MyProgram/Go/github/"
//var mPath = "E:/go/project/"
func process2(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl.html")
t.Execute(w, "Hello Workd!")
}
func process3(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl2.html")
rand.Seed(time.Now().Unix())
t.Execute(w, rand.Intn(10) > 5)
}
func process4(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl4.html")
daysOfWeek := []string{"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"}
//daysOfWeek := []string{}
t.Execute(w, daysOfWeek)
}
func process5(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl5.html")
t.Execute(w, "hello")
}
func process6(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath+"test02/src/test/t1.html", mPath+"test02/src/test/t2.html")
t.Execute(w, "Hello World!")
}
func formatDate(t time.Time) string {
layout := "2020-6-21"
return t.Format(layout)
}
func process7(w http.ResponseWriter, r *http.Request) {
funcMap := template.FuncMap{"fdate": formatDate}
t := template.New("tmpl7.html").Funcs(funcMap)
t, _ = t.ParseFiles(mPath + "test02/src/test/tmpl7.html")
t.Execute(w, time.Now())
}
func process8(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl8.html")
content := `I asked: <i>"What's up?"</i>`
t.Execute(w, content)
}
func process9(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl9.html")
t.Execute(w, r.FormValue("comment"))
}
func form(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/form.html")
t.Execute(w, nil)
}
func process10(w http.ResponseWriter, r *http.Request) {
rand.Seed(time.Now().Unix())
var t * | "database/sql"
"encoding/base64"
"fmt"
_ "github.com/go-sql-driver/mysql" | random_line_split |
|
main01.go | Secure: false,
HttpOnly: false,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
http.SetCookie(w, &rc)
val, _ := base64.URLEncoding.DecodeString(c.Value)
fmt.Fprintln(w, string(val))
}
}
var mPath = "D:/MyProgram/Go/github/"
//var mPath = "E:/go/project/"
func process2(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl.html")
t.Execute(w, "Hello Workd!")
}
func process3(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl2.html")
rand.Seed(time.Now().Unix())
t.Execute(w, rand.Intn(10) > 5)
}
func process4(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl4.html")
daysOfWeek := []string{"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"}
//daysOfWeek := []string{}
t.Execute(w, daysOfWeek)
}
func process5(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl5.html")
t.Execute(w, "hello")
}
func process6(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath+"test02/src/test/t1.html", mPath+"test02/src/test/t2.html")
t.Execute(w, "Hello World!")
}
func formatDate(t time.Time) string {
layout := "2020-6-21"
return t.Format(layout)
}
func process7(w http.ResponseWriter, r *http.Request) {
funcMap := template.FuncMap{"fdate": formatDate}
t := template.New("tmpl7.html").Funcs(funcMap)
t, _ = t.ParseFiles(mPath + "test02/src/test/tmpl7.html")
t.Execute(w, time.Now())
}
func process8(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl8.html")
content := `I asked: <i>"What's up?"</i>`
t.Execute(w, content)
}
func process9(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl9.html")
t.Execute(w, r.FormValue("comment"))
}
func form(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/form.html")
t.Execute(w, nil)
}
func process10(w http.ResponseWriter, r *http.Request) {
rand.Seed(time.Now().Unix())
var t *template.Template
if rand.Intn(10) > 5 {
t, _ = template.ParseFiles(mPath+"test02/src/test/layout.html", mPath+"test02/src/test/red_hello.html")
} else {
//t, _ = template.ParseFiles(mPath + "test02/src/test/layout.html", mPath + "test02/src/test/blue_hello.html")
t, _ = template.ParseFiles(mPath + "test02/src/test/layout.html")
}
t.ExecuteTemplate(w, "layout", "")
}
//func main() {
// //hello := HelloHandler{}
// //world := WorldHandler{}
//
// //mux:= httprouter.New()
// //mux.GET("/hello/:name", hello)
//
// server := http.Server{
// Addr: "127.0.0.1:8080",
// }
//
// //http.Handle("/hello", &hello)
// //http.Handle("/world", &world)
//
// //http.HandleFunc("/hello", hello)
// //http.HandleFunc("/world", world)
//
// //http.HandleFunc("/hello", log(hello))
// //http.Handle("/hello", protect(log2(&hello)))
//
// //http.HandleFunc("/headers", headers)
// //http.HandleFunc("/body", body)
// //http.HandleFunc("/process", process)
// //http.HandleFunc("/write", writeExample)
// //http.HandleFunc("/writeheader", writeHeaderExample)
// //http.HandleFunc("/redirect", headerExample)
// //http.HandleFunc("/json", jsonExample)
// //http.HandleFunc("/set_cookie", setCookie)
// //http.HandleFunc("/get_cookie", getCookie)
// //http.HandleFunc("/set_message", setMessage)
// //http.HandleFunc("/show_message", showMessage)
// http.HandleFunc("/process2", process2)
// http.HandleFunc("/process3", process3)
// http.HandleFunc("/process4", process4)
// http.HandleFunc("/process5", process5)
// http.HandleFunc("/process6", process6)
// http.HandleFunc("/process7", process7)
// http.HandleFunc("/process8", process8)
// http.HandleFunc("/process9", process9)
// http.HandleFunc("/form", form)
// http.HandleFunc("/process10", process10)
//
// server.ListenAndServe()
//}
//type Post struct {
// Id int
// Content string
// Author string
//}
//
//var PostById map[int]*Post
//var PostsByAuthor map[string][]*Post
//
//func store(post Post) {
// PostById[post.Id] = &post
// PostsByAuthor[post.Author] = append(PostsByAuthor[post.Author], &post)
//}
//
//func main() {
// PostById = make(map[int]*Post)
// PostsByAuthor = make(map[string][]*Post)
//
// post1 := Post{Id: 1, Content: "Hello World!", Author: "Sau Sheong"}
// post2 := Post{Id: 2, Content: "Bonjour Monde!", Author: "Pierre"}
// post3 := Post{Id: 3, Content: "Hola Mundo!", Author: "Pedro"}
// post4 := Post{Id: 4, Content: "Greetings Earthlings", Author: "Sau Sheong"}
//
// store(post1)
// store(post2)
// store(post3)
// store(post4)
//
// fmt.Println(PostById[1])
// fmt.Println(PostById[2])
//
// for _, post := range PostsByAuthor["Sau Sheong"] {
// fmt.Println(post)
// }
//
// for _, post := range PostsByAuthor["Pedro"] {
// fmt.Println(post)
// }
//}
//func main() {
// data := []byte("Hello World!\n")
// err := ioutil.WriteFile("data1", data, 0644)
// if err != nil {
// panic(err)
// }
//
// read1, _ := ioutil.ReadFile("data1")
// fmt.Print(string(read1))
//
// file1, _ := os.Create("data2")
// defer file1.Close()
//
// bytes, _ := file1.Write(data)
// fmt.Printf("Wrote %d bytes to file\n", bytes)
//
// file2, _ := os.Open("data2")
// defer file2.Close()
//
// read2 := make([]byte, len(data))
// bytes, _ = file2.Read(read2)
// fmt.Printf("Read %d bytes from file\n", bytes)
// fmt.Println(string(read2))
//}
//type Post struct {
// Id int
// Content string
// Author string
//}
//func store(data interface{}, filename string) {
// buffer := new(bytes.Buffer)
// encoder := gob.NewEncoder(buffer)
// err := encoder.Encode(data)
// if err != nil {
// panic(err)
// }
// err = ioutil.WriteFile(filename, buffer.Bytes(), 0600)
// if err != nil {
// panic(err)
// }
//}
//
//func load(data interface{}, filename string) {
// raw, err := ioutil.ReadFile(filename)
// if err != nil {
// panic(err)
// }
// buffer := bytes.NewBuffer(raw)
// dec := gob.NewDecoder(buffer)
// err = dec.Decode(data)
// if err != nil {
// panic(err)
// }
//}
type Post struct {
Id int
Content string
Author string
}
var Db *sql.DB
func init() {
var err error
Db, err = sql.Open("mysql", "myuser:mypass@(127.0.0.1:3306)/gwp?charset=utf8")
if err != nil {
panic(err)
}
}
func Posts(limit int) (posts []Post, err error) {
rows, err := Db.Query("select id, content, author from posts limit ?", limit)
if err != nil {
fmt.Println("err = " + err.Error())
return
}
for rows.Next() {
post := Post{}
err = rows.Scan(&post.Id, &post.Content, &post.Author)
if err != nil {
return
}
posts = append(posts, post)
}
rows.Close()
return
}
func GetPost(id int) (post Post, err error) {
post = Post{}
err = Db.QueryRow("select id ,content, author from posts where id = ?", id).Scan(&post.Id, &post.Content, &post.Author)
if err != nil | {
fmt.Println("err = " + err.Error())
} | conditional_block |
|
main01.go | data, err := ioutil.ReadAll(file)
if err == nil {
fmt.Fprintln(w, string(data))
}
}
}
func writeExample(w http.ResponseWriter, r *http.Request) {
str := "<html> <head><title>Go web Programming</title></head><body><h1>" +
"hello world</h1></body></html>"
w.Write([]byte(str))
}
func writeHeaderExample(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(501)
fmt.Fprintln(w, "No such service, try next door")
}
func headerExample(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Location", "http://baidu.com")
w.WriteHeader(302)
}
//type Post struct {
// User string
// Threads []string
//}
//
//func jsonExample(w http.ResponseWriter, r *http.Request) {
// w.Header().Set("Content-Type", "application/json")
// post := &Post{
// User: "Sau Sheong",
// Threads: []string{"first", "second", "third"},
// }
// json, _ := json.Marshal(post)
// w.Write(json)
//}
func setCookie(w http.ResponseWriter, r *http.Request) {
c1 := http.Cookie{
Name: "first_cookie",
Value: "Go Web Programming",
Path: "",
Domain: "",
Expires: time.Time{},
RawExpires: "",
MaxAge: 0,
Secure: false,
HttpOnly: true,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
c2 := http.Cookie{
Name: "second_cookie",
Value: "Manning Publications Co",
Path: "",
Domain: "",
Expires: time.Time{},
RawExpires: "",
MaxAge: 0,
Secure: false,
HttpOnly: true,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
//w.Header().Set("Set-Cookie", c1.String())
//w.Header().Add("Set-Cookie", c2.String())
http.SetCookie(w, &c1)
http.SetCookie(w, &c2)
}
func getCookie(w http.ResponseWriter, r *http.Request) {
//h := r.Header["Cookie"]
//fmt.Fprintln(w, h)
c1, err := r.Cookie("first_cookie")
if err != nil {
fmt.Fprintln(w, "Cannot get the first cookie")
}
cs := r.Cookies()
fmt.Fprintln(w, c1)
fmt.Fprintln(w, cs)
}
func setMessage(w http.ResponseWriter, r *http.Request) {
msg := []byte("Hello World!")
c := http.Cookie{
Name: "flash",
Value: base64.URLEncoding.EncodeToString(msg),
Path: "",
Domain: "",
Expires: time.Time{},
RawExpires: "",
MaxAge: 0,
Secure: false,
HttpOnly: false,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
http.SetCookie(w, &c)
}
func showMessage(w http.ResponseWriter, r *http.Request) {
c, err := r.Cookie("flash")
if err != nil {
if err == http.ErrNoCookie {
fmt.Fprintln(w, "No message found")
}
} else {
rc := http.Cookie{
Name: "flash",
Value: "",
Path: "",
Domain: "",
Expires: time.Unix(1, 0),
RawExpires: "",
MaxAge: -1,
Secure: false,
HttpOnly: false,
SameSite: 0,
Raw: "",
Unparsed: nil,
}
http.SetCookie(w, &rc)
val, _ := base64.URLEncoding.DecodeString(c.Value)
fmt.Fprintln(w, string(val))
}
}
var mPath = "D:/MyProgram/Go/github/"
//var mPath = "E:/go/project/"
func process2(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl.html")
t.Execute(w, "Hello Workd!")
}
func process3(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl2.html")
rand.Seed(time.Now().Unix())
t.Execute(w, rand.Intn(10) > 5)
}
func process4(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl4.html")
daysOfWeek := []string{"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"}
//daysOfWeek := []string{}
t.Execute(w, daysOfWeek)
}
func process5(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl5.html")
t.Execute(w, "hello")
}
func process6(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath+"test02/src/test/t1.html", mPath+"test02/src/test/t2.html")
t.Execute(w, "Hello World!")
}
func formatDate(t time.Time) string {
layout := "2020-6-21"
return t.Format(layout)
}
func process7(w http.ResponseWriter, r *http.Request) {
funcMap := template.FuncMap{"fdate": formatDate}
t := template.New("tmpl7.html").Funcs(funcMap)
t, _ = t.ParseFiles(mPath + "test02/src/test/tmpl7.html")
t.Execute(w, time.Now())
}
func process8(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl8.html")
content := `I asked: <i>"What's up?"</i>`
t.Execute(w, content)
}
func process9(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/tmpl9.html")
t.Execute(w, r.FormValue("comment"))
}
func form(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles(mPath + "test02/src/test/form.html")
t.Execute(w, nil)
}
func process10(w http.ResponseWriter, r *http.Request) {
rand.Seed(time.Now().Unix())
var t *template.Template
if rand.Intn(10) > 5 {
t, _ = template.ParseFiles(mPath+"test02/src/test/layout.html", mPath+"test02/src/test/red_hello.html")
} else {
//t, _ = template.ParseFiles(mPath + "test02/src/test/layout.html", mPath + "test02/src/test/blue_hello.html")
t, _ = template.ParseFiles(mPath + "test02/src/test/layout.html")
}
t.ExecuteTemplate(w, "layout", "")
}
//func main() {
// //hello := HelloHandler{}
// //world := WorldHandler{}
//
// //mux:= httprouter.New()
// //mux.GET("/hello/:name", hello)
//
// server := http.Server{
// Addr: "127.0.0.1:8080",
// }
//
// //http.Handle("/hello", &hello)
// //http.Handle("/world", &world)
//
// //http.HandleFunc("/hello", hello)
// //http.HandleFunc("/world", world)
//
// //http.HandleFunc("/hello", log(hello))
// //http.Handle("/hello", protect(log2(&hello)))
//
// //http.HandleFunc("/headers", headers)
// //http.HandleFunc("/body", body)
// //http.HandleFunc("/process", process)
// //http.HandleFunc("/write", writeExample)
// //http.HandleFunc("/writeheader", writeHeaderExample)
// //http.HandleFunc("/redirect", headerExample)
// //http.HandleFunc("/json", jsonExample)
// //http.HandleFunc("/set_cookie", setCookie)
// //http.HandleFunc("/get_cookie", getCookie)
// //http.HandleFunc("/set_message", setMessage)
// //http.HandleFunc("/show_message", showMessage)
// http.HandleFunc("/process2", process2)
// http.HandleFunc("/process3", process3)
// | {
//r.ParseForm()
//fmt.Fprintln(w, r.Form)
r.ParseMultipartForm(1024)
//fmt.Fprintln(w, "(1)", r.FormValue("hello"))
//fmt.Fprintln(w, "(2)", r.PostFormValue("hello"))
//fmt.Fprintln(w, "(3)", r.PostForm)
//fmt.Fprintln(w, "(4)", r.MultipartForm)
//fileHeader := r.MultipartForm.File["uploaded"][0]
//file, err := fileHeader.Open()
//if err == nil {
// data, err := ioutil.ReadAll(file)
// if err == nil {
// fmt.Fprintln(w, string(data))
// }
//}
file, _, err := r.FormFile("uploaded")
if err == nil { | identifier_body |
|
mod.rs | InitialSnapshot, RemoteTimestamp};
use self::progress::{LowerFrontier, UpperFrontier};
use self::sink::{EventSink, EventStream};
pub mod sink;
pub mod progress;
type SubscriberId = usize;
enum Event<T, D> {
Timely(TimelyEvent<T, D>),
Accepted((Sender, Receiver)),
Disconnected(SubscriberId),
Error(SubscriberId, io::Error),
ShutdownRequested,
}
/// State and logic of the publisher.
///
/// Maintains the upper and lower frontier of a Timely stream and broadcasts
/// their updated versions and any incoming data tuples to subscribed clients.
struct PublisherServer<T: Timestamp, D> {
// progress tracking state
lower: LowerFrontier<T>,
upper: UpperFrontier<T>,
// connected subscribers
subscribers: Slab<Sender>,
count: AtomicCounter,
// tokio event loop
events: Box<Stream<Item = Event<T, D>, Error = io::Error>>,
notificator: mpsc::UnboundedSender<Event<T, D>>,
core: Core,
handle: Handle,
}
impl<T: RemoteTimestamp, D: ExchangeData + Serialize> PublisherServer<T, D> {
/// Creates a new publisher, accepting subscribers on `socket`, publishing
/// the Timely events observed on `stream`.
fn new(socket: Listener, stream: EventStream<T, D>, count: AtomicCounter) -> io::Result<Self> {
let core = Core::new()?;
let handle = core.handle();
// queue for disconnection events from subscribers
let (notificator, subscribers) = mpsc::unbounded();
// we have three event sources:
let listener = socket.map(Event::Accepted);
let timely = stream
.map(Event::Timely)
.map_err(|_| unreachable!())
.chain(stream::once(Ok(Event::ShutdownRequested)));
let subscribers = subscribers.map_err(|_| unreachable!());
// all of which we merge into a single stream
let events = listener.select(subscribers).select(timely);
Ok(PublisherServer {
lower: LowerFrontier::default(),
upper: UpperFrontier::empty(),
subscribers: Slab::new(),
count: count,
events: Box::new(events),
notificator: notificator,
core: core,
handle: handle,
})
}
fn next_event(&mut self) -> io::Result<Event<T, D>> {
// run tokio reactor until we get the next event
let next_msg = self.events.by_ref().into_future();
match self.core.run(next_msg) {
Ok((msg, _)) => Ok(msg.unwrap()),
Err((err, _)) => Err(err),
}
}
/// Starts serving subscribers, blocks until the Timely stream completes
/// (or an error happens).
fn serve(mut self) -> io::Result<()> {
loop {
match self.next_event()? {
// processing incoming timely events
Event::Timely(ev) => self.timely_event(ev)?,
// handle networking events
Event::Accepted(sub) => self.add_subscriber(sub)?,
Event::Disconnected(id) => self.remove_subscriber(id),
Event::Error(id, err) => {
// subscriber errors should not be fatal. we just log
// them and forget about it.
error!("Subscriber {}: {}", id, err);
}
Event::ShutdownRequested => {
// this drops self, and thus drain the queues of | return Ok(());
}
}
}
}
/// Sends `msg` to all connected subscribers.
fn broadcast(&self, msg: MessageBuf) -> io::Result<()> {
if self.subscribers.len() == 0 {
// nothing to do here
return Ok(());
}
let last = self.subscribers.len() - 1;
for (id, sub) in self.subscribers.iter() {
if id < last {
sub.send(msg.clone());
} else {
// this case is a hint to the compiler that for the last
// iteration we can move `msg` directly, no need to clone
sub.send(msg);
break;
}
}
Ok(())
}
/// Processes a single Timely event, might cause multiple messages to be
/// sent to connected subscribers.
fn timely_event(&mut self, event: TimelyEvent<T, D>) -> io::Result<()> {
match event {
TimelyEvent::Progress(mut updates) => {
self.lower.update(&mut updates);
if !updates.is_empty() {
self.broadcast(Message::<T, D>::frontier_update(updates)?)?;
}
}
TimelyEvent::Messages(time, data) => {
self.upper.insert(time.clone());
self.broadcast(Message::<T, D>::data_message(time, data)?)?;
}
};
Ok(())
}
/// Registers a new subscriber.
///
/// Installs a "monitor" for the subscriber, making sure we get notified
/// when it disconnects.
fn add_subscriber(&mut self, (tx, rx): (Sender, Receiver)) -> io::Result<()> {
// inform new subscriber about current state of progress
let snapshot = InitialSnapshot::encode(self.lower.elements(), self.upper.elements())?;
tx.send(snapshot);
// add it to the list of listening subscribers
self.count.increment();
let id = self.subscribers.insert(tx);
// register event handler for disconnection
let notificator = self.notificator.clone();
let subscriber = rx.for_each(|_| {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"unexpected message",
))
}).then(move |res| {
let event = match res {
Ok(()) => Event::Disconnected(id),
Err(err) => Event::Error(id, err),
};
notificator.unbounded_send(event).map_err(|_| ())
});
self.handle.spawn(subscriber);
Ok(())
}
/// Removes a subscriber from the broadcasting list.
///
/// This does not cancel the subscriber monitor registered above, so if the
/// subscriber is still alive, it will still emit events on errors or
/// when it disconnects.
fn remove_subscriber(&mut self, id: SubscriberId) {
self.count.decrement();
self.subscribers.remove(id);
}
}
impl<T: Timestamp, D> Drop for PublisherServer<T, D> {
fn drop(&mut self) {
self.subscribers.clear();
self.count.invalidate();
}
}
/// The host and port on which the publisher is accepting subscribers.
pub type Addr = (String, u16);
/// A handle for spawned publisher.
///
/// This implements `EventPusher`, so it can be used with Timely's `capture`.
/// When dropped, will block and drain any subscriber queues.
pub struct Publisher<T, D> {
/// Handle for events to be published by this instance.
sink: Option<EventSink<T, D>>,
/// A join handle for the spawned thread.
thread: Thread,
// The current subscriber count (wrapped in a mutex, so we can block on it)
subscribers: AtomicCounter,
}
impl<T, D> Publisher<T, D>
where
T: RemoteTimestamp,
D: ExchangeData + Serialize,
{
/// Spawns a new publisher thread on a ephemerial network port.
///
/// The corresponding address can be obtained from the first member of the
/// tuple. The publisher handle itself is used to send events into the
/// topic.
pub fn new(network: &Network) -> io::Result<(Addr, Self)> {
// the queue between the Timely operator and this publisher thread
let (timely_sink, timely_stream) = sink::pair();
// the network socket on which subscribers are accepted
let listener = network.listen(None)?;
let addr = {
let (host, port) = listener.external_addr();
(String::from(host), port)
};
let subscribers = AtomicCounter::new();
let count = subscribers.clone();
// main event loop of the publisher thread
let handle = thread::spawn(move || {
PublisherServer::new(listener, timely_stream, count)
.and_then(|publisher| publisher.serve())
});
let publisher = Publisher {
sink: Some(timely_sink),
thread: Thread::new(handle),
subscribers: subscribers,
};
Ok((addr, publisher))
}
/// Blocks the current thread until some subscribers have connected.
///
/// Returns the number of currently connected subscribers. Note that this
/// does not actually guarantee that the subscribers are still connected,
/// only that there was some recent point in time when there were some
/// connected subscribers. This is mostly intended for testing purposes.
#[allow(dead_code)]
pub fn subscriber_barrier(&self) -> io::Result<usize> {
// important: this must unblock when the thread dies, so we make
// sure to call `count.invalidate()` in the publisher thread when it drops
let count = self.subscribers.wait_nonzero();
if count == COUNTER_INVALID {
Err(io::Error::new(io::ErrorKind::Other, "publisher terminated"))
} else {
Ok(count)
}
}
}
impl<T, D> EventPusher<T, D> for Publisher<T, D>
where
T: RemoteTimestamp,
D: ExchangeData + Serialize,
{
fn push(&mut self, event: TimelyEvent<T, | // all still connected subscribers | random_line_split |
mod.rs |
.map(Event::Timely)
.map_err(|_| unreachable!())
.chain(stream::once(Ok(Event::ShutdownRequested)));
let subscribers = subscribers.map_err(|_| unreachable!());
// all of which we merge into a single stream
let events = listener.select(subscribers).select(timely);
Ok(PublisherServer {
lower: LowerFrontier::default(),
upper: UpperFrontier::empty(),
subscribers: Slab::new(),
count: count,
events: Box::new(events),
notificator: notificator,
core: core,
handle: handle,
})
}
fn next_event(&mut self) -> io::Result<Event<T, D>> {
// run tokio reactor until we get the next event
let next_msg = self.events.by_ref().into_future();
match self.core.run(next_msg) {
Ok((msg, _)) => Ok(msg.unwrap()),
Err((err, _)) => Err(err),
}
}
/// Starts serving subscribers, blocks until the Timely stream completes
/// (or an error happens).
fn serve(mut self) -> io::Result<()> {
loop {
match self.next_event()? {
// processing incoming timely events
Event::Timely(ev) => self.timely_event(ev)?,
// handle networking events
Event::Accepted(sub) => self.add_subscriber(sub)?,
Event::Disconnected(id) => self.remove_subscriber(id),
Event::Error(id, err) => {
// subscriber errors should not be fatal. we just log
// them and forget about it.
error!("Subscriber {}: {}", id, err);
}
Event::ShutdownRequested => {
// this drops self, and thus drain the queues of
// all still connected subscribers
return Ok(());
}
}
}
}
/// Sends `msg` to all connected subscribers.
fn broadcast(&self, msg: MessageBuf) -> io::Result<()> {
if self.subscribers.len() == 0 {
// nothing to do here
return Ok(());
}
let last = self.subscribers.len() - 1;
for (id, sub) in self.subscribers.iter() {
if id < last {
sub.send(msg.clone());
} else {
// this case is a hint to the compiler that for the last
// iteration we can move `msg` directly, no need to clone
sub.send(msg);
break;
}
}
Ok(())
}
/// Processes a single Timely event, might cause multiple messages to be
/// sent to connected subscribers.
fn timely_event(&mut self, event: TimelyEvent<T, D>) -> io::Result<()> {
match event {
TimelyEvent::Progress(mut updates) => {
self.lower.update(&mut updates);
if !updates.is_empty() {
self.broadcast(Message::<T, D>::frontier_update(updates)?)?;
}
}
TimelyEvent::Messages(time, data) => {
self.upper.insert(time.clone());
self.broadcast(Message::<T, D>::data_message(time, data)?)?;
}
};
Ok(())
}
/// Registers a new subscriber.
///
/// Installs a "monitor" for the subscriber, making sure we get notified
/// when it disconnects.
fn add_subscriber(&mut self, (tx, rx): (Sender, Receiver)) -> io::Result<()> {
// inform new subscriber about current state of progress
let snapshot = InitialSnapshot::encode(self.lower.elements(), self.upper.elements())?;
tx.send(snapshot);
// add it to the list of listening subscribers
self.count.increment();
let id = self.subscribers.insert(tx);
// register event handler for disconnection
let notificator = self.notificator.clone();
let subscriber = rx.for_each(|_| {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"unexpected message",
))
}).then(move |res| {
let event = match res {
Ok(()) => Event::Disconnected(id),
Err(err) => Event::Error(id, err),
};
notificator.unbounded_send(event).map_err(|_| ())
});
self.handle.spawn(subscriber);
Ok(())
}
/// Removes a subscriber from the broadcasting list.
///
/// This does not cancel the subscriber monitor registered above, so if the
/// subscriber is still alive, it will still emit events on errors or
/// when it disconnects.
fn remove_subscriber(&mut self, id: SubscriberId) {
self.count.decrement();
self.subscribers.remove(id);
}
}
impl<T: Timestamp, D> Drop for PublisherServer<T, D> {
fn drop(&mut self) {
self.subscribers.clear();
self.count.invalidate();
}
}
/// The host and port on which the publisher is accepting subscribers.
pub type Addr = (String, u16);
/// A handle for spawned publisher.
///
/// This implements `EventPusher`, so it can be used with Timely's `capture`.
/// When dropped, will block and drain any subscriber queues.
pub struct Publisher<T, D> {
/// Handle for events to be published by this instance.
sink: Option<EventSink<T, D>>,
/// A join handle for the spawned thread.
thread: Thread,
// The current subscriber count (wrapped in a mutex, so we can block on it)
subscribers: AtomicCounter,
}
impl<T, D> Publisher<T, D>
where
T: RemoteTimestamp,
D: ExchangeData + Serialize,
{
/// Spawns a new publisher thread on a ephemerial network port.
///
/// The corresponding address can be obtained from the first member of the
/// tuple. The publisher handle itself is used to send events into the
/// topic.
pub fn new(network: &Network) -> io::Result<(Addr, Self)> {
// the queue between the Timely operator and this publisher thread
let (timely_sink, timely_stream) = sink::pair();
// the network socket on which subscribers are accepted
let listener = network.listen(None)?;
let addr = {
let (host, port) = listener.external_addr();
(String::from(host), port)
};
let subscribers = AtomicCounter::new();
let count = subscribers.clone();
// main event loop of the publisher thread
let handle = thread::spawn(move || {
PublisherServer::new(listener, timely_stream, count)
.and_then(|publisher| publisher.serve())
});
let publisher = Publisher {
sink: Some(timely_sink),
thread: Thread::new(handle),
subscribers: subscribers,
};
Ok((addr, publisher))
}
/// Blocks the current thread until some subscribers have connected.
///
/// Returns the number of currently connected subscribers. Note that this
/// does not actually guarantee that the subscribers are still connected,
/// only that there was some recent point in time when there were some
/// connected subscribers. This is mostly intended for testing purposes.
#[allow(dead_code)]
pub fn subscriber_barrier(&self) -> io::Result<usize> {
// important: this must unblock when the thread dies, so we make
// sure to call `count.invalidate()` in the publisher thread when it drops
let count = self.subscribers.wait_nonzero();
if count == COUNTER_INVALID {
Err(io::Error::new(io::ErrorKind::Other, "publisher terminated"))
} else {
Ok(count)
}
}
}
impl<T, D> EventPusher<T, D> for Publisher<T, D>
where
T: RemoteTimestamp,
D: ExchangeData + Serialize,
{
fn push(&mut self, event: TimelyEvent<T, D>) {
self.sink.as_mut().unwrap().push(event)
}
}
impl<T, D> Drop for Publisher<T, D> {
fn drop(&mut self) {
// Note the the drop order is important here: The event `EventSink` must be
// dropped before `Thread` in order to avoid a deadlock: Dropping `EventSink`
// indicates to the publisher thread that it has to shut down, which will block
// the join operation until the shutdown is complete.
drop(self.sink.take());
if let Err(err) = self.thread.join() {
error!("failed to drain subscriber queues: {}", err);
}
}
}
type ThreadHandle = thread::JoinHandle<io::Result<()>>;
/// A join handle for the publisher thread.
///
/// This can be used to ensure all subscriber queues are drained properly.
struct Thread(Option<ThreadHandle>);
impl Thread {
fn new(handle: ThreadHandle) -> Self {
Thread(Some(handle))
}
fn join(&mut self) -> io::Result<()> {
match self.0.take().map(|t| t.join()) {
Some(Ok(res)) => res,
Some(Err(_)) => Err(io::Error::new(io::ErrorKind::Other, "thread panicked")),
None => Err(io::Error::new(io::ErrorKind::Other, "already joined")),
}
}
}
/// A counter which can block readers when it reaches zero.
#[derive(Debug, Clone)]
struct AtomicCounter(Arc<(Mutex<usize>, Condvar)>);
const COUNTER_INVALID: usize = ::std::usize::MAX;
impl AtomicCounter {
fn new() -> Self {
AtomicCounter(Default::default())
}
fn | lock | identifier_name |
|
tof_coincidences_jitters_correct_Paola.py |
### read sensor positions from database
DataSiPM = db.DataSiPMsim_only('petalo', 0)
DataSiPM_idx = DataSiPM.set_index('SensorID')
n_sipms = len(DataSiPM)
first_sipm = DataSiPM_idx.index.min()
### parameters for single photoelectron convolution in SiPM response
tau_sipm = [100, 15000]
time_window = 5000
#time_bin = 5 # ps
time = np.arange(0, 5000)
#time = time + (time_bin/2)
spe_resp, norm = tf.apply_spe_dist(time, tau_sipm)
sigma_sipm = 0 #80 #ps
sigma_elec = 0 #30 #ps
#n_pe = 1
arguments = parse_args(sys.argv)
start = arguments.first_file
numb = arguments.n_files
thr_r = arguments.thr_r
thr_phi = arguments.thr_phi
thr_z = arguments.thr_z
thr_e = arguments.thr_e
n_pe = arguments.n_pe
timestamp_thr = arguments.thr_charge
eventsPath = arguments.events_path
file_name = arguments.file_name
rpos_file = arguments.rpos_file
data_path = arguments.data_path
print(f'Using r map: {rpos_file}')
evt_file = f"{data_path}/tof_coincidences_Paola_npe{n_pe}_thr{timestamp_thr}_{start}_{numb}_{thr_r}_{thr_phi}_{thr_z}_{thr_e}"
Rpos = load_map(rpos_file,
group = "Radius",
node = f"f{int(thr_r)}pes150bins",
x_name = "PhiRms",
y_name = "Rpos",
u_name = "RposUncertainty")
#charge_range = (2000, 2250) # pde 0.30, n=1.6
#charge_range = (0, 5000)
charge_range = (1050, 1300)
print(f'Charge range = {charge_range}')
c0 = c1 = c2 = c3 = c4 = 0
bad = 0
boh0 = boh1 = 0
below_thr = 0
true_r1, true_phi1, true_z1 = [], [], []
reco_r1, reco_phi1, reco_z1 = [], [], []
true_r2, true_phi2, true_z2 = [], [], []
reco_r2, reco_phi2, reco_z2 = [], [], []
sns_response1, sns_response2 = [], []
### PETsys thresholds to extract the timestamp
#timestamp_thr = 0.25
first_sipm1 = []
first_sipm2 = []
first_time1 = []
first_time2 = []
true_time1, true_time2 = [], []
touched_sipms1, touched_sipms2 = [], []
photo1, photo2 = [], []
max_hit_distance1, max_hit_distance2 = [], []
hit_energy1, hit_energy2 = [], []
event_ids = []
for number in range(start, start+numb):
number_str = "{:03d}".format(number)
filename = f"{eventsPath}/{file_name}.{number_str}.pet.h5"
try:
#sns_response = load_mcsns_response(filename)
sns_response = pd.read_hdf(filename, 'MC/waveforms')
except ValueError:
print(f'File {filename} not found')
continue
except OSError:
print(f'File {filename} not found')
continue
except KeyError:
print(f'No object named MC/waveforms in file {filename}')
continue
print(f'Analyzing file {filename}')
tof_bin_size = read_sensor_bin_width_from_conf(filename, tof=True)
particles = pd.read_hdf(filename, 'MC/particles')
hits = pd.read_hdf(filename, 'MC/hits')
#sns_response = snsf.apply_sipm_pde(sns_response, 0.3)
#sns_response = snsf.apply_charge_fluctuation(sns_response, DataSiPM_idx)
tof_response = pd.read_hdf(filename, 'MC/tof_waveforms')
events = particles.event_id.unique()
for evt in events[:]:
evt_sns = sns_response[sns_response.event_id == evt]
evt_sns = rf.find_SiPMs_over_threshold(evt_sns, threshold=thr_e)
if len(evt_sns) == 0:
boh0 += 1
continue
ids_over_thr = evt_sns.sensor_id.astype('int64').values
evt_parts = particles[particles.event_id == evt]
evt_hits = hits[hits.event_id == evt]
evt_tof = tof_response[tof_response.event_id == evt]
# if evt_hits.energy.sum() < 0.511:
# below_thr += 1
# continue
if len(evt_tof) == 0:
boh1 += 1
continue
evt_tof = evt_tof[evt_tof.sensor_id.isin(-ids_over_thr)]
if len(evt_tof) == 0:
boh2 += 1
continue
pos1, pos2, q1, q2, true_pos1, true_pos2, true_t1, true_t2, sns1, sns2 = rf.reconstruct_coincidences(evt_sns, charge_range, DataSiPM_idx, evt_parts, evt_hits)
if len(pos1) == 0 or len(pos2) == 0:
c0 += 1
continue
q1 = np.array(q1)
q2 = np.array(q2)
pos1 = np.array(pos1)
pos2 = np.array(pos2)
## Calculate R
r1 = r2 = None
sel1_r = q1>thr_r
q1r = q1[sel1_r]
pos1r = pos1[sel1_r]
sel2_r = q2>thr_r
q2r = q2[sel2_r]
pos2r = pos2[sel2_r]
if len(pos1r) == 0 or len(pos2r) == 0:
c1 += 1
continue
pos1_phi = rf.from_cartesian_to_cyl(np.array(pos1r))[:,1]
diff_sign = min(pos1_phi ) < 0 < max(pos1_phi)
if diff_sign & (np.abs(np.min(pos1_phi))>np.pi/2.):
pos1_phi[pos1_phi<0] = np.pi + np.pi + pos1_phi[pos1_phi<0]
mean_phi = np.average(pos1_phi, weights=q1r)
var_phi1 = np.average((pos1_phi-mean_phi)**2, weights=q1r)
r1 = Rpos(np.sqrt(var_phi1)).value
pos2_phi = rf.from_cartesian_to_cyl(np.array(pos2r))[:,1]
diff_sign = min(pos2_phi ) < 0 < max(pos2_phi)
if diff_sign & (np.abs(np.min(pos2_phi))>np.pi/2.):
pos2_phi[pos2_phi<0] = np.pi + np.pi + pos2_phi[pos2_phi<0]
mean_phi = np.average(pos2_phi, weights=q2r)
var_phi2 = np.average((pos2_phi-mean_phi)**2, weights=q2r)
r2 = Rpos(np.sqrt(var_phi2)).value
sel1_phi = q1>thr_phi
q1phi = q1[sel1_phi]
pos1phi = pos1[sel1_phi]
sel2_phi = q2>thr_phi
q2phi = q2[sel2_phi]
pos2phi = pos2 | parser = argparse.ArgumentParser()
parser.add_argument('first_file' , type = int, help = "first file (inclusive)" )
parser.add_argument('n_files' , type = int, help = "number of files to analize" )
parser.add_argument('thr_r' , type = int, help = "threshold in r coordinate" )
parser.add_argument('thr_phi' , type = int, help = "threshold in phi coordinate")
parser.add_argument('thr_z' , type = int, help = "threshold in z coordinate" )
parser.add_argument('thr_e' , type = int, help = "threshold in the energy" )
parser.add_argument('n_pe' , type = int, help = "number of pes" )
parser.add_argument('thr_charge' , type = float, help = "thr in charge" )
parser.add_argument('events_path', help = "input files path" )
parser.add_argument('file_name' , help = "name of input files" )
parser.add_argument('rpos_file' , help = "File of the Rpos" )
parser.add_argument('data_path' , help = "output files path" )
return parser.parse_args() | identifier_body |
|
tof_coincidences_jitters_correct_Paola.py | = np.arange(0, 5000)
#time = time + (time_bin/2)
spe_resp, norm = tf.apply_spe_dist(time, tau_sipm)
sigma_sipm = 0 #80 #ps
sigma_elec = 0 #30 #ps
#n_pe = 1
arguments = parse_args(sys.argv)
start = arguments.first_file
numb = arguments.n_files
thr_r = arguments.thr_r
thr_phi = arguments.thr_phi
thr_z = arguments.thr_z
thr_e = arguments.thr_e
n_pe = arguments.n_pe
timestamp_thr = arguments.thr_charge
eventsPath = arguments.events_path
file_name = arguments.file_name
rpos_file = arguments.rpos_file
data_path = arguments.data_path
print(f'Using r map: {rpos_file}')
evt_file = f"{data_path}/tof_coincidences_Paola_npe{n_pe}_thr{timestamp_thr}_{start}_{numb}_{thr_r}_{thr_phi}_{thr_z}_{thr_e}"
Rpos = load_map(rpos_file,
group = "Radius",
node = f"f{int(thr_r)}pes150bins",
x_name = "PhiRms",
y_name = "Rpos",
u_name = "RposUncertainty")
#charge_range = (2000, 2250) # pde 0.30, n=1.6
#charge_range = (0, 5000)
charge_range = (1050, 1300)
print(f'Charge range = {charge_range}')
c0 = c1 = c2 = c3 = c4 = 0
bad = 0
boh0 = boh1 = 0
below_thr = 0
true_r1, true_phi1, true_z1 = [], [], []
reco_r1, reco_phi1, reco_z1 = [], [], []
true_r2, true_phi2, true_z2 = [], [], []
reco_r2, reco_phi2, reco_z2 = [], [], []
sns_response1, sns_response2 = [], []
### PETsys thresholds to extract the timestamp
#timestamp_thr = 0.25
first_sipm1 = []
first_sipm2 = []
first_time1 = []
first_time2 = []
true_time1, true_time2 = [], []
touched_sipms1, touched_sipms2 = [], []
photo1, photo2 = [], []
max_hit_distance1, max_hit_distance2 = [], []
hit_energy1, hit_energy2 = [], []
event_ids = []
for number in range(start, start+numb):
number_str = "{:03d}".format(number)
filename = f"{eventsPath}/{file_name}.{number_str}.pet.h5"
try:
#sns_response = load_mcsns_response(filename)
sns_response = pd.read_hdf(filename, 'MC/waveforms')
except ValueError:
print(f'File {filename} not found')
continue
except OSError:
print(f'File {filename} not found')
continue
except KeyError:
print(f'No object named MC/waveforms in file {filename}')
continue
print(f'Analyzing file {filename}')
tof_bin_size = read_sensor_bin_width_from_conf(filename, tof=True)
particles = pd.read_hdf(filename, 'MC/particles')
hits = pd.read_hdf(filename, 'MC/hits')
#sns_response = snsf.apply_sipm_pde(sns_response, 0.3)
#sns_response = snsf.apply_charge_fluctuation(sns_response, DataSiPM_idx)
tof_response = pd.read_hdf(filename, 'MC/tof_waveforms')
events = particles.event_id.unique()
for evt in events[:]:
evt_sns = sns_response[sns_response.event_id == evt]
evt_sns = rf.find_SiPMs_over_threshold(evt_sns, threshold=thr_e)
if len(evt_sns) == 0:
boh0 += 1
continue
ids_over_thr = evt_sns.sensor_id.astype('int64').values
evt_parts = particles[particles.event_id == evt]
evt_hits = hits[hits.event_id == evt]
evt_tof = tof_response[tof_response.event_id == evt]
# if evt_hits.energy.sum() < 0.511:
# below_thr += 1
# continue
if len(evt_tof) == 0:
boh1 += 1
continue | evt_tof = evt_tof[evt_tof.sensor_id.isin(-ids_over_thr)]
if len(evt_tof) == 0:
boh2 += 1
continue
pos1, pos2, q1, q2, true_pos1, true_pos2, true_t1, true_t2, sns1, sns2 = rf.reconstruct_coincidences(evt_sns, charge_range, DataSiPM_idx, evt_parts, evt_hits)
if len(pos1) == 0 or len(pos2) == 0:
c0 += 1
continue
q1 = np.array(q1)
q2 = np.array(q2)
pos1 = np.array(pos1)
pos2 = np.array(pos2)
## Calculate R
r1 = r2 = None
sel1_r = q1>thr_r
q1r = q1[sel1_r]
pos1r = pos1[sel1_r]
sel2_r = q2>thr_r
q2r = q2[sel2_r]
pos2r = pos2[sel2_r]
if len(pos1r) == 0 or len(pos2r) == 0:
c1 += 1
continue
pos1_phi = rf.from_cartesian_to_cyl(np.array(pos1r))[:,1]
diff_sign = min(pos1_phi ) < 0 < max(pos1_phi)
if diff_sign & (np.abs(np.min(pos1_phi))>np.pi/2.):
pos1_phi[pos1_phi<0] = np.pi + np.pi + pos1_phi[pos1_phi<0]
mean_phi = np.average(pos1_phi, weights=q1r)
var_phi1 = np.average((pos1_phi-mean_phi)**2, weights=q1r)
r1 = Rpos(np.sqrt(var_phi1)).value
pos2_phi = rf.from_cartesian_to_cyl(np.array(pos2r))[:,1]
diff_sign = min(pos2_phi ) < 0 < max(pos2_phi)
if diff_sign & (np.abs(np.min(pos2_phi))>np.pi/2.):
pos2_phi[pos2_phi<0] = np.pi + np.pi + pos2_phi[pos2_phi<0]
mean_phi = np.average(pos2_phi, weights=q2r)
var_phi2 = np.average((pos2_phi-mean_phi)**2, weights=q2r)
r2 = Rpos(np.sqrt(var_phi2)).value
sel1_phi = q1>thr_phi
q1phi = q1[sel1_phi]
pos1phi = pos1[sel1_phi]
sel2_phi = q2>thr_phi
q2phi = q2[sel2_phi]
pos2phi = pos2[sel2_phi]
if len(q1phi) == 0 or len(q2phi) == 0:
c2 += 1
continue
phi1 = phi2 = None
reco_cart_pos = np.average(pos1phi, weights=q1phi, axis=0)
phi1 = np.arctan2(reco_cart_pos[1], reco_cart_pos[0])
reco_cart_pos = np.average(pos2phi, weights=q2phi, axis=0)
phi2 = np.arctan2(reco_cart_pos[1], reco_cart_pos[0])
sel1_z = q1>thr_z
q1z = q1[sel1_z]
pos1z = pos1[sel1_z]
sel2_z = q2>thr_z
q2z = q2[sel2_z]
pos2z = pos2[sel2_z]
if len(q1z) == 0 or len(q2z) == 0:
c3 += 1
continue
z1 = z2 = None
reco_cart_pos = np.average(pos1z, weights=q1z, axis=0)
z1 = reco_cart_pos[2]
reco_cart_pos = np.average(pos2z, weights=q2z, axis=0)
z2 = reco_cart_pos[2]
sel1_e = q1>thr_e
q1e = q1[sel1_e]
sel2_e = q2>thr_e
q2e = q2[sel2_e]
if len(q1e) == 0 or len(q2e) == 0:
c4 += 1
continue
times = evt_tof.time_bin.values * tof_bin_size / units.ps
| random_line_split |
|
tof_coincidences_jitters_correct_Paola.py | (args):
parser = argparse.ArgumentParser()
parser.add_argument('first_file' , type = int, help = "first file (inclusive)" )
parser.add_argument('n_files' , type = int, help = "number of files to analize" )
parser.add_argument('thr_r' , type = int, help = "threshold in r coordinate" )
parser.add_argument('thr_phi' , type = int, help = "threshold in phi coordinate")
parser.add_argument('thr_z' , type = int, help = "threshold in z coordinate" )
parser.add_argument('thr_e' , type = int, help = "threshold in the energy" )
parser.add_argument('n_pe' , type = int, help = "number of pes" )
parser.add_argument('thr_charge' , type = float, help = "thr in charge" )
parser.add_argument('events_path', help = "input files path" )
parser.add_argument('file_name' , help = "name of input files" )
parser.add_argument('rpos_file' , help = "File of the Rpos" )
parser.add_argument('data_path' , help = "output files path" )
return parser.parse_args()
### read sensor positions from database
DataSiPM = db.DataSiPMsim_only('petalo', 0)
DataSiPM_idx = DataSiPM.set_index('SensorID')
n_sipms = len(DataSiPM)
first_sipm = DataSiPM_idx.index.min()
### parameters for single photoelectron convolution in SiPM response
tau_sipm = [100, 15000]
time_window = 5000
#time_bin = 5 # ps
time = np.arange(0, 5000)
#time = time + (time_bin/2)
spe_resp, norm = tf.apply_spe_dist(time, tau_sipm)
sigma_sipm = 0 #80 #ps
sigma_elec = 0 #30 #ps
#n_pe = 1
arguments = parse_args(sys.argv)
start = arguments.first_file
numb = arguments.n_files
thr_r = arguments.thr_r
thr_phi = arguments.thr_phi
thr_z = arguments.thr_z
thr_e = arguments.thr_e
n_pe = arguments.n_pe
timestamp_thr = arguments.thr_charge
eventsPath = arguments.events_path
file_name = arguments.file_name
rpos_file = arguments.rpos_file
data_path = arguments.data_path
print(f'Using r map: {rpos_file}')
evt_file = f"{data_path}/tof_coincidences_Paola_npe{n_pe}_thr{timestamp_thr}_{start}_{numb}_{thr_r}_{thr_phi}_{thr_z}_{thr_e}"
Rpos = load_map(rpos_file,
group = "Radius",
node = f"f{int(thr_r)}pes150bins",
x_name = "PhiRms",
y_name = "Rpos",
u_name = "RposUncertainty")
#charge_range = (2000, 2250) # pde 0.30, n=1.6
#charge_range = (0, 5000)
charge_range = (1050, 1300)
print(f'Charge range = {charge_range}')
c0 = c1 = c2 = c3 = c4 = 0
bad = 0
boh0 = boh1 = 0
below_thr = 0
true_r1, true_phi1, true_z1 = [], [], []
reco_r1, reco_phi1, reco_z1 = [], [], []
true_r2, true_phi2, true_z2 = [], [], []
reco_r2, reco_phi2, reco_z2 = [], [], []
sns_response1, sns_response2 = [], []
### PETsys thresholds to extract the timestamp
#timestamp_thr = 0.25
first_sipm1 = []
first_sipm2 = []
first_time1 = []
first_time2 = []
true_time1, true_time2 = [], []
touched_sipms1, touched_sipms2 = [], []
photo1, photo2 = [], []
max_hit_distance1, max_hit_distance2 = [], []
hit_energy1, hit_energy2 = [], []
event_ids = []
for number in range(start, start+numb):
number_str = "{:03d}".format(number)
filename = f"{eventsPath}/{file_name}.{number_str}.pet.h5"
try:
#sns_response = load_mcsns_response(filename)
sns_response = pd.read_hdf(filename, 'MC/waveforms')
except ValueError:
print(f'File {filename} not found')
continue
except OSError:
print(f'File {filename} not found')
continue
except KeyError:
print(f'No object named MC/waveforms in file {filename}')
continue
print(f'Analyzing file {filename}')
tof_bin_size = read_sensor_bin_width_from_conf(filename, tof=True)
particles = pd.read_hdf(filename, 'MC/particles')
hits = pd.read_hdf(filename, 'MC/hits')
#sns_response = snsf.apply_sipm_pde(sns_response, 0.3)
#sns_response = snsf.apply_charge_fluctuation(sns_response, DataSiPM_idx)
tof_response = pd.read_hdf(filename, 'MC/tof_waveforms')
events = particles.event_id.unique()
for evt in events[:]:
evt_sns = sns_response[sns_response.event_id == evt]
evt_sns = rf.find_SiPMs_over_threshold(evt_sns, threshold=thr_e)
if len(evt_sns) == 0:
boh0 += 1
continue
ids_over_thr = evt_sns.sensor_id.astype('int64').values
evt_parts = particles[particles.event_id == evt]
evt_hits = hits[hits.event_id == evt]
evt_tof = tof_response[tof_response.event_id == evt]
# if evt_hits.energy.sum() < 0.511:
# below_thr += 1
# continue
if len(evt_tof) == 0:
boh1 += 1
continue
evt_tof = evt_tof[evt_tof.sensor_id.isin(-ids_over_thr)]
if len(evt_tof) == 0:
boh2 += 1
continue
pos1, pos2, q1, q2, true_pos1, true_pos2, true_t1, true_t2, sns1, sns2 = rf.reconstruct_coincidences(evt_sns, charge_range, DataSiPM_idx, evt_parts, evt_hits)
if len(pos1) == 0 or len(pos2) == 0:
c0 += 1
continue
q1 = np.array(q1)
q2 = np.array(q2)
pos1 = np.array(pos1)
pos2 = np.array(pos2)
## Calculate R
r1 = r2 = None
sel1_r = q1>thr_r
q1r = q1[sel1_r]
pos1r = pos1[sel1_r]
sel2_r = q2>thr_r
q2r = q2[sel2_r]
pos2r = pos2[sel2_r]
if len(pos1r) == 0 or len(pos2r) == 0:
c1 += 1
continue
pos1_phi = rf.from_cartesian_to_cyl(np.array(pos1r))[:,1]
diff_sign = min(pos1_phi ) < 0 < max(pos1_phi)
if diff_sign & (np.abs(np.min(pos1_phi))>np.pi/2.):
pos1_phi[pos1_phi<0] = np.pi + np.pi + pos1_phi[pos1_phi<0]
mean_phi = np.average(pos1_phi, weights=q1r)
var_phi1 = np.average((pos1_phi-mean_phi)**2, weights=q1r)
r1 = Rpos(np.sqrt(var_phi1)).value
pos2_phi = rf.from_cartesian_to_cyl(np.array(pos2r))[:,1]
diff_sign = min(pos2_phi ) < 0 < max(pos2_phi)
if diff_sign & (np.abs(np.min(pos2_phi))>np.pi/2.):
pos2_phi[pos2_phi<0] = np.pi + np.pi + pos2_phi[pos2_phi<0]
mean_phi = np.average(pos2_phi, weights=q2r)
var_phi2 = np.average((pos2_phi-mean_phi)**2, weights=q2r)
r2 = Rpos(np.sqrt(var_phi2)).value
sel1_phi = q1>thr_phi
q1phi = q1[sel1_phi]
pos1phi = pos1[sel1_phi]
sel2_phi = q2>thr_phi
q2phi = q2[sel2_phi]
pos2phi | parse_args | identifier_name |
|
tof_coincidences_jitters_correct_Paola.py | = np.arange(0, 5000)
#time = time + (time_bin/2)
spe_resp, norm = tf.apply_spe_dist(time, tau_sipm)
sigma_sipm = 0 #80 #ps
sigma_elec = 0 #30 #ps
#n_pe = 1
arguments = parse_args(sys.argv)
start = arguments.first_file
numb = arguments.n_files
thr_r = arguments.thr_r
thr_phi = arguments.thr_phi
thr_z = arguments.thr_z
thr_e = arguments.thr_e
n_pe = arguments.n_pe
timestamp_thr = arguments.thr_charge
eventsPath = arguments.events_path
file_name = arguments.file_name
rpos_file = arguments.rpos_file
data_path = arguments.data_path
print(f'Using r map: {rpos_file}')
evt_file = f"{data_path}/tof_coincidences_Paola_npe{n_pe}_thr{timestamp_thr}_{start}_{numb}_{thr_r}_{thr_phi}_{thr_z}_{thr_e}"
Rpos = load_map(rpos_file,
group = "Radius",
node = f"f{int(thr_r)}pes150bins",
x_name = "PhiRms",
y_name = "Rpos",
u_name = "RposUncertainty")
#charge_range = (2000, 2250) # pde 0.30, n=1.6
#charge_range = (0, 5000)
charge_range = (1050, 1300)
print(f'Charge range = {charge_range}')
c0 = c1 = c2 = c3 = c4 = 0
bad = 0
boh0 = boh1 = 0
below_thr = 0
true_r1, true_phi1, true_z1 = [], [], []
reco_r1, reco_phi1, reco_z1 = [], [], []
true_r2, true_phi2, true_z2 = [], [], []
reco_r2, reco_phi2, reco_z2 = [], [], []
sns_response1, sns_response2 = [], []
### PETsys thresholds to extract the timestamp
#timestamp_thr = 0.25
first_sipm1 = []
first_sipm2 = []
first_time1 = []
first_time2 = []
true_time1, true_time2 = [], []
touched_sipms1, touched_sipms2 = [], []
photo1, photo2 = [], []
max_hit_distance1, max_hit_distance2 = [], []
hit_energy1, hit_energy2 = [], []
event_ids = []
for number in range(start, start+numb):
number_str = "{:03d}".format(number)
filename = f"{eventsPath}/{file_name}.{number_str}.pet.h5"
try:
#sns_response = load_mcsns_response(filename)
sns_response = pd.read_hdf(filename, 'MC/waveforms')
except ValueError:
print(f'File {filename} not found')
continue
except OSError:
print(f'File {filename} not found')
continue
except KeyError:
print(f'No object named MC/waveforms in file {filename}')
continue
print(f'Analyzing file {filename}')
tof_bin_size = read_sensor_bin_width_from_conf(filename, tof=True)
particles = pd.read_hdf(filename, 'MC/particles')
hits = pd.read_hdf(filename, 'MC/hits')
#sns_response = snsf.apply_sipm_pde(sns_response, 0.3)
#sns_response = snsf.apply_charge_fluctuation(sns_response, DataSiPM_idx)
tof_response = pd.read_hdf(filename, 'MC/tof_waveforms')
events = particles.event_id.unique()
for evt in events[:]:
evt_sns = sns_response[sns_response.event_id == evt]
evt_sns = rf.find_SiPMs_over_threshold(evt_sns, threshold=thr_e)
if len(evt_sns) == 0:
boh0 += 1
continue
ids_over_thr = evt_sns.sensor_id.astype('int64').values
evt_parts = particles[particles.event_id == evt]
evt_hits = hits[hits.event_id == evt]
evt_tof = tof_response[tof_response.event_id == evt]
# if evt_hits.energy.sum() < 0.511:
# below_thr += 1
# continue
if len(evt_tof) == 0:
boh1 += 1
continue
evt_tof = evt_tof[evt_tof.sensor_id.isin(-ids_over_thr)]
if len(evt_tof) == 0:
boh2 += 1
continue
pos1, pos2, q1, q2, true_pos1, true_pos2, true_t1, true_t2, sns1, sns2 = rf.reconstruct_coincidences(evt_sns, charge_range, DataSiPM_idx, evt_parts, evt_hits)
if len(pos1) == 0 or len(pos2) == 0:
c0 += 1
continue
q1 = np.array(q1)
q2 = np.array(q2)
pos1 = np.array(pos1)
pos2 = np.array(pos2)
## Calculate R
r1 = r2 = None
sel1_r = q1>thr_r
q1r = q1[sel1_r]
pos1r = pos1[sel1_r]
sel2_r = q2>thr_r
q2r = q2[sel2_r]
pos2r = pos2[sel2_r]
if len(pos1r) == 0 or len(pos2r) == 0:
|
pos1_phi = rf.from_cartesian_to_cyl(np.array(pos1r))[:,1]
diff_sign = min(pos1_phi ) < 0 < max(pos1_phi)
if diff_sign & (np.abs(np.min(pos1_phi))>np.pi/2.):
pos1_phi[pos1_phi<0] = np.pi + np.pi + pos1_phi[pos1_phi<0]
mean_phi = np.average(pos1_phi, weights=q1r)
var_phi1 = np.average((pos1_phi-mean_phi)**2, weights=q1r)
r1 = Rpos(np.sqrt(var_phi1)).value
pos2_phi = rf.from_cartesian_to_cyl(np.array(pos2r))[:,1]
diff_sign = min(pos2_phi ) < 0 < max(pos2_phi)
if diff_sign & (np.abs(np.min(pos2_phi))>np.pi/2.):
pos2_phi[pos2_phi<0] = np.pi + np.pi + pos2_phi[pos2_phi<0]
mean_phi = np.average(pos2_phi, weights=q2r)
var_phi2 = np.average((pos2_phi-mean_phi)**2, weights=q2r)
r2 = Rpos(np.sqrt(var_phi2)).value
sel1_phi = q1>thr_phi
q1phi = q1[sel1_phi]
pos1phi = pos1[sel1_phi]
sel2_phi = q2>thr_phi
q2phi = q2[sel2_phi]
pos2phi = pos2[sel2_phi]
if len(q1phi) == 0 or len(q2phi) == 0:
c2 += 1
continue
phi1 = phi2 = None
reco_cart_pos = np.average(pos1phi, weights=q1phi, axis=0)
phi1 = np.arctan2(reco_cart_pos[1], reco_cart_pos[0])
reco_cart_pos = np.average(pos2phi, weights=q2phi, axis=0)
phi2 = np.arctan2(reco_cart_pos[1], reco_cart_pos[0])
sel1_z = q1>thr_z
q1z = q1[sel1_z]
pos1z = pos1[sel1_z]
sel2_z = q2>thr_z
q2z = q2[sel2_z]
pos2z = pos2[sel2_z]
if len(q1z) == 0 or len(q2z) == 0:
c3 += 1
continue
z1 = z2 = None
reco_cart_pos = np.average(pos1z, weights=q1z, axis=0)
z1 = reco_cart_pos[2]
reco_cart_pos = np.average(pos2z, weights=q2z, axis=0)
z2 = reco_cart_pos[2]
sel1_e = q1>thr_e
q1e = q1[sel1_e]
sel2_e = q2>thr_e
q2e = q2[sel2_e]
if len(q1e) == 0 or len(q2e) == 0:
c4 += 1
continue
times = evt_tof.time_bin.values * tof_bin_size / units.ps | c1 += 1
continue | conditional_block |
tooltip.directive.ts | material design tooltip to the host element. Animates the showing and
* hiding of a tooltip provided position (defaults to below the element).
*/
@Directive({
selector: '[appTooltip]',
exportAs: 'appTooltip',
host: {
'(longpress)': 'show()',
'(keydown)': '_handleKeydown($event)',
'(touchend)': 'hide(' + TOUCHEND_HIDE_DELAY + ')'
}
})
export class TooltipDirective implements OnDestroy {
_overlayRef: OverlayRef | null;
_tooltipInstance: TooltipComponent | null;
private _position: TooltipPosition = 'below';
private _disabled: boolean = false;
private _tooltipClass: string | string[] | Set<string> | { [key: string]: any };
/** Allows the user to define the position of the tooltip relative to the parent element */
@Input('appTooltipPosition')
get position(): TooltipPosition { return this._position; }
set position(value: TooltipPosition) {
if (value !== this._position) {
this._position = value;
// TODO(andrewjs): When the overlay's position can be dynamically changed, do not destroy
// the tooltip.
if (this._tooltipInstance) {
this._disposeTooltip();
}
}
}
/** Disables the display of the tooltip. */
@Input('appTooltipDisabled')
get disabled(): boolean { return this._disabled; }
set disabled(value) {
this._disabled = CoercionHelper.coerceBoolean(value);
// If tooltip is disabled, hide immediately.
if (this._disabled) {
this.hide(0);
}
}
/** The default delay in ms before showing the tooltip after show is called */
@Input('appTooltipShowDelay') showDelay = 0;
/** The default delay in ms before hiding the tooltip after hide is called */
@Input('appTooltipHideDelay') hideDelay = 0;
private _message = '';
/** The message to be displayed in the tooltip */
@Input('appTooltip')
get message() { return this._message; }
set message(value: string) {
// If the message is not a string (e.g. number), convert it to a string and trim it.
this._message = value != null ? `${value}`.trim() : '';
this._updateTooltipMessage();
}
/** Classes to be passed to the tooltip. Supports the same syntax as `ngClass`. */
@Input('appTooltipClass')
get tooltipClass() { return this._tooltipClass; }
set tooltipClass(value: string | string[] | Set<string> | { [key: string]: any }) {
this._tooltipClass = value;
if (this._tooltipInstance) {
this._setTooltipClass(this._tooltipClass);
}
}
private _enterListener: Function;
private _leaveListener: Function;
constructor (
renderer: Renderer2,
private _overlayService: OverlayService,
private _elementRef: ElementRef,
private _scrollDispatcher: ScrollDispatcherService,
private _viewContainerRef: ViewContainerRef,
private _ngZone: NgZone,
private _platform: Platform,
private _focusMonitorService: FocusMonitorService,
@Inject(TOOLTIP_SCROLL_STRATEGY) private _scrollStrategy) {
// The mouse events shouldn't be bound on iOS devices, because
// they can prevent the first tap from firing its click event.
if (!_platform.IOS) {
this._enterListener =
renderer.listen(_elementRef.nativeElement, 'mouseenter', () => this.show());
this._leaveListener =
renderer.listen(_elementRef.nativeElement, 'mouseleave', () => this.hide());
}
_focusMonitorService.monitor(_elementRef.nativeElement, false).subscribe(origin => {
// Note that the focus monitor runs outside the Angular zone.
if (!origin) {
_ngZone.run(() => this.hide(0));
} else if (origin !== 'program') {
_ngZone.run(() => this.show());
}
});
}
/**
* Dispose the tooltip when destroyed.
*/
ngOnDestroy() {
if (this._tooltipInstance) {
this._disposeTooltip();
}
// Clean up the event listeners set in the constructor
if (!this._platform.IOS) {
this._enterListener();
this._leaveListener();
}
this._focusMonitorService.stopMonitoring(this._elementRef.nativeElement);
}
/** Shows the tooltip after the delay in ms, defaults to tooltip-delay-show or 0ms if no input */
show(delay: number = this.showDelay): void {
if (this.disabled || !this.message) { return; }
if (!this._tooltipInstance) {
this._createTooltip();
}
this._setTooltipClass(this._tooltipClass);
this._updateTooltipMessage();
this._tooltipInstance!.show(this._position, delay);
}
/** Hides the tooltip after the delay in ms, defaults to tooltip-delay-hide or 0ms if no input */
hide(delay: number = this.hideDelay): void {
if (this._tooltipInstance) {
this._tooltipInstance.hide(delay);
}
}
/** Shows/hides the tooltip */
toggle(): void {
this._isTooltipVisible() ? this.hide() : this.show();
}
/** Returns true if the tooltip is currently visible to the user */
_isTooltipVisible(): boolean {
return !!this._tooltipInstance && this._tooltipInstance.isVisible();
}
/** Handles the keydown events on the host element. */
_handleKeydown(e: KeyboardEvent) {
if (this._isTooltipVisible() && e.keyCode === KeyCodes.ESCAPE) {
e.stopPropagation();
this.hide(0);
}
}
/** Create the tooltip to display */
private _createTooltip(): void {
const overlayRef = this._createOverlay();
const portal = new ComponentPortal(TooltipComponent, this._viewContainerRef);
this._tooltipInstance = overlayRef.attach(portal).instance;
// Dispose of the tooltip when the overlay is detached.
merge(this._tooltipInstance!.afterHidden(), overlayRef.detachments()).subscribe(() => {
// Check first if the tooltip has already been removed through this components destroy.
if (this._tooltipInstance) {
this._disposeTooltip();
}
});
}
/** Create the overlay config and position strategy */
private _createOverlay(): OverlayRef {
const origin = this._getOrigin();
const overlay = this._getOverlayPosition();
// Create connected position strategy that listens for scroll events to reposition.
const strategy = this._overlayService
.position()
.connectedTo(this._elementRef, origin.main, overlay.main)
.withFallbackPosition(origin.fallback, overlay.fallback);
const scrollableAncestors = this._scrollDispatcher
.getAncestorScrollContainers(this._elementRef);
strategy.withScrollableContainers(scrollableAncestors);
strategy.onPositionChange.subscribe(change => {
if (this._tooltipInstance) {
if (change.scrollableViewProperties.isOverlayClipped && this._tooltipInstance.isVisible()) {
// After position changes occur and the overlay is clipped by
// a parent scrollable then close the tooltip.
this.hide(0);
} else {
// Otherwise recalculate the origin based on the new position.
this._tooltipInstance._setTransformOrigin(change.connectionPair);
}
}
});
const config = new OverlayConfig({
positionStrategy: strategy,
panelClass: TOOLTIP_PANEL_CLASS,
scrollStrategy: this._scrollStrategy()
});
this._overlayRef = this._overlayService.create(config);
return this._overlayRef;
}
/** Disposes the current tooltip and the overlay it is attached to */
private | (): void {
if (this._overlayRef) {
this._overlayRef.dispose();
this._overlayRef = null;
}
this._tooltipInstance = null;
}
/**
* Returns the origin position and a fallback position based on the user's position preference.
* The fallback position is the inverse of the origin (e.g. 'below' -> 'above').
*/
_getOrigin(): { main: OriginConnectionPosition, fallback: OriginConnectionPosition } {
let position: OriginConnectionPosition;
if (this.position == 'above' || this.position == 'below') {
position = { originX: 'center', originY: this.position == 'above' ? 'top' : 'bottom' };
} else if (this.position == 'left') {
position = { originX: 'start', originY: 'center' };
} else if (this.position == 'right') {
position = { originX: 'end', originY: 'center' };
} else {
throw getAppTooltipInvalidPositionError(this.position);
}
const { x, y } = this._invertPosition(position.originX, position.originY);
return {
main: position,
fallback: { originX: x, originY: y }
};
}
/** Returns the overlay position and a fallback position based on the user's preference */
_getOverlayPosition(): { main: OverlayConnectionPosition, fallback: OverlayConnectionPosition } {
let position: OverlayConnectionPosition;
if (this.position == 'above') {
position = { overlayX: 'center', overlayY: 'bottom' };
} else if (this | _disposeTooltip | identifier_name |
tooltip.directive.ts | message = value != null ? `${value}`.trim() : '';
this._updateTooltipMessage();
}
/** Classes to be passed to the tooltip. Supports the same syntax as `ngClass`. */
@Input('appTooltipClass')
get tooltipClass() { return this._tooltipClass; }
set tooltipClass(value: string | string[] | Set<string> | { [key: string]: any }) {
this._tooltipClass = value;
if (this._tooltipInstance) {
this._setTooltipClass(this._tooltipClass);
}
}
private _enterListener: Function;
private _leaveListener: Function;
constructor (
renderer: Renderer2,
private _overlayService: OverlayService,
private _elementRef: ElementRef,
private _scrollDispatcher: ScrollDispatcherService,
private _viewContainerRef: ViewContainerRef,
private _ngZone: NgZone,
private _platform: Platform,
private _focusMonitorService: FocusMonitorService,
@Inject(TOOLTIP_SCROLL_STRATEGY) private _scrollStrategy) {
// The mouse events shouldn't be bound on iOS devices, because
// they can prevent the first tap from firing its click event.
if (!_platform.IOS) {
this._enterListener =
renderer.listen(_elementRef.nativeElement, 'mouseenter', () => this.show());
this._leaveListener =
renderer.listen(_elementRef.nativeElement, 'mouseleave', () => this.hide());
}
_focusMonitorService.monitor(_elementRef.nativeElement, false).subscribe(origin => {
// Note that the focus monitor runs outside the Angular zone.
if (!origin) {
_ngZone.run(() => this.hide(0));
} else if (origin !== 'program') {
_ngZone.run(() => this.show());
}
});
}
/**
* Dispose the tooltip when destroyed.
*/
ngOnDestroy() {
if (this._tooltipInstance) {
this._disposeTooltip();
}
// Clean up the event listeners set in the constructor
if (!this._platform.IOS) {
this._enterListener();
this._leaveListener();
}
this._focusMonitorService.stopMonitoring(this._elementRef.nativeElement);
}
/** Shows the tooltip after the delay in ms, defaults to tooltip-delay-show or 0ms if no input */
show(delay: number = this.showDelay): void {
if (this.disabled || !this.message) { return; }
if (!this._tooltipInstance) {
this._createTooltip();
}
this._setTooltipClass(this._tooltipClass);
this._updateTooltipMessage();
this._tooltipInstance!.show(this._position, delay);
}
/** Hides the tooltip after the delay in ms, defaults to tooltip-delay-hide or 0ms if no input */
hide(delay: number = this.hideDelay): void {
if (this._tooltipInstance) {
this._tooltipInstance.hide(delay);
}
}
/** Shows/hides the tooltip */
toggle(): void {
this._isTooltipVisible() ? this.hide() : this.show();
}
/** Returns true if the tooltip is currently visible to the user */
_isTooltipVisible(): boolean {
return !!this._tooltipInstance && this._tooltipInstance.isVisible();
}
/** Handles the keydown events on the host element. */
_handleKeydown(e: KeyboardEvent) {
if (this._isTooltipVisible() && e.keyCode === KeyCodes.ESCAPE) {
e.stopPropagation();
this.hide(0);
}
}
/** Create the tooltip to display */
private _createTooltip(): void {
const overlayRef = this._createOverlay();
const portal = new ComponentPortal(TooltipComponent, this._viewContainerRef);
this._tooltipInstance = overlayRef.attach(portal).instance;
// Dispose of the tooltip when the overlay is detached.
merge(this._tooltipInstance!.afterHidden(), overlayRef.detachments()).subscribe(() => {
// Check first if the tooltip has already been removed through this components destroy.
if (this._tooltipInstance) {
this._disposeTooltip();
}
});
}
/** Create the overlay config and position strategy */
private _createOverlay(): OverlayRef {
const origin = this._getOrigin();
const overlay = this._getOverlayPosition();
// Create connected position strategy that listens for scroll events to reposition.
const strategy = this._overlayService
.position()
.connectedTo(this._elementRef, origin.main, overlay.main)
.withFallbackPosition(origin.fallback, overlay.fallback);
const scrollableAncestors = this._scrollDispatcher
.getAncestorScrollContainers(this._elementRef);
strategy.withScrollableContainers(scrollableAncestors);
strategy.onPositionChange.subscribe(change => {
if (this._tooltipInstance) {
if (change.scrollableViewProperties.isOverlayClipped && this._tooltipInstance.isVisible()) {
// After position changes occur and the overlay is clipped by
// a parent scrollable then close the tooltip.
this.hide(0);
} else {
// Otherwise recalculate the origin based on the new position.
this._tooltipInstance._setTransformOrigin(change.connectionPair);
}
}
});
const config = new OverlayConfig({
positionStrategy: strategy,
panelClass: TOOLTIP_PANEL_CLASS,
scrollStrategy: this._scrollStrategy()
});
this._overlayRef = this._overlayService.create(config);
return this._overlayRef;
}
/** Disposes the current tooltip and the overlay it is attached to */
private _disposeTooltip(): void {
if (this._overlayRef) {
this._overlayRef.dispose();
this._overlayRef = null;
}
this._tooltipInstance = null;
}
/**
* Returns the origin position and a fallback position based on the user's position preference.
* The fallback position is the inverse of the origin (e.g. 'below' -> 'above').
*/
_getOrigin(): { main: OriginConnectionPosition, fallback: OriginConnectionPosition } {
let position: OriginConnectionPosition;
if (this.position == 'above' || this.position == 'below') {
position = { originX: 'center', originY: this.position == 'above' ? 'top' : 'bottom' };
} else if (this.position == 'left') {
position = { originX: 'start', originY: 'center' };
} else if (this.position == 'right') {
position = { originX: 'end', originY: 'center' };
} else {
throw getAppTooltipInvalidPositionError(this.position);
}
const { x, y } = this._invertPosition(position.originX, position.originY);
return {
main: position,
fallback: { originX: x, originY: y }
};
}
/** Returns the overlay position and a fallback position based on the user's preference */
_getOverlayPosition(): { main: OverlayConnectionPosition, fallback: OverlayConnectionPosition } {
let position: OverlayConnectionPosition;
if (this.position == 'above') {
position = { overlayX: 'center', overlayY: 'bottom' };
} else if (this.position == 'below') {
position = { overlayX: 'center', overlayY: 'top' };
} else if (this.position == 'left') {
position = { overlayX: 'end', overlayY: 'center' };
} else if (this.position == 'right') {
position = { overlayX: 'start', overlayY: 'center' };
} else {
throw getAppTooltipInvalidPositionError(this.position);
}
const { x, y } = this._invertPosition(position.overlayX, position.overlayY);
return {
main: position,
fallback: { overlayX: x, overlayY: y }
};
}
/** Updates the tooltip message and repositions the overlay according to the new message length */
private _updateTooltipMessage() {
// Must wait for the message to be painted to the tooltip so that the overlay can properly
// calculate the correct positioning based on the size of the text.
if (this._tooltipInstance) {
this._tooltipInstance.message = this.message;
this._tooltipInstance._markForCheck();
this._ngZone.onMicrotaskEmpty.asObservable().pipe(first()).subscribe(() => {
if (this._tooltipInstance) {
this._overlayRef!.updatePosition();
}
});
}
}
/** Updates the tooltip class */
private _setTooltipClass(tooltipClass: string | string[] | Set<string> | { [key: string]: any }) {
if (this._tooltipInstance) {
this._tooltipInstance.tooltipClass = tooltipClass;
this._tooltipInstance._markForCheck();
}
}
/** Inverts an overlay position. */
private _invertPosition(x: HorizontalConnectionPos, y: VerticalConnectionPos) {
if (this.position === 'above' || this.position === 'below') {
if (y === 'top') {
y = 'bottom';
} else if (y === 'bottom') {
y = 'top';
}
} else | {
if (x === 'end') {
x = 'start';
} else if (x === 'start') {
x = 'end';
}
} | conditional_block |
|
tooltip.directive.ts | material design tooltip to the host element. Animates the showing and
* hiding of a tooltip provided position (defaults to below the element).
*/
@Directive({
selector: '[appTooltip]',
exportAs: 'appTooltip',
host: {
'(longpress)': 'show()',
'(keydown)': '_handleKeydown($event)',
'(touchend)': 'hide(' + TOUCHEND_HIDE_DELAY + ')'
}
})
export class TooltipDirective implements OnDestroy {
_overlayRef: OverlayRef | null;
_tooltipInstance: TooltipComponent | null;
private _position: TooltipPosition = 'below';
private _disabled: boolean = false;
private _tooltipClass: string | string[] | Set<string> | { [key: string]: any };
/** Allows the user to define the position of the tooltip relative to the parent element */
@Input('appTooltipPosition')
get position(): TooltipPosition { return this._position; }
set position(value: TooltipPosition) {
if (value !== this._position) {
this._position = value;
// TODO(andrewjs): When the overlay's position can be dynamically changed, do not destroy
// the tooltip.
if (this._tooltipInstance) {
this._disposeTooltip();
}
}
}
/** Disables the display of the tooltip. */
@Input('appTooltipDisabled')
get disabled(): boolean { return this._disabled; }
set disabled(value) {
this._disabled = CoercionHelper.coerceBoolean(value);
// If tooltip is disabled, hide immediately.
if (this._disabled) {
this.hide(0);
}
}
/** The default delay in ms before showing the tooltip after show is called */
@Input('appTooltipShowDelay') showDelay = 0;
/** The default delay in ms before hiding the tooltip after hide is called */
@Input('appTooltipHideDelay') hideDelay = 0;
private _message = '';
/** The message to be displayed in the tooltip */
@Input('appTooltip')
get message() { return this._message; }
set message(value: string) {
// If the message is not a string (e.g. number), convert it to a string and trim it.
this._message = value != null ? `${value}`.trim() : '';
this._updateTooltipMessage();
}
/** Classes to be passed to the tooltip. Supports the same syntax as `ngClass`. */
@Input('appTooltipClass')
get tooltipClass() |
set tooltipClass(value: string | string[] | Set<string> | { [key: string]: any }) {
this._tooltipClass = value;
if (this._tooltipInstance) {
this._setTooltipClass(this._tooltipClass);
}
}
private _enterListener: Function;
private _leaveListener: Function;
constructor (
renderer: Renderer2,
private _overlayService: OverlayService,
private _elementRef: ElementRef,
private _scrollDispatcher: ScrollDispatcherService,
private _viewContainerRef: ViewContainerRef,
private _ngZone: NgZone,
private _platform: Platform,
private _focusMonitorService: FocusMonitorService,
@Inject(TOOLTIP_SCROLL_STRATEGY) private _scrollStrategy) {
// The mouse events shouldn't be bound on iOS devices, because
// they can prevent the first tap from firing its click event.
if (!_platform.IOS) {
this._enterListener =
renderer.listen(_elementRef.nativeElement, 'mouseenter', () => this.show());
this._leaveListener =
renderer.listen(_elementRef.nativeElement, 'mouseleave', () => this.hide());
}
_focusMonitorService.monitor(_elementRef.nativeElement, false).subscribe(origin => {
// Note that the focus monitor runs outside the Angular zone.
if (!origin) {
_ngZone.run(() => this.hide(0));
} else if (origin !== 'program') {
_ngZone.run(() => this.show());
}
});
}
/**
* Dispose the tooltip when destroyed.
*/
ngOnDestroy() {
if (this._tooltipInstance) {
this._disposeTooltip();
}
// Clean up the event listeners set in the constructor
if (!this._platform.IOS) {
this._enterListener();
this._leaveListener();
}
this._focusMonitorService.stopMonitoring(this._elementRef.nativeElement);
}
/** Shows the tooltip after the delay in ms, defaults to tooltip-delay-show or 0ms if no input */
show(delay: number = this.showDelay): void {
if (this.disabled || !this.message) { return; }
if (!this._tooltipInstance) {
this._createTooltip();
}
this._setTooltipClass(this._tooltipClass);
this._updateTooltipMessage();
this._tooltipInstance!.show(this._position, delay);
}
/** Hides the tooltip after the delay in ms, defaults to tooltip-delay-hide or 0ms if no input */
hide(delay: number = this.hideDelay): void {
if (this._tooltipInstance) {
this._tooltipInstance.hide(delay);
}
}
/** Shows/hides the tooltip */
toggle(): void {
this._isTooltipVisible() ? this.hide() : this.show();
}
/** Returns true if the tooltip is currently visible to the user */
_isTooltipVisible(): boolean {
return !!this._tooltipInstance && this._tooltipInstance.isVisible();
}
/** Handles the keydown events on the host element. */
_handleKeydown(e: KeyboardEvent) {
if (this._isTooltipVisible() && e.keyCode === KeyCodes.ESCAPE) {
e.stopPropagation();
this.hide(0);
}
}
/** Create the tooltip to display */
private _createTooltip(): void {
const overlayRef = this._createOverlay();
const portal = new ComponentPortal(TooltipComponent, this._viewContainerRef);
this._tooltipInstance = overlayRef.attach(portal).instance;
// Dispose of the tooltip when the overlay is detached.
merge(this._tooltipInstance!.afterHidden(), overlayRef.detachments()).subscribe(() => {
// Check first if the tooltip has already been removed through this components destroy.
if (this._tooltipInstance) {
this._disposeTooltip();
}
});
}
/** Create the overlay config and position strategy */
private _createOverlay(): OverlayRef {
const origin = this._getOrigin();
const overlay = this._getOverlayPosition();
// Create connected position strategy that listens for scroll events to reposition.
const strategy = this._overlayService
.position()
.connectedTo(this._elementRef, origin.main, overlay.main)
.withFallbackPosition(origin.fallback, overlay.fallback);
const scrollableAncestors = this._scrollDispatcher
.getAncestorScrollContainers(this._elementRef);
strategy.withScrollableContainers(scrollableAncestors);
strategy.onPositionChange.subscribe(change => {
if (this._tooltipInstance) {
if (change.scrollableViewProperties.isOverlayClipped && this._tooltipInstance.isVisible()) {
// After position changes occur and the overlay is clipped by
// a parent scrollable then close the tooltip.
this.hide(0);
} else {
// Otherwise recalculate the origin based on the new position.
this._tooltipInstance._setTransformOrigin(change.connectionPair);
}
}
});
const config = new OverlayConfig({
positionStrategy: strategy,
panelClass: TOOLTIP_PANEL_CLASS,
scrollStrategy: this._scrollStrategy()
});
this._overlayRef = this._overlayService.create(config);
return this._overlayRef;
}
/** Disposes the current tooltip and the overlay it is attached to */
private _disposeTooltip(): void {
if (this._overlayRef) {
this._overlayRef.dispose();
this._overlayRef = null;
}
this._tooltipInstance = null;
}
/**
* Returns the origin position and a fallback position based on the user's position preference.
* The fallback position is the inverse of the origin (e.g. 'below' -> 'above').
*/
_getOrigin(): { main: OriginConnectionPosition, fallback: OriginConnectionPosition } {
let position: OriginConnectionPosition;
if (this.position == 'above' || this.position == 'below') {
position = { originX: 'center', originY: this.position == 'above' ? 'top' : 'bottom' };
} else if (this.position == 'left') {
position = { originX: 'start', originY: 'center' };
} else if (this.position == 'right') {
position = { originX: 'end', originY: 'center' };
} else {
throw getAppTooltipInvalidPositionError(this.position);
}
const { x, y } = this._invertPosition(position.originX, position.originY);
return {
main: position,
fallback: { originX: x, originY: y }
};
}
/** Returns the overlay position and a fallback position based on the user's preference */
_getOverlayPosition(): { main: OverlayConnectionPosition, fallback: OverlayConnectionPosition } {
let position: OverlayConnectionPosition;
if (this.position == 'above') {
position = { overlayX: 'center', overlayY: 'bottom' };
} else if ( | { return this._tooltipClass; } | identifier_body |
tooltip.directive.ts | material design tooltip to the host element. Animates the showing and
* hiding of a tooltip provided position (defaults to below the element).
*/
@Directive({
selector: '[appTooltip]',
exportAs: 'appTooltip',
host: {
'(longpress)': 'show()',
'(keydown)': '_handleKeydown($event)',
'(touchend)': 'hide(' + TOUCHEND_HIDE_DELAY + ')'
}
})
export class TooltipDirective implements OnDestroy {
_overlayRef: OverlayRef | null;
_tooltipInstance: TooltipComponent | null;
private _position: TooltipPosition = 'below';
private _disabled: boolean = false;
private _tooltipClass: string | string[] | Set<string> | { [key: string]: any };
/** Allows the user to define the position of the tooltip relative to the parent element */
@Input('appTooltipPosition')
get position(): TooltipPosition { return this._position; }
set position(value: TooltipPosition) {
if (value !== this._position) {
this._position = value;
// TODO(andrewjs): When the overlay's position can be dynamically changed, do not destroy
// the tooltip.
if (this._tooltipInstance) {
this._disposeTooltip();
}
}
}
/** Disables the display of the tooltip. */
@Input('appTooltipDisabled')
get disabled(): boolean { return this._disabled; }
set disabled(value) {
this._disabled = CoercionHelper.coerceBoolean(value);
// If tooltip is disabled, hide immediately.
if (this._disabled) {
this.hide(0);
}
}
/** The default delay in ms before showing the tooltip after show is called */
@Input('appTooltipShowDelay') showDelay = 0;
/** The default delay in ms before hiding the tooltip after hide is called */
@Input('appTooltipHideDelay') hideDelay = 0;
private _message = '';
/** The message to be displayed in the tooltip */
@Input('appTooltip')
get message() { return this._message; }
set message(value: string) {
// If the message is not a string (e.g. number), convert it to a string and trim it.
this._message = value != null ? `${value}`.trim() : '';
this._updateTooltipMessage();
}
/** Classes to be passed to the tooltip. Supports the same syntax as `ngClass`. */
@Input('appTooltipClass')
get tooltipClass() { return this._tooltipClass; }
set tooltipClass(value: string | string[] | Set<string> | { [key: string]: any }) {
this._tooltipClass = value;
if (this._tooltipInstance) {
this._setTooltipClass(this._tooltipClass);
}
}
private _enterListener: Function;
private _leaveListener: Function;
constructor (
renderer: Renderer2,
private _overlayService: OverlayService,
private _elementRef: ElementRef,
private _scrollDispatcher: ScrollDispatcherService,
private _viewContainerRef: ViewContainerRef,
private _ngZone: NgZone,
private _platform: Platform,
private _focusMonitorService: FocusMonitorService,
@Inject(TOOLTIP_SCROLL_STRATEGY) private _scrollStrategy) {
// The mouse events shouldn't be bound on iOS devices, because
// they can prevent the first tap from firing its click event.
if (!_platform.IOS) {
this._enterListener =
renderer.listen(_elementRef.nativeElement, 'mouseenter', () => this.show());
this._leaveListener =
renderer.listen(_elementRef.nativeElement, 'mouseleave', () => this.hide());
}
_focusMonitorService.monitor(_elementRef.nativeElement, false).subscribe(origin => {
// Note that the focus monitor runs outside the Angular zone.
if (!origin) {
_ngZone.run(() => this.hide(0));
} else if (origin !== 'program') {
_ngZone.run(() => this.show());
}
});
}
/**
* Dispose the tooltip when destroyed.
*/
ngOnDestroy() {
if (this._tooltipInstance) {
this._disposeTooltip();
}
// Clean up the event listeners set in the constructor
if (!this._platform.IOS) {
this._enterListener();
this._leaveListener();
}
this._focusMonitorService.stopMonitoring(this._elementRef.nativeElement);
}
/** Shows the tooltip after the delay in ms, defaults to tooltip-delay-show or 0ms if no input */
show(delay: number = this.showDelay): void {
if (this.disabled || !this.message) { return; }
if (!this._tooltipInstance) {
this._createTooltip();
}
this._setTooltipClass(this._tooltipClass);
this._updateTooltipMessage();
this._tooltipInstance!.show(this._position, delay);
}
/** Hides the tooltip after the delay in ms, defaults to tooltip-delay-hide or 0ms if no input */
hide(delay: number = this.hideDelay): void {
if (this._tooltipInstance) {
this._tooltipInstance.hide(delay);
}
}
/** Shows/hides the tooltip */
toggle(): void {
this._isTooltipVisible() ? this.hide() : this.show();
}
/** Returns true if the tooltip is currently visible to the user */
_isTooltipVisible(): boolean {
return !!this._tooltipInstance && this._tooltipInstance.isVisible();
}
/** Handles the keydown events on the host element. */
_handleKeydown(e: KeyboardEvent) {
if (this._isTooltipVisible() && e.keyCode === KeyCodes.ESCAPE) {
e.stopPropagation();
this.hide(0);
}
}
/** Create the tooltip to display */
private _createTooltip(): void {
const overlayRef = this._createOverlay();
const portal = new ComponentPortal(TooltipComponent, this._viewContainerRef);
this._tooltipInstance = overlayRef.attach(portal).instance;
// Dispose of the tooltip when the overlay is detached.
merge(this._tooltipInstance!.afterHidden(), overlayRef.detachments()).subscribe(() => {
// Check first if the tooltip has already been removed through this components destroy.
if (this._tooltipInstance) {
this._disposeTooltip();
}
});
}
/** Create the overlay config and position strategy */
private _createOverlay(): OverlayRef {
const origin = this._getOrigin();
const overlay = this._getOverlayPosition();
// Create connected position strategy that listens for scroll events to reposition.
const strategy = this._overlayService
.position()
.connectedTo(this._elementRef, origin.main, overlay.main)
.withFallbackPosition(origin.fallback, overlay.fallback);
const scrollableAncestors = this._scrollDispatcher
.getAncestorScrollContainers(this._elementRef);
strategy.withScrollableContainers(scrollableAncestors);
strategy.onPositionChange.subscribe(change => {
if (this._tooltipInstance) {
if (change.scrollableViewProperties.isOverlayClipped && this._tooltipInstance.isVisible()) {
// After position changes occur and the overlay is clipped by
// a parent scrollable then close the tooltip.
this.hide(0);
} else {
// Otherwise recalculate the origin based on the new position.
this._tooltipInstance._setTransformOrigin(change.connectionPair);
}
}
});
const config = new OverlayConfig({
positionStrategy: strategy,
panelClass: TOOLTIP_PANEL_CLASS,
scrollStrategy: this._scrollStrategy()
});
this._overlayRef = this._overlayService.create(config);
return this._overlayRef;
}
/** Disposes the current tooltip and the overlay it is attached to */
private _disposeTooltip(): void {
if (this._overlayRef) {
this._overlayRef.dispose();
this._overlayRef = null;
}
this._tooltipInstance = null;
}
/**
* Returns the origin position and a fallback position based on the user's position preference.
* The fallback position is the inverse of the origin (e.g. 'below' -> 'above').
*/
_getOrigin(): { main: OriginConnectionPosition, fallback: OriginConnectionPosition } {
let position: OriginConnectionPosition;
if (this.position == 'above' || this.position == 'below') {
position = { originX: 'center', originY: this.position == 'above' ? 'top' : 'bottom' };
} else if (this.position == 'left') {
position = { originX: 'start', originY: 'center' };
} else if (this.position == 'right') {
position = { originX: 'end', originY: 'center' };
} else {
throw getAppTooltipInvalidPositionError(this.position);
}
const { x, y } = this._invertPosition(position.originX, position.originY);
return {
main: position,
fallback: { originX: x, originY: y }
};
}
/** Returns the overlay position and a fallback position based on the user's preference */
_getOverlayPosition(): { main: OverlayConnectionPosition, fallback: OverlayConnectionPosition } { | if (this.position == 'above') {
position = { overlayX: 'center', overlayY: 'bottom' };
} else if (this.position | let position: OverlayConnectionPosition;
| random_line_split |
lib.rs | _queriable_and_has_subresources() -> Result<(), Box<dyn std::error::Error>> {
use crate::runtime::wait::{await_condition, conditions};
use serde_json::json;
let client = Client::try_default().await?;
let ssapply = PatchParams::apply("kube").force();
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
// Server-side apply CRD and wait for it to get ready
crds.patch("foos.clux.dev", &ssapply, &Patch::Apply(Foo::crd()))
.await?;
let establish = await_condition(crds.clone(), "foos.clux.dev", conditions::is_crd_established());
let _ = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await?;
// Use it
let foos: Api<Foo> = Api::default_namespaced(client.clone());
// Apply from generated struct
{
let foo = Foo::new("baz", FooSpec {
name: "baz".into(),
info: Some("old baz".into()),
replicas: 1,
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(&foo)).await?;
assert_eq!(o.spec.name, "baz");
let oref = o.object_ref(&());
assert_eq!(oref.name.unwrap(), "baz");
assert_eq!(oref.uid, o.uid());
}
// Apply from partial json!
{
let patch = json!({
"apiVersion": "clux.dev/v1",
"kind": "Foo",
"spec": {
"name": "foo",
"replicas": 2
}
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(patch)).await?;
assert_eq!(o.spec.replicas, 2, "patching spec updated spec.replicas");
}
// check subresource
{
let scale = foos.get_scale("baz").await?;
assert_eq!(scale.spec.unwrap().replicas, Some(2));
let status = foos.get_status("baz").await?;
assert!(status.status.is_none(), "nothing has set status");
}
// set status subresource
{
let fs = serde_json::json!({"status": FooStatus { is_bad: false, replicas: 1 }});
let o = foos
.patch_status("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert!(o.status.is_some(), "status set after patch_status");
}
// set scale subresource
{
let fs = serde_json::json!({"spec": { "replicas": 3 }});
let o = foos
.patch_scale("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert_eq!(o.status.unwrap().replicas, 1, "scale replicas got patched");
let linked_replicas = o.spec.unwrap().replicas.unwrap();
assert_eq!(linked_replicas, 3, "patch_scale updates linked spec.replicas");
}
// cleanup
foos.delete_collection(&DeleteParams::default(), &Default::default())
.await?;
crds.delete("foos.clux.dev", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
async fn custom_serialized_objects_are_queryable_and_iterable() -> Result<(), Box<dyn std::error::Error>>
{
use crate::core::{
object::{HasSpec, HasStatus, NotUsed, Object},
ApiResource,
};
use k8s_openapi::api::core::v1::Pod;
#[derive(Clone, Deserialize, Debug)]
struct PodSpecSimple {
containers: Vec<ContainerSimple>,
}
#[derive(Clone, Deserialize, Debug)]
struct ContainerSimple {
#[allow(dead_code)]
image: String,
}
type PodSimple = Object<PodSpecSimple, NotUsed>;
// use known type information from pod (can also use discovery for this)
let ar = ApiResource::erase::<Pod>(&());
let client = Client::try_default().await?;
let api: Api<PodSimple> = Api::default_namespaced_with(client, &ar);
let mut list = api.list(&Default::default()).await?;
// check we can mutably iterate over ObjectList
for pod in &mut list {
pod.spec_mut().containers = vec![];
*pod.status_mut() = None;
pod.annotations_mut()
.entry("kube-seen".to_string())
.or_insert_with(|| "yes".to_string());
pod.labels_mut()
.entry("kube.rs".to_string())
.or_insert_with(|| "hello".to_string());
pod.finalizers_mut().push("kube-finalizer".to_string());
pod.managed_fields_mut().clear();
// NB: we are **not** pushing these back upstream - (Api::apply or Api::replace needed for it)
}
// check we can iterate over ObjectList normally - and check the mutations worked
for pod in list {
assert!(pod.annotations().get("kube-seen").is_some());
assert!(pod.labels().get("kube.rs").is_some());
assert!(pod.finalizers().contains(&"kube-finalizer".to_string()));
assert!(pod.spec().containers.is_empty());
assert!(pod.managed_fields().is_empty());
}
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (fetches api resources, and lists all)"]
#[cfg(feature = "derive")]
async fn derived_resources_discoverable() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
core::{DynamicObject, GroupVersion, GroupVersionKind},
discovery::{self, verbs, ApiGroup, Discovery, Scope},
runtime::wait::{await_condition, conditions, Condition},
};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "kube.rs", version = "v1", kind = "TestCr", namespaced)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
struct TestCrSpec {}
let client = Client::try_default().await?;
// install crd is installed
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
let ssapply = PatchParams::apply("kube").force();
crds.patch("testcrs.kube.rs", &ssapply, &Patch::Apply(TestCr::crd()))
.await?;
let establish = await_condition(crds.clone(), "testcrs.kube.rs", conditions::is_crd_established());
let crd = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await??;
assert!(conditions::is_crd_established().matches_object(crd.as_ref()));
tokio::time::sleep(std::time::Duration::from_secs(2)).await; // Established condition is actually not enough for api discovery :(
// create partial information for it to discover
let gvk = GroupVersionKind::gvk("kube.rs", "v1", "TestCr");
let gv = GroupVersion::gv("kube.rs", "v1");
// discover by both (recommended kind on groupversion) and (pinned gvk) and they should equal
let apigroup = discovery::oneshot::pinned_group(&client, &gv).await?;
let (ar1, caps1) = apigroup.recommended_kind("TestCr").unwrap();
let (ar2, caps2) = discovery::pinned_kind(&client, &gvk).await?;
assert_eq!(caps1.operations.len(), caps2.operations.len(), "unequal caps");
assert_eq!(ar1, ar2, "unequal apiresource");
assert_eq!(DynamicObject::api_version(&ar2), "kube.rs/v1", "unequal dynver");
// run (almost) full discovery
let discovery = Discovery::new(client.clone())
// skip something in discovery (clux.dev crd being mutated in other tests)
.exclude(&["rbac.authorization.k8s.io", "clux.dev"])
.run()
.await?;
// check our custom resource first by resolving within groups
assert!(discovery.has_group("kube.rs"), "missing group kube.rs");
let (ar, _caps) = discovery.resolve_gvk(&gvk).unwrap();
assert_eq!(ar.group, gvk.group, "unexpected discovered group");
assert_eq!(ar.version, gvk.version, "unexcepted discovered ver");
assert_eq!(ar.kind, gvk.kind, "unexpected discovered kind");
// check all non-excluded groups that are iterable
let mut groups = discovery.groups_alphabetical().into_iter();
let firstgroup = groups.next().unwrap();
assert_eq!(firstgroup.name(), ApiGroup::CORE_GROUP, "core not first");
for group in groups {
for (ar, caps) in group.recommended_resources() {
if !caps.supports_operation(verbs::LIST) {
continue;
}
let api: Api<DynamicObject> = if caps.scope == Scope::Namespaced | {
Api::default_namespaced_with(client.clone(), &ar)
} | conditional_block |
|
lib.rs | (10),
//! await_condition(crds, "foos.clux.dev", conditions::is_crd_established())
//! ).await?;
//!
//! // Watch for changes to foos in the configured namespace
//! let foos: Api<Foo> = Api::default_namespaced(client.clone());
//! let wc = watcher::Config::default();
//! let mut apply_stream = watcher(foos, wc).applied_objects().boxed();
//! while let Some(f) = apply_stream.try_next().await? {
//! println!("saw apply to {}", f.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`CustomResource`](crate::CustomResource) for documentation how to configure custom resources
//! - [`runtime::watcher`](crate::runtime::watcher()) for how to long-running watches work and why you want to use this over [`Api::watch`](crate::Api::watch)
//! - [`runtime`](crate::runtime) for abstractions that help with more complicated Kubernetes application
//!
//! # Examples
//! A large list of complete, runnable examples with explainations are available in the [examples folder](https://github.com/kube-rs/kube/tree/main/examples).
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
#![forbid(unsafe_code)]
macro_rules! cfg_client {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[cfg(feature = "client")]
$item
)*
}
}
macro_rules! cfg_config {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
#[cfg(feature = "config")]
$item
)*
}
}
macro_rules! cfg_error {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(any(feature = "config", feature = "client"))))]
#[cfg(any(feature = "config", feature = "client"))]
$item
)*
}
}
cfg_client! {
pub use kube_client::api;
pub use kube_client::discovery;
pub use kube_client::client;
#[doc(inline)]
pub use api::Api;
#[doc(inline)]
pub use client::Client;
#[doc(inline)]
pub use discovery::Discovery;
}
cfg_config! {
pub use kube_client::config;
#[doc(inline)]
pub use config::Config;
}
cfg_error! {
pub use kube_client::error;
#[doc(inline)] pub use error::Error;
/// Convient alias for `Result<T, Error>`
pub type Result<T, E = Error> = std::result::Result<T, E>;
}
/// Re-exports from [`kube-derive`](kube_derive)
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use kube_derive::CustomResource;
/// Re-exports from `kube-runtime`
#[cfg(feature = "runtime")]
#[cfg_attr(docsrs, doc(cfg(feature = "runtime")))]
#[doc(inline)]
pub use kube_runtime as runtime;
pub use crate::core::{CustomResourceExt, Resource, ResourceExt};
/// Re-exports from `kube_core`
#[doc(inline)] | #[cfg(test)]
#[cfg(all(feature = "derive", feature = "client"))]
mod test {
use crate::{
api::{DeleteParams, Patch, PatchParams},
Api, Client, CustomResourceExt, Resource, ResourceExt,
};
use kube_derive::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
#[kube(status = "FooStatus")]
#[kube(scale = r#"{"specReplicasPath":".spec.replicas", "statusReplicasPath":".status.replicas"}"#)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
pub struct FooSpec {
name: String,
info: Option<String>,
replicas: isize,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
pub struct FooStatus {
is_bad: bool,
replicas: isize,
}
#[tokio::test]
#[ignore = "needs kubeconfig"]
async fn custom_resource_generates_correct_core_structs() {
use crate::core::{ApiResource, DynamicObject, GroupVersionKind};
let client = Client::try_default().await.unwrap();
let gvk = GroupVersionKind::gvk("clux.dev", "v1", "Foo");
let api_resource = ApiResource::from_gvk(&gvk);
let a1: Api<DynamicObject> = Api::namespaced_with(client.clone(), "myns", &api_resource);
let a2: Api<Foo> = Api::namespaced(client, "myns");
// make sure they return the same url_path through their impls
assert_eq!(a1.resource_url(), a2.resource_url());
}
use k8s_openapi::{
api::core::v1::ConfigMap,
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
};
#[tokio::test]
#[ignore = "needs cluster (creates + patches foo crd)"]
#[cfg(all(feature = "derive", feature = "runtime"))]
async fn derived_resource_queriable_and_has_subresources() -> Result<(), Box<dyn std::error::Error>> {
use crate::runtime::wait::{await_condition, conditions};
use serde_json::json;
let client = Client::try_default().await?;
let ssapply = PatchParams::apply("kube").force();
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
// Server-side apply CRD and wait for it to get ready
crds.patch("foos.clux.dev", &ssapply, &Patch::Apply(Foo::crd()))
.await?;
let establish = await_condition(crds.clone(), "foos.clux.dev", conditions::is_crd_established());
let _ = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await?;
// Use it
let foos: Api<Foo> = Api::default_namespaced(client.clone());
// Apply from generated struct
{
let foo = Foo::new("baz", FooSpec {
name: "baz".into(),
info: Some("old baz".into()),
replicas: 1,
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(&foo)).await?;
assert_eq!(o.spec.name, "baz");
let oref = o.object_ref(&());
assert_eq!(oref.name.unwrap(), "baz");
assert_eq!(oref.uid, o.uid());
}
// Apply from partial json!
{
let patch = json!({
"apiVersion": "clux.dev/v1",
"kind": "Foo",
"spec": {
"name": "foo",
"replicas": 2
}
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(patch)).await?;
assert_eq!(o.spec.replicas, 2, "patching spec updated spec.replicas");
}
// check subresource
{
let scale = foos.get_scale("baz").await?;
assert_eq!(scale.spec.unwrap().replicas, Some(2));
let status = foos.get_status("baz").await?;
assert!(status.status.is_none(), "nothing has set status");
}
// set status subresource
{
let fs = serde_json::json!({"status": FooStatus { is_bad: false, replicas: 1 }});
let o = foos
.patch_status("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert!(o.status.is_some(), "status set after patch_status");
}
// set scale subresource
{
let fs = serde_json::json!({"spec": { "replicas": 3 }});
let o = foos
.patch_scale("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert_eq!(o.status.unwrap().replicas, 1, "scale replicas got patched");
let linked_replicas = o.spec.unwrap().replicas.unwrap();
assert_eq!(linked_replicas, 3, "patch_scale updates linked spec.replicas");
}
// cleanup
foos.delete_collection(&DeleteParams::default(), &Default::default())
.await?;
crds.delete("foos.clux.dev", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
async fn custom_serialized_objects_are_queryable_and_iterable() -> Result<(), Box<dyn std::error::Error>>
{
use crate::core::{
| pub use kube_core as core;
// Tests that require a cluster and the complete feature set
// Can be run with `cargo test -p kube --lib --features=runtime,derive -- --ignored` | random_line_split |
lib.rs | ();
//! let mut apply_stream = watcher(foos, wc).applied_objects().boxed();
//! while let Some(f) = apply_stream.try_next().await? {
//! println!("saw apply to {}", f.name_any());
//! }
//! Ok(())
//! }
//! ```
//!
//! For details, see:
//!
//! - [`CustomResource`](crate::CustomResource) for documentation how to configure custom resources
//! - [`runtime::watcher`](crate::runtime::watcher()) for how to long-running watches work and why you want to use this over [`Api::watch`](crate::Api::watch)
//! - [`runtime`](crate::runtime) for abstractions that help with more complicated Kubernetes application
//!
//! # Examples
//! A large list of complete, runnable examples with explainations are available in the [examples folder](https://github.com/kube-rs/kube/tree/main/examples).
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
#![forbid(unsafe_code)]
macro_rules! cfg_client {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[cfg(feature = "client")]
$item
)*
}
}
macro_rules! cfg_config {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
#[cfg(feature = "config")]
$item
)*
}
}
macro_rules! cfg_error {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(any(feature = "config", feature = "client"))))]
#[cfg(any(feature = "config", feature = "client"))]
$item
)*
}
}
cfg_client! {
pub use kube_client::api;
pub use kube_client::discovery;
pub use kube_client::client;
#[doc(inline)]
pub use api::Api;
#[doc(inline)]
pub use client::Client;
#[doc(inline)]
pub use discovery::Discovery;
}
cfg_config! {
pub use kube_client::config;
#[doc(inline)]
pub use config::Config;
}
cfg_error! {
pub use kube_client::error;
#[doc(inline)] pub use error::Error;
/// Convient alias for `Result<T, Error>`
pub type Result<T, E = Error> = std::result::Result<T, E>;
}
/// Re-exports from [`kube-derive`](kube_derive)
#[cfg(feature = "derive")]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use kube_derive::CustomResource;
/// Re-exports from `kube-runtime`
#[cfg(feature = "runtime")]
#[cfg_attr(docsrs, doc(cfg(feature = "runtime")))]
#[doc(inline)]
pub use kube_runtime as runtime;
pub use crate::core::{CustomResourceExt, Resource, ResourceExt};
/// Re-exports from `kube_core`
#[doc(inline)]
pub use kube_core as core;
// Tests that require a cluster and the complete feature set
// Can be run with `cargo test -p kube --lib --features=runtime,derive -- --ignored`
#[cfg(test)]
#[cfg(all(feature = "derive", feature = "client"))]
mod test {
use crate::{
api::{DeleteParams, Patch, PatchParams},
Api, Client, CustomResourceExt, Resource, ResourceExt,
};
use kube_derive::CustomResource;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "clux.dev", version = "v1", kind = "Foo", namespaced)]
#[kube(status = "FooStatus")]
#[kube(scale = r#"{"specReplicasPath":".spec.replicas", "statusReplicasPath":".status.replicas"}"#)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
pub struct FooSpec {
name: String,
info: Option<String>,
replicas: isize,
}
#[derive(Deserialize, Serialize, Clone, Debug, Default, JsonSchema)]
pub struct FooStatus {
is_bad: bool,
replicas: isize,
}
#[tokio::test]
#[ignore = "needs kubeconfig"]
async fn custom_resource_generates_correct_core_structs() {
use crate::core::{ApiResource, DynamicObject, GroupVersionKind};
let client = Client::try_default().await.unwrap();
let gvk = GroupVersionKind::gvk("clux.dev", "v1", "Foo");
let api_resource = ApiResource::from_gvk(&gvk);
let a1: Api<DynamicObject> = Api::namespaced_with(client.clone(), "myns", &api_resource);
let a2: Api<Foo> = Api::namespaced(client, "myns");
// make sure they return the same url_path through their impls
assert_eq!(a1.resource_url(), a2.resource_url());
}
use k8s_openapi::{
api::core::v1::ConfigMap,
apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition,
};
#[tokio::test]
#[ignore = "needs cluster (creates + patches foo crd)"]
#[cfg(all(feature = "derive", feature = "runtime"))]
async fn derived_resource_queriable_and_has_subresources() -> Result<(), Box<dyn std::error::Error>> {
use crate::runtime::wait::{await_condition, conditions};
use serde_json::json;
let client = Client::try_default().await?;
let ssapply = PatchParams::apply("kube").force();
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
// Server-side apply CRD and wait for it to get ready
crds.patch("foos.clux.dev", &ssapply, &Patch::Apply(Foo::crd()))
.await?;
let establish = await_condition(crds.clone(), "foos.clux.dev", conditions::is_crd_established());
let _ = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await?;
// Use it
let foos: Api<Foo> = Api::default_namespaced(client.clone());
// Apply from generated struct
{
let foo = Foo::new("baz", FooSpec {
name: "baz".into(),
info: Some("old baz".into()),
replicas: 1,
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(&foo)).await?;
assert_eq!(o.spec.name, "baz");
let oref = o.object_ref(&());
assert_eq!(oref.name.unwrap(), "baz");
assert_eq!(oref.uid, o.uid());
}
// Apply from partial json!
{
let patch = json!({
"apiVersion": "clux.dev/v1",
"kind": "Foo",
"spec": {
"name": "foo",
"replicas": 2
}
});
let o = foos.patch("baz", &ssapply, &Patch::Apply(patch)).await?;
assert_eq!(o.spec.replicas, 2, "patching spec updated spec.replicas");
}
// check subresource
{
let scale = foos.get_scale("baz").await?;
assert_eq!(scale.spec.unwrap().replicas, Some(2));
let status = foos.get_status("baz").await?;
assert!(status.status.is_none(), "nothing has set status");
}
// set status subresource
{
let fs = serde_json::json!({"status": FooStatus { is_bad: false, replicas: 1 }});
let o = foos
.patch_status("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert!(o.status.is_some(), "status set after patch_status");
}
// set scale subresource
{
let fs = serde_json::json!({"spec": { "replicas": 3 }});
let o = foos
.patch_scale("baz", &Default::default(), &Patch::Merge(&fs))
.await?;
assert_eq!(o.status.unwrap().replicas, 1, "scale replicas got patched");
let linked_replicas = o.spec.unwrap().replicas.unwrap();
assert_eq!(linked_replicas, 3, "patch_scale updates linked spec.replicas");
}
// cleanup
foos.delete_collection(&DeleteParams::default(), &Default::default())
.await?;
crds.delete("foos.clux.dev", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
async fn custom_serialized_objects_are_queryable_and_iterable() -> Result<(), Box<dyn std::error::Error>>
{
use crate::core::{
object::{HasSpec, HasStatus, NotUsed, Object},
ApiResource,
};
use k8s_openapi::api::core::v1::Pod;
#[derive(Clone, Deserialize, Debug)]
struct PodSpecSimple {
containers: Vec<ContainerSimple>,
}
#[derive(Clone, Deserialize, Debug)]
struct | ContainerSimple | identifier_name |
|
lib.rs | !(o.status.unwrap().replicas, 1, "scale replicas got patched");
let linked_replicas = o.spec.unwrap().replicas.unwrap();
assert_eq!(linked_replicas, 3, "patch_scale updates linked spec.replicas");
}
// cleanup
foos.delete_collection(&DeleteParams::default(), &Default::default())
.await?;
crds.delete("foos.clux.dev", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
async fn custom_serialized_objects_are_queryable_and_iterable() -> Result<(), Box<dyn std::error::Error>>
{
use crate::core::{
object::{HasSpec, HasStatus, NotUsed, Object},
ApiResource,
};
use k8s_openapi::api::core::v1::Pod;
#[derive(Clone, Deserialize, Debug)]
struct PodSpecSimple {
containers: Vec<ContainerSimple>,
}
#[derive(Clone, Deserialize, Debug)]
struct ContainerSimple {
#[allow(dead_code)]
image: String,
}
type PodSimple = Object<PodSpecSimple, NotUsed>;
// use known type information from pod (can also use discovery for this)
let ar = ApiResource::erase::<Pod>(&());
let client = Client::try_default().await?;
let api: Api<PodSimple> = Api::default_namespaced_with(client, &ar);
let mut list = api.list(&Default::default()).await?;
// check we can mutably iterate over ObjectList
for pod in &mut list {
pod.spec_mut().containers = vec![];
*pod.status_mut() = None;
pod.annotations_mut()
.entry("kube-seen".to_string())
.or_insert_with(|| "yes".to_string());
pod.labels_mut()
.entry("kube.rs".to_string())
.or_insert_with(|| "hello".to_string());
pod.finalizers_mut().push("kube-finalizer".to_string());
pod.managed_fields_mut().clear();
// NB: we are **not** pushing these back upstream - (Api::apply or Api::replace needed for it)
}
// check we can iterate over ObjectList normally - and check the mutations worked
for pod in list {
assert!(pod.annotations().get("kube-seen").is_some());
assert!(pod.labels().get("kube.rs").is_some());
assert!(pod.finalizers().contains(&"kube-finalizer".to_string()));
assert!(pod.spec().containers.is_empty());
assert!(pod.managed_fields().is_empty());
}
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (fetches api resources, and lists all)"]
#[cfg(feature = "derive")]
async fn derived_resources_discoverable() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
core::{DynamicObject, GroupVersion, GroupVersionKind},
discovery::{self, verbs, ApiGroup, Discovery, Scope},
runtime::wait::{await_condition, conditions, Condition},
};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[kube(group = "kube.rs", version = "v1", kind = "TestCr", namespaced)]
#[kube(crates(kube_core = "crate::core"))] // for dev-dep test structure
struct TestCrSpec {}
let client = Client::try_default().await?;
// install crd is installed
let crds: Api<CustomResourceDefinition> = Api::all(client.clone());
let ssapply = PatchParams::apply("kube").force();
crds.patch("testcrs.kube.rs", &ssapply, &Patch::Apply(TestCr::crd()))
.await?;
let establish = await_condition(crds.clone(), "testcrs.kube.rs", conditions::is_crd_established());
let crd = tokio::time::timeout(std::time::Duration::from_secs(10), establish).await??;
assert!(conditions::is_crd_established().matches_object(crd.as_ref()));
tokio::time::sleep(std::time::Duration::from_secs(2)).await; // Established condition is actually not enough for api discovery :(
// create partial information for it to discover
let gvk = GroupVersionKind::gvk("kube.rs", "v1", "TestCr");
let gv = GroupVersion::gv("kube.rs", "v1");
// discover by both (recommended kind on groupversion) and (pinned gvk) and they should equal
let apigroup = discovery::oneshot::pinned_group(&client, &gv).await?;
let (ar1, caps1) = apigroup.recommended_kind("TestCr").unwrap();
let (ar2, caps2) = discovery::pinned_kind(&client, &gvk).await?;
assert_eq!(caps1.operations.len(), caps2.operations.len(), "unequal caps");
assert_eq!(ar1, ar2, "unequal apiresource");
assert_eq!(DynamicObject::api_version(&ar2), "kube.rs/v1", "unequal dynver");
// run (almost) full discovery
let discovery = Discovery::new(client.clone())
// skip something in discovery (clux.dev crd being mutated in other tests)
.exclude(&["rbac.authorization.k8s.io", "clux.dev"])
.run()
.await?;
// check our custom resource first by resolving within groups
assert!(discovery.has_group("kube.rs"), "missing group kube.rs");
let (ar, _caps) = discovery.resolve_gvk(&gvk).unwrap();
assert_eq!(ar.group, gvk.group, "unexpected discovered group");
assert_eq!(ar.version, gvk.version, "unexcepted discovered ver");
assert_eq!(ar.kind, gvk.kind, "unexpected discovered kind");
// check all non-excluded groups that are iterable
let mut groups = discovery.groups_alphabetical().into_iter();
let firstgroup = groups.next().unwrap();
assert_eq!(firstgroup.name(), ApiGroup::CORE_GROUP, "core not first");
for group in groups {
for (ar, caps) in group.recommended_resources() {
if !caps.supports_operation(verbs::LIST) {
continue;
}
let api: Api<DynamicObject> = if caps.scope == Scope::Namespaced {
Api::default_namespaced_with(client.clone(), &ar)
} else {
Api::all_with(client.clone(), &ar)
};
api.list(&Default::default()).await?;
}
}
// cleanup
crds.delete("testcrs.kube.rs", &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create await a pod)"]
#[cfg(feature = "runtime")]
async fn pod_can_await_conditions() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, PostParams},
runtime::wait::{await_condition, conditions, delete::delete_and_finalize, Condition},
Api, Client,
};
use k8s_openapi::api::core::v1::Pod;
use std::time::Duration;
use tokio::time::timeout;
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 20s
let data: Pod = serde_json::from_value(serde_json::json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube4",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 20"],
}],
}
}))?;
let pp = PostParams::default();
assert_eq!(
data.name_unchecked(),
pods.create(&pp, &data).await?.name_unchecked()
);
// Watch it phase for a few seconds
let is_running = await_condition(pods.clone(), "busybox-kube4", conditions::is_pod_running());
let _ = timeout(Duration::from_secs(15), is_running).await?;
// Verify we can get it
let pod = pods.get("busybox-kube4").await?;
assert_eq!(pod.spec.as_ref().unwrap().containers[0].name, "busybox");
// Wait for a more complicated condition: ContainersReady AND Initialized
// TODO: remove these once we can write these functions generically
fn is_each_container_ready() -> impl Condition<Pod> | {
|obj: Option<&Pod>| {
if let Some(o) = obj {
if let Some(s) = &o.status {
if let Some(conds) = &s.conditions {
if let Some(pcond) = conds.iter().find(|c| c.type_ == "ContainersReady") {
return pcond.status == "True";
}
}
}
}
false
}
} | identifier_body |
|
testone.py | "0" or h1str == 0:
h1str = clientlist[i].get_pk()['pk']
print("h1str", h1str)
pk = group_signature.pkGen(h1str)
print("pk---------------\n", pk)
if (group_signature.verifyUsk(usklist[i], vklist[i], pk, GID, UID)):
count = count + 1
else:
print("key is invalide\n\n")
usk = group_signature.uskGen(usklist, pk, GID, UID, L, k)
print("usk---------------\n", usk)
return pk, usk
def get_lam(sig):
okliststr = []
i = 0
for client in clientlist:
okstr = client.get_ok(str(sig['e1']), str(sig['e2']))
print(okstr)
okliststr.append(okstr)
i = i + 1
if i < k:
print("the number of ok is not enough\n")
return
lam = group_signature.open(okliststr, L, k)
return lam
def tx_build_broad(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,
no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
# print("txsign",tx)
re = tx.broadcast()
return re
def tx_build(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,
no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
# print("txsign",tx)
# re = tx.broadcast()
return tx
def annoy_commit(account, usk, pk, GID, UID, title="paper_title", body="paper_body", groupID="computer"):
annoy_author = 'nya'
# group signature ------title 必须 这里面是对title进行hash 然后使用usk对hash进行签名
sig = group_signature.sign(title, usk, pk, GID, UID, groupID)
permlink = ''.join(random.choices(string.digits, k=7))
print("permlink is " + permlink)
op = operations.CommitPaper(
**{
"account": account,
"author": annoy_author,
"permlink": permlink,
"title": title,
"body": body,
"json_metadata": "",
"c0": str(sig['c0']),
"c5": str(sig['c5']),
"c6": str(sig['c6']),
"e1": str(sig['e1']),
"e2": str(sig['e2']),
"e3": str(sig['e3']),
"c": str(sig['c']),
"s1": str(sig['s1']),
"s2": str(sig['s2']),
"s3": str(sig['s3'])
}
)
print("commitop", op)
return op, sig, permlink
def open_op(account, sig, userID, permlink):
lam = get_lam(sig)
# E = (pk['n'] ** UID) * lam #计算出e3 即签名的e3 判断是否相等
op = operations.ApplyOpen(
**{
'account': account,
'author': userID,
'lambda': str(lam),
'permlink': permlink,
'json_metadata': ""
}
)
return op
def annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance, wallet_instance, title="paper_title",
body="paper_body"):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID, title="paper_title", body="paper_body",
groupID="computer")
re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)
print("commit-re", re)
return ssig, permlink
def open_tx(account, ssig, userID, permlink, steemd_instance, wallet_instance):
openop = open_op(account, ssig, userID, permlink)
re = tx_build_broad(openop, steemd_instance, wallet_instance, account)
print("open-re", re)
# 一个节点的 并发产生交易
def one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):
ssiglistone = []
permlinklistone = []
threads = []
for i in range(nodeTX):
t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID, steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglistone.append(ssig)
permlinklistone.append(permlink)
return ssiglistone, permlinklistone
def one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd, wallet):
threads = []
for i in range(nodeTX):
t = MyThread(open_tx,
args=(account, ssiglistone[i], userID, permlinklistone[i], steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def mul_annoy_tx(usk, pk, UID):
ssiglist = []
permlinklist = []
threads = []
for i in range(n):
# t = MyThread(annoy_commit_tx, args=(accountlist[i], usk, pk, GID, UID, clientlist[i].steemd, clientlist[i].wallet))
t = MyThread(one_mul_annoy_tx,
args=(accountlist[i], usk, pk, UID, clientlist[i].steemd, clientlist[i].wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglist.append(ssig)
permlinklist.append(permlink)
return ssiglist, permlinklist
# 多个节点, 每个节点并发
def mul_open_tx(ssiglist, permlinklist, userID):
threads = []
for i in range(n):
# t = MyThread(open_tx,
# args=(accountlist[i], ssiglist[i], userID, permlinklist[i], clientlist[i].steemd, clientlist[i].wallet))
t = MyThread(one_mul_open_tx,
args=(
accountlist[i], ssiglist[i], userID, permlinklist[i], clientlist[i].steemd, clientlist[i].wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
# for t in threads:
# t.get_result()
# 仅创造tx 不广播
def creat_commit_tx(account, usk, pk, GID, UID, steemd_instance, wallet_instance, title="paper_title",
body="paper_body"):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID, title, body, groupID="computer")
commit_tx = tx_build(commitop, steemd_instance, wallet_instance, account)
return ssig, permlink, commit_tx
def creat_num_commit_tx(num, account, usk, pk, GID, UID, steemd_instance, wallet_instance, ttitle="paper_title",
tbody="paper_body"):
ssiglist = []
permlinklist = []
txlist = []
threads = []
for i in range(num):
t = MyThread(creat_commit_tx, args=(account, usk, pk, GID, UID, steemd_instance, wallet_instance, ttitle,
tbody))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink, commit_tx = t.get_result()
ssiglist.append(ssig)
permlinklist.append(permlink)
txlist.append(commit_tx)
return ssiglist, permlinklist, txlist
def creat_open_tx(account, ssig, userID, permlink, steemd_instance, wallet_instance):
openop = open_op(account, ssig, userID, permlink)
open_tx = tx_build(openop, steemd_instance, wallet_instance, account)
return open_tx
def creat_num_open_tx(num, account, ssiglist, userID, permlinklist, steemd_instance, wallet_instance):
opentxlist = []
threads = []
for i in range(num):
t = MyThread(creat_open_tx,
args=(account, ssiglist[i], userID, permlinklist[i], steemd_instance,
wallet_instance))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
opentx = t.get_result()
opentxlist. | append(op | conditional_block |
|
testone.py | )
b4 = self.group.gen1_0(1)
b5 = self.group.gen1_0(1)
r2 = self.group.random(ZR)
for i in range(k):
b0 = b0 * (usklist[i]['b0'] ** L[i])
b3 = b3 * (usklist[i]['b3'] ** L[i])
b4 = b4 * (usklist[i]['b4'] ** L[i])
b5 = b5 * (usklist[i]['b5'] ** L[i])
b0 = b0 * (pk['u0'] * (pk['u1'] ** GID) * (pk['u2'] ** UID)) ** r2
b3 = b3 * (pk['u3'] ** r2)
b4 = b4 * (pk['u4'] ** r2)
b5 = b5 * (pk['g'] ** r2)
usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}
t2 = time()
with open("extracttime.txt", 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
return usk
def LGen(self, n, k):
L = []
I = self.group.random(ZR)
J = self.group.random(ZR)
for i in range(n):
L.append(self.group.random(ZR))
L[i].set(1)
I.set(i + 1)
for j in range(1, k + 1):
print(j)
J.set(j)
if (i + 1) != j:
L[i] = L[i] * ((J) / (J - I))
return L
def verifyUsk(self, usk, vk, pk, GID, UID):
g = pk['g']
g2 = pk['g2']
u0 = pk['u0']
u1 = pk['u1']
u2 = pk['u2']
u3 = pk['u3']
u4 = pk['u4']
b0 = usk['b0']
b5 = usk['b5']
b3 = usk['b3']
b4 = usk['b4']
return pair(g, b0) == (pair(vk, g2) * pair(b5, u0) * pair(b5, u1 ** GID) * pair(b5, u2 ** UID)) and pair(g,
b3) == pair(
b5, u3) and pair(g, b4) == pair(b5, u4)
def sign(self, title, usk, pk, GID, UID, groupID):
t1 = time()
m = self.group.hash(title)
b0 = usk['b0']
b3 = usk['b3']
b4 = usk['b4']
b5 = usk['b5']
r4 = self.group.random(ZR)
r3 = self.group.random(ZR)
k = self.group.random(ZR)
c0 = b0 * (b3 ** m) * (b4 ** r4) * (
(pk['u0'] * (pk['u1'] ** GID) * (pk['u2'] ** UID) * (pk['u3'] ** m) * (pk['u4'] ** r4)) ** r3)
c5 = b5 * (pk['g'] ** r3)
c6 = (pk['u2'] ** UID) * (pk['u4'] ** r4)
e1 = pk['g'] ** k
e2 = (pk['u0'] * (pk['u1'] ** GID)) ** k
e3 = (pk['n'] ** UID) * (pair(pk['h1'], pk['g2']) ** k)
# 产生pok
f = pk['u0'] * (pk['u1'] ** GID)
gp = pair(pk['h1'], pk['g2'])
k1 = self.group.random(ZR)
k2 = self.group.random(ZR)
k3 = self.group.random(ZR)
r1 = (pk['u2'] ** k1) * (pk['u4'] ** k2)
r2 = pk['g'] ** k3
r3 = f ** k3
t4 = (pk['n'] ** k1) * (gp ** k3)
hashstr = str(r1) + str(r2) + str(r3) + str(t4)
c = self.group.hash(hashstr)
s1 = k1 + c * UID
s2 = k2 + c * r4
s3 = k3 + c * k
signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3': e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}
t2 = time()
with open("gssigntime.txt", 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print("gs time", t2 - t1)
return signature
def open(self, okliststr, L, k):
t1 = time()
oklist = []
for ok in okliststr:
oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT), 'ok2': self.group.fromstr(ok['ok2'], 10, GT)})
ok1 = self.group.gen1_0(1)
ok2 = self.group.gen1_0(1)
for i in range(k):
ok1 = ok1 * (oklist[i]['ok1'] ** L[i])
ok2 = ok2 * (oklist[i]['ok2'] ** L[i])
t2 = time()
with open("opentime.txt", 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print("open time", t2 - t1)
return ok1 / ok2
def get_usk(userID, GID, UID, h1str="", count=0):
pk = {}
for i in range(n):
vkliststr.append(clientlist[i].get_vk()['vk'])
vklist.append(group_signature.group.fromstr(vkliststr[i], 10, G1))
uskliststr.append(clientlist[i].user_extract(userID))
usklist.append({})
usklist[i]['b0'] = group_signature.group.fromstr(uskliststr[i]['b0'], 10, G2)
usklist[i]['b3'] = group_signature.group.fromstr(uskliststr[i]['b3'], 10, G2)
usklist[i]['b4'] = group_signature.group.fromstr(uskliststr[i]['b4'], 10, G2)
usklist[i]['b5'] = group_signature.group.fromstr(uskliststr[i]['b5'], 10, G1)
print(usklist[i])
if h1str == "" or h1str == "0" or h1str == 0:
h1str = clientlist[i].get_pk()['pk']
print("h1str", h1str)
pk = group_signature.pkGen(h1str)
print("pk---------------\n", pk)
if (group_signature.verifyUsk(usklist[i], vklist[i], pk, GID, UID)):
count = count + 1
else:
print("key is invalide\n\n")
usk = group_signature.uskGen(usklist, pk, GID, UID, L, k)
print("usk---------------\n", usk)
return pk, usk
def get_lam(sig):
okliststr = []
i = 0
for client in clientlist:
okstr = client.get_ok(str(sig['e1']), str(sig['e2']))
print(okstr)
okliststr.append(okstr)
i = i + 1
if i < k:
print("the number of ok is not enough\n")
return
lam = group_signature.open(okliststr, L, k)
return lam
def tx_build_broad(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,
no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
# print("txsign",tx)
re = tx.broadcast()
return re
def tx_build(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,
no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
# print("txsign",tx)
# re = tx.broadcast()
return tx
| random_line_split |
||
testone.py | 389375981104409894060310698729022957801238449570622103067828518416602275957863668289683360250722835022304456841105526036470008237775051984811323, 862537748555943361001122447731987661405436458862545177179548603003392540530328380518694788420155531238391922289886044667763424887444361610972254938158280]"
u4str = "[8157254219580822599577995921928211211847392705248772673869189421041858895589817404931780741226510985762564598862965174380020566416411083236239871342674775, 4736677719200783513058679582227494204159737596114643136852532046080608159561620208171676599501713934575216178076006396924589443776642926902969084668055006]"
hstr = "[6248393417805371388321299785844751688345516419281230263497475615452026459314582553252281068616984105757749673095320346188725995701858182333525688832492249, 351368339412205819108519989143352052898751906937356995136442397753142226531384069336237369861919799955237545207977716196031001184146017796598836939617335]"
nstr = "[75201312764006187596691102237923705656296213254701583615255122742135170369075831428394751330697143847448434841509551532135632624530360013837581615049543, 3886258599652934715331576083899336629981754505948456216299528998628273512432828729344158706718479567056972375128622026273382126529171409058157562418608963]"
g = self.group.fromstr(gstr, 10, G1)
g2 = self.group.fromstr(g2str, 10, G2)
u0 = self.group.fromstr(u0str, 10, G2)
u1 = self.group.fromstr(u1str, 10, G2)
u2 = self.group.fromstr(u2str, 10, G2)
u3 = self.group.fromstr(u3str, 10, G2)
u4 = self.group.fromstr(u4str, 10, G2)
h = self.group.fromstr(hstr, 10, G1)
n = self.group.fromstr(nstr, 10, GT)
h1 = self.group.fromstr(h1str, 10, G1)
pk = {'g': g, 'g2': g2, 'u0': u0, 'u1': u1, 'u2': u2, 'u3': u3, 'u4': u4, 'h': h, 'n': n, 'h1': h1}
return pk
def uskGen(self, usklist, pk, GID, UID, L, k):
t1 = time()
b0 = self.group.gen1_0(1)
b3 = self.group.gen1_0(1)
b4 = self.group.gen1_0(1)
b5 = self.group.gen1_0(1)
r2 = self.group.random(ZR)
for i in range(k):
b0 = b0 * (usklist[i]['b0'] ** L[i])
b3 = b3 * (usklist[i]['b3'] ** L[i])
b4 = b4 * (usklist[i]['b4'] ** L[i])
b5 = b5 * (usklist[i]['b5'] ** L[i])
b0 = b0 * (pk['u0'] * (pk['u1'] ** GID) * (pk['u2'] ** UID)) ** r2
b3 = b3 * (pk['u3'] ** r2)
b4 = b4 * (pk['u4'] ** r2)
b5 = b5 * (pk['g'] ** r2)
usk = {'b0': b0, 'b3': b3, 'b4': b4, 'b5': b5}
t2 = time()
with open("extracttime.txt", 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
return usk
def LGen(self, n, k):
L = []
I = self.group.random(ZR)
J = self.group.random(ZR)
for i in range(n):
L.append(self.group.random(ZR))
L[i].set(1)
I.set(i + 1)
for j in range(1, k + 1):
print(j)
J.set(j)
if (i + 1) != j:
L[i] = L[i] * ((J) / (J - I))
return L
def | (self, usk, vk, pk, GID, UID):
g = pk['g']
g2 = pk['g2']
u0 = pk['u0']
u1 = pk['u1']
u2 = pk['u2']
u3 = pk['u3']
u4 = pk['u4']
b0 = usk['b0']
b5 | verifyUsk | identifier_name |
testone.py | ID)) ** k
e3 = (pk['n'] ** UID) * (pair(pk['h1'], pk['g2']) ** k)
# 产生pok
f = pk['u0'] * (pk['u1'] ** GID)
gp = pair(pk['h1'], pk['g2'])
k1 = self.group.random(ZR)
k2 = self.group.random(ZR)
k3 = self.group.random(ZR)
r1 = (pk['u2'] ** k1) * (pk['u4'] ** k2)
r2 = pk['g'] ** k3
r3 = f ** k3
t4 = (pk['n'] ** k1) * (gp ** k3)
hashstr = str(r1) + str(r2) + str(r3) + str(t4)
c = self.group.hash(hashstr)
s1 = k1 + c * UID
s2 = k2 + c * r4
s3 = k3 + c * k
signature = {'c0': c0, 'c5': c5, 'c6': c6, 'e1': e1, 'e2': e2, 'e3': e3, 'c': c, 's1': s1, 's2': s2, 's3': s3}
t2 = time()
with open("gssigntime.txt", 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print("gs time", t2 - t1)
return signature
def open(self, okliststr, L, k):
t1 = time()
oklist = []
for ok in okliststr:
oklist.append({'ok1': self.group.fromstr(ok['ok1'], 10, GT), 'ok2': self.group.fromstr(ok['ok2'], 10, GT)})
ok1 = self.group.gen1_0(1)
ok2 = self.group.gen1_0(1)
for i in range(k):
ok1 = ok1 * (oklist[i]['ok1'] ** L[i])
ok2 = ok2 * (oklist[i]['ok2'] ** L[i])
t2 = time()
with open("opentime.txt", 'a') as f:
f.write(str(t2 - t1))
f.write('\n')
print("open time", t2 - t1)
return ok1 / ok2
def get_usk(userID, GID, UID, h1str="", count=0):
pk = {}
for i in range(n):
vkliststr.append(clientlist[i].get_vk()['vk'])
vklist.append(group_signature.group.fromstr(vkliststr[i], 10, G1))
uskliststr.append(clientlist[i].user_extract(userID))
usklist.append({})
usklist[i]['b0'] = group_signature.group.fromstr(uskliststr[i]['b0'], 10, G2)
usklist[i]['b3'] = group_signature.group.fromstr(uskliststr[i]['b3'], 10, G2)
usklist[i]['b4'] = group_signature.group.fromstr(uskliststr[i]['b4'], 10, G2)
usklist[i]['b5'] = group_signature.group.fromstr(uskliststr[i]['b5'], 10, G1)
print(usklist[i])
if h1str == "" or h1str == "0" or h1str == 0:
h1str = clientlist[i].get_pk()['pk']
print("h1str", h1str)
pk = group_signature.pkGen(h1str)
print("pk---------------\n", pk)
if (group_signature.verifyUsk(usklist[i], vklist[i], pk, GID, UID)):
count = count + 1
else:
print("key is invalide\n\n")
usk = group_signature.uskGen(usklist, pk, GID, UID, L, k)
print("usk---------------\n", usk)
return pk, usk
def get_lam(sig):
okliststr = []
i = 0
for client in clientlist:
okstr = client.get_ok(str(sig['e1']), str(sig['e2']))
print(okstr)
okliststr.append(okstr)
i = i + 1
if i < k:
print("the number of ok is not enough\n")
return
lam = group_signature.open(okliststr, L, k)
return lam
def tx_build_broad(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,
no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
# print("txsign",tx)
re = tx.broadcast()
return re
def tx_build(op, steemd_instance, wallet_instance, account):
tx = TransactionBuilder(steemd_instance=steemd_instance, wallet_instance=wallet_instance,
no_broadcast=False)
tx.appendOps(op)
tx.appendSigner(account, 'posting')
tx.sign()
# print("txsign",tx)
# re = tx.broadcast()
return tx
def annoy_commit(account, usk, pk, GID, UID, title="paper_title", body="paper_body", groupID="computer"):
annoy_author = 'nya'
# group signature ------title 必须 这里面是对title进行hash 然后使用usk对hash进行签名
sig = group_signature.sign(title, usk, pk, GID, UID, groupID)
permlink = ''.join(random.choices(string.digits, k=7))
print("permlink is " + permlink)
op = operations.CommitPaper(
**{
"account": account,
"author": annoy_author,
"permlink": permlink,
"title": title,
"body": body,
"json_metadata": "",
"c0": str(sig['c0']),
"c5": str(sig['c5']),
"c6": str(sig['c6']),
"e1": str(sig['e1']),
"e2": str(sig['e2']),
"e3": str(sig['e3']),
"c": str(sig['c']),
"s1": str(sig['s1']),
"s2": str(sig['s2']),
"s3": str(sig['s3'])
}
)
print("commitop", op)
return op, sig, permlink
def open_op(account, sig, userID, permlink):
lam = get_lam(sig)
# E = (pk['n'] ** UID) * lam #计算出e3 即签名的e3 判断是否相等
op = operations.ApplyOpen(
**{
'account': account,
'author': userID,
'lambda': str(lam),
'permlink': permlink,
'json_metadata': ""
}
)
return op
def annoy_commit_tx(account, usk, pk, GID, UID, steemd_instance, wallet_instance, title="paper_title",
body="paper_body"):
commitop, ssig, permlink = annoy_commit(account, usk, pk, GID, UID, title="paper_title", body="paper_body",
groupID="computer")
re = tx_build_broad(commitop, steemd_instance, wallet_instance, account)
print("commit-re", re)
return ssig, permlink
def open_tx(account, ssig, userID, permlink, steemd_instance, wallet_instance):
openop = open_op(account, ssig, userID, permlink)
re = tx_build_broad(openop, steemd_instance, wallet_instance, account)
print("open-re", re)
# 一个节点的 并发产生交易
def one_mul_annoy_tx(account, usk, pk, UID, steemd, wallet):
ssiglistone = []
permlinklistone = []
threads = []
for i in range(nodeTX):
t = MyThread(annoy_commit_tx, args=(account, usk, pk, GID, UID, steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
ssig, permlink = t.get_result()
ssiglistone.append(ssig)
permlinklistone.append(permlink)
return ssiglistone, permlinklistone
def one_mul_open_tx(account, ssiglistone, userID, permlinklistone, steemd, wallet):
threads = []
for i in range(nodeTX):
t = MyThread(open_tx,
| args=(account, ssiglistone[i], userID, permlinklistone[i], steemd, wallet))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def mul_annoy_tx(usk, pk, UID):
ssiglist = []
permlinklist = []
threads = | identifier_body |
|
app.go | ) User {
session := getSession(r)
uid, ok := session.Values["user_id"]
if !ok || uid == nil {
return User{}
}
u := User{}
err := db.Get(&u, "SELECT * FROM `users` WHERE `id` = ?", uid)
if err != nil {
return User{}
}
return u
}
func getFlash(w http.ResponseWriter, r *http.Request, key string) string {
session := getSession(r)
value, ok := session.Values[key]
if !ok || value == nil {
return ""
} else {
delete(session.Values, key)
session.Save(r, w)
return value.(string)
}
}
func makePosts(results []Post, csrfToken string, allComments bool) ([]Post, error) {
var posts []Post
for _, p := range results {
err := db.Get(&p.CommentCount, "SELECT COUNT(*) AS `count` FROM `comments` WHERE `post_id` = ?", p.ID)
if err != nil {
return nil, err
}
query := "SELECT * FROM `comments` WHERE `post_id` = ? ORDER BY `created_at` DESC"
if !allComments {
query += " LIMIT 3"
}
var comments []Comment
err = db.Select(&comments, query, p.ID)
if err != nil {
return nil, err
}
for i := 0; i < len(comments); i++ {
err := db.Get(&comments[i].User, "SELECT * FROM `users` WHERE `id` = ?", comments[i].UserID)
if err != nil {
return nil, err
}
}
// reverse
for i, j := 0, len(comments)-1; i < j; i, j = i+1, j-1 {
comments[i], comments[j] = comments[j], comments[i]
}
p.Comments = comments
err = db.Get(&p.User, "SELECT * FROM `users` WHERE `id` = ?", p.UserID)
if err != nil {
return nil, err
}
p.CSRFToken = csrfToken
if p.User.DelFlg == 0 {
posts = append(posts, p)
}
if len(posts) >= postsPerPage {
break
}
}
return posts, nil
}
func imageURL(p Post) string {
ext := ""
if p.Mime == "image/jpeg" {
ext = ".jpg"
} else if p.Mime == "image/png" {
ext = ".png"
} else if p.Mime == "image/gif" {
ext = ".gif"
}
return "/image/" + strconv.Itoa(p.ID) + ext
}
func isLogin(u User) bool {
return u.ID != 0
}
func getCSRFToken(r *http.Request) string {
session := getSession(r)
csrfToken, ok := session.Values["csrf_token"]
if !ok {
return ""
}
return csrfToken.(string)
}
func secureRandomStr(b int) string {
k := make([]byte, b)
if _, err := crand.Read(k); err != nil {
panic(err)
}
return fmt.Sprintf("%x", k)
}
func getTemplPath(filename string) string {
return path.Join("templates", filename)
}
func getInitialize(w http.ResponseWriter, r *http.Request) {
dbInitialize()
w.Wri | p.StatusOK)
}
func getLogin(w http.ResponseWriter, r *http.Request) {
me := getSessionUser(r)
if isLogin(me) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
template.Must(template.ParseFiles(
getTemplPath("layout.html"),
getTemplPath("login.html")),
).Execute(w, struct {
Me User
Flash string
}{me, getFlash(w, r, "notice")})
}
func postLogin(w http.ResponseWriter, r *http.Request) {
if isLogin(getSessionUser(r)) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
u := tryLogin(r.FormValue("account_name"), r.FormValue("password"))
if u != nil {
session := getSession(r)
session.Values["user_id"] = u.ID
session.Values["csrf_token"] = secureRandomStr(16)
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
} else {
session := getSession(r)
session.Values["notice"] = "アカウント名かパスワードが間違っています"
session.Save(r, w)
http.Redirect(w, r, "/login", http.StatusFound)
}
}
func getRegister(w http.ResponseWriter, r *http.Request) {
if isLogin(getSessionUser(r)) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
template.Must(template.ParseFiles(
getTemplPath("layout.html"),
getTemplPath("register.html")),
).Execute(w, struct {
Me User
Flash string
}{User{}, getFlash(w, r, "notice")})
}
func postRegister(w http.ResponseWriter, r *http.Request) {
if isLogin(getSessionUser(r)) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
accountName, password := r.FormValue("account_name"), r.FormValue("password")
validated := validateUser(accountName, password)
if !validated {
session := getSession(r)
session.Values["notice"] = "アカウント名は3文字以上、パスワードは6文字以上である必要があります"
session.Save(r, w)
http.Redirect(w, r, "/register", http.StatusFound)
return
}
exists := 0
// ユーザーが存在しない場合はエラーになるのでエラーチェックはしない
db.Get(&exists, "SELECT 1 FROM users WHERE `account_name` = ?", accountName)
if exists == 1 {
session := getSession(r)
session.Values["notice"] = "アカウント名がすでに使われています"
session.Save(r, w)
http.Redirect(w, r, "/register", http.StatusFound)
return
}
query := "INSERT INTO `users` (`account_name`, `passhash`) VALUES (?,?)"
result, err := db.Exec(query, accountName, calculatePasshash(accountName, password))
if err != nil {
log.Print(err)
return
}
session := getSession(r)
uid, err := result.LastInsertId()
if err != nil {
log.Print(err)
return
}
session.Values["user_id"] = uid
session.Values["csrf_token"] = secureRandomStr(16)
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
}
func getLogout(w http.ResponseWriter, r *http.Request) {
session := getSession(r)
delete(session.Values, "user_id")
session.Options = &sessions.Options{MaxAge: -1}
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
}
func getIndex(w http.ResponseWriter, r *http.Request) {
me := getSessionUser(r)
results := []Post{}
err := db.Select(&results, "SELECT `id`, `user_id`, `body`, `mime`, `created_at` FROM `posts` ORDER BY `created_at` DESC")
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("layout.html").Funcs(fmap).ParseFiles(
getTemplPath("layout.html"),
getTemplPath("index.html"),
getTemplPath("posts.html"),
getTemplPath("post.html"),
)).Execute(w, struct {
Posts []Post
Me User
CSRFToken string
Flash string
}{posts, me, getCSRFToken(r), getFlash(w, r, "notice")})
}
func getAccountName(w http.ResponseWriter, r *http.Request) {
accountName := chi.URLParam(r, "accountName")
user := User{}
err := db.Get(&user, "SELECT * FROM `users` WHERE `account_name` = ? AND `del_flg` = 0", accountName)
if err != nil {
log.Print(err)
return
}
if user.ID == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
results := []Post{}
err = db.Select(&results, "SELECT `id`, `user_id`, `body`, `mime`, `created_at` FROM `posts` WHERE `user_id` = ? ORDER BY `created_at` DESC", user.ID)
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
commentCount := 0
err = db.Get(&commentCount, "SELECT COUNT(*) AS count FROM `comments` WHERE `user_id` = ?", user.ID)
if err != nil {
log.Print(err)
return
}
postIDs := []int{}
err = db.Select(&postIDs, "SELECT `id` FROM `posts` WHERE `user_id` = ?", user.ID)
if err != nil | teHeader(htt | identifier_name |
app.go | _id` = ?", p.ID)
if err != nil {
return nil, err
}
query := "SELECT * FROM `comments` WHERE `post_id` = ? ORDER BY `created_at` DESC"
if !allComments {
query += " LIMIT 3"
}
var comments []Comment
err = db.Select(&comments, query, p.ID)
if err != nil {
return nil, err
}
for i := 0; i < len(comments); i++ {
err := db.Get(&comments[i].User, "SELECT * FROM `users` WHERE `id` = ?", comments[i].UserID)
if err != nil {
return nil, err
}
}
// reverse
for i, j := 0, len(comments)-1; i < j; i, j = i+1, j-1 {
comments[i], comments[j] = comments[j], comments[i]
}
p.Comments = comments
err = db.Get(&p.User, "SELECT * FROM `users` WHERE `id` = ?", p.UserID)
if err != nil {
return nil, err
}
p.CSRFToken = csrfToken
if p.User.DelFlg == 0 {
posts = append(posts, p)
}
if len(posts) >= postsPerPage {
break
}
}
return posts, nil
}
func imageURL(p Post) string {
ext := ""
if p.Mime == "image/jpeg" {
ext = ".jpg"
} else if p.Mime == "image/png" {
ext = ".png"
} else if p.Mime == "image/gif" {
ext = ".gif"
}
return "/image/" + strconv.Itoa(p.ID) + ext
}
func isLogin(u User) bool {
return u.ID != 0
}
func getCSRFToken(r *http.Request) string {
session := getSession(r)
csrfToken, ok := session.Values["csrf_token"]
if !ok {
return ""
}
return csrfToken.(string)
}
func secureRandomStr(b int) string {
k := make([]byte, b)
if _, err := crand.Read(k); err != nil {
panic(err)
}
return fmt.Sprintf("%x", k)
}
func getTemplPath(filename string) string {
return path.Join("templates", filename)
}
func getInitialize(w http.ResponseWriter, r *http.Request) {
dbInitialize()
w.WriteHeader(http.StatusOK)
}
func getLogin(w http.ResponseWriter, r *http.Request) {
me := getSessionUser(r)
if isLogin(me) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
template.Must(template.ParseFiles(
getTemplPath("layout.html"),
getTemplPath("login.html")),
).Execute(w, struct {
Me User
Flash string
}{me, getFlash(w, r, "notice")})
}
func postLogin(w http.ResponseWriter, r *http.Request) {
if isLogin(getSessionUser(r)) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
u := tryLogin(r.FormValue("account_name"), r.FormValue("password"))
if u != nil {
session := getSession(r)
session.Values["user_id"] = u.ID
session.Values["csrf_token"] = secureRandomStr(16)
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
} else {
session := getSession(r)
session.Values["notice"] = "アカウント名かパスワードが間違っています"
session.Save(r, w)
http.Redirect(w, r, "/login", http.StatusFound)
}
}
func getRegister(w http.ResponseWriter, r *http.Request) {
if isLogin(getSessionUser(r)) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
template.Must(template.ParseFiles(
getTemplPath("layout.html"),
getTemplPath("register.html")),
).Execute(w, struct {
Me User
Flash string
}{User{}, getFlash(w, r, "notice")})
}
func postRegister(w http.ResponseWriter, r *http.Request) {
if isLogin(getSessionUser(r)) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
accountName, password := r.FormValue("account_name"), r.FormValue("password")
validated := validateUser(accountName, password)
if !validated {
session := getSession(r)
session.Values["notice"] = "アカウント名は3文字以上、パスワードは6文字以上である必要があります"
session.Save(r, w)
http.Redirect(w, r, "/register", http.StatusFound)
return
}
exists := 0
// ユーザーが存在しない場合はエラーになるのでエラーチェックはしない
db.Get(&exists, "SELECT 1 FROM users WHERE `account_name` = ?", accountName)
if exists == 1 {
session := getSession(r)
session.Values["notice"] = "アカウント名がすでに使われています"
session.Save(r, w)
http.Redirect(w, r, "/register", http.StatusFound)
return
}
query := "INSERT INTO `users` (`account_name`, `passhash`) VALUES (?,?)"
result, err := db.Exec(query, accountName, calculatePasshash(accountName, password))
if err != nil {
log.Print(err)
return
}
session := getSession(r)
uid, err := result.LastInsertId()
if err != nil {
log.Print(err)
return
}
session.Values["user_id"] = uid
session.Values["csrf_token"] = secureRandomStr(16)
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
}
func getLogout(w http.ResponseWriter, r *http.Request) {
session := getSession(r)
delete(session.Values, "user_id")
session.Options = &sessions.Options{MaxAge: -1}
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
}
func getIndex(w http.ResponseWriter, r *http.Request) {
me := getSessionUser(r)
results := []Post{}
err := db.Select(&results, "SELECT `id`, `user_id`, `body`, `mime`, `created_at` FROM `posts` ORDER BY `created_at` DESC")
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("layout.html").Funcs(fmap).ParseFiles(
getTemplPath("layout.html"),
getTemplPath("index.html"),
getTemplPath("posts.html"),
getTemplPath("post.html"),
)).Execute(w, struct {
Posts []Post
Me User
CSRFToken string
Flash string
}{posts, me, getCSRFToken(r), getFlash(w, r, "notice")})
}
func getAccountName(w http.ResponseWriter, r *http.Request) {
accountName := chi.URLParam(r, "accountName")
user := User{}
err := db.Get(&user, "SELECT * FROM `users` WHERE `account_name` = ? AND `del_flg` = 0", accountName)
if err != nil {
log.Print(err)
return
}
if user.ID == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
results := []Post{}
err = db.Select(&results, "SELECT `id`, `user_id`, `body`, `mime`, `created_at` FROM `posts` WHERE `user_id` = ? ORDER BY `created_at` DESC", user.ID)
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
commentCount := 0
err = db.Get(&commentCount, "SELECT COUNT(*) AS count FROM `comments` WHERE `user_id` = ?", user.ID)
if err != nil {
log.Print(err)
return
}
postIDs := []int{}
err = db.Select(&postIDs, "SELECT `id` FROM `posts` WHERE `user_id` = ?", user.ID)
if err != nil {
log.Print(err)
return
}
postCount := len(postIDs)
commentedCount := 0
if postCount > 0 {
s := []string{}
for range postIDs {
s = append(s, "?")
}
placeholder := strings.Join(s, ", ")
// convert []int -> []interface{}
args := make([]interface{}, len(postIDs))
for i, v := range postIDs {
args[i] = v
}
err = db.Get(&commentedCount, "SELECT COUNT(*) AS count FROM `comments` WHERE `post_id` IN ("+placeholder+")", args...)
if err != nil {
log.Print(err)
return
}
}
me := getSessionUser(r)
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("layout.html").Funcs(fmap).ParseFiles(
getTemplPath("layout.html"), | random_line_split |
||
app.go | notice"] = "アカウント名かパスワードが間違っています"
session.Save(r, w)
http.Redirect(w, r, "/login", http.StatusFound)
}
}
func getRegister(w http.ResponseWriter, r *http.Request) {
if isLogin(getSessionUser(r)) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
template.Must(template.ParseFiles(
getTemplPath("layout.html"),
getTemplPath("register.html")),
).Execute(w, struct {
Me User
Flash string
}{User{}, getFlash(w, r, "notice")})
}
func postRegister(w http.ResponseWriter, r *http.Request) {
if isLogin(getSessionUser(r)) {
http.Redirect(w, r, "/", http.StatusFound)
return
}
accountName, password := r.FormValue("account_name"), r.FormValue("password")
validated := validateUser(accountName, password)
if !validated {
session := getSession(r)
session.Values["notice"] = "アカウント名は3文字以上、パスワードは6文字以上である必要があります"
session.Save(r, w)
http.Redirect(w, r, "/register", http.StatusFound)
return
}
exists := 0
// ユーザーが存在しない場合はエラーになるのでエラーチェックはしない
db.Get(&exists, "SELECT 1 FROM users WHERE `account_name` = ?", accountName)
if exists == 1 {
session := getSession(r)
session.Values["notice"] = "アカウント名がすでに使われています"
session.Save(r, w)
http.Redirect(w, r, "/register", http.StatusFound)
return
}
query := "INSERT INTO `users` (`account_name`, `passhash`) VALUES (?,?)"
result, err := db.Exec(query, accountName, calculatePasshash(accountName, password))
if err != nil {
log.Print(err)
return
}
session := getSession(r)
uid, err := result.LastInsertId()
if err != nil {
log.Print(err)
return
}
session.Values["user_id"] = uid
session.Values["csrf_token"] = secureRandomStr(16)
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
}
func getLogout(w http.ResponseWriter, r *http.Request) {
session := getSession(r)
delete(session.Values, "user_id")
session.Options = &sessions.Options{MaxAge: -1}
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
}
func getIndex(w http.ResponseWriter, r *http.Request) {
me := getSessionUser(r)
results := []Post{}
err := db.Select(&results, "SELECT `id`, `user_id`, `body`, `mime`, `created_at` FROM `posts` ORDER BY `created_at` DESC")
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("layout.html").Funcs(fmap).ParseFiles(
getTemplPath("layout.html"),
getTemplPath("index.html"),
getTemplPath("posts.html"),
getTemplPath("post.html"),
)).Execute(w, struct {
Posts []Post
Me User
CSRFToken string
Flash string
}{posts, me, getCSRFToken(r), getFlash(w, r, "notice")})
}
func getAccountName(w http.ResponseWriter, r *http.Request) {
accountName := chi.URLParam(r, "accountName")
user := User{}
err := db.Get(&user, "SELECT * FROM `users` WHERE `account_name` = ? AND `del_flg` = 0", accountName)
if err != nil {
log.Print(err)
return
}
if user.ID == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
results := []Post{}
err = db.Select(&results, "SELECT `id`, `user_id`, `body`, `mime`, `created_at` FROM `posts` WHERE `user_id` = ? ORDER BY `created_at` DESC", user.ID)
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
commentCount := 0
err = db.Get(&commentCount, "SELECT COUNT(*) AS count FROM `comments` WHERE `user_id` = ?", user.ID)
if err != nil {
log.Print(err)
return
}
postIDs := []int{}
err = db.Select(&postIDs, "SELECT `id` FROM `posts` WHERE `user_id` = ?", user.ID)
if err != nil {
log.Print(err)
return
}
postCount := len(postIDs)
commentedCount := 0
if postCount > 0 {
s := []string{}
for range postIDs {
s = append(s, "?")
}
placeholder := strings.Join(s, ", ")
// convert []int -> []interface{}
args := make([]interface{}, len(postIDs))
for i, v := range postIDs {
args[i] = v
}
err = db.Get(&commentedCount, "SELECT COUNT(*) AS count FROM `comments` WHERE `post_id` IN ("+placeholder+")", args...)
if err != nil {
log.Print(err)
return
}
}
me := getSessionUser(r)
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("layout.html").Funcs(fmap).ParseFiles(
getTemplPath("layout.html"),
getTemplPath("user.html"),
getTemplPath("posts.html"),
getTemplPath("post.html"),
)).Execute(w, struct {
Posts []Post
User User
PostCount int
CommentCount int
CommentedCount int
Me User
}{posts, user, postCount, commentCount, commentedCount, me})
}
func getPosts(w http.ResponseWriter, r *http.Request) {
m, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Print(err)
return
}
maxCreatedAt := m.Get("max_created_at")
if maxCreatedAt == "" {
return
}
t, err := time.Parse(ISO8601Format, maxCreatedAt)
if err != nil {
log.Print(err)
return
}
results := []Post{}
err = db.Select(&results, "SELECT `id`, `user_id`, `body`, `mime`, `created_at` FROM `posts` WHERE `created_at` <= ? ORDER BY `created_at` DESC", t.Format(ISO8601Format))
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
if len(posts) == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("posts.html").Funcs(fmap).ParseFiles(
getTemplPath("posts.html"),
getTemplPath("post.html"),
)).Execute(w, posts)
}
func getPostsID(w http.ResponseWriter, r *http.Request) {
pidStr := chi.URLParam(r, "id")
pid, err := strconv.Atoi(pidStr)
if err != nil {
w.WriteHeader(http.StatusNotFound)
return
}
results := []Post{}
err = db.Select(&results, "SELECT * FROM `posts` WHERE `id` = ?", pid)
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), true)
if err != nil {
log.Print(err)
return
}
if len(posts) == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
p := posts[0]
me := getSessionUser(r)
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("layout.html").Funcs(fmap).ParseFiles(
getTemplPath("layout.html"),
getTemplPath("post_id.html"),
getTemplPath("post.html"),
)).Execute(w, struct {
Post Post
Me User
}{p, me})
}
func postIndex(w http.ResponseWriter, r *http.Request) {
me := getSessionUser(r)
if !isLogin(me) {
http.Redirect(w, r, "/login", http.StatusFound)
return
}
if r.FormValue("csrf_token") != getCSRFToken(r) {
w.WriteHeader(http.StatusUnprocessableEntity)
return
}
file, header, err := r.FormFile("file")
if err != nil {
session := getSession(r)
session.Values["notice"] = "画像が必須です"
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
| return
}
mime := ""
if file != nil {
// 投稿のContent-Type | conditional_block |
|
app.go | _at` FROM `posts` ORDER BY `created_at` DESC")
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("layout.html").Funcs(fmap).ParseFiles(
getTemplPath("layout.html"),
getTemplPath("index.html"),
getTemplPath("posts.html"),
getTemplPath("post.html"),
)).Execute(w, struct {
Posts []Post
Me User
CSRFToken string
Flash string
}{posts, me, getCSRFToken(r), getFlash(w, r, "notice")})
}
func getAccountName(w http.ResponseWriter, r *http.Request) {
accountName := chi.URLParam(r, "accountName")
user := User{}
err := db.Get(&user, "SELECT * FROM `users` WHERE `account_name` = ? AND `del_flg` = 0", accountName)
if err != nil {
log.Print(err)
return
}
if user.ID == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
results := []Post{}
err = db.Select(&results, "SELECT `id`, `user_id`, `body`, `mime`, `created_at` FROM `posts` WHERE `user_id` = ? ORDER BY `created_at` DESC", user.ID)
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
commentCount := 0
err = db.Get(&commentCount, "SELECT COUNT(*) AS count FROM `comments` WHERE `user_id` = ?", user.ID)
if err != nil {
log.Print(err)
return
}
postIDs := []int{}
err = db.Select(&postIDs, "SELECT `id` FROM `posts` WHERE `user_id` = ?", user.ID)
if err != nil {
log.Print(err)
return
}
postCount := len(postIDs)
commentedCount := 0
if postCount > 0 {
s := []string{}
for range postIDs {
s = append(s, "?")
}
placeholder := strings.Join(s, ", ")
// convert []int -> []interface{}
args := make([]interface{}, len(postIDs))
for i, v := range postIDs {
args[i] = v
}
err = db.Get(&commentedCount, "SELECT COUNT(*) AS count FROM `comments` WHERE `post_id` IN ("+placeholder+")", args...)
if err != nil {
log.Print(err)
return
}
}
me := getSessionUser(r)
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("layout.html").Funcs(fmap).ParseFiles(
getTemplPath("layout.html"),
getTemplPath("user.html"),
getTemplPath("posts.html"),
getTemplPath("post.html"),
)).Execute(w, struct {
Posts []Post
User User
PostCount int
CommentCount int
CommentedCount int
Me User
}{posts, user, postCount, commentCount, commentedCount, me})
}
func getPosts(w http.ResponseWriter, r *http.Request) {
m, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Print(err)
return
}
maxCreatedAt := m.Get("max_created_at")
if maxCreatedAt == "" {
return
}
t, err := time.Parse(ISO8601Format, maxCreatedAt)
if err != nil {
log.Print(err)
return
}
results := []Post{}
err = db.Select(&results, "SELECT `id`, `user_id`, `body`, `mime`, `created_at` FROM `posts` WHERE `created_at` <= ? ORDER BY `created_at` DESC", t.Format(ISO8601Format))
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), false)
if err != nil {
log.Print(err)
return
}
if len(posts) == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("posts.html").Funcs(fmap).ParseFiles(
getTemplPath("posts.html"),
getTemplPath("post.html"),
)).Execute(w, posts)
}
func getPostsID(w http.ResponseWriter, r *http.Request) {
pidStr := chi.URLParam(r, "id")
pid, err := strconv.Atoi(pidStr)
if err != nil {
w.WriteHeader(http.StatusNotFound)
return
}
results := []Post{}
err = db.Select(&results, "SELECT * FROM `posts` WHERE `id` = ?", pid)
if err != nil {
log.Print(err)
return
}
posts, err := makePosts(results, getCSRFToken(r), true)
if err != nil {
log.Print(err)
return
}
if len(posts) == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
p := posts[0]
me := getSessionUser(r)
fmap := template.FuncMap{
"imageURL": imageURL,
}
template.Must(template.New("layout.html").Funcs(fmap).ParseFiles(
getTemplPath("layout.html"),
getTemplPath("post_id.html"),
getTemplPath("post.html"),
)).Execute(w, struct {
Post Post
Me User
}{p, me})
}
func postIndex(w http.ResponseWriter, r *http.Request) {
me := getSessionUser(r)
if !isLogin(me) {
http.Redirect(w, r, "/login", http.StatusFound)
return
}
if r.FormValue("csrf_token") != getCSRFToken(r) {
w.WriteHeader(http.StatusUnprocessableEntity)
return
}
file, header, err := r.FormFile("file")
if err != nil {
session := getSession(r)
session.Values["notice"] = "画像が必須です"
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
return
}
mime := ""
if file != nil {
// 投稿のContent-Typeからファイルのタイプを決定する
contentType := header.Header["Content-Type"][0]
if strings.Contains(contentType, "jpeg") {
mime = "image/jpeg"
} else if strings.Contains(contentType, "png") {
mime = "image/png"
} else if strings.Contains(contentType, "gif") {
mime = "image/gif"
} else {
session := getSession(r)
session.Values["notice"] = "投稿できる画像形式はjpgとpngとgifだけです"
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
return
}
}
filedata, err := io.ReadAll(file)
if err != nil {
log.Print(err)
return
}
if len(filedata) > UploadLimit {
session := getSession(r)
session.Values["notice"] = "ファイルサイズが大きすぎます"
session.Save(r, w)
http.Redirect(w, r, "/", http.StatusFound)
return
}
query := "INSERT INTO `posts` (`user_id`, `mime`, `imgdata`, `body`) VALUES (?,?,?,?)"
result, err := db.Exec(
query,
me.ID,
mime,
filedata,
r.FormValue("body"),
)
if err != nil {
log.Print(err)
return
}
pid, err := result.LastInsertId()
if err != nil {
log.Print(err)
return
}
http.Redirect(w, r, "/posts/"+strconv.FormatInt(pid, 10), http.StatusFound)
}
func getImage(w http.ResponseWriter, r *http.Request) {
pidStr := chi.URLParam(r, "id")
pid, err := strconv.Atoi(pidStr)
if err != nil {
w.WriteHeader(http.StatusNotFound)
return
}
post := Post{}
err = db.Get(&post, "SELECT * FROM `posts` WHERE `id` = ?", pid)
if err != nil {
log.Print(err)
return
}
ext := chi.URLParam(r, "ext")
if ext == "jpg" && post.Mime == "image/jpeg" ||
ext == "png" && post.Mime == "image/png" ||
ext == "gif" && post.Mime == "image/gif" {
w.Header().Set("Content-Type", post.Mim | e)
_, err := w.Write(post.Imgdata)
if err != nil {
log.Print(err)
return
}
return
}
w.WriteHeader(http.StatusNotFound)
}
func postComment(w http.ResponseWriter, r *http.Request) {
me := getSessionUser(r)
if !isLogin(me) {
http.Redirect(w, r, "/login", http.StatusFound)
return
}
if r.FormValue("csrf_token") != getCSRFToken(r) { | identifier_body |
|
shuffle.py | icarpeta/proyectos/Ejercicios_Pyhton/biblioteca/Seattle_Party.flac"},
"King_Kunta":
{"track-number": 3, "artist": "Kendrick Lamar", "album": "To Pimp A Butterfly", "location": "/home/ulises/Micarpeta/proyectos/Ejercicios_Pyhton/biblioteca/King_Kunta.mp3"},
"Gorilaz - Clint Eastwood.mp3":
{"track-number": 3, "artist": "Kendrick Lamar", "album": "To Pimp A Butterfly", "location": "/home/ulises/Música/gorillaz/Gorilaz - Clint Eastwood.mp3"}
}"""
def checkSeleccionaCancionRandom(cancion, libreria):
assert isinstance(cancion, str)
assert isinstance(libreria, dict)
if cancion not in libreria:
return False
else:
return True
def checkIndices(indices):
for i in indices:
if indices.count(i)>1:
return False
"""incidencias=0
for j in range(0,len(indices)):
if i==indices[j]:
incidencias=incidencias+1
if incidencias>1:
return False"""
return True
def checkCancionRepetida(playlist):
for i in playlist:
if playList.count(i)>1:
return False
"""incidencias=0
for j in range(0,len(playlist)):
if i==playlist[j]:
incidencias=incidencias+1
if incidencias>1:
return False"""
return True |
"""def creadorIndicesRandom(libreria):
assert isinstance(libreria,dict),"no es un diccionario!"
indices=[]
indiceRandom=random.randrange(1,len(libreria)+1)
indices.append(indiceRandom)
while len(indices)!=len(libreria):
while indiceRandom in indices:
indiceRandom=random.randrange(1,len(libreria)+1)
indices.append(indiceRandom)
assert isinstance(indices,list),"creadorIndicesRandom no devuelve una lista"
assert indices!=[],"lista indices vacía"
assert checkIndices(indices) ,"indice repetido"
return indices"""
def creadorIndicesRandom2(libreria):
assert isinstance(libreria,dict)," libreria no es un diccionario!"
assert isinstance(random.sample(range(1,len(libreria)+1), len(libreria)),list),"creadorIndicesRandom2 no devuelve una lista"
assert random.sample(range(1,len(libreria)+1), len(libreria))!=[],"lista indices vacía"
assert checkIndices(random.sample(range(1,len(libreria)+1), len(libreria))) ,"indice repetido"
return random.sample(range(1,len(libreria)+1), len(libreria))
def getLocalización(libreria,cancion):
assert isinstance(cancion,str),"canción no es una string"
return libreria[cancion]["location"]
def creadorListaTitulos(libreria, playList):
assert isinstance(libreria,dict),"libreria no es un diccionario!"
assert isinstance(playList,dict),"playList no es un diccionario"
indices=creadorIndicesRandom2(libreria)
i=0
for key in libreria.keys():
playList[indices[i]]=key
i=i+1
assert playList, "La lista(diccionario playList) está vacía"
assert checkCancionRepetida, "Hay canciones repetidas"
return playList
"""def iniciarPlayList(numeroCancion):
# simulare que el diccionario playList es una lista playList[integer]
# donde la clave es un numero entero que incremento cada vez
# que añado una cancion a la playList
claveDiccionarioPlayList = numeroCancion
def appendCancion(cancion, playList):
assert isinstance(playList, dict), "playList no es un diccionario"
# la cancion no debe estar ya en la playList
assert cancion not in list(playList.values())
# closure: claveDiccionarioPlayList recuerda su ultimo valor
# cada vez que invocamos a appendCancion()
# De este modo, incremento la clave del diccionario en uno
# y coloco la cancion en esa "posicion" de la lista que simula
# el diccionario implementado de este modo.
nonlocal claveDiccionarioPlayList
claveDiccionarioPlayList += 1
# asocio el valor titulo de la cancion a la clave integer
playList[claveDiccionarioPlayList] = str(cancion)
return claveDiccionarioPlayList
return appendCancion"""
def imprimirCancionesReproducidas(playList):
assert isinstance(playList, dict)
# Recorro el objeto iterable view keys() del diccionario playList
# Antes lo he ordenado.
for numeroCancion in sorted(playList.keys()):
# muestro la posicion en la que fue elegida la cancion
# y el titulo de la cancion
print(str(numeroCancion) + ": " + str(playList[numeroCancion]))
def lanzarVLC(libreria,playList):
# Las canciones han de estar en un directorio llamado biblioteca
# en el directorio de la aplicacion.
# Han de ser expresamente las incluidas en el diccionario libreria.
# La extensión a este programa es incluir la capa de acceso a datos
# para extraer los titulos de las canciones y las rutas
# a los ficheros del fichero XML playlist.xspf que genera VLC
# o Rhythmbox con las canciones de la biblioteca
import subprocess
import shlex
import os
linuxPathVLC = "/usr/bin/vlc"
lineaComandoVLC = [linuxPathVLC]
separador = " "
for numeroCancion in sorted(playList.keys()):
tituloCancion = playList[numeroCancion]
try:
rutaAccesoFichero = getLocalización(libreria,tituloCancion)
print(tituloCancion,getLocalización(libreria,tituloCancion))
except KeyError:
print("la cancion " + str(tituloCancion) + " no se encuentra en la biblioteca")
else:
# compruebo si la ruta de acceso al fichero cancion es correcto
if os.path.exists(str(rutaAccesoFichero)):
# anhado la ruta de acceso a la cancion
# a la linea de comandos para invocar a VLC
#lineaComandoVLC = lineaComandoVLC + separador + str(rutaAccesoFichero)
lineaComandoVLC.append(str(rutaAccesoFichero))
else:
print("no lo encuentro",os.path.exists(str(rutaAccesoFichero)))
pass
# Popen necesita una lista de string
# Esta libreria optimiza la division de los strings que forman
# la entrada de un comando en argumentos
#args = shlex.split(lineaComandoVLC)
#print("somos los args",args)
try:
# lanzo el subproceso VLC con las opciones adecuada:
# la ruta de acceso a las canciones de la playList
procesoVLC = subprocess.Popen(lineaComandoVLC)
# procesoVLC = subprocess.Popen(["/usr/bin/vlc", "California_Uber_Alles.mp3", "Seattle_Party.flac"])
except OSError:
print("el fichero no existe")
except ValueError:
print("argumentos invalidos")
else:
print("lanzando VLC con lista aleatoria")
def ObtenerLibreriaGrupo():
libreria=dict()
musicaPath="/home/ulises/Música/"
musicaArbol=os.walk(musicaPath)
for path,sub,fileList in musicaArbol:
for grupo in sub:
print(grupo)
break
while True:
print("¿Que quieres escuchar?")
NombreGrupo=input()
if os.path.exists(musicaPath+NombreGrupo):
grupoPath=musicaPath+NombreGrupo
grupoArbol=os.walk(grupoPath)
for path,sub,fileList in grupoArbol:
if len(sub)>0:#nuevo ¿?
for disco in sub:
print(disco)
while True:
print("¿Que disco de "+ NombreGrupo+" quieres escuchar?")
InputUsuarioDisco=input()
nombreDisco=""
for disco in sub:
if InputUsuarioDisco in disco:
nombreDisco=disco
if nombreDisco=="":
print("Introduce el nombre exacto de la carpeta por favor:")
nombreDisco=input()
#print(os.path.exists(musicaPath+NombreGrupo+"/"+nombreDisco),musicaPath+NombreGrupo+"/"+nombreDisco)
| random_line_split |
|
shuffle.py | wood.mp3":
{"track-number": 3, "artist": "Kendrick Lamar", "album": "To Pimp A Butterfly", "location": "/home/ulises/Música/gorillaz/Gorilaz - Clint Eastwood.mp3"}
}"""
def checkSeleccionaCancionRandom(cancion, libreria):
assert isinstance(cancion, str)
assert isinstance(libreria, dict)
if cancion not in libreria:
return False
else:
return True
def checkIndices(indices):
for i in indices:
if indices.count(i)>1:
return False
"""incidencias=0
for j in range(0,len(indices)):
if i==indices[j]:
incidencias=incidencias+1
if incidencias>1:
return False"""
return True
def checkCancionRepetida(playlist):
for i in playlist:
if playList.count(i)>1:
return False
"""incidencias=0
for j in range(0,len(playlist)):
if i==playlist[j]:
incidencias=incidencias+1
if incidencias>1:
return False"""
return True
"""def creadorIndicesRandom(libreria):
assert isinstance(libreria,dict),"no es un diccionario!"
indices=[]
indiceRandom=random.randrange(1,len(libreria)+1)
indices.append(indiceRandom)
while len(indices)!=len(libreria):
while indiceRandom in indices:
indiceRandom=random.randrange(1,len(libreria)+1)
indices.append(indiceRandom)
assert isinstance(indices,list),"creadorIndicesRandom no devuelve una lista"
assert indices!=[],"lista indices vacía"
assert checkIndices(indices) ,"indice repetido"
return indices"""
def creadorIndicesRandom2(libreria):
assert isinstance(libreria,dict)," libreria no es un diccionario!"
assert isinstance(random.sample(range(1,len(libreria)+1), len(libreria)),list),"creadorIndicesRandom2 no devuelve una lista"
assert random.sample(range(1,len(libreria)+1), len(libreria))!=[],"lista indices vacía"
assert checkIndices(random.sample(range(1,len(libreria)+1), len(libreria))) ,"indice repetido"
return random.sample(range(1,len(libreria)+1), len(libreria))
def getLocalización(libreria,cancion):
assert isinstance(cancion,str),"canción no es una string"
return libreria[cancion]["location"]
def creadorListaTitulos(libreria, playList):
assert isinstance(libreria,dict),"libreria no es un diccionario!"
assert isinstance(playList,dict),"playList no es un diccionario"
indices=creadorIndicesRandom2(libreria)
i=0
for key in libreria.keys():
playList[indices[i]]=key
i=i+1
assert playList, "La lista(diccionario playList) está vacía"
assert checkCancionRepetida, "Hay canciones repetidas"
return playList
"""def iniciarPlayList(numeroCancion):
# simulare que el diccionario playList es una lista playList[integer]
# donde la clave es un numero entero que incremento cada vez
# que añado una cancion a la playList
claveDiccionarioPlayList = numeroCancion
def appendCancion(cancion, playList):
assert isinstance(playList, dict), "playList no es un diccionario"
# la cancion no debe estar ya en la playList
assert cancion not in list(playList.values())
# closure: claveDiccionarioPlayList recuerda su ultimo valor
# cada vez que invocamos a appendCancion()
# De este modo, incremento la clave del diccionario en uno
# y coloco la cancion en esa "posicion" de la lista que simula
# el diccionario implementado de este modo.
nonlocal claveDiccionarioPlayList
claveDiccionarioPlayList += 1
# asocio el valor titulo de la cancion a la clave integer
playList[claveDiccionarioPlayList] = str(cancion)
return claveDiccionarioPlayList
return appendCancion"""
def imprimirCancionesReproducidas(playList):
assert isinstance(playList, dict)
# Recorro el objeto iterable view keys() del diccionario playList
# Antes lo he ordenado.
for numeroCancion in sorted(playList.keys()):
# muestro la posicion en la que fue elegida la cancion
# y el titulo de la cancion
print(str(numeroCancion) + ": " + str(playList[numeroCancion]))
def lanzarVLC(libreria,playList):
# Las canciones han de estar en un directorio llamado biblioteca
# en el directorio de la aplicacion.
# Han de ser expresamente las incluidas en el diccionario libreria.
# La extensión a este programa es incluir la capa de acceso a datos
# para extraer los titulos de las canciones y las rutas
# a los ficheros del fichero XML playlist.xspf que genera VLC
# o Rhythmbox con las canciones de la biblioteca
import subprocess
import shlex
import os
linuxPathVLC = "/usr/bin/vlc"
lineaComandoVLC = [linuxPathVLC]
separador = " "
for numeroCancion in sorted(playList.keys()):
tituloCancion = playList[numeroCancion]
try:
rutaAccesoFichero = getLocalización(libreria,tituloCancion)
print(tituloCancion,getLocalización(libreria,tituloCancion))
except KeyError:
print("la cancion " + str(tituloCancion) + " no se encuentra en la biblioteca")
else:
# compruebo si la ruta de acceso al fichero cancion es correcto
if os.path.exists(str(rutaAccesoFichero)):
# anhado la ruta de acceso a la cancion
# a la linea de comandos para invocar a VLC
#lineaComandoVLC = lineaComandoVLC + separador + str(rutaAccesoFichero)
lineaComandoVLC.append(str(rutaAccesoFichero))
else:
print("no lo encuentro",os.path.exists(str(rutaAccesoFichero)))
pass
# Popen necesita una lista de string
# Esta libreria optimiza la division de los strings que forman
# la entrada de un comando en argumentos
#args = shlex.split(lineaComandoVLC)
#print("somos los args",args)
try:
# lanzo el subproceso VLC con las opciones adecuada:
# la ruta de acceso a las canciones de la playList
procesoVLC = subprocess.Popen(lineaComandoVLC)
# procesoVLC = subprocess.Popen(["/usr/bin/vlc", "California_Uber_Alles.mp3", "Seattle_Party.flac"])
except OSError:
print("el fichero no existe")
except ValueError:
print("argumentos invalidos")
else:
print("lanzando VLC con lista aleatoria")
def ObtenerLibreriaGrupo():
libreria=dict()
musicaPath="/home/ulises/Música/"
musicaArbol=os.walk(musicaPath)
for path,sub,fileList in musicaArbol:
for grupo in sub:
print(grupo)
break
while True:
print("¿Que quieres escuchar?")
NombreGrupo=input()
if os.path.exists(musicaPath+NombreGrupo):
grupoPath=musicaPath+NombreGrupo
grupoArbol=os.walk(grupoPath)
for path,sub,fileList in grupoArbol:
if len(sub)>0:#nuevo ¿?
for disco in sub:
print(disco)
while True:
print("¿Que disco de "+ NombreGrupo+" quieres escuchar?")
InputUsuarioDisco=input()
nombreDisco=""
for disco in sub:
if InputUsuarioDisco in disco:
nombreDisco=disco
if nombreDisco=="":
print("Introduce el nombre exacto de la carpeta por favor:")
nombreDisco=input()
#print(os.path.exists(musicaPath+NombreGrupo+"/"+nombreDisco),musicaPath+NombreGrupo+"/"+nombreDisco)
if os.path.exists(grupoPath+"/"+nombreDisco):
discoPath=grupoPath+"/"+nombreDisco
for path,sub,fileList in os.walk(discoPath):
thePath=path
listFilename=fileList
for fileName in listFilename:
print(path,sub,fileName )
if ".mp3" in fileName or ".flac" in fileName:
libreria[fileNa | me]={"location":thePath+'/'+fileName}
| conditional_block |
|
shuffle.py | icarpeta/proyectos/Ejercicios_Pyhton/biblioteca/Seattle_Party.flac"},
"King_Kunta":
{"track-number": 3, "artist": "Kendrick Lamar", "album": "To Pimp A Butterfly", "location": "/home/ulises/Micarpeta/proyectos/Ejercicios_Pyhton/biblioteca/King_Kunta.mp3"},
"Gorilaz - Clint Eastwood.mp3":
{"track-number": 3, "artist": "Kendrick Lamar", "album": "To Pimp A Butterfly", "location": "/home/ulises/Música/gorillaz/Gorilaz - Clint Eastwood.mp3"}
}"""
def checkSeleccionaCancionRandom(cancion, libreria):
assert isinstance(cancion, str)
assert isinstance(libreria, dict)
if cancion not in libreria:
return False
else:
return True
def checkIndices(indices):
for i in indices:
if indices.count(i)>1:
return False
"""incidencias=0
for j in range(0,len(indices)):
if i==indices[j]:
incidencias=incidencias+1
if incidencias>1:
return False"""
return True
def checkCancionRepetida(playlist):
for i in playlist:
if playList.count(i)>1:
return False
"""incidencias=0
for j in range(0,len(playlist)):
if i==playlist[j]:
incidencias=incidencias+1
if incidencias>1:
return False"""
return True
"""def creadorIndicesRandom(libreria):
assert isinstance(libreria,dict),"no es un diccionario!"
indices=[]
indiceRandom=random.randrange(1,len(libreria)+1)
indices.append(indiceRandom)
while len(indices)!=len(libreria):
while indiceRandom in indices:
indiceRandom=random.randrange(1,len(libreria)+1)
indices.append(indiceRandom)
assert isinstance(indices,list),"creadorIndicesRandom no devuelve una lista"
assert indices!=[],"lista indices vacía"
assert checkIndices(indices) ,"indice repetido"
return indices"""
def creadorIndicesRandom2(libreria):
assert isinstance(libreria,dict)," libreria no es un diccionario!"
assert isinstance(random.sample(range(1,len(libreria)+1), len(libreria)),list),"creadorIndicesRandom2 no devuelve una lista"
assert random.sample(range(1,len(libreria)+1), len(libreria))!=[],"lista indices vacía"
assert checkIndices(random.sample(range(1,len(libreria)+1), len(libreria))) ,"indice repetido"
return random.sample(range(1,len(libreria)+1), len(libreria))
def getLocalización(libreria,cancion):
assert isinstance(cancion,str),"canción no es una string"
return libreria[cancion]["location"]
def creadorListaTitulos(libreria, playList):
assert isinstance(libreria,dict),"libreria no es un diccionario!"
assert isinstance(playList,dict),"playList no es un diccionario"
indices=creadorIndicesRandom2(libreria)
i=0
for key in libreria.keys():
playList[indices[i]]=key
i=i+1
assert playList, "La lista(diccionario playList) está vacía"
assert checkCancionRepetida, "Hay canciones repetidas"
return playList
"""def iniciarPlayList(numeroCancion):
# simulare que el diccionario playList es una lista playList[integer]
# donde la clave es un numero entero que incremento cada vez
# que añado una cancion a la playList
claveDiccionarioPlayList = numeroCancion
def appendCancion(cancion, playList):
assert isinstance(playList, dict), "playList no es un diccionario"
# la cancion no debe estar ya en la playList
assert cancion not in list(playList.values())
# closure: claveDiccionarioPlayList recuerda su ultimo valor
# cada vez que invocamos a appendCancion()
# De este modo, incremento la clave del diccionario en uno
# y coloco la cancion en esa "posicion" de la lista que simula
# el diccionario implementado de este modo.
nonlocal claveDiccionarioPlayList
claveDiccionarioPlayList += 1
# asocio el valor titulo de la cancion a la clave integer
playList[claveDiccionarioPlayList] = str(cancion)
return claveDiccionarioPlayList
return appendCancion"""
def imprimirCancionesReproducidas(playList):
assert isinstance(playList, dict)
# Recorro el objeto iterable view keys() del diccionario playList
# Antes lo he ordenado.
for numeroCancion in sorted(playList.keys()):
# muestro la posicion en la que fue elegida la cancion
# y el titulo de la cancion
print(str(numeroCancion) + ": " + str(playList[numeroCancion]))
def lanzarVLC(libreria,playList):
# Las canciones han de estar en un directorio llamado biblioteca
# en el directorio de la aplicacion.
# Han de ser expresamente las incluidas en el diccionario libreria.
# La extensión a este programa es incluir la capa de acceso a datos
# para extraer los titulos de las canciones y las rutas
# a los ficheros del fichero XML playlist.xspf que genera VLC
# o Rhythmbox con las canciones de la biblioteca
import subprocess
import shlex
import os
linuxPathVLC = "/usr/bin/vlc"
lineaComandoVLC = [linuxPathVLC]
separador = " "
for numeroCancion in sorted(playList.keys()):
tituloCancion = playList[numeroCancion]
try:
rutaAccesoFichero = getLocalización(libreria,tituloCancion)
print(tituloCancion,getLocalización(libreria,tituloCancion))
except KeyError:
print("la cancion " + str(tituloCancion) + " no se encuentra en la biblioteca")
else:
# compruebo si la ruta de acceso al fichero cancion es correcto
if os.path.exists(str(rutaAccesoFichero)):
# anhado la ruta de acceso a la cancion
# a la linea de comandos para invocar a VLC
#lineaComandoVLC = lineaComandoVLC + separador + str(rutaAccesoFichero)
lineaComandoVLC.append(str(rutaAccesoFichero))
else:
print("no lo encuentro",os.path.exists(str(rutaAccesoFichero)))
pass
# Popen necesita una lista de string
# Esta libreria optimiza la division de los strings que forman
# la entrada de un comando en argumentos
#args = shlex.split(lineaComandoVLC)
#print("somos los args",args)
try:
# lanzo el subproceso VLC con las opciones adecuada:
# la ruta de acceso a las canciones de la playList
procesoVLC = subprocess.Popen(lineaComandoVLC)
# procesoVLC = subprocess.Popen(["/usr/bin/vlc", "California_Uber_Alles.mp3", "Seattle_Party.flac"])
except OSError:
print("el fichero no existe")
except ValueError:
print("argumentos invalidos")
else:
print("lanzando VLC con lista aleatoria")
def ObtenerLibr | reria=dict()
musicaPath="/home/ulises/Música/"
musicaArbol=os.walk(musicaPath)
for path,sub,fileList in musicaArbol:
for grupo in sub:
print(grupo)
break
while True:
print("¿Que quieres escuchar?")
NombreGrupo=input()
if os.path.exists(musicaPath+NombreGrupo):
grupoPath=musicaPath+NombreGrupo
grupoArbol=os.walk(grupoPath)
for path,sub,fileList in grupoArbol:
if len(sub)>0:#nuevo ¿?
for disco in sub:
print(disco)
while True:
print("¿Que disco de "+ NombreGrupo+" quieres escuchar?")
InputUsuarioDisco=input()
nombreDisco=""
for disco in sub:
if InputUsuarioDisco in disco:
nombreDisco=disco
if nombreDisco=="":
print("Introduce el nombre exacto de la carpeta por favor:")
nombreDisco=input()
#print(os.path.exists(musicaPath+NombreGrupo+"/"+nombreDisco),musicaPath+NombreGrupo+"/"+nombreDisco)
| eriaGrupo():
lib | identifier_name |
shuffle.py | arpeta/proyectos/Ejercicios_Pyhton/biblioteca/Seattle_Party.flac"},
"King_Kunta":
{"track-number": 3, "artist": "Kendrick Lamar", "album": "To Pimp A Butterfly", "location": "/home/ulises/Micarpeta/proyectos/Ejercicios_Pyhton/biblioteca/King_Kunta.mp3"},
"Gorilaz - Clint Eastwood.mp3":
{"track-number": 3, "artist": "Kendrick Lamar", "album": "To Pimp A Butterfly", "location": "/home/ulises/Música/gorillaz/Gorilaz - Clint Eastwood.mp3"}
}"""
def checkSeleccionaCancionRandom(cancion, libreria):
assert isinstance(cancion, str)
assert isinstance(libreria, dict)
if cancion not in libreria:
return False
else:
return True
def checkIndices(indices):
for i in indices:
if indices.count(i)>1:
return False
"""incidencias=0
for j in range(0,len(indices)):
if i==indices[j]:
incidencias=incidencias+1
if incidencias>1:
return False"""
return True
def checkCancionRepetida(playlist):
for i in playlist:
if playList.count(i)>1:
return False
"""incidencias=0
for j in range(0,len(playlist)):
if i==playlist[j]:
incidencias=incidencias+1
if incidencias>1:
return False"""
return True
"""def creadorIndicesRandom(libreria):
assert isinstance(libreria,dict),"no es un diccionario!"
indices=[]
indiceRandom=random.randrange(1,len(libreria)+1)
indices.append(indiceRandom)
while len(indices)!=len(libreria):
while indiceRandom in indices:
indiceRandom=random.randrange(1,len(libreria)+1)
indices.append(indiceRandom)
assert isinstance(indices,list),"creadorIndicesRandom no devuelve una lista"
assert indices!=[],"lista indices vacía"
assert checkIndices(indices) ,"indice repetido"
return indices"""
def creadorIndicesRandom2(libreria):
assert isinstance(libreria,dict)," libreria no es un diccionario!"
assert isinstance(random.sample(range(1,len(libreria)+1), len(libreria)),list),"creadorIndicesRandom2 no devuelve una lista"
assert random.sample(range(1,len(libreria)+1), len(libreria))!=[],"lista indices vacía"
assert checkIndices(random.sample(range(1,len(libreria)+1), len(libreria))) ,"indice repetido"
return random.sample(range(1,len(libreria)+1), len(libreria))
def getLocalización(libreria,cancion):
assert isinstance(cancion,str),"canción no es una string"
return libreria[cancion]["location"]
def creadorListaTitulos(libreria, playList):
assert isinstance(libreria,dict),"libreria no es un diccionario!"
assert isinstance(playList,dict),"playList no es un diccionario"
indices=creadorIndicesRandom2(libreria)
i=0
for key in libreria.keys():
playList[indices[i]]=key
i=i+1
assert playList, "La lista(diccionario playList) está vacía"
assert checkCancionRepetida, "Hay canciones repetidas"
return playList
"""def iniciarPlayList(numeroCancion):
# simulare que el diccionario playList es una lista playList[integer]
# donde la clave es un numero entero que incremento cada vez
# que añado una cancion a la playList
claveDiccionarioPlayList = numeroCancion
def appendCancion(cancion, playList):
assert isinstance(playList, dict), "playList no es un diccionario"
# la cancion no debe estar ya en la playList
assert cancion not in list(playList.values())
# closure: claveDiccionarioPlayList recuerda su ultimo valor
# cada vez que invocamos a appendCancion()
# De este modo, incremento la clave del diccionario en uno
# y coloco la cancion en esa "posicion" de la lista que simula
# el diccionario implementado de este modo.
nonlocal claveDiccionarioPlayList
claveDiccionarioPlayList += 1
# asocio el valor titulo de la cancion a la clave integer
playList[claveDiccionarioPlayList] = str(cancion)
return claveDiccionarioPlayList
return appendCancion"""
def imprimirCancionesReproducidas(playList):
assert isinstance(playList, dict)
# Recorro el objeto iterable view keys() del diccionario playList
# Antes lo he ordenado.
for numeroCancion in sorted(playList.keys()):
# muestro la posicion en la que fue elegida la cancion
# y el titulo de la cancion
print(str(numeroCancion) + ": " + str(playList[numeroCancion]))
def lanzarVLC(libreria,playList):
# Las canciones han de estar en un directorio llamado biblioteca
# en el directorio de la aplicacion.
# Han de ser expresamente las incluidas en el diccionario libreria.
# La extensión a este programa es incluir la capa de acceso a datos
# para extraer los titulos de las canciones y las rutas
# a los ficheros del fichero XML playlist.xspf que genera VLC
# o Rhythmbox con las canciones de la biblioteca
import su | # a la linea de comandos para invocar a VLC
#lineaComandoVLC = lineaComandoVLC + separador + str(rutaAccesoFichero)
lineaComandoVLC.append(str(rutaAccesoFichero))
else:
print("no lo encuentro",os.path.exists(str(rutaAccesoFichero)))
pass
# Popen necesita una lista de string
# Esta libreria optimiza la division de los strings que forman
# la entrada de un comando en argumentos
#args = shlex.split(lineaComandoVLC)
#print("somos los args",args)
try:
# lanzo el subproceso VLC con las opciones adecuada:
# la ruta de acceso a las canciones de la playList
procesoVLC = subprocess.Popen(lineaComandoVLC)
# procesoVLC = subprocess.Popen(["/usr/bin/vlc", "California_Uber_Alles.mp3", "Seattle_Party.flac"])
except OSError:
print("el fichero no existe")
except ValueError:
print("argumentos invalidos")
else:
print("lanzando VLC con lista aleatoria")
def Obten
erLibreriaGrupo():
libreria=dict()
musicaPath="/home/ulises/Música/"
musicaArbol=os.walk(musicaPath)
for path,sub,fileList in musicaArbol:
for grupo in sub:
print(grupo)
break
while True:
print("¿Que quieres escuchar?")
NombreGrupo=input()
if os.path.exists(musicaPath+NombreGrupo):
grupoPath=musicaPath+NombreGrupo
grupoArbol=os.walk(grupoPath)
for path,sub,fileList in grupoArbol:
if len(sub)>0:#nuevo ¿?
for disco in sub:
print(disco)
while True:
print("¿Que disco de "+ NombreGrupo+" quieres escuchar?")
InputUsuarioDisco=input()
nombreDisco=""
for disco in sub:
if InputUsuarioDisco in disco:
nombreDisco=disco
if nombreDisco=="":
print("Introduce el nombre exacto de la carpeta por favor:")
nombreDisco=input()
#print(os.path.exists(musicaPath+NombreGrupo+"/"+nombreDisco),musicaPath+NombreGrupo+"/"+nombreDisco | bprocess
import shlex
import os
linuxPathVLC = "/usr/bin/vlc"
lineaComandoVLC = [linuxPathVLC]
separador = " "
for numeroCancion in sorted(playList.keys()):
tituloCancion = playList[numeroCancion]
try:
rutaAccesoFichero = getLocalización(libreria,tituloCancion)
print(tituloCancion,getLocalización(libreria,tituloCancion))
except KeyError:
print("la cancion " + str(tituloCancion) + " no se encuentra en la biblioteca")
else:
# compruebo si la ruta de acceso al fichero cancion es correcto
if os.path.exists(str(rutaAccesoFichero)):
# anhado la ruta de acceso a la cancion | identifier_body |
GeneticAlgorithm.py | self.parameters['population_size']
self.edge_domain = self.parameters['edge_domain']
self.tournament_size = self.parameters['tournament_size']
self.parents_per_offspring = self.parameters['parents_per_offspring']
self.mutation_probability = self.parameters['mutation_probability']
self.reproductivity = self.parameters['reproductivity']
self.record_of_all_fitnesses_each_generation = []
# generate a list of integers up to the population size
def generate_integers(self):
self.integer_list = []
for integer in range(self.population_size):
self.integer_list.append(integer)
def stop_condition(self):
return self.selection_fitness_score != 'STOP' and self.evaluation_nr < self.max_fitness_evaluations
def init_run(self):
# initialize population
# make a list of integers to be able to randomize the order of the population without losing the connectedness of individuals and fitness
self.generate_integers()
# set the amount of edges in the neural network
edges = self.env.get_num_sensors() * self.hidden_neurons + 5 * self.hidden_neurons # not sure why this should be the right amount of edges
# set the first fitness type to select on
self.fitness_type = 0
self.selection_fitness_score = self.fitness_order[self.fitness_type]
# generate an initial population
self.survived_population = np.random.uniform(self.edge_domain[0], self.edge_domain[1], (self.population_size, edges))
# determine and make an array of the fitnesses of the initial population
self.survived_fitnesses = self.determine_fitness(self.survived_population)
# self.survived_fitnesses = np.random.randint(0, 100, size=(100, 5)) # for testing
# make an empty array to store fitness values
#fitness_record = np.array([0,0,0,0,0])
# save the initial fitness mean, std and max
self.fitness_record = self.save_fitness(self.survived_fitnesses)
# save all fitnesses:
#record_of_all_fitnesses_each_generation = [np.ndarray.tolist(self.survived_fitnesses)]
self.evaluation_nr = 0
def step(self):
parents = self.parent_selection()
children = self.recombination(parents)
self.survivor_selection(children)
def run(self):
self.init_run()
while self.stop_condition():
self.step()
self.record_of_all_fitnesses_each_generation.append(np.ndarray.tolist(self.survived_fitnesses))
#save a record of all fitnesses of all individuals in all generations to a pickle file
pickle_out = open('task_1_GA_' + sys.argv[1] + '/fitness_record_GA_enemy'+sys.argv[1]+'_run'+sys.argv[2]+'.pickle', 'wb')
pickle.dump(self.record_of_all_fitnesses_each_generation, pickle_out)
pickle_out.close()
print('the fitnesses look like\n',self.record_of_all_fitnesses_each_generation)
#save the best solution
fitnesses = self.survived_fitnesses[:,0]
index = np.where(fitnesses == np.amax(fitnesses))[0][0]
fittest_individual = self.survived_population[index]
pickle_out = open('task_1_GA_' + sys.argv[1] + '/best_solution_GA_enemy'+ sys.argv[1]+'_run'+sys.argv[2]+'.pickle', 'wb')
pickle.dump(fittest_individual, pickle_out)
pickle_out.close()
self.plot_fitness()
return fittest_individual
# perform a tournament to choose the parents that reproduce
def tournament(self, population_fitness, population):
# match up individuals for tournament
reproductive_individuals = []
random.shuffle(self.integer_list) # randomize the integer_list to determine the tournament opponents
for tournament_number in range(int(self.population_size/self.tournament_size)):
fitnesses_tournament = []
for individual_nr in range(self.tournament_size):
shuffled_population_position = tournament_number*self.tournament_size + individual_nr
fitnesses_tournament.append(population_fitness[self.integer_list[shuffled_population_position]][self.selection_fitness_score])
#select winner of tournament
#store population position of winner
fittest_tournee = fitnesses_tournament.index(max(fitnesses_tournament))
reproductive_individuals.append(population[self.integer_list[tournament_number+fittest_tournee]])
return reproductive_individuals
# select the parents for the next population
def select_parents(self, population_fitness, population):
if self.parent_selection_type == 'tournament':
parents = self.tournament(population_fitness, population)
else:
|
return parents
# create the children from the selected parents
def breed(self, parents):
children = []
for breeding_group in range(int(len(parents)/self.parents_per_offspring)):
picked_parents = parents[breeding_group*self.parents_per_offspring:breeding_group*self.parents_per_offspring+self.parents_per_offspring]
for _ in range(self.reproductivity):
unmutated_child = self.crossover(picked_parents)
mutated_child = self.mutate(unmutated_child)
children.append(mutated_child)
return np.asarray(children)
# crossover the parents to create a child
def crossover(self, parents):
# initiate child as list of zeros of the same length as the information contained in a single parent
child = np.zeros(len(parents[0]))
# go through all genes
for gene_nr in range(len(parents[0])):
if self.crossover_weight == 'random':
# make a list of heritability strengths summing to 1
heritabilities = []
devidable_proportion = 1
for parent_nr in range(len(parents)-1):
inheritance = np.random.rand()*devidable_proportion
# give child proportional part of parent value
heritabilities.append(inheritance)
devidable_proportion -= inheritance
heritabilities.append(devidable_proportion)
random.shuffle(heritabilities) # randomize the heritabilities to prevent a parent from dominating the offsrping values
for parent_nr in range(len(parents)):
child[gene_nr] += parents[parent_nr][gene_nr]*heritabilities[parent_nr]
return child
# mutate the genes of the child
def mutate(self, child):
# go through all genes of the child
for gene_nr in range(len(child)):
# mutate of random number is smaller than mutation probability
if np.random.rand() < self.mutation_probability:
# only accept new values if they are in the accepted domain
mutated_allele = self.edge_domain[0] - 1
while not(self.edge_domain[0] < mutated_allele < self.edge_domain[1]):
mutated_allele = child[gene_nr] + np.random.normal(0, 1)
child[gene_nr] = mutated_allele
return child
# select the individuals to continue to the next generation
def live_and_let_die(self, fitnesses, population):
# reduce population to desired population size
survival_scores = []
if self.survival_mechanism == 'weighted probability':
for individual in fitnesses:
# give each individual a survival score based on their fitness and a random number
# add 1 to make sure not most of them are 0
survival_scores.append(np.random.rand()*(individual[self.selection_fitness_score]+1))
elif self.survival_mechanism == 'replace worst':
for individual in fitnesses:
survival_scores.append(individual[self.selection_fitness_score] + 1)
if self.keep_best_solution:
# change the survival score of the fittest individual to the highest
index_topfit = np.argmax(fitnesses[:,self.selection_fitness_score])
survival_scores[index_topfit] = max(survival_scores) + 1
# determine the fitness value of the ordered population of the individual at the population size
ordered_survival_scores = survival_scores[:]
ordered_survival_scores.sort(reverse=True)
survival_threshold = ordered_survival_scores[self.population_size]
individual_nr = 0
# remove individuals with a too low survival score, also removing their fitness and survival score
while self.population_size < len(population):
if survival_scores[individual_nr] <= survival_threshold:
# remove the individuals and fitnesses fo those who died
population = np.delete(population, individual_nr, 0)
fitnesses = np.delete(fitnesses,individual_nr,0)
del survival_scores[individual_nr]
else:
individual_nr += 1
return fitnesses, population
# return the mean, std and max fitness
def save_fitness(self, fitnesses):
# store in colums the mean, std and max of all the 5 fitness measures in rows
fitnesses_statistics = []
for fitness_definition in range(fitnesses.shape[1]):
mean_fitn = np.mean(fitnesses[:,fitness_definition])
std_fitn = np.std(fitnesses[:,fitness_definition])
max_fitn = max(fitnesses[:,fitness_definition])
fitnesses_statistics.append([mean_fitn, std_fitn, max_fitn])
# add a third dimension to be able to add new time points
fitnesses_statistics = np.array(fitnesses_statistics)
fitnesses_statistics = np.transpose(fitnesses_statistics)
fitnesses_statistics = list | print('Error: no appropriate parent selection method selected') | conditional_block |
GeneticAlgorithm.py | self.parameters['population_size']
self.edge_domain = self.parameters['edge_domain']
self.tournament_size = self.parameters['tournament_size']
self.parents_per_offspring = self.parameters['parents_per_offspring']
self.mutation_probability = self.parameters['mutation_probability']
self.reproductivity = self.parameters['reproductivity']
self.record_of_all_fitnesses_each_generation = []
# generate a list of integers up to the population size
def generate_integers(self):
self.integer_list = []
for integer in range(self.population_size):
self.integer_list.append(integer)
def stop_condition(self):
return self.selection_fitness_score != 'STOP' and self.evaluation_nr < self.max_fitness_evaluations
def init_run(self):
# initialize population
# make a list of integers to be able to randomize the order of the population without losing the connectedness of individuals and fitness
self.generate_integers()
# set the amount of edges in the neural network
edges = self.env.get_num_sensors() * self.hidden_neurons + 5 * self.hidden_neurons # not sure why this should be the right amount of edges
# set the first fitness type to select on
self.fitness_type = 0
self.selection_fitness_score = self.fitness_order[self.fitness_type]
# generate an initial population
self.survived_population = np.random.uniform(self.edge_domain[0], self.edge_domain[1], (self.population_size, edges))
# determine and make an array of the fitnesses of the initial population
self.survived_fitnesses = self.determine_fitness(self.survived_population)
# self.survived_fitnesses = np.random.randint(0, 100, size=(100, 5)) # for testing
# make an empty array to store fitness values
#fitness_record = np.array([0,0,0,0,0])
# save the initial fitness mean, std and max
self.fitness_record = self.save_fitness(self.survived_fitnesses)
# save all fitnesses:
#record_of_all_fitnesses_each_generation = [np.ndarray.tolist(self.survived_fitnesses)]
self.evaluation_nr = 0
def step(self):
parents = self.parent_selection()
children = self.recombination(parents)
self.survivor_selection(children)
def run(self):
| return fittest_individual
# perform a tournament to choose the parents that reproduce
def tournament(self, population_fitness, population):
# match up individuals for tournament
reproductive_individuals = []
random.shuffle(self.integer_list) # randomize the integer_list to determine the tournament opponents
for tournament_number in range(int(self.population_size/self.tournament_size)):
fitnesses_tournament = []
for individual_nr in range(self.tournament_size):
shuffled_population_position = tournament_number*self.tournament_size + individual_nr
fitnesses_tournament.append(population_fitness[self.integer_list[shuffled_population_position]][self.selection_fitness_score])
#select winner of tournament
#store population position of winner
fittest_tournee = fitnesses_tournament.index(max(fitnesses_tournament))
reproductive_individuals.append(population[self.integer_list[tournament_number+fittest_tournee]])
return reproductive_individuals
# select the parents for the next population
def select_parents(self, population_fitness, population):
if self.parent_selection_type == 'tournament':
parents = self.tournament(population_fitness, population)
else:
print('Error: no appropriate parent selection method selected')
return parents
# create the children from the selected parents
def breed(self, parents):
children = []
for breeding_group in range(int(len(parents)/self.parents_per_offspring)):
picked_parents = parents[breeding_group*self.parents_per_offspring:breeding_group*self.parents_per_offspring+self.parents_per_offspring]
for _ in range(self.reproductivity):
unmutated_child = self.crossover(picked_parents)
mutated_child = self.mutate(unmutated_child)
children.append(mutated_child)
return np.asarray(children)
# crossover the parents to create a child
def crossover(self, parents):
# initiate child as list of zeros of the same length as the information contained in a single parent
child = np.zeros(len(parents[0]))
# go through all genes
for gene_nr in range(len(parents[0])):
if self.crossover_weight == 'random':
# make a list of heritability strengths summing to 1
heritabilities = []
devidable_proportion = 1
for parent_nr in range(len(parents)-1):
inheritance = np.random.rand()*devidable_proportion
# give child proportional part of parent value
heritabilities.append(inheritance)
devidable_proportion -= inheritance
heritabilities.append(devidable_proportion)
random.shuffle(heritabilities) # randomize the heritabilities to prevent a parent from dominating the offsrping values
for parent_nr in range(len(parents)):
child[gene_nr] += parents[parent_nr][gene_nr]*heritabilities[parent_nr]
return child
# mutate the genes of the child
def mutate(self, child):
# go through all genes of the child
for gene_nr in range(len(child)):
# mutate of random number is smaller than mutation probability
if np.random.rand() < self.mutation_probability:
# only accept new values if they are in the accepted domain
mutated_allele = self.edge_domain[0] - 1
while not(self.edge_domain[0] < mutated_allele < self.edge_domain[1]):
mutated_allele = child[gene_nr] + np.random.normal(0, 1)
child[gene_nr] = mutated_allele
return child
# select the individuals to continue to the next generation
def live_and_let_die(self, fitnesses, population):
# reduce population to desired population size
survival_scores = []
if self.survival_mechanism == 'weighted probability':
for individual in fitnesses:
# give each individual a survival score based on their fitness and a random number
# add 1 to make sure not most of them are 0
survival_scores.append(np.random.rand()*(individual[self.selection_fitness_score]+1))
elif self.survival_mechanism == 'replace worst':
for individual in fitnesses:
survival_scores.append(individual[self.selection_fitness_score] + 1)
if self.keep_best_solution:
# change the survival score of the fittest individual to the highest
index_topfit = np.argmax(fitnesses[:,self.selection_fitness_score])
survival_scores[index_topfit] = max(survival_scores) + 1
# determine the fitness value of the ordered population of the individual at the population size
ordered_survival_scores = survival_scores[:]
ordered_survival_scores.sort(reverse=True)
survival_threshold = ordered_survival_scores[self.population_size]
individual_nr = 0
# remove individuals with a too low survival score, also removing their fitness and survival score
while self.population_size < len(population):
if survival_scores[individual_nr] <= survival_threshold:
# remove the individuals and fitnesses fo those who died
population = np.delete(population, individual_nr, 0)
fitnesses = np.delete(fitnesses,individual_nr,0)
del survival_scores[individual_nr]
else:
individual_nr += 1
return fitnesses, population
# return the mean, std and max fitness
def save_fitness(self, fitnesses):
# store in colums the mean, std and max of all the 5 fitness measures in rows
fitnesses_statistics = []
for fitness_definition in range(fitnesses.shape[1]):
mean_fitn = np.mean(fitnesses[:,fitness_definition])
std_fitn = np.std(fitnesses[:,fitness_definition])
max_fitn = max(fitnesses[:,fitness_definition])
fitnesses_statistics.append([mean_fitn, std_fitn, max_fitn])
# add a third dimension to be able to add new time points
fitnesses_statistics = np.array(fitnesses_statistics)
fitnesses_statistics = np.transpose(fitnesses_statistics)
fitnesses_statistics = list(f | self.init_run()
while self.stop_condition():
self.step()
self.record_of_all_fitnesses_each_generation.append(np.ndarray.tolist(self.survived_fitnesses))
#save a record of all fitnesses of all individuals in all generations to a pickle file
pickle_out = open('task_1_GA_' + sys.argv[1] + '/fitness_record_GA_enemy'+sys.argv[1]+'_run'+sys.argv[2]+'.pickle', 'wb')
pickle.dump(self.record_of_all_fitnesses_each_generation, pickle_out)
pickle_out.close()
print('the fitnesses look like\n',self.record_of_all_fitnesses_each_generation)
#save the best solution
fitnesses = self.survived_fitnesses[:,0]
index = np.where(fitnesses == np.amax(fitnesses))[0][0]
fittest_individual = self.survived_population[index]
pickle_out = open('task_1_GA_' + sys.argv[1] + '/best_solution_GA_enemy'+ sys.argv[1]+'_run'+sys.argv[2]+'.pickle', 'wb')
pickle.dump(fittest_individual, pickle_out)
pickle_out.close()
self.plot_fitness() | identifier_body |
GeneticAlgorithm.py | = self.parameters['population_size']
self.edge_domain = self.parameters['edge_domain']
self.tournament_size = self.parameters['tournament_size']
self.parents_per_offspring = self.parameters['parents_per_offspring']
self.mutation_probability = self.parameters['mutation_probability']
self.reproductivity = self.parameters['reproductivity']
self.record_of_all_fitnesses_each_generation = []
# generate a list of integers up to the population size
def generate_integers(self):
self.integer_list = []
for integer in range(self.population_size):
self.integer_list.append(integer)
def stop_condition(self):
return self.selection_fitness_score != 'STOP' and self.evaluation_nr < self.max_fitness_evaluations
def init_run(self):
# initialize population
# make a list of integers to be able to randomize the order of the population without losing the connectedness of individuals and fitness
self.generate_integers()
# set the amount of edges in the neural network | self.fitness_type = 0
self.selection_fitness_score = self.fitness_order[self.fitness_type]
# generate an initial population
self.survived_population = np.random.uniform(self.edge_domain[0], self.edge_domain[1], (self.population_size, edges))
# determine and make an array of the fitnesses of the initial population
self.survived_fitnesses = self.determine_fitness(self.survived_population)
# self.survived_fitnesses = np.random.randint(0, 100, size=(100, 5)) # for testing
# make an empty array to store fitness values
#fitness_record = np.array([0,0,0,0,0])
# save the initial fitness mean, std and max
self.fitness_record = self.save_fitness(self.survived_fitnesses)
# save all fitnesses:
#record_of_all_fitnesses_each_generation = [np.ndarray.tolist(self.survived_fitnesses)]
self.evaluation_nr = 0
def step(self):
parents = self.parent_selection()
children = self.recombination(parents)
self.survivor_selection(children)
def run(self):
self.init_run()
while self.stop_condition():
self.step()
self.record_of_all_fitnesses_each_generation.append(np.ndarray.tolist(self.survived_fitnesses))
#save a record of all fitnesses of all individuals in all generations to a pickle file
pickle_out = open('task_1_GA_' + sys.argv[1] + '/fitness_record_GA_enemy'+sys.argv[1]+'_run'+sys.argv[2]+'.pickle', 'wb')
pickle.dump(self.record_of_all_fitnesses_each_generation, pickle_out)
pickle_out.close()
print('the fitnesses look like\n',self.record_of_all_fitnesses_each_generation)
#save the best solution
fitnesses = self.survived_fitnesses[:,0]
index = np.where(fitnesses == np.amax(fitnesses))[0][0]
fittest_individual = self.survived_population[index]
pickle_out = open('task_1_GA_' + sys.argv[1] + '/best_solution_GA_enemy'+ sys.argv[1]+'_run'+sys.argv[2]+'.pickle', 'wb')
pickle.dump(fittest_individual, pickle_out)
pickle_out.close()
self.plot_fitness()
return fittest_individual
# perform a tournament to choose the parents that reproduce
def tournament(self, population_fitness, population):
# match up individuals for tournament
reproductive_individuals = []
random.shuffle(self.integer_list) # randomize the integer_list to determine the tournament opponents
for tournament_number in range(int(self.population_size/self.tournament_size)):
fitnesses_tournament = []
for individual_nr in range(self.tournament_size):
shuffled_population_position = tournament_number*self.tournament_size + individual_nr
fitnesses_tournament.append(population_fitness[self.integer_list[shuffled_population_position]][self.selection_fitness_score])
#select winner of tournament
#store population position of winner
fittest_tournee = fitnesses_tournament.index(max(fitnesses_tournament))
reproductive_individuals.append(population[self.integer_list[tournament_number+fittest_tournee]])
return reproductive_individuals
# select the parents for the next population
def select_parents(self, population_fitness, population):
if self.parent_selection_type == 'tournament':
parents = self.tournament(population_fitness, population)
else:
print('Error: no appropriate parent selection method selected')
return parents
# create the children from the selected parents
def breed(self, parents):
children = []
for breeding_group in range(int(len(parents)/self.parents_per_offspring)):
picked_parents = parents[breeding_group*self.parents_per_offspring:breeding_group*self.parents_per_offspring+self.parents_per_offspring]
for _ in range(self.reproductivity):
unmutated_child = self.crossover(picked_parents)
mutated_child = self.mutate(unmutated_child)
children.append(mutated_child)
return np.asarray(children)
# crossover the parents to create a child
def crossover(self, parents):
# initiate child as list of zeros of the same length as the information contained in a single parent
child = np.zeros(len(parents[0]))
# go through all genes
for gene_nr in range(len(parents[0])):
if self.crossover_weight == 'random':
# make a list of heritability strengths summing to 1
heritabilities = []
devidable_proportion = 1
for parent_nr in range(len(parents)-1):
inheritance = np.random.rand()*devidable_proportion
# give child proportional part of parent value
heritabilities.append(inheritance)
devidable_proportion -= inheritance
heritabilities.append(devidable_proportion)
random.shuffle(heritabilities) # randomize the heritabilities to prevent a parent from dominating the offsrping values
for parent_nr in range(len(parents)):
child[gene_nr] += parents[parent_nr][gene_nr]*heritabilities[parent_nr]
return child
# mutate the genes of the child
def mutate(self, child):
# go through all genes of the child
for gene_nr in range(len(child)):
# mutate of random number is smaller than mutation probability
if np.random.rand() < self.mutation_probability:
# only accept new values if they are in the accepted domain
mutated_allele = self.edge_domain[0] - 1
while not(self.edge_domain[0] < mutated_allele < self.edge_domain[1]):
mutated_allele = child[gene_nr] + np.random.normal(0, 1)
child[gene_nr] = mutated_allele
return child
# select the individuals to continue to the next generation
def live_and_let_die(self, fitnesses, population):
# reduce population to desired population size
survival_scores = []
if self.survival_mechanism == 'weighted probability':
for individual in fitnesses:
# give each individual a survival score based on their fitness and a random number
# add 1 to make sure not most of them are 0
survival_scores.append(np.random.rand()*(individual[self.selection_fitness_score]+1))
elif self.survival_mechanism == 'replace worst':
for individual in fitnesses:
survival_scores.append(individual[self.selection_fitness_score] + 1)
if self.keep_best_solution:
# change the survival score of the fittest individual to the highest
index_topfit = np.argmax(fitnesses[:,self.selection_fitness_score])
survival_scores[index_topfit] = max(survival_scores) + 1
# determine the fitness value of the ordered population of the individual at the population size
ordered_survival_scores = survival_scores[:]
ordered_survival_scores.sort(reverse=True)
survival_threshold = ordered_survival_scores[self.population_size]
individual_nr = 0
# remove individuals with a too low survival score, also removing their fitness and survival score
while self.population_size < len(population):
if survival_scores[individual_nr] <= survival_threshold:
# remove the individuals and fitnesses fo those who died
population = np.delete(population, individual_nr, 0)
fitnesses = np.delete(fitnesses,individual_nr,0)
del survival_scores[individual_nr]
else:
individual_nr += 1
return fitnesses, population
# return the mean, std and max fitness
def save_fitness(self, fitnesses):
# store in colums the mean, std and max of all the 5 fitness measures in rows
fitnesses_statistics = []
for fitness_definition in range(fitnesses.shape[1]):
mean_fitn = np.mean(fitnesses[:,fitness_definition])
std_fitn = np.std(fitnesses[:,fitness_definition])
max_fitn = max(fitnesses[:,fitness_definition])
fitnesses_statistics.append([mean_fitn, std_fitn, max_fitn])
# add a third dimension to be able to add new time points
fitnesses_statistics = np.array(fitnesses_statistics)
fitnesses_statistics = np.transpose(fitnesses_statistics)
fitnesses_statistics = list(fitness | edges = self.env.get_num_sensors() * self.hidden_neurons + 5 * self.hidden_neurons # not sure why this should be the right amount of edges
# set the first fitness type to select on | random_line_split |
GeneticAlgorithm.py | ):
return self.selection_fitness_score != 'STOP' and self.evaluation_nr < self.max_fitness_evaluations
def init_run(self):
# initialize population
# make a list of integers to be able to randomize the order of the population without losing the connectedness of individuals and fitness
self.generate_integers()
# set the amount of edges in the neural network
edges = self.env.get_num_sensors() * self.hidden_neurons + 5 * self.hidden_neurons # not sure why this should be the right amount of edges
# set the first fitness type to select on
self.fitness_type = 0
self.selection_fitness_score = self.fitness_order[self.fitness_type]
# generate an initial population
self.survived_population = np.random.uniform(self.edge_domain[0], self.edge_domain[1], (self.population_size, edges))
# determine and make an array of the fitnesses of the initial population
self.survived_fitnesses = self.determine_fitness(self.survived_population)
# self.survived_fitnesses = np.random.randint(0, 100, size=(100, 5)) # for testing
# make an empty array to store fitness values
#fitness_record = np.array([0,0,0,0,0])
# save the initial fitness mean, std and max
self.fitness_record = self.save_fitness(self.survived_fitnesses)
# save all fitnesses:
#record_of_all_fitnesses_each_generation = [np.ndarray.tolist(self.survived_fitnesses)]
self.evaluation_nr = 0
def step(self):
parents = self.parent_selection()
children = self.recombination(parents)
self.survivor_selection(children)
def run(self):
self.init_run()
while self.stop_condition():
self.step()
self.record_of_all_fitnesses_each_generation.append(np.ndarray.tolist(self.survived_fitnesses))
#save a record of all fitnesses of all individuals in all generations to a pickle file
pickle_out = open('task_1_GA_' + sys.argv[1] + '/fitness_record_GA_enemy'+sys.argv[1]+'_run'+sys.argv[2]+'.pickle', 'wb')
pickle.dump(self.record_of_all_fitnesses_each_generation, pickle_out)
pickle_out.close()
print('the fitnesses look like\n',self.record_of_all_fitnesses_each_generation)
#save the best solution
fitnesses = self.survived_fitnesses[:,0]
index = np.where(fitnesses == np.amax(fitnesses))[0][0]
fittest_individual = self.survived_population[index]
pickle_out = open('task_1_GA_' + sys.argv[1] + '/best_solution_GA_enemy'+ sys.argv[1]+'_run'+sys.argv[2]+'.pickle', 'wb')
pickle.dump(fittest_individual, pickle_out)
pickle_out.close()
self.plot_fitness()
return fittest_individual
# perform a tournament to choose the parents that reproduce
def tournament(self, population_fitness, population):
# match up individuals for tournament
reproductive_individuals = []
random.shuffle(self.integer_list) # randomize the integer_list to determine the tournament opponents
for tournament_number in range(int(self.population_size/self.tournament_size)):
fitnesses_tournament = []
for individual_nr in range(self.tournament_size):
shuffled_population_position = tournament_number*self.tournament_size + individual_nr
fitnesses_tournament.append(population_fitness[self.integer_list[shuffled_population_position]][self.selection_fitness_score])
#select winner of tournament
#store population position of winner
fittest_tournee = fitnesses_tournament.index(max(fitnesses_tournament))
reproductive_individuals.append(population[self.integer_list[tournament_number+fittest_tournee]])
return reproductive_individuals
# select the parents for the next population
def select_parents(self, population_fitness, population):
if self.parent_selection_type == 'tournament':
parents = self.tournament(population_fitness, population)
else:
print('Error: no appropriate parent selection method selected')
return parents
# create the children from the selected parents
def breed(self, parents):
children = []
for breeding_group in range(int(len(parents)/self.parents_per_offspring)):
picked_parents = parents[breeding_group*self.parents_per_offspring:breeding_group*self.parents_per_offspring+self.parents_per_offspring]
for _ in range(self.reproductivity):
unmutated_child = self.crossover(picked_parents)
mutated_child = self.mutate(unmutated_child)
children.append(mutated_child)
return np.asarray(children)
# crossover the parents to create a child
def crossover(self, parents):
# initiate child as list of zeros of the same length as the information contained in a single parent
child = np.zeros(len(parents[0]))
# go through all genes
for gene_nr in range(len(parents[0])):
if self.crossover_weight == 'random':
# make a list of heritability strengths summing to 1
heritabilities = []
devidable_proportion = 1
for parent_nr in range(len(parents)-1):
inheritance = np.random.rand()*devidable_proportion
# give child proportional part of parent value
heritabilities.append(inheritance)
devidable_proportion -= inheritance
heritabilities.append(devidable_proportion)
random.shuffle(heritabilities) # randomize the heritabilities to prevent a parent from dominating the offsrping values
for parent_nr in range(len(parents)):
child[gene_nr] += parents[parent_nr][gene_nr]*heritabilities[parent_nr]
return child
# mutate the genes of the child
def mutate(self, child):
# go through all genes of the child
for gene_nr in range(len(child)):
# mutate of random number is smaller than mutation probability
if np.random.rand() < self.mutation_probability:
# only accept new values if they are in the accepted domain
mutated_allele = self.edge_domain[0] - 1
while not(self.edge_domain[0] < mutated_allele < self.edge_domain[1]):
mutated_allele = child[gene_nr] + np.random.normal(0, 1)
child[gene_nr] = mutated_allele
return child
# select the individuals to continue to the next generation
def live_and_let_die(self, fitnesses, population):
# reduce population to desired population size
survival_scores = []
if self.survival_mechanism == 'weighted probability':
for individual in fitnesses:
# give each individual a survival score based on their fitness and a random number
# add 1 to make sure not most of them are 0
survival_scores.append(np.random.rand()*(individual[self.selection_fitness_score]+1))
elif self.survival_mechanism == 'replace worst':
for individual in fitnesses:
survival_scores.append(individual[self.selection_fitness_score] + 1)
if self.keep_best_solution:
# change the survival score of the fittest individual to the highest
index_topfit = np.argmax(fitnesses[:,self.selection_fitness_score])
survival_scores[index_topfit] = max(survival_scores) + 1
# determine the fitness value of the ordered population of the individual at the population size
ordered_survival_scores = survival_scores[:]
ordered_survival_scores.sort(reverse=True)
survival_threshold = ordered_survival_scores[self.population_size]
individual_nr = 0
# remove individuals with a too low survival score, also removing their fitness and survival score
while self.population_size < len(population):
if survival_scores[individual_nr] <= survival_threshold:
# remove the individuals and fitnesses fo those who died
population = np.delete(population, individual_nr, 0)
fitnesses = np.delete(fitnesses,individual_nr,0)
del survival_scores[individual_nr]
else:
individual_nr += 1
return fitnesses, population
# return the mean, std and max fitness
def save_fitness(self, fitnesses):
# store in colums the mean, std and max of all the 5 fitness measures in rows
fitnesses_statistics = []
for fitness_definition in range(fitnesses.shape[1]):
mean_fitn = np.mean(fitnesses[:,fitness_definition])
std_fitn = np.std(fitnesses[:,fitness_definition])
max_fitn = max(fitnesses[:,fitness_definition])
fitnesses_statistics.append([mean_fitn, std_fitn, max_fitn])
# add a third dimension to be able to add new time points
fitnesses_statistics = np.array(fitnesses_statistics)
fitnesses_statistics = np.transpose(fitnesses_statistics)
fitnesses_statistics = list(fitnesses_statistics)
fitnesses_statistics = [fitnesses_statistics]
fitnesses_statistics = np.array(fitnesses_statistics)
return fitnesses_statistics
def parent_selection(self):
# select the parents
return self.select_parents(self.survived_fitnesses, self.survived_population)
def recombination(self, parents):
# make the children
children = self.breed(parents)
# evaluate the performance of the children
self.fitness_children = self.determine_fitness(children)
return children
def mutation(self, children):
return children
def | survivor_selection | identifier_name |
|
test_error_reporting.py | in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
be = Exception(bs) # unicode(be) fails
ue = Exception(us) # bytes(ue) fails, str(ue) fails in Python 2;
# unicode(ue) fails in Python < 2.6 (issue2517_)
# .. _issue2517: http://bugs.python.org/issue2517
# wrapped test data:
wbs = SafeString(bs)
wus = SafeString(us)
wbe = SafeString(be)
wue = SafeString(ue)
def test_7bit(self):
# wrapping (not required with 7-bit chars) must not change the
# result of conversions:
bs7 = b('foo')
us7 = u'foo'
be7 = Exception(bs7)
ue7 = Exception(us7)
self.assertEqual(str(42), str(SafeString(42)))
self.assertEqual(str(bs7), str(SafeString(bs7)))
self.assertEqual(str(us7), str(SafeString(us7)))
self.assertEqual(str(be7), str(SafeString(be7)))
self.assertEqual(str(ue7), str(SafeString(ue7)))
self.assertEqual(unicode(7), unicode(SafeString(7)))
self.assertEqual(unicode(bs7), unicode(SafeString(bs7)))
self.assertEqual(unicode(us7), unicode(SafeString(us7)))
self.assertEqual(unicode(be7), unicode(SafeString(be7)))
self.assertEqual(unicode(ue7), unicode(SafeString(ue7)))
def test_ustr(self):
"""Test conversion to a unicode-string."""
# unicode(self.bs) fails
self.assertEqual(unicode, type(unicode(self.wbs)))
self.assertEqual(unicode(self.us), unicode(self.wus))
# unicode(self.be) fails
self.assertEqual(unicode, type(unicode(self.wbe)))
# unicode(ue) fails in Python < 2.6 (issue2517_)
self.assertEqual(unicode, type(unicode(self.wue)))
self.assertEqual(self.us, unicode(self.wue))
def test_str(self):
"""Test conversion to a string (bytes in Python 2, unicode in Python 3)."""
self.assertEqual(str(self.bs), str(self.wbs))
self.assertEqual(str(self.be), str(self.be))
# str(us) fails in Python 2
self.assertEqual(str, type(str(self.wus)))
# str(ue) fails in Python 2
self.assertEqual(str, type(str(self.wue)))
class ErrorStringTests(unittest.TestCase):
bs = b('\xfc') # unicode(bs) fails, str(bs) in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
def test_str(self):
self.assertEqual('Exception: spam',
str(ErrorString(Exception('spam'))))
self.assertEqual('IndexError: '+str(self.bs),
str(ErrorString(IndexError(self.bs))))
self.assertEqual('ImportError: %s' % SafeString(self.us),
str(ErrorString(ImportError(self.us))))
def test_unicode(self):
self.assertEqual(u'Exception: spam',
unicode(ErrorString(Exception(u'spam'))))
self.assertEqual(u'IndexError: '+self.us,
unicode(ErrorString(IndexError(self.us))))
self.assertEqual(u'ImportError: %s' % SafeString(self.bs),
unicode(ErrorString(ImportError(self.bs))))
# ErrorOutput tests
# -----------------
# Stub: Buffer with 'strict' auto-conversion of input to byte string:
class BBuf(BytesIO, object): # super class object required by Python <= 2.5
def write(self, data):
if isinstance(data, unicode):
data.encode('ascii', 'strict')
super(BBuf, self).write(data)
# Stub: Buffer expecting unicode string:
class UBuf(StringIO, object): # super class object required by Python <= 2.5
def write(self, data):
# emulate Python 3 handling of stdout, stderr
if isinstance(data, bytes):
raise TypeError('must be unicode, not bytes')
super(UBuf, self).write(data)
class ErrorOutputTests(unittest.TestCase):
def test_defaults(self):
e = ErrorOutput()
self.assertEqual(e.stream, sys.stderr)
def test_bbuf(self):
buf = BBuf() # buffer storing byte string
e = ErrorOutput(buf, encoding='ascii')
# write byte-string as-is
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc'))
# encode unicode data with backslashescape fallback replacement:
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc'))
# handle Exceptions with Unicode string args
# unicode(Exception(u'e\xfc')) # fails in Python < 2.6
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc'))
# encode with `encoding` attribute
e.encoding = 'utf8'
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc u\xc3\xbc'))
def test_ubuf(self):
buf = UBuf() # buffer only accepting unicode string
# decode of binary strings
e = ErrorOutput(buf, encoding='ascii')
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd') # use REPLACEMENT CHARACTER
# write Unicode string and Exceptions with Unicode args
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc')
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc')
# decode with `encoding` attribute
e.encoding = 'latin1'
e.write(b(' b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc b\xfc')
class SafeStringTests_locale(unittest.TestCase):
"""
Test docutils.SafeString with 'problematic' locales.
The error message in `EnvironmentError` instances comes from the OS
and in some locales (e.g. ru_RU), contains high bit chars.
"""
if testlocale:
locale.setlocale(locale.LC_ALL, testlocale)
# test data:
bs = b('\xfc')
us = u'\xfc'
try:
open(b('\xfc'))
except IOError, e: # in Python 3 the name for the exception instance
bioe = e # is local to the except clause
try:
open(u'\xfc')
except IOError, e:
uioe = e
except UnicodeEncodeError:
try:
open(u'\xfc'.encode(sys.getfilesystemencoding(), 'replace'))
except IOError, e:
uioe = e
try:
os.chdir(b('\xfc'))
except OSError, e:
bose = e
try:
os.chdir(u'\xfc')
except OSError, e:
uose = e
except UnicodeEncodeError:
try:
os.chdir(u'\xfc'.encode(sys.getfilesystemencoding(), 'replace'))
except OSError, e:
uose = e
# wrapped test data:
wbioe = SafeString(bioe)
wuioe = SafeString(uioe)
wbose = SafeString(bose)
wuose = SafeString(uose)
# reset locale
if testlocale:
locale.setlocale(locale.LC_ALL, oldlocale)
def test_ustr(self):
"""Test conversion to a unicode-string."""
# unicode(bioe) fails with e.g. 'ru_RU.utf8' locale
self.assertEqual(unicode, type(unicode(self.wbioe)))
self.assertEqual(unicode, type(unicode(self.wuioe)))
self.assertEqual(unicode, type(unicode(self.wbose)))
self.assertEqual(unicode, type(unicode(self.wuose)))
def test_str(self):
"""Test conversion to a string (bytes in Python 2, unicode in Python 3)."""
self.assertEqual(str(self.bioe), str(self.wbioe))
self.assertEqual(str(self.uioe), str(self.wuioe))
self.assertEqual(str(self.bose), str(self.wbose))
self.assertEqual(str(self.uose), str(self.wuose))
class ErrorReportingTests(unittest.TestCase):
"""
Test cases where error reporting can go wrong.
Do not test the exact output (as this varies with the locale), just
ensure that the correct exception is thrown.
"""
# These tests fail with a 'problematic locale',
# Docutils revision < 7035, and Python 2:
parser = parsers.rst.Parser()
"""Parser shared by all ParserTestCases."""
option_parser = frontend.OptionParser(components=(parsers.rst.Parser,))
settings = option_parser.get_default_values()
settings.report_level = 1
settings.halt_level = 1
settings.warning_stream = ''
document = utils.new_document('test data', settings)
def setUp(self):
if testlocale:
locale.setlocale(locale.LC_ALL, testlocale)
def tearDown(self):
if testlocale:
l | ocale.setlocale(locale.LC_ALL, oldlocale)
| conditional_block |
|
test_error_reporting.py | from io import StringIO, BytesIO
except ImportError: # new in Python 2.6
from StringIO import StringIO
BytesIO = StringIO
import DocutilsTestSupport # must be imported before docutils
from docutils import core, parsers, frontend, utils
from docutils.utils.error_reporting import SafeString, ErrorString, ErrorOutput
from docutils._compat import b, bytes
oldlocale = None
if sys.version_info < (3,0): # problems solved in py3k
try:
import locale # module missing in Jython
oldlocale = locale.getlocale()
# Why does getlocale return the defaultlocale in Python 3.2 ????
# oldlocale = (None, None) # test suite runs without locale
except ImportError:
print ('cannot test error reporting with problematic locales,\n'
'`import locale` failed.')
# locales confirmed to use non-ASCII chars in the IOError message
# for a missing file (https://bugs.gentoo.org/show_bug.cgi?id=349101)
# TODO: add more confirmed problematic locales
problematic_locales = ['cs_CZ', 'cs_CZ.UTF8',
'el_GR', 'el_GR.UTF-8',
# 'fr_FR.UTF-8', # only OSError
'ja_JP.UTF-8',
'ru_RU', 'ru_RU.KOI8-R',
'ru_RU.UTF-8',
'', # default locale: might be non-problematic
]
if oldlocale is not None:
# find a supported problematic locale:
for testlocale in problematic_locales:
try:
locale.setlocale(locale.LC_ALL, testlocale)
except locale.Error:
testlocale = None
else:
break
locale.setlocale(locale.LC_ALL, oldlocale) # reset
else:
testlocale = None
class SafeStringTests(unittest.TestCase):
# the error message in EnvironmentError instances comes from the OS
# and in some locales (e.g. ru_RU), contains high bit chars.
# -> see the test in test_error_reporting.py
# test data:
bs = b('\xfc') # unicode(bs) fails, str(bs) in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
be = Exception(bs) # unicode(be) fails
ue = Exception(us) # bytes(ue) fails, str(ue) fails in Python 2;
# unicode(ue) fails in Python < 2.6 (issue2517_)
# .. _issue2517: http://bugs.python.org/issue2517
# wrapped test data:
wbs = SafeString(bs)
wus = SafeString(us)
wbe = SafeString(be)
wue = SafeString(ue)
def test_7bit(self):
# wrapping (not required with 7-bit chars) must not change the
# result of conversions:
bs7 = b('foo')
us7 = u'foo'
be7 = Exception(bs7)
ue7 = Exception(us7)
self.assertEqual(str(42), str(SafeString(42)))
self.assertEqual(str(bs7), str(SafeString(bs7)))
self.assertEqual(str(us7), str(SafeString(us7)))
self.assertEqual(str(be7), str(SafeString(be7)))
self.assertEqual(str(ue7), str(SafeString(ue7)))
self.assertEqual(unicode(7), unicode(SafeString(7)))
self.assertEqual(unicode(bs7), unicode(SafeString(bs7)))
self.assertEqual(unicode(us7), unicode(SafeString(us7)))
self.assertEqual(unicode(be7), unicode(SafeString(be7)))
self.assertEqual(unicode(ue7), unicode(SafeString(ue7)))
def test_ustr(self):
"""Test conversion to a unicode-string."""
# unicode(self.bs) fails
self.assertEqual(unicode, type(unicode(self.wbs)))
self.assertEqual(unicode(self.us), unicode(self.wus))
# unicode(self.be) fails
self.assertEqual(unicode, type(unicode(self.wbe)))
# unicode(ue) fails in Python < 2.6 (issue2517_)
self.assertEqual(unicode, type(unicode(self.wue)))
self.assertEqual(self.us, unicode(self.wue))
def test_str(self):
"""Test conversion to a string (bytes in Python 2, unicode in Python 3)."""
self.assertEqual(str(self.bs), str(self.wbs))
self.assertEqual(str(self.be), str(self.be))
# str(us) fails in Python 2
self.assertEqual(str, type(str(self.wus)))
# str(ue) fails in Python 2
self.assertEqual(str, type(str(self.wue)))
class ErrorStringTests(unittest.TestCase):
bs = b('\xfc') # unicode(bs) fails, str(bs) in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
def test_str(self):
self.assertEqual('Exception: spam',
str(ErrorString(Exception('spam'))))
self.assertEqual('IndexError: '+str(self.bs),
str(ErrorString(IndexError(self.bs))))
self.assertEqual('ImportError: %s' % SafeString(self.us),
str(ErrorString(ImportError(self.us))))
def test_unicode(self):
self.assertEqual(u'Exception: spam',
unicode(ErrorString(Exception(u'spam'))))
self.assertEqual(u'IndexError: '+self.us,
unicode(ErrorString(IndexError(self.us))))
self.assertEqual(u'ImportError: %s' % SafeString(self.bs),
unicode(ErrorString(ImportError(self.bs))))
# ErrorOutput tests
# -----------------
# Stub: Buffer with 'strict' auto-conversion of input to byte string:
class BBuf(BytesIO, object): # super class object required by Python <= 2.5
def write(self, data):
if isinstance(data, unicode):
data.encode('ascii', 'strict')
super(BBuf, self).write(data)
# Stub: Buffer expecting unicode string:
class UBuf(StringIO, object): # super class object required by Python <= 2.5
def write(self, data):
# emulate Python 3 handling of stdout, stderr
if isinstance(data, bytes):
raise TypeError('must be unicode, not bytes')
super(UBuf, self).write(data)
class ErrorOutputTests(unittest.TestCase):
def test_defaults(self):
e = ErrorOutput()
self.assertEqual(e.stream, sys.stderr)
def test_bbuf(self):
buf = BBuf() # buffer storing byte string
e = ErrorOutput(buf, encoding='ascii')
# write byte-string as-is
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc'))
# encode unicode data with backslashescape fallback replacement:
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc'))
# handle Exceptions with Unicode string args
# unicode(Exception(u'e\xfc')) # fails in Python < 2.6
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc'))
# encode with `encoding` attribute
e.encoding = 'utf8'
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc u\xc3\xbc'))
def test_ubuf(self):
buf = UBuf() # buffer only accepting unicode string
# decode of binary strings
e = ErrorOutput(buf, encoding='ascii')
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd') # use REPLACEMENT CHARACTER
# write Unicode string and Exceptions with Unicode args
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc')
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc')
# decode with `encoding` attribute
e.encoding = 'latin1'
e.write(b(' b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc b\xfc')
class SafeStringTests_locale(unittest.TestCase):
"""
Test docutils.SafeString with 'problematic' locales.
The error message in `EnvironmentError` instances comes from the OS
and in some locales (e.g. ru_RU), contains high bit chars.
"""
if testlocale:
locale.setlocale(locale.LC_ALL, testlocale)
# test data:
bs = b('\xfc')
us = u'\xfc'
try:
open(b('\xfc'))
except IOError, e: # in Python 3 the name for the exception instance
bioe = e # is local to the except clause
try:
open(u'\xfc')
except IOError, e:
uioe = e
except UnicodeEncodeError:
try:
open(u'\xfc'.encode(sys.getfilesystemencoding(), 'replace'))
except IOError, e:
uioe = e
try:
os.chdir(b('\xfc'))
except OSError, e:
bose = e
|
import unittest
import sys, os
import codecs
try: # from standard library module `io` | random_line_split |
|
test_error_reporting.py |
BytesIO = StringIO
import DocutilsTestSupport # must be imported before docutils
from docutils import core, parsers, frontend, utils
from docutils.utils.error_reporting import SafeString, ErrorString, ErrorOutput
from docutils._compat import b, bytes
oldlocale = None
if sys.version_info < (3,0): # problems solved in py3k
try:
import locale # module missing in Jython
oldlocale = locale.getlocale()
# Why does getlocale return the defaultlocale in Python 3.2 ????
# oldlocale = (None, None) # test suite runs without locale
except ImportError:
print ('cannot test error reporting with problematic locales,\n'
'`import locale` failed.')
# locales confirmed to use non-ASCII chars in the IOError message
# for a missing file (https://bugs.gentoo.org/show_bug.cgi?id=349101)
# TODO: add more confirmed problematic locales
problematic_locales = ['cs_CZ', 'cs_CZ.UTF8',
'el_GR', 'el_GR.UTF-8',
# 'fr_FR.UTF-8', # only OSError
'ja_JP.UTF-8',
'ru_RU', 'ru_RU.KOI8-R',
'ru_RU.UTF-8',
'', # default locale: might be non-problematic
]
if oldlocale is not None:
# find a supported problematic locale:
for testlocale in problematic_locales:
try:
locale.setlocale(locale.LC_ALL, testlocale)
except locale.Error:
testlocale = None
else:
break
locale.setlocale(locale.LC_ALL, oldlocale) # reset
else:
testlocale = None
class SafeStringTests(unittest.TestCase):
# the error message in EnvironmentError instances comes from the OS
# and in some locales (e.g. ru_RU), contains high bit chars.
# -> see the test in test_error_reporting.py
# test data:
bs = b('\xfc') # unicode(bs) fails, str(bs) in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
be = Exception(bs) # unicode(be) fails
ue = Exception(us) # bytes(ue) fails, str(ue) fails in Python 2;
# unicode(ue) fails in Python < 2.6 (issue2517_)
# .. _issue2517: http://bugs.python.org/issue2517
# wrapped test data:
wbs = SafeString(bs)
wus = SafeString(us)
wbe = SafeString(be)
wue = SafeString(ue)
def test_7bit(self):
# wrapping (not required with 7-bit chars) must not change the
# result of conversions:
bs7 = b('foo')
us7 = u'foo'
be7 = Exception(bs7)
ue7 = Exception(us7)
self.assertEqual(str(42), str(SafeString(42)))
self.assertEqual(str(bs7), str(SafeString(bs7)))
self.assertEqual(str(us7), str(SafeString(us7)))
self.assertEqual(str(be7), str(SafeString(be7)))
self.assertEqual(str(ue7), str(SafeString(ue7)))
self.assertEqual(unicode(7), unicode(SafeString(7)))
self.assertEqual(unicode(bs7), unicode(SafeString(bs7)))
self.assertEqual(unicode(us7), unicode(SafeString(us7)))
self.assertEqual(unicode(be7), unicode(SafeString(be7)))
self.assertEqual(unicode(ue7), unicode(SafeString(ue7)))
def test_ustr(self):
"""Test conversion to a unicode-string."""
# unicode(self.bs) fails
self.assertEqual(unicode, type(unicode(self.wbs)))
self.assertEqual(unicode(self.us), unicode(self.wus))
# unicode(self.be) fails
self.assertEqual(unicode, type(unicode(self.wbe)))
# unicode(ue) fails in Python < 2.6 (issue2517_)
self.assertEqual(unicode, type(unicode(self.wue)))
self.assertEqual(self.us, unicode(self.wue))
def test_str(self):
"""Test conversion to a string (bytes in Python 2, unicode in Python 3)."""
self.assertEqual(str(self.bs), str(self.wbs))
self.assertEqual(str(self.be), str(self.be))
# str(us) fails in Python 2
self.assertEqual(str, type(str(self.wus)))
# str(ue) fails in Python 2
self.assertEqual(str, type(str(self.wue)))
class ErrorStringTests(unittest.TestCase):
bs = b('\xfc') # unicode(bs) fails, str(bs) in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
def test_str(self):
self.assertEqual('Exception: spam',
str(ErrorString(Exception('spam'))))
self.assertEqual('IndexError: '+str(self.bs),
str(ErrorString(IndexError(self.bs))))
self.assertEqual('ImportError: %s' % SafeString(self.us),
str(ErrorString(ImportError(self.us))))
def test_unicode(self):
self.assertEqual(u'Exception: spam',
unicode(ErrorString(Exception(u'spam'))))
self.assertEqual(u'IndexError: '+self.us,
unicode(ErrorString(IndexError(self.us))))
self.assertEqual(u'ImportError: %s' % SafeString(self.bs),
unicode(ErrorString(ImportError(self.bs))))
# ErrorOutput tests
# -----------------
# Stub: Buffer with 'strict' auto-conversion of input to byte string:
class BBuf(BytesIO, object): # super class object required by Python <= 2.5
def write(self, data):
if isinstance(data, unicode):
data.encode('ascii', 'strict')
super(BBuf, self).write(data)
# Stub: Buffer expecting unicode string:
class UBuf(StringIO, object): # super class object required by Python <= 2.5
d |
class ErrorOutputTests(unittest.TestCase):
def test_defaults(self):
e = ErrorOutput()
self.assertEqual(e.stream, sys.stderr)
def test_bbuf(self):
buf = BBuf() # buffer storing byte string
e = ErrorOutput(buf, encoding='ascii')
# write byte-string as-is
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc'))
# encode unicode data with backslashescape fallback replacement:
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc'))
# handle Exceptions with Unicode string args
# unicode(Exception(u'e\xfc')) # fails in Python < 2.6
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc'))
# encode with `encoding` attribute
e.encoding = 'utf8'
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc u\xc3\xbc'))
def test_ubuf(self):
buf = UBuf() # buffer only accepting unicode string
# decode of binary strings
e = ErrorOutput(buf, encoding='ascii')
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd') # use REPLACEMENT CHARACTER
# write Unicode string and Exceptions with Unicode args
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc')
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc')
# decode with `encoding` attribute
e.encoding = 'latin1'
e.write(b(' b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc b\xfc')
class SafeStringTests_locale(unittest.TestCase):
"""
Test docutils.SafeString with 'problematic' locales.
The error message in `EnvironmentError` instances comes from the OS
and in some locales (e.g. ru_RU), contains high bit chars.
"""
if testlocale:
locale.setlocale(locale.LC_ALL, testlocale)
# test data:
bs = b('\xfc')
us = u'\xfc'
try:
open(b('\xfc'))
except IOError, e: # in Python 3 the name for the exception instance
bioe = e # is local to the except clause
try:
open(u'\xfc')
except IOError, e:
uioe = e
except UnicodeEncodeError:
try:
open(u'\xfc'.encode(sys.getfilesystemencoding(), 'replace'))
except IOError, e:
uioe = e
try:
os.chdir(b('\xfc'))
except OSError, e:
bose = e
try:
os.chdir(u'\xfc')
except OSError, e:
uose = e
except UnicodeEncodeError:
try:
os.chdir(u'\xfc'.encode(sys.getfilesystemencoding(), 'replace'))
| ef write(self, data):
# emulate Python 3 handling of stdout, stderr
if isinstance(data, bytes):
raise TypeError('must be unicode, not bytes')
super(UBuf, self).write(data)
| identifier_body |
test_error_reporting.py |
BytesIO = StringIO
import DocutilsTestSupport # must be imported before docutils
from docutils import core, parsers, frontend, utils
from docutils.utils.error_reporting import SafeString, ErrorString, ErrorOutput
from docutils._compat import b, bytes
oldlocale = None
if sys.version_info < (3,0): # problems solved in py3k
try:
import locale # module missing in Jython
oldlocale = locale.getlocale()
# Why does getlocale return the defaultlocale in Python 3.2 ????
# oldlocale = (None, None) # test suite runs without locale
except ImportError:
print ('cannot test error reporting with problematic locales,\n'
'`import locale` failed.')
# locales confirmed to use non-ASCII chars in the IOError message
# for a missing file (https://bugs.gentoo.org/show_bug.cgi?id=349101)
# TODO: add more confirmed problematic locales
problematic_locales = ['cs_CZ', 'cs_CZ.UTF8',
'el_GR', 'el_GR.UTF-8',
# 'fr_FR.UTF-8', # only OSError
'ja_JP.UTF-8',
'ru_RU', 'ru_RU.KOI8-R',
'ru_RU.UTF-8',
'', # default locale: might be non-problematic
]
if oldlocale is not None:
# find a supported problematic locale:
for testlocale in problematic_locales:
try:
locale.setlocale(locale.LC_ALL, testlocale)
except locale.Error:
testlocale = None
else:
break
locale.setlocale(locale.LC_ALL, oldlocale) # reset
else:
testlocale = None
class SafeStringTests(unittest.TestCase):
# the error message in EnvironmentError instances comes from the OS
# and in some locales (e.g. ru_RU), contains high bit chars.
# -> see the test in test_error_reporting.py
# test data:
bs = b('\xfc') # unicode(bs) fails, str(bs) in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
be = Exception(bs) # unicode(be) fails
ue = Exception(us) # bytes(ue) fails, str(ue) fails in Python 2;
# unicode(ue) fails in Python < 2.6 (issue2517_)
# .. _issue2517: http://bugs.python.org/issue2517
# wrapped test data:
wbs = SafeString(bs)
wus = SafeString(us)
wbe = SafeString(be)
wue = SafeString(ue)
def test_7bit(self):
# wrapping (not required with 7-bit chars) must not change the
# result of conversions:
bs7 = b('foo')
us7 = u'foo'
be7 = Exception(bs7)
ue7 = Exception(us7)
self.assertEqual(str(42), str(SafeString(42)))
self.assertEqual(str(bs7), str(SafeString(bs7)))
self.assertEqual(str(us7), str(SafeString(us7)))
self.assertEqual(str(be7), str(SafeString(be7)))
self.assertEqual(str(ue7), str(SafeString(ue7)))
self.assertEqual(unicode(7), unicode(SafeString(7)))
self.assertEqual(unicode(bs7), unicode(SafeString(bs7)))
self.assertEqual(unicode(us7), unicode(SafeString(us7)))
self.assertEqual(unicode(be7), unicode(SafeString(be7)))
self.assertEqual(unicode(ue7), unicode(SafeString(ue7)))
def test_ustr(self):
"""Test conversion to a unicode-string."""
# unicode(self.bs) fails
self.assertEqual(unicode, type(unicode(self.wbs)))
self.assertEqual(unicode(self.us), unicode(self.wus))
# unicode(self.be) fails
self.assertEqual(unicode, type(unicode(self.wbe)))
# unicode(ue) fails in Python < 2.6 (issue2517_)
self.assertEqual(unicode, type(unicode(self.wue)))
self.assertEqual(self.us, unicode(self.wue))
def test_str(self):
"""Test conversion to a string (bytes in Python 2, unicode in Python 3)."""
self.assertEqual(str(self.bs), str(self.wbs))
self.assertEqual(str(self.be), str(self.be))
# str(us) fails in Python 2
self.assertEqual(str, type(str(self.wus)))
# str(ue) fails in Python 2
self.assertEqual(str, type(str(self.wue)))
class ErrorStringTests(unittest.TestCase):
bs = b('\xfc') # unicode(bs) fails, str(bs) in Python 3 return repr()
us = u'\xfc' # bytes(us) fails; str(us) fails in Python 2
def test_str(self):
self.assertEqual('Exception: spam',
str(ErrorString(Exception('spam'))))
self.assertEqual('IndexError: '+str(self.bs),
str(ErrorString(IndexError(self.bs))))
self.assertEqual('ImportError: %s' % SafeString(self.us),
str(ErrorString(ImportError(self.us))))
def test_unicode(self):
self.assertEqual(u'Exception: spam',
unicode(ErrorString(Exception(u'spam'))))
self.assertEqual(u'IndexError: '+self.us,
unicode(ErrorString(IndexError(self.us))))
self.assertEqual(u'ImportError: %s' % SafeString(self.bs),
unicode(ErrorString(ImportError(self.bs))))
# ErrorOutput tests
# -----------------
# Stub: Buffer with 'strict' auto-conversion of input to byte string:
class BBuf(BytesIO, object): # super class object required by Python <= 2.5
def w | self, data):
if isinstance(data, unicode):
data.encode('ascii', 'strict')
super(BBuf, self).write(data)
# Stub: Buffer expecting unicode string:
class UBuf(StringIO, object): # super class object required by Python <= 2.5
def write(self, data):
# emulate Python 3 handling of stdout, stderr
if isinstance(data, bytes):
raise TypeError('must be unicode, not bytes')
super(UBuf, self).write(data)
class ErrorOutputTests(unittest.TestCase):
def test_defaults(self):
e = ErrorOutput()
self.assertEqual(e.stream, sys.stderr)
def test_bbuf(self):
buf = BBuf() # buffer storing byte string
e = ErrorOutput(buf, encoding='ascii')
# write byte-string as-is
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc'))
# encode unicode data with backslashescape fallback replacement:
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc'))
# handle Exceptions with Unicode string args
# unicode(Exception(u'e\xfc')) # fails in Python < 2.6
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc'))
# encode with `encoding` attribute
e.encoding = 'utf8'
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), b('b\xfc u\\xfc e\\xfc u\xc3\xbc'))
def test_ubuf(self):
buf = UBuf() # buffer only accepting unicode string
# decode of binary strings
e = ErrorOutput(buf, encoding='ascii')
e.write(b('b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd') # use REPLACEMENT CHARACTER
# write Unicode string and Exceptions with Unicode args
e.write(u' u\xfc')
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc')
e.write(AttributeError(u' e\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc')
# decode with `encoding` attribute
e.encoding = 'latin1'
e.write(b(' b\xfc'))
self.assertEqual(buf.getvalue(), u'b\ufffd u\xfc e\xfc b\xfc')
class SafeStringTests_locale(unittest.TestCase):
"""
Test docutils.SafeString with 'problematic' locales.
The error message in `EnvironmentError` instances comes from the OS
and in some locales (e.g. ru_RU), contains high bit chars.
"""
if testlocale:
locale.setlocale(locale.LC_ALL, testlocale)
# test data:
bs = b('\xfc')
us = u'\xfc'
try:
open(b('\xfc'))
except IOError, e: # in Python 3 the name for the exception instance
bioe = e # is local to the except clause
try:
open(u'\xfc')
except IOError, e:
uioe = e
except UnicodeEncodeError:
try:
open(u'\xfc'.encode(sys.getfilesystemencoding(), 'replace'))
except IOError, e:
uioe = e
try:
os.chdir(b('\xfc'))
except OSError, e:
bose = e
try:
os.chdir(u'\xfc')
except OSError, e:
uose = e
except UnicodeEncodeError:
try:
os.chdir(u'\xfc'.encode(sys.getfilesystemencoding(), 'replace'))
| rite( | identifier_name |
clustering.go | .New("missing locker type field")
}
func (a *App) leaderKey() string {
return fmt.Sprintf("gnmic/%s/leader", a.Config.Clustering.ClusterName)
}
func (a *App) inCluster() bool {
if a.Config == nil {
return false
}
return !(a.Config.Clustering == nil)
}
func (a *App) apiServiceRegistration() {
addr, port, _ := net.SplitHostPort(a.Config.APIServer.Address)
p, _ := strconv.Atoi(port)
tags := make([]string, 0, 2+len(a.Config.Clustering.Tags))
tags = append(tags, fmt.Sprintf("cluster-name=%s", a.Config.Clustering.ClusterName))
tags = append(tags, fmt.Sprintf("instance-name=%s", a.Config.Clustering.InstanceName))
if a.Config.APIServer.TLS != nil {
tags = append(tags, "protocol=https")
} else {
tags = append(tags, "protocol=http")
}
tags = append(tags, a.Config.Clustering.Tags...)
serviceReg := &lockers.ServiceRegistration{
ID: a.Config.Clustering.InstanceName + "-api",
Name: fmt.Sprintf("%s-%s", a.Config.Clustering.ClusterName, apiServiceName),
Address: a.Config.Clustering.ServiceAddress,
Port: p,
Tags: tags,
TTL: 5 * time.Second,
}
if serviceReg.Address == "" {
serviceReg.Address = addr
}
var err error
a.Logger.Printf("registering service %+v", serviceReg)
for {
select {
case <-a.ctx.Done():
return
default:
err = a.locker.Register(a.ctx, serviceReg)
if err != nil {
a.Logger.Printf("api service registration failed: %v", err)
time.Sleep(retryTimer)
continue
}
return
}
}
}
func (a *App) startCluster() {
if a.locker == nil || a.Config.Clustering == nil {
return
}
// register api service
go a.apiServiceRegistration()
leaderKey := a.leaderKey()
var err error
START:
// acquire leader key lock
for {
a.isLeader = false
err = nil
a.isLeader, err = a.locker.Lock(a.ctx, leaderKey, []byte(a.Config.Clustering.InstanceName))
if err != nil {
a.Logger.Printf("failed to acquire leader lock: %v", err)
time.Sleep(retryTimer)
continue
}
if !a.isLeader {
time.Sleep(retryTimer)
continue
}
a.isLeader = true
a.Logger.Printf("%q became the leader", a.Config.Clustering.InstanceName)
break
}
ctx, cancel := context.WithCancel(a.ctx)
defer cancel()
go func() {
go a.watchMembers(ctx)
a.Logger.Printf("leader waiting %s before dispatching targets", a.Config.Clustering.LeaderWaitTimer)
time.Sleep(a.Config.Clustering.LeaderWaitTimer)
a.Logger.Printf("leader done waiting, starting loader and dispatching targets")
go a.startLoader(ctx)
go a.dispatchTargets(ctx)
}()
doneCh, errCh := a.locker.KeepLock(a.ctx, leaderKey)
select {
case <-doneCh:
a.Logger.Printf("%q lost leader role", a.Config.Clustering.InstanceName)
cancel()
a.isLeader = false
goto START
case err := <-errCh:
a.Logger.Printf("%q failed to maintain the leader key: %v", a.Config.Clustering.InstanceName, err)
cancel()
a.isLeader = false
goto START
case <-a.ctx.Done():
return
}
}
func (a *App) watchMembers(ctx context.Context) {
serviceName := fmt.Sprintf("%s-%s", a.Config.Clustering.ClusterName, apiServiceName)
START:
select {
case <-ctx.Done():
return
default:
membersChan := make(chan []*lockers.Service)
go func() {
for {
select {
case <-ctx.Done():
return
case srvs, ok := <-membersChan:
if !ok {
return
}
a.updateServices(srvs)
}
}
}()
err := a.locker.WatchServices(ctx, serviceName, []string{"cluster-name=" + a.Config.Clustering.ClusterName}, membersChan, a.Config.Clustering.ServicesWatchTimer)
if err != nil {
a.Logger.Printf("failed getting services: %v", err)
time.Sleep(retryTimer)
goto START
}
}
}
func (a *App) updateServices(srvs []*lockers.Service) {
a.configLock.Lock()
defer a.configLock.Unlock()
numNewSrv := len(srvs)
numCurrentSrv := len(a.apiServices)
a.Logger.Printf("received service update with %d service(s)", numNewSrv)
// no new services and no current services, continue
if numNewSrv == 0 && numCurrentSrv == 0 {
return
}
// no new services and having some services, delete all
if numNewSrv == 0 && numCurrentSrv != 0 {
a.Logger.Printf("deleting all services")
a.apiServices = make(map[string]*lockers.Service)
return
}
// no current services, add all new services
if numCurrentSrv == 0 {
for _, s := range srvs {
a.Logger.Printf("adding service id %q", s.ID)
a.apiServices[s.ID] = s
}
return
}
//
newSrvs := make(map[string]*lockers.Service)
for _, s := range srvs {
newSrvs[s.ID] = s
}
// delete removed services
for n := range a.apiServices {
if _, ok := newSrvs[n]; !ok {
a.Logger.Printf("deleting service id %q", n)
delete(a.apiServices, n)
}
}
// add new services
for n, s := range newSrvs {
a.Logger.Printf("adding service id %q", n)
a.apiServices[n] = s
}
}
func (a *App) dispatchTargets(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
if len(a.apiServices) == 0 {
a.Logger.Printf("no services found, waiting...")
time.Sleep(a.Config.Clustering.TargetsWatchTimer)
continue
}
var err error
//a.m.RLock()
dctx, cancel := context.WithTimeout(ctx, a.Config.Clustering.TargetsWatchTimer)
for _, tc := range a.Config.Targets {
err = a.dispatchTarget(dctx, tc)
if err != nil {
a.Logger.Printf("failed to dispatch target %q: %v", tc.Name, err)
}
if err == errNotFound {
// no registered services,
// no need to continue with other targets,
// break from the targets loop
break
}
if err == errNoMoreSuitableServices {
// target has no suitable matching services,
// continue to next target without wait
continue
}
}
//a.m.RUnlock()
cancel()
select {
case <-ctx.Done():
return
default:
time.Sleep(a.Config.Clustering.TargetsWatchTimer)
}
}
}
}
func (a *App) dispatchTarget(ctx context.Context, tc *types.TargetConfig) error {
if a.Config.Debug {
a.Logger.Printf("checking if %q is locked", tc.Name)
}
key := fmt.Sprintf("gnmic/%s/targets/%s", a.Config.Clustering.ClusterName, tc.Name)
locked, err := a.locker.IsLocked(ctx, key)
if err != nil {
return err
}
if a.Config.Debug {
a.Logger.Printf("target %q is locked: %v", tc.Name, locked)
}
if locked {
return nil
}
a.Logger.Printf("dispatching target %q", tc.Name)
denied := make([]string, 0)
SELECTSERVICE:
service, err := a.selectService(tc.Tags, denied...)
if err != nil {
return err
}
if service == nil {
goto SELECTSERVICE
}
a.Logger.Printf("selected service %+v", service)
// assign target to selected service
err = a.assignTarget(ctx, tc, service)
if err != nil {
// add service to denied list and reselect
a.Logger.Printf("failed assigning target %q to service %q: %v", tc.Name, service.ID, err)
denied = append(denied, service.ID)
goto SELECTSERVICE
}
// wait for lock to be acquired
instanceName := ""
for _, tag := range service.Tags {
splitTag := strings.Split(tag, "=")
if len(splitTag) == | intf("starting locker type %q", lockerType)
if initializer, ok := lockers.Lockers[lockerType.(string)]; ok {
lock := initializer()
err := lock.Init(a.ctx, a.Config.Clustering.Locker, lockers.WithLogger(a.Logger))
if err != nil {
return err
}
a.locker = lock
return nil
}
return fmt.Errorf("unknown locker type %q", lockerType)
}
return errors | conditional_block |
|
clustering.go | .ctx)
defer cancel()
go func() {
go a.watchMembers(ctx)
a.Logger.Printf("leader waiting %s before dispatching targets", a.Config.Clustering.LeaderWaitTimer)
time.Sleep(a.Config.Clustering.LeaderWaitTimer)
a.Logger.Printf("leader done waiting, starting loader and dispatching targets")
go a.startLoader(ctx)
go a.dispatchTargets(ctx)
}()
doneCh, errCh := a.locker.KeepLock(a.ctx, leaderKey)
select {
case <-doneCh:
a.Logger.Printf("%q lost leader role", a.Config.Clustering.InstanceName)
cancel()
a.isLeader = false
goto START
case err := <-errCh:
a.Logger.Printf("%q failed to maintain the leader key: %v", a.Config.Clustering.InstanceName, err)
cancel()
a.isLeader = false
goto START
case <-a.ctx.Done():
return
}
}
func (a *App) watchMembers(ctx context.Context) {
serviceName := fmt.Sprintf("%s-%s", a.Config.Clustering.ClusterName, apiServiceName)
START:
select {
case <-ctx.Done():
return
default:
membersChan := make(chan []*lockers.Service)
go func() {
for {
select {
case <-ctx.Done():
return
case srvs, ok := <-membersChan:
if !ok {
return
}
a.updateServices(srvs)
}
}
}()
err := a.locker.WatchServices(ctx, serviceName, []string{"cluster-name=" + a.Config.Clustering.ClusterName}, membersChan, a.Config.Clustering.ServicesWatchTimer)
if err != nil {
a.Logger.Printf("failed getting services: %v", err)
time.Sleep(retryTimer)
goto START
}
}
}
func (a *App) updateServices(srvs []*lockers.Service) {
a.configLock.Lock()
defer a.configLock.Unlock()
numNewSrv := len(srvs)
numCurrentSrv := len(a.apiServices)
a.Logger.Printf("received service update with %d service(s)", numNewSrv)
// no new services and no current services, continue
if numNewSrv == 0 && numCurrentSrv == 0 {
return
}
// no new services and having some services, delete all
if numNewSrv == 0 && numCurrentSrv != 0 {
a.Logger.Printf("deleting all services")
a.apiServices = make(map[string]*lockers.Service)
return
}
// no current services, add all new services
if numCurrentSrv == 0 {
for _, s := range srvs {
a.Logger.Printf("adding service id %q", s.ID)
a.apiServices[s.ID] = s
}
return
}
//
newSrvs := make(map[string]*lockers.Service)
for _, s := range srvs {
newSrvs[s.ID] = s
}
// delete removed services
for n := range a.apiServices {
if _, ok := newSrvs[n]; !ok {
a.Logger.Printf("deleting service id %q", n)
delete(a.apiServices, n)
}
}
// add new services
for n, s := range newSrvs {
a.Logger.Printf("adding service id %q", n)
a.apiServices[n] = s
}
}
func (a *App) dispatchTargets(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
if len(a.apiServices) == 0 {
a.Logger.Printf("no services found, waiting...")
time.Sleep(a.Config.Clustering.TargetsWatchTimer)
continue
}
var err error
//a.m.RLock()
dctx, cancel := context.WithTimeout(ctx, a.Config.Clustering.TargetsWatchTimer)
for _, tc := range a.Config.Targets {
err = a.dispatchTarget(dctx, tc)
if err != nil {
a.Logger.Printf("failed to dispatch target %q: %v", tc.Name, err)
}
if err == errNotFound {
// no registered services,
// no need to continue with other targets,
// break from the targets loop
break
}
if err == errNoMoreSuitableServices {
// target has no suitable matching services,
// continue to next target without wait
continue
}
}
//a.m.RUnlock()
cancel()
select {
case <-ctx.Done():
return
default:
time.Sleep(a.Config.Clustering.TargetsWatchTimer)
}
}
}
}
func (a *App) dispatchTarget(ctx context.Context, tc *types.TargetConfig) error {
if a.Config.Debug {
a.Logger.Printf("checking if %q is locked", tc.Name)
}
key := fmt.Sprintf("gnmic/%s/targets/%s", a.Config.Clustering.ClusterName, tc.Name)
locked, err := a.locker.IsLocked(ctx, key)
if err != nil {
return err
}
if a.Config.Debug {
a.Logger.Printf("target %q is locked: %v", tc.Name, locked)
}
if locked {
return nil
}
a.Logger.Printf("dispatching target %q", tc.Name)
denied := make([]string, 0)
SELECTSERVICE:
service, err := a.selectService(tc.Tags, denied...)
if err != nil {
return err
}
if service == nil {
goto SELECTSERVICE
}
a.Logger.Printf("selected service %+v", service)
// assign target to selected service
err = a.assignTarget(ctx, tc, service)
if err != nil {
// add service to denied list and reselect
a.Logger.Printf("failed assigning target %q to service %q: %v", tc.Name, service.ID, err)
denied = append(denied, service.ID)
goto SELECTSERVICE
}
// wait for lock to be acquired
instanceName := ""
for _, tag := range service.Tags {
splitTag := strings.Split(tag, "=")
if len(splitTag) == 2 && splitTag[0] == "instance-name" {
instanceName = splitTag[1]
}
}
a.Logger.Printf("[cluster-leader] waiting for lock %q to be acquired by %q", key, instanceName)
retries := 0
WAIT:
values, err := a.locker.List(ctx, key)
if err != nil {
a.Logger.Printf("failed getting value of %q: %v", key, err)
time.Sleep(lockWaitTime)
goto WAIT
}
if len(values) == 0 {
retries++
if (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) {
a.Logger.Printf("[cluster-leader] max retries reached for target %q and service %q, reselecting...", tc.Name, service.ID)
err = a.unassignTarget(ctx, tc.Name, service.ID)
if err != nil {
a.Logger.Printf("failed to unassign target %q from %q", tc.Name, service.ID)
}
goto SELECTSERVICE
}
time.Sleep(lockWaitTime)
goto WAIT
}
if instance, ok := values[key]; ok {
if instance == instanceName {
a.Logger.Printf("[cluster-leader] lock %q acquired by %q", key, instanceName)
return nil
}
}
retries++
if (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) {
a.Logger.Printf("[cluster-leader] max retries reached for target %q and service %q, reselecting...", tc.Name, service.ID)
err = a.unassignTarget(ctx, tc.Name, service.ID)
if err != nil {
a.Logger.Printf("failed to unassign target %q from %q", tc.Name, service.ID)
}
goto SELECTSERVICE
}
time.Sleep(lockWaitTime)
goto WAIT
}
func (a *App) selectService(tags []string, denied ...string) (*lockers.Service, error) {
numServices := len(a.apiServices)
switch numServices {
case 0:
return nil, errNotFound
case 1:
for _, s := range a.apiServices {
return s, nil
}
default:
// select instance by tags
matchingInstances := make([]string, 0)
tagCount := a.getInstancesTagsMatches(tags)
if len(tagCount) > 0 {
matchingInstances = a.getHighestTagsMatches(tagCount)
a.Logger.Printf("current instances with tags=%v: %+v", tags, matchingInstances)
} else {
for n := range a.apiServices {
matchingInstances = append(matchingInstances, strings.TrimSuffix(n, "-api"))
}
}
if len(matchingInstances) == 1 {
return a.apiServices[fmt.Sprintf("%s-api", matchingInstances[0])], nil
}
// select instance by load
load, err := a.getInstancesLoad(matchingInstances...)
if err != nil {
return nil, err
}
a.Logger.Printf("current instances load: %+v", load)
// if there are no locks in place, return a random service | if len(load) == 0 { | random_line_split |
|
clustering.go | target has no suitable matching services,
// continue to next target without wait
continue
}
}
//a.m.RUnlock()
cancel()
select {
case <-ctx.Done():
return
default:
time.Sleep(a.Config.Clustering.TargetsWatchTimer)
}
}
}
}
func (a *App) dispatchTarget(ctx context.Context, tc *types.TargetConfig) error {
if a.Config.Debug {
a.Logger.Printf("checking if %q is locked", tc.Name)
}
key := fmt.Sprintf("gnmic/%s/targets/%s", a.Config.Clustering.ClusterName, tc.Name)
locked, err := a.locker.IsLocked(ctx, key)
if err != nil {
return err
}
if a.Config.Debug {
a.Logger.Printf("target %q is locked: %v", tc.Name, locked)
}
if locked {
return nil
}
a.Logger.Printf("dispatching target %q", tc.Name)
denied := make([]string, 0)
SELECTSERVICE:
service, err := a.selectService(tc.Tags, denied...)
if err != nil {
return err
}
if service == nil {
goto SELECTSERVICE
}
a.Logger.Printf("selected service %+v", service)
// assign target to selected service
err = a.assignTarget(ctx, tc, service)
if err != nil {
// add service to denied list and reselect
a.Logger.Printf("failed assigning target %q to service %q: %v", tc.Name, service.ID, err)
denied = append(denied, service.ID)
goto SELECTSERVICE
}
// wait for lock to be acquired
instanceName := ""
for _, tag := range service.Tags {
splitTag := strings.Split(tag, "=")
if len(splitTag) == 2 && splitTag[0] == "instance-name" {
instanceName = splitTag[1]
}
}
a.Logger.Printf("[cluster-leader] waiting for lock %q to be acquired by %q", key, instanceName)
retries := 0
WAIT:
values, err := a.locker.List(ctx, key)
if err != nil {
a.Logger.Printf("failed getting value of %q: %v", key, err)
time.Sleep(lockWaitTime)
goto WAIT
}
if len(values) == 0 {
retries++
if (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) {
a.Logger.Printf("[cluster-leader] max retries reached for target %q and service %q, reselecting...", tc.Name, service.ID)
err = a.unassignTarget(ctx, tc.Name, service.ID)
if err != nil {
a.Logger.Printf("failed to unassign target %q from %q", tc.Name, service.ID)
}
goto SELECTSERVICE
}
time.Sleep(lockWaitTime)
goto WAIT
}
if instance, ok := values[key]; ok {
if instance == instanceName {
a.Logger.Printf("[cluster-leader] lock %q acquired by %q", key, instanceName)
return nil
}
}
retries++
if (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) {
a.Logger.Printf("[cluster-leader] max retries reached for target %q and service %q, reselecting...", tc.Name, service.ID)
err = a.unassignTarget(ctx, tc.Name, service.ID)
if err != nil {
a.Logger.Printf("failed to unassign target %q from %q", tc.Name, service.ID)
}
goto SELECTSERVICE
}
time.Sleep(lockWaitTime)
goto WAIT
}
func (a *App) selectService(tags []string, denied ...string) (*lockers.Service, error) {
numServices := len(a.apiServices)
switch numServices {
case 0:
return nil, errNotFound
case 1:
for _, s := range a.apiServices {
return s, nil
}
default:
// select instance by tags
matchingInstances := make([]string, 0)
tagCount := a.getInstancesTagsMatches(tags)
if len(tagCount) > 0 {
matchingInstances = a.getHighestTagsMatches(tagCount)
a.Logger.Printf("current instances with tags=%v: %+v", tags, matchingInstances)
} else {
for n := range a.apiServices {
matchingInstances = append(matchingInstances, strings.TrimSuffix(n, "-api"))
}
}
if len(matchingInstances) == 1 {
return a.apiServices[fmt.Sprintf("%s-api", matchingInstances[0])], nil
}
// select instance by load
load, err := a.getInstancesLoad(matchingInstances...)
if err != nil {
return nil, err
}
a.Logger.Printf("current instances load: %+v", load)
// if there are no locks in place, return a random service
if len(load) == 0 {
for _, n := range matchingInstances {
a.Logger.Printf("selected service name: %s", n)
return a.apiServices[fmt.Sprintf("%s-api", n)], nil
}
}
for _, d := range denied {
delete(load, strings.TrimSuffix(d, "-api"))
}
a.Logger.Printf("current instances load after filtering: %+v", load)
// all services were denied
if len(load) == 0 {
return nil, errNoMoreSuitableServices
}
ss := a.getLowLoadInstance(load)
a.Logger.Printf("selected service name: %s", ss)
if srv, ok := a.apiServices[fmt.Sprintf("%s-api", ss)]; ok {
return srv, nil
}
return a.apiServices[ss], nil
}
return nil, errNotFound
}
func (a *App) getInstancesLoad(instances ...string) (map[string]int, error) {
// read all current locks held by the cluster
locks, err := a.locker.List(a.ctx, fmt.Sprintf("gnmic/%s/targets", a.Config.Clustering.ClusterName))
if err != nil {
return nil, err
}
if a.Config.Debug {
a.Logger.Println("current locks:", locks)
}
load := make(map[string]int)
// using the read locks, calculate the number of targets each instance has locked
for _, instance := range locks {
if _, ok := load[instance]; !ok {
load[instance] = 0
}
load[instance]++
}
// for instances that are registered but do not have any lock,
// add a "0" load
for _, s := range a.apiServices {
instance := strings.TrimSuffix(s.ID, "-api")
if _, ok := load[instance]; !ok {
load[instance] = 0
}
}
if len(instances) > 0 {
filteredLoad := make(map[string]int)
for _, instance := range instances {
if l, ok := load[instance]; ok {
filteredLoad[instance] = l
} else {
filteredLoad[instance] = 0
}
}
return filteredLoad, nil
}
return load, nil
}
// loop through the current cluster load
// find the instance with the lowest load
func (a *App) getLowLoadInstance(load map[string]int) string {
var ss string
var low = -1
for s, l := range load {
if low < 0 || l < low {
ss = s
low = l
}
}
return ss
}
func (a *App) getTargetToInstanceMapping() (map[string]string, error) {
locks, err := a.locker.List(a.ctx, fmt.Sprintf("gnmic/%s/targets", a.Config.Clustering.ClusterName))
if err != nil {
return nil, err
}
if a.Config.Debug {
a.Logger.Println("current locks:", locks)
}
for k, v := range locks {
delete(locks, k)
locks[filepath.Base(k)] = v
}
return locks, nil
}
func (a *App) getInstancesTagsMatches(tags []string) map[string]int {
maxMatch := make(map[string]int)
numTags := len(tags)
if numTags == 0 {
return maxMatch
}
for name, s := range a.apiServices {
name = strings.TrimSuffix(name, "-api")
maxMatch[name] = 0
for i, tag := range s.Tags {
if i+1 > numTags {
break
}
if tag == tags[i] {
maxMatch[name]++
continue
}
break
}
}
return maxMatch
}
func (a *App) getHighestTagsMatches(tagsCount map[string]int) []string {
var ss = make([]string, 0)
var high = -1
for s, c := range tagsCount {
if high < 0 || c > high {
ss = []string{strings.TrimSuffix(s, "-api")}
high = c
continue
}
if high == c {
ss = append(ss, strings.TrimSuffix(s, "-api"))
}
}
return ss
}
func (a *App) deleteTarget(ct | x context.Co | identifier_name |
|
clustering.go | .Clustering.InstanceName))
if err != nil {
a.Logger.Printf("failed to acquire leader lock: %v", err)
time.Sleep(retryTimer)
continue
}
if !a.isLeader {
time.Sleep(retryTimer)
continue
}
a.isLeader = true
a.Logger.Printf("%q became the leader", a.Config.Clustering.InstanceName)
break
}
ctx, cancel := context.WithCancel(a.ctx)
defer cancel()
go func() {
go a.watchMembers(ctx)
a.Logger.Printf("leader waiting %s before dispatching targets", a.Config.Clustering.LeaderWaitTimer)
time.Sleep(a.Config.Clustering.LeaderWaitTimer)
a.Logger.Printf("leader done waiting, starting loader and dispatching targets")
go a.startLoader(ctx)
go a.dispatchTargets(ctx)
}()
doneCh, errCh := a.locker.KeepLock(a.ctx, leaderKey)
select {
case <-doneCh:
a.Logger.Printf("%q lost leader role", a.Config.Clustering.InstanceName)
cancel()
a.isLeader = false
goto START
case err := <-errCh:
a.Logger.Printf("%q failed to maintain the leader key: %v", a.Config.Clustering.InstanceName, err)
cancel()
a.isLeader = false
goto START
case <-a.ctx.Done():
return
}
}
func (a *App) watchMembers(ctx context.Context) {
serviceName | err := a.locker.WatchServices(ctx, serviceName, []string{"cluster-name=" + a.Config.Clustering.ClusterName}, membersChan, a.Config.Clustering.ServicesWatchTimer)
if err != nil {
a.Logger.Printf("failed getting services: %v", err)
time.Sleep(retryTimer)
goto START
}
}
}
func (a *App)
updateServices(srvs []*lockers.Service) {
a.configLock.Lock()
defer a.configLock.Unlock()
numNewSrv := len(srvs)
numCurrentSrv := len(a.apiServices)
a.Logger.Printf("received service update with %d service(s)", numNewSrv)
// no new services and no current services, continue
if numNewSrv == 0 && numCurrentSrv == 0 {
return
}
// no new services and having some services, delete all
if numNewSrv == 0 && numCurrentSrv != 0 {
a.Logger.Printf("deleting all services")
a.apiServices = make(map[string]*lockers.Service)
return
}
// no current services, add all new services
if numCurrentSrv == 0 {
for _, s := range srvs {
a.Logger.Printf("adding service id %q", s.ID)
a.apiServices[s.ID] = s
}
return
}
//
newSrvs := make(map[string]*lockers.Service)
for _, s := range srvs {
newSrvs[s.ID] = s
}
// delete removed services
for n := range a.apiServices {
if _, ok := newSrvs[n]; !ok {
a.Logger.Printf("deleting service id %q", n)
delete(a.apiServices, n)
}
}
// add new services
for n, s := range newSrvs {
a.Logger.Printf("adding service id %q", n)
a.apiServices[n] = s
}
}
func (a *App) dispatchTargets(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
default:
if len(a.apiServices) == 0 {
a.Logger.Printf("no services found, waiting...")
time.Sleep(a.Config.Clustering.TargetsWatchTimer)
continue
}
var err error
//a.m.RLock()
dctx, cancel := context.WithTimeout(ctx, a.Config.Clustering.TargetsWatchTimer)
for _, tc := range a.Config.Targets {
err = a.dispatchTarget(dctx, tc)
if err != nil {
a.Logger.Printf("failed to dispatch target %q: %v", tc.Name, err)
}
if err == errNotFound {
// no registered services,
// no need to continue with other targets,
// break from the targets loop
break
}
if err == errNoMoreSuitableServices {
// target has no suitable matching services,
// continue to next target without wait
continue
}
}
//a.m.RUnlock()
cancel()
select {
case <-ctx.Done():
return
default:
time.Sleep(a.Config.Clustering.TargetsWatchTimer)
}
}
}
}
func (a *App) dispatchTarget(ctx context.Context, tc *types.TargetConfig) error {
if a.Config.Debug {
a.Logger.Printf("checking if %q is locked", tc.Name)
}
key := fmt.Sprintf("gnmic/%s/targets/%s", a.Config.Clustering.ClusterName, tc.Name)
locked, err := a.locker.IsLocked(ctx, key)
if err != nil {
return err
}
if a.Config.Debug {
a.Logger.Printf("target %q is locked: %v", tc.Name, locked)
}
if locked {
return nil
}
a.Logger.Printf("dispatching target %q", tc.Name)
denied := make([]string, 0)
SELECTSERVICE:
service, err := a.selectService(tc.Tags, denied...)
if err != nil {
return err
}
if service == nil {
goto SELECTSERVICE
}
a.Logger.Printf("selected service %+v", service)
// assign target to selected service
err = a.assignTarget(ctx, tc, service)
if err != nil {
// add service to denied list and reselect
a.Logger.Printf("failed assigning target %q to service %q: %v", tc.Name, service.ID, err)
denied = append(denied, service.ID)
goto SELECTSERVICE
}
// wait for lock to be acquired
instanceName := ""
for _, tag := range service.Tags {
splitTag := strings.Split(tag, "=")
if len(splitTag) == 2 && splitTag[0] == "instance-name" {
instanceName = splitTag[1]
}
}
a.Logger.Printf("[cluster-leader] waiting for lock %q to be acquired by %q", key, instanceName)
retries := 0
WAIT:
values, err := a.locker.List(ctx, key)
if err != nil {
a.Logger.Printf("failed getting value of %q: %v", key, err)
time.Sleep(lockWaitTime)
goto WAIT
}
if len(values) == 0 {
retries++
if (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) {
a.Logger.Printf("[cluster-leader] max retries reached for target %q and service %q, reselecting...", tc.Name, service.ID)
err = a.unassignTarget(ctx, tc.Name, service.ID)
if err != nil {
a.Logger.Printf("failed to unassign target %q from %q", tc.Name, service.ID)
}
goto SELECTSERVICE
}
time.Sleep(lockWaitTime)
goto WAIT
}
if instance, ok := values[key]; ok {
if instance == instanceName {
a.Logger.Printf("[cluster-leader] lock %q acquired by %q", key, instanceName)
return nil
}
}
retries++
if (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) {
a.Logger.Printf("[cluster-leader] max retries reached for target %q and service %q, reselecting...", tc.Name, service.ID)
err = a.unassignTarget(ctx, tc.Name, service.ID)
if err != nil {
a.Logger.Printf("failed to unassign target %q from %q", tc.Name, service.ID)
}
goto SELECTSERVICE
}
time.Sleep(lockWaitTime)
goto WAIT
}
func (a *App) selectService(tags []string, denied ...string) (*lockers.Service, error) {
numServices := len(a.apiServices)
switch numServices {
case 0:
return nil, errNotFound
case 1:
for _, s := range a.apiServices {
return s, nil
}
default:
// select instance by tags
matchingInstances := make([]string, 0)
tagCount := a.getInstancesTagsMatches(tags)
if len(tagCount) > 0 {
matchingInstances = a.getHighestTagsMatches(tagCount)
a.Logger.Printf("current instances with tags=%v: %+v", tags, matchingInstances)
} else {
for n := range a.apiServices {
matchingInstances = append(matchingInstances, strings.TrimSuffix(n, "-api"))
}
}
if len(matchingInstances) == | := fmt.Sprintf("%s-%s", a.Config.Clustering.ClusterName, apiServiceName)
START:
select {
case <-ctx.Done():
return
default:
membersChan := make(chan []*lockers.Service)
go func() {
for {
select {
case <-ctx.Done():
return
case srvs, ok := <-membersChan:
if !ok {
return
}
a.updateServices(srvs)
}
}
}() | identifier_body |
views.py | *
from .feeds import EventFeed
import mijnhercules.settings as settings
from .models import Match, Location
from .forms import MatchPresence
from members.models import Team, Player, MembershipHercules, Pass
# from mijnhercules.forms import *
from members.forms import EditPlayerForm, ArrangeSubstitutesForm, importMatchesForm, importPlayersForm
SITE_ROOT = os.path.dirname(os.path.realpath(manage.__file__))
eu = pytz.utc
#count amount of teams
# @login_required
# def TeamCount():
# t = Team.objects.all()
# return len(t)
def createMatchFeed(request, teamwedstrijd = None):
cal = EventFeed(teamwedstrijd)
return cal.__call__(request)
@login_required
def viewMatch(request, match):
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404
teams = m.getHercules()
substituteoptions = False
substitutes = {}
for t in teams:
if m.getSubstitutes(t.pk) != 0:
substituteoptions = True
substitutes[t] = m.getSubstitutes(t.pk)
# raise ValueError
return render(request, 'viewmatch.html', {'match':m, 'hercules':teams, 'substitutes':substitutes, 'substituteoptions':substituteoptions})
def | (request, match):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404
if request.method == 'POST' and m.isTeam(teampk):
form = ArrangeSubstitutesForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
# m.substitutesneeded = cd['substitutesneeded']
m.setSubstitutes(team = teampk, amountsubsneeded = cd['substitutesneeded'])
m.save()
return render(request, 'player/editplayer_complete.html')
else:
if m.isTeam(teampk):
form = ArrangeSubstitutesForm(initial={'substitutesneeded': m.getSubstitutesNeeded(teampk)})
u1 = User.objects.get(username=request.user.username)
player = u1.get_profile()
if player.gender == 'V':
substituteWilling = Player.women.filter(substitutewilling=True)
elif player.gender == 'M':
substituteWilling = Player.men.filter(substitutewilling=True)
presentplayers = m.getPresentPlayers(player.team_member.pk)
return render(request, 'match.html', {'match':m, 'form': form, 'substitutes':substituteWilling, 'presentplayers':presentplayers})
else:
raise Http404
def readMatch(f):
# with open(f, 'rU') as csvfile:
# data = csv.reader(csvfile, delimiter=';', dialect=csv.excel_tab)
# data.next()
# data = f.read()
# data = data.splitlines()
# dialect = csv.Sniffer().sniff(codecs.EncodedFile(f,"utf-8").read(1024))
f.open()
# check whether headers are indicative of a good csv file:
reader = csv.reader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
try:
assert 'Wedstrijdnummer' and 'Wedstrijddatum (niet geformatteerd)' and 'Aanvangstijd' and 'Aanduiding' and \
'Thuis team' and 'Uit team' and 'Sport omschrijving' and 'Veld' and 'Accommodatie naam' and 'Plaats' in reader.next()
except:
# mail_admins("Foute wedstrijd upload", "Probleem met CSV upload", fail_silently=False)
return [], "Foutje: het lijkt geen csv bestand te zijn."
f.close()
# get min and max daterange so cancelled matches can be deleted later on:
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
dates = []
for row in data:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
dates.append(date)
mindate = min(dates)
maxdate = max(dates)
existingmatches = Match.objects.filter(date__lte=maxdate).filter(date__gte=mindate)
f.close()
# start saving matches
savedmatches = []
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
for row in data:
if "Zaal" in row['Aanduiding']:
# add locations if not yet existent in the db
try:
loca = re.match(r'(.*)\sveld', row['Veld'])
hall = loca.group(1)
loc = Location.objects.get(name=hall)
#print "Existing", loc
except:
loc = re.match(r'(.*)\sveld', row['Veld'])
loc = Location.objects.create(name=loc.group(1))
loc.save()
#add team if not yet existent in the db
try:
t1 = Team.objects.get(number=row['Thuis team'])
except:
t1 = Team.objects.create(number = row['Thuis team'], level = '99')
t1.save()
try:
t2 = Team.objects.get(number=row['Uit team'])
except:
t2 = Team.objects.create(number = row['Uit team'], level = '99')
t2.save()
# get datetime field:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
#get matches:
try:
m = Match.objects.get(nrid=row['Wedstrijdnummer'])
m.date = date
m.teamhome = t1
m.teamaway = t2
m.location = loc
m.save()
savedmatches.append(m)
#print m
# saveMatch(m, row[1] + row[2], t1, t2, loc)
except:
#print "except match with %s and %s" % (t1, t2)
m = Match(
nrid=row['Wedstrijdnummer'],
date = date,
teamhome = t1,
teamaway = t2,
location = loc)
m.save()
savedmatches.append(m)
# delete cancelled matches:
for e in existingmatches:
if e not in savedmatches:
e.delete()
f.close()
return savedmatches, None
def importMatch(request):
matches = Match.objects.exclude(date__lte=date.today()).order_by('date')
if request.method == 'POST':
form = importMatchesForm(request.POST, request.FILES)
if form.is_valid():
savedmatches, fail = readMatch(request.FILES['matches'])
# request.FILES['matches'].open("rb")
# portfolio = csv.DictReader(request.FILES['uploadFile'].file)
return render(request, 'savematch_success.html', {'savedmatches':savedmatches, 'fail': fail})
else:
form = importMatchesForm()
return render(request, 'savematch.html', {'form': form, 'matches': matches})
def viewMyMatches(request):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
matches = Match.objects.get_my_matches(teampk)
presentmatches = {}
for m in matches:
if m.playerPresent(teampk, u1):
status = 'Aanwezig'
else:
status = 'Afwezig'
presentmatches[m] = MatchPresence(initial = status)
# raise ValueError
return render(request, 'mymatches.html', {'mymatches': matches, 'presentmatches':presentmatches})
def offerSubstitute(request, matchpk, teampk, substitutepk):
match = Match.objects.get(pk=matchpk)
match.addSubstitute(teampk = teampk, player = Player.objects.get(pk=substitutepk))
messages.add_message(request, messages.SUCCESS, 'Je hebt jezelf aangemeld als mogelijke invaller. Goed bezig!!')
# return render(request, 'substitutewilling_confirmation.html')
# redirect_url = reverse(viewMatch, args= | editMatch | identifier_name |
views.py | *
from .feeds import EventFeed
import mijnhercules.settings as settings
from .models import Match, Location
from .forms import MatchPresence
from members.models import Team, Player, MembershipHercules, Pass
# from mijnhercules.forms import *
from members.forms import EditPlayerForm, ArrangeSubstitutesForm, importMatchesForm, importPlayersForm
SITE_ROOT = os.path.dirname(os.path.realpath(manage.__file__))
eu = pytz.utc
#count amount of teams
# @login_required
# def TeamCount():
# t = Team.objects.all()
# return len(t)
def createMatchFeed(request, teamwedstrijd = None):
cal = EventFeed(teamwedstrijd)
return cal.__call__(request)
@login_required
def viewMatch(request, match):
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404
teams = m.getHercules()
substituteoptions = False
substitutes = {}
for t in teams:
if m.getSubstitutes(t.pk) != 0:
substituteoptions = True
substitutes[t] = m.getSubstitutes(t.pk)
# raise ValueError
return render(request, 'viewmatch.html', {'match':m, 'hercules':teams, 'substitutes':substitutes, 'substituteoptions':substituteoptions})
def editMatch(request, match):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404
if request.method == 'POST' and m.isTeam(teampk):
form = ArrangeSubstitutesForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
# m.substitutesneeded = cd['substitutesneeded']
m.setSubstitutes(team = teampk, amountsubsneeded = cd['substitutesneeded'])
m.save()
return render(request, 'player/editplayer_complete.html')
else:
if m.isTeam(teampk):
form = ArrangeSubstitutesForm(initial={'substitutesneeded': m.getSubstitutesNeeded(teampk)})
u1 = User.objects.get(username=request.user.username)
player = u1.get_profile()
if player.gender == 'V':
substituteWilling = Player.women.filter(substitutewilling=True)
elif player.gender == 'M':
substituteWilling = Player.men.filter(substitutewilling=True)
presentplayers = m.getPresentPlayers(player.team_member.pk)
return render(request, 'match.html', {'match':m, 'form': form, 'substitutes':substituteWilling, 'presentplayers':presentplayers})
else:
raise Http404
def readMatch(f):
# with open(f, 'rU') as csvfile:
# data = csv.reader(csvfile, delimiter=';', dialect=csv.excel_tab)
# data.next()
# data = f.read()
# data = data.splitlines()
# dialect = csv.Sniffer().sniff(codecs.EncodedFile(f,"utf-8").read(1024))
f.open()
# check whether headers are indicative of a good csv file:
reader = csv.reader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
try:
assert 'Wedstrijdnummer' and 'Wedstrijddatum (niet geformatteerd)' and 'Aanvangstijd' and 'Aanduiding' and \
'Thuis team' and 'Uit team' and 'Sport omschrijving' and 'Veld' and 'Accommodatie naam' and 'Plaats' in reader.next()
except:
# mail_admins("Foute wedstrijd upload", "Probleem met CSV upload", fail_silently=False)
return [], "Foutje: het lijkt geen csv bestand te zijn."
f.close()
# get min and max daterange so cancelled matches can be deleted later on:
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
dates = []
for row in data:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
dates.append(date)
mindate = min(dates)
maxdate = max(dates)
existingmatches = Match.objects.filter(date__lte=maxdate).filter(date__gte=mindate)
f.close()
# start saving matches
savedmatches = []
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
for row in data:
if "Zaal" in row['Aanduiding']:
# add locations if not yet existent in the db
try:
loca = re.match(r'(.*)\sveld', row['Veld'])
hall = loca.group(1)
loc = Location.objects.get(name=hall)
#print "Existing", loc
except:
loc = re.match(r'(.*)\sveld', row['Veld'])
loc = Location.objects.create(name=loc.group(1))
loc.save()
#add team if not yet existent in the db
try:
t1 = Team.objects.get(number=row['Thuis team'])
except:
t1 = Team.objects.create(number = row['Thuis team'], level = '99')
t1.save()
try:
t2 = Team.objects.get(number=row['Uit team'])
except:
t2 = Team.objects.create(number = row['Uit team'], level = '99')
t2.save()
# get datetime field:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
#get matches:
try:
m = Match.objects.get(nrid=row['Wedstrijdnummer'])
m.date = date
m.teamhome = t1
m.teamaway = t2
m.location = loc
m.save()
savedmatches.append(m)
#print m
# saveMatch(m, row[1] + row[2], t1, t2, loc)
except:
#print "except match with %s and %s" % (t1, t2)
m = Match(
nrid=row['Wedstrijdnummer'],
date = date,
teamhome = t1,
teamaway = t2,
location = loc)
m.save()
savedmatches.append(m)
# delete cancelled matches:
for e in existingmatches:
if e not in savedmatches:
e.delete()
f.close()
return savedmatches, None
def importMatch(request):
matches = Match.objects.exclude(date__lte=date.today()).order_by('date')
if request.method == 'POST':
form = importMatchesForm(request.POST, request.FILES)
if form.is_valid():
savedmatches, fail = readMatch(request.FILES['matches'])
# request.FILES['matches'].open("rb")
# portfolio = csv.DictReader(request.FILES['uploadFile'].file)
return render(request, 'savematch_success.html', {'savedmatches':savedmatches, 'fail': fail})
else:
|
return render(request, 'savematch.html', {'form': form, 'matches': matches})
def viewMyMatches(request):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
matches = Match.objects.get_my_matches(teampk)
presentmatches = {}
for m in matches:
if m.playerPresent(teampk, u1):
status = 'Aanwezig'
else:
status = 'Afwezig'
presentmatches[m] = MatchPresence(initial = status)
# raise ValueError
return render(request, 'mymatches.html', {'mymatches': matches, 'presentmatches':presentmatches})
def offerSubstitute(request, matchpk, teampk, substitutepk):
match = Match.objects.get(pk=matchpk)
match.addSubstitute(teampk = teampk, player = Player.objects.get(pk=substitutepk))
messages.add_message(request, messages.SUCCESS, 'Je hebt jezelf aangemeld als mogelijke invaller. Goed bezig!!')
# return render(request, 'substitutewilling_confirmation.html')
# redirect_url = reverse(viewMatch, args= | form = importMatchesForm() | conditional_block |
views.py | return cal.__call__(request)
@login_required
def viewMatch(request, match):
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404
teams = m.getHercules()
substituteoptions = False
substitutes = {}
for t in teams:
if m.getSubstitutes(t.pk) != 0:
substituteoptions = True
substitutes[t] = m.getSubstitutes(t.pk)
# raise ValueError
return render(request, 'viewmatch.html', {'match':m, 'hercules':teams, 'substitutes':substitutes, 'substituteoptions':substituteoptions})
def editMatch(request, match):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404
if request.method == 'POST' and m.isTeam(teampk):
form = ArrangeSubstitutesForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
# m.substitutesneeded = cd['substitutesneeded']
m.setSubstitutes(team = teampk, amountsubsneeded = cd['substitutesneeded'])
m.save()
return render(request, 'player/editplayer_complete.html')
else:
if m.isTeam(teampk):
form = ArrangeSubstitutesForm(initial={'substitutesneeded': m.getSubstitutesNeeded(teampk)})
u1 = User.objects.get(username=request.user.username)
player = u1.get_profile()
if player.gender == 'V':
substituteWilling = Player.women.filter(substitutewilling=True)
elif player.gender == 'M':
substituteWilling = Player.men.filter(substitutewilling=True)
presentplayers = m.getPresentPlayers(player.team_member.pk)
return render(request, 'match.html', {'match':m, 'form': form, 'substitutes':substituteWilling, 'presentplayers':presentplayers})
else:
raise Http404
def readMatch(f):
# with open(f, 'rU') as csvfile:
# data = csv.reader(csvfile, delimiter=';', dialect=csv.excel_tab)
# data.next()
# data = f.read()
# data = data.splitlines()
# dialect = csv.Sniffer().sniff(codecs.EncodedFile(f,"utf-8").read(1024))
f.open()
# check whether headers are indicative of a good csv file:
reader = csv.reader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
try:
assert 'Wedstrijdnummer' and 'Wedstrijddatum (niet geformatteerd)' and 'Aanvangstijd' and 'Aanduiding' and \
'Thuis team' and 'Uit team' and 'Sport omschrijving' and 'Veld' and 'Accommodatie naam' and 'Plaats' in reader.next()
except:
# mail_admins("Foute wedstrijd upload", "Probleem met CSV upload", fail_silently=False)
return [], "Foutje: het lijkt geen csv bestand te zijn."
f.close()
# get min and max daterange so cancelled matches can be deleted later on:
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
dates = []
for row in data:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
dates.append(date)
mindate = min(dates)
maxdate = max(dates)
existingmatches = Match.objects.filter(date__lte=maxdate).filter(date__gte=mindate)
f.close()
# start saving matches
savedmatches = []
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
for row in data:
if "Zaal" in row['Aanduiding']:
# add locations if not yet existent in the db
try:
loca = re.match(r'(.*)\sveld', row['Veld'])
hall = loca.group(1)
loc = Location.objects.get(name=hall)
#print "Existing", loc
except:
loc = re.match(r'(.*)\sveld', row['Veld'])
loc = Location.objects.create(name=loc.group(1))
loc.save()
#add team if not yet existent in the db
try:
t1 = Team.objects.get(number=row['Thuis team'])
except:
t1 = Team.objects.create(number = row['Thuis team'], level = '99')
t1.save()
try:
t2 = Team.objects.get(number=row['Uit team'])
except:
t2 = Team.objects.create(number = row['Uit team'], level = '99')
t2.save()
# get datetime field:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
#get matches:
try:
m = Match.objects.get(nrid=row['Wedstrijdnummer'])
m.date = date
m.teamhome = t1
m.teamaway = t2
m.location = loc
m.save()
savedmatches.append(m)
#print m
# saveMatch(m, row[1] + row[2], t1, t2, loc)
except:
#print "except match with %s and %s" % (t1, t2)
m = Match(
nrid=row['Wedstrijdnummer'],
date = date,
teamhome = t1,
teamaway = t2,
location = loc)
m.save()
savedmatches.append(m)
# delete cancelled matches:
for e in existingmatches:
if e not in savedmatches:
e.delete()
f.close()
return savedmatches, None
def importMatch(request):
matches = Match.objects.exclude(date__lte=date.today()).order_by('date')
if request.method == 'POST':
form = importMatchesForm(request.POST, request.FILES)
if form.is_valid():
savedmatches, fail = readMatch(request.FILES['matches'])
# request.FILES['matches'].open("rb")
# portfolio = csv.DictReader(request.FILES['uploadFile'].file)
return render(request, 'savematch_success.html', {'savedmatches':savedmatches, 'fail': fail})
else:
form = importMatchesForm()
return render(request, 'savematch.html', {'form': form, 'matches': matches})
def viewMyMatches(request):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
matches = Match.objects.get_my_matches(teampk)
presentmatches = {}
for m in matches:
if m.playerPresent(teampk, u1):
status = 'Aanwezig'
else:
status = 'Afwezig'
presentmatches[m] = MatchPresence(initial = status)
# raise ValueError
return render(request, 'mymatches.html', {'mymatches': matches, 'presentmatches':presentmatches})
def offerSubstitute(request, matchpk, teampk, substitutepk):
match = Match.objects.get(pk=matchpk)
match.addSubstitute(teampk = teampk, player = Player.objects.get(pk=substitutepk))
messages.add_message(request, messages.SUCCESS, 'Je hebt jezelf aangemeld als mogelijke invaller. Goed bezig!!')
# return render(request, 'substitutewilling_confirmation.html')
# redirect_url = reverse(viewMatch, args=matchpk,)
return redirect(reverse(viewMatch, args=(matchpk,)))
def cancelSubstituteOffer(request, matchpk, teampk, substitutepk):
| match = Match.objects.get(pk=matchpk)
match.removeSubstitute(teampk=teampk, player =Player.objects.get(pk=substitutepk))
# return render(request, 'substitutewilling_cancellation.html')
messages.add_message(request, messages.SUCCESS, 'Je afmelding als mogelijke invaller is doorgegeven.')
# return render(request, 'substitutewilling_confirmation.html')
# redirect_url = reverse(viewMatch, args=matchpk,)
return redirect(reverse(viewMatch, args=(matchpk,))) | identifier_body |
|
views.py | import *
from .feeds import EventFeed
import mijnhercules.settings as settings
from .models import Match, Location
from .forms import MatchPresence
from members.models import Team, Player, MembershipHercules, Pass
# from mijnhercules.forms import *
from members.forms import EditPlayerForm, ArrangeSubstitutesForm, importMatchesForm, importPlayersForm
SITE_ROOT = os.path.dirname(os.path.realpath(manage.__file__))
eu = pytz.utc
#count amount of teams
# @login_required
# def TeamCount():
# t = Team.objects.all()
# return len(t)
def createMatchFeed(request, teamwedstrijd = None):
cal = EventFeed(teamwedstrijd)
return cal.__call__(request)
@login_required
def viewMatch(request, match):
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404
teams = m.getHercules()
substituteoptions = False
substitutes = {}
for t in teams:
if m.getSubstitutes(t.pk) != 0:
substituteoptions = True
substitutes[t] = m.getSubstitutes(t.pk)
# raise ValueError
return render(request, 'viewmatch.html', {'match':m, 'hercules':teams, 'substitutes':substitutes, 'substituteoptions':substituteoptions})
def editMatch(request, match):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
try:
m = Match.objects.get(id=match)
except Match.DoesNotExist:
raise Http404 | if form.is_valid():
cd = form.cleaned_data
# m.substitutesneeded = cd['substitutesneeded']
m.setSubstitutes(team = teampk, amountsubsneeded = cd['substitutesneeded'])
m.save()
return render(request, 'player/editplayer_complete.html')
else:
if m.isTeam(teampk):
form = ArrangeSubstitutesForm(initial={'substitutesneeded': m.getSubstitutesNeeded(teampk)})
u1 = User.objects.get(username=request.user.username)
player = u1.get_profile()
if player.gender == 'V':
substituteWilling = Player.women.filter(substitutewilling=True)
elif player.gender == 'M':
substituteWilling = Player.men.filter(substitutewilling=True)
presentplayers = m.getPresentPlayers(player.team_member.pk)
return render(request, 'match.html', {'match':m, 'form': form, 'substitutes':substituteWilling, 'presentplayers':presentplayers})
else:
raise Http404
def readMatch(f):
# with open(f, 'rU') as csvfile:
# data = csv.reader(csvfile, delimiter=';', dialect=csv.excel_tab)
# data.next()
# data = f.read()
# data = data.splitlines()
# dialect = csv.Sniffer().sniff(codecs.EncodedFile(f,"utf-8").read(1024))
f.open()
# check whether headers are indicative of a good csv file:
reader = csv.reader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
try:
assert 'Wedstrijdnummer' and 'Wedstrijddatum (niet geformatteerd)' and 'Aanvangstijd' and 'Aanduiding' and \
'Thuis team' and 'Uit team' and 'Sport omschrijving' and 'Veld' and 'Accommodatie naam' and 'Plaats' in reader.next()
except:
# mail_admins("Foute wedstrijd upload", "Probleem met CSV upload", fail_silently=False)
return [], "Foutje: het lijkt geen csv bestand te zijn."
f.close()
# get min and max daterange so cancelled matches can be deleted later on:
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
dates = []
for row in data:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
dates.append(date)
mindate = min(dates)
maxdate = max(dates)
existingmatches = Match.objects.filter(date__lte=maxdate).filter(date__gte=mindate)
f.close()
# start saving matches
savedmatches = []
f.open()
data = csv.DictReader(codecs.EncodedFile(f,"latin-1"), delimiter=';', dialect=csv.excel_tab)
for row in data:
if "Zaal" in row['Aanduiding']:
# add locations if not yet existent in the db
try:
loca = re.match(r'(.*)\sveld', row['Veld'])
hall = loca.group(1)
loc = Location.objects.get(name=hall)
#print "Existing", loc
except:
loc = re.match(r'(.*)\sveld', row['Veld'])
loc = Location.objects.create(name=loc.group(1))
loc.save()
#add team if not yet existent in the db
try:
t1 = Team.objects.get(number=row['Thuis team'])
except:
t1 = Team.objects.create(number = row['Thuis team'], level = '99')
t1.save()
try:
t2 = Team.objects.get(number=row['Uit team'])
except:
t2 = Team.objects.create(number = row['Uit team'], level = '99')
t2.save()
# get datetime field:
try:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%y %H:%M'))
except:
date = eu.localize(datetime.strptime((row['Wedstrijddatum (niet geformatteerd)']+" " + row['Aanvangstijd']), '%d-%m-%Y %H:%M'))
#get matches:
try:
m = Match.objects.get(nrid=row['Wedstrijdnummer'])
m.date = date
m.teamhome = t1
m.teamaway = t2
m.location = loc
m.save()
savedmatches.append(m)
#print m
# saveMatch(m, row[1] + row[2], t1, t2, loc)
except:
#print "except match with %s and %s" % (t1, t2)
m = Match(
nrid=row['Wedstrijdnummer'],
date = date,
teamhome = t1,
teamaway = t2,
location = loc)
m.save()
savedmatches.append(m)
# delete cancelled matches:
for e in existingmatches:
if e not in savedmatches:
e.delete()
f.close()
return savedmatches, None
def importMatch(request):
matches = Match.objects.exclude(date__lte=date.today()).order_by('date')
if request.method == 'POST':
form = importMatchesForm(request.POST, request.FILES)
if form.is_valid():
savedmatches, fail = readMatch(request.FILES['matches'])
# request.FILES['matches'].open("rb")
# portfolio = csv.DictReader(request.FILES['uploadFile'].file)
return render(request, 'savematch_success.html', {'savedmatches':savedmatches, 'fail': fail})
else:
form = importMatchesForm()
return render(request, 'savematch.html', {'form': form, 'matches': matches})
def viewMyMatches(request):
u1 = User.objects.get(username=request.user.username)
teampk = u1.get_profile().team_member.pk
matches = Match.objects.get_my_matches(teampk)
presentmatches = {}
for m in matches:
if m.playerPresent(teampk, u1):
status = 'Aanwezig'
else:
status = 'Afwezig'
presentmatches[m] = MatchPresence(initial = status)
# raise ValueError
return render(request, 'mymatches.html', {'mymatches': matches, 'presentmatches':presentmatches})
def offerSubstitute(request, matchpk, teampk, substitutepk):
match = Match.objects.get(pk=matchpk)
match.addSubstitute(teampk = teampk, player = Player.objects.get(pk=substitutepk))
messages.add_message(request, messages.SUCCESS, 'Je hebt jezelf aangemeld als mogelijke invaller. Goed bezig!!')
# return render(request, 'substitutewilling_confirmation.html')
# redirect_url = reverse(viewMatch, args=match | if request.method == 'POST' and m.isTeam(teampk):
form = ArrangeSubstitutesForm(request.POST) | random_line_split |
hotflip.py | to compute is") and a version
# where the full prefix is discovered by HotFlip without any assistance.
preprefix_ids = [self.tokenizer.bos_token_id] if self.tokenizer.bos_token_id else []
if preprefix:
preprefix_ids.extend(self.tokenizer.encode(preprefix))
self.preprefix_ids = torch.tensor(preprefix_ids, dtype=int).to(device)
self.prefix_ids = None
self._set_prefix_ids(
self.init_discrete_prefix(num_tokens=self._num_tokens)
)
print(f"preprefix: '{preprefix}'")
# disable grads to model
for p in self.model.parameters(): p.requires_grad = False
# track data specific to HotFlip
self._epoch = 0
self._data = []
self._loss_for_prefix = {}
#
self.prefix_before_input = args.prefix_before_input
def check_early_stop(self) -> bool:
|
def _set_prefix_ids(self, new_ids: torch.Tensor) -> None:
assert new_ids.ndim == 1, "cannot set prefix with more than 1 dim (need list of IDs)"
# Track steps since new prefix to enable early stopping
if (self.prefix_ids is not None) and (self.prefix_ids == new_ids).all():
self._steps_since_new_prefix += 1
else:
self._steps_since_new_prefix = 0
self.prefix_ids = new_ids.to(device)
self.prefix_embedding = nn.Parameter(
self.token_embedding.to(device).forward(self.prefix_ids), requires_grad=True
)
# track prefixes we've tried
self._tested_prefix_ids[(tuple(new_ids.flatten().tolist()), self._swap_token_idx)] += 1
def pre_epoch(self) -> None:
# Print closest tokens at the beginning of each epoch.
if VERBOSE:
print("*" * 30)
print(f"Epoch {epoch}. Closest tokens to '{prefix_str}':")
word_distances = ((self.token_embedding.weight - self.prefix_embedding.reshape(1, emb_dim))**2).sum(1)
assert word_distances.shape == (50_257,)
topk_closest_words = word_distances.topk(k=TOP_K, largest=False)
for _id, _dist in zip(topk_closest_words.indices.cpu().tolist(), topk_closest_words.values.cpu().tolist()):
print(f'\t{self.id_to_word[_id]} ({_id}): {_dist:.3f}')
print("*" * 30)
@property
def _prefix_token_grad(self) -> torch.Tensor:
"""Gradient of the prefix tokens wrt the token embedding matrix."""
return torch.einsum('nd,vd->nv', self.prefix_embedding.grad, self.token_embedding.weight)
def compute_loss_and_call_backward(
self,
x_tokenized: transformers.BatchEncoding,
y_tokenized: transformers.BatchEncoding,
possible_answer_mask: torch.Tensor,
full_text_tokenized: Optional[transformers.BatchEncoding] = None
) -> Tuple[torch.Tensor, int]:
"""Computes loss using `self.loss_func`.
Returns:
loss (float torch.Tensor) -- the loss
num_correct (int): number of examples where prediction was correct
"""
original_input_ids = x_tokenized.input_ids
next_token_ids = y_tokenized.input_ids # only compute loss over next token
_input_ids, loss, n_correct = self._compute_loss_with_set_prefix(
original_input_ids=original_input_ids,
next_token_ids=next_token_ids, # only compute loss over next token
possible_answer_mask=possible_answer_mask
)
loss.backward()
# self._set_prefix_ids(best_prefix)
return loss, n_correct
def post_epoch(self, dataloader: torch.utils.data.DataLoader, possible_answer_mask: torch.Tensor) -> None:
#
# Get candidate IDs for every position.
#
token_idx = self._swap_token_idx
token_grads = self._prefix_token_grad
top_tokens_per_position = (
token_grads.topk(k=self._num_candidates_per_prefix_token, dim=1, largest=False).indices
)
assert top_tokens_per_position.shape == (self._num_tokens, self._num_candidates_per_prefix_token)
top_swap_tokens = top_tokens_per_position[token_idx, :]
#
# Get most likely tokens.
#
prefix_until_swap_ids = torch.cat(
(self.preprefix_ids.to(device), self.prefix_ids[:token_idx].to(device)), dim=0
)[None].to(device)
with torch.no_grad():
all_preprefix_logits = self.model(prefix_until_swap_ids)
swap_token_logits = all_preprefix_logits.logits[:, -1, :]
rvocab = {v: k for k,v in self.tokenizer.vocab.items()}
# dist_sum = (swap_token_logits.log_softmax(dim=1) * .7 + (-1 * token_grads).log_softmax(dim=1))
# for v in (swap_token_logits.log_softmax(dim=1) * .7 + (-1 * token_grads).log_softmax(dim=1)).topk(10).indices.flatten(): print(rvocab[v.item()])
alpha = 0.0 # TODO argparse for this alpha
print(f"HotFlip alpha = {alpha}")
token_losses = (
(swap_token_logits.log_softmax(dim=1) * alpha + (-1 * token_grads).log_softmax(dim=1))
)
top_swap_tokens = token_losses.argsort(descending=True).flatten()
# if we've already tried this (prefix, swap_token_idx) combo, then let's try the next n candidates.
_n = self._tested_prefix_ids[tuple(self.prefix_ids.flatten().tolist()), token_idx] - 1
assert _n >= 0, "something went wrong"
top_swap_tokens = top_swap_tokens[(_n * self._num_candidates_per_prefix_token) : (_n+1) * self._num_candidates_per_prefix_token]
#
# Evaluate candidates.
#
all_candidate_losses = torch.zeros(self._num_candidates_per_prefix_token, dtype=float).to(device)
all_n_correct = torch.zeros(self._num_candidates_per_prefix_token, dtype=int).to(device)
best_loss = self._min_loss
mask = torch.nn.functional.one_hot(
torch.tensor(token_idx), num_classes=self._num_tokens
).bool().to(device)
# Evaluate each prefix.
for batch in tqdm.tqdm(dataloader, desc='evaluating HotFlip candidates', colour='red', leave=False):
# Loop in this order so we only tokenize each thing once.
x_text, y_text = self.prepare_batch(batch=batch)
input_ids = self.tokenizer(x_text, return_tensors='pt', padding='longest')['input_ids'].to(device)
next_token_ids = self.tokenizer(y_text, return_tensors='pt', padding='longest')['input_ids'].to(device)
# only evaluate on single next-token
next_token_ids = next_token_ids[:, 0]
for candidate_idx in range(self._num_candidates_per_prefix_token):
new_token_id = top_swap_tokens[candidate_idx]
prefix_ids = torch.where(
mask, new_token_id, self.prefix_ids.to(device)
).to(device)
with torch.no_grad():
_input_ids, loss, n_correct = (
self._compute_loss_with_set_prefix(
original_input_ids=input_ids,
next_token_ids=next_token_ids,
possible_answer_mask=possible_answer_mask,
prefix_ids=prefix_ids
)
)
all_candidate_losses[candidate_idx] += loss
all_n_correct[candidate_idx] += n_correct
##################################################################################################################
hotflip_out_path = os.path.join(self.args.save_dir_unique, 'hotflip_grads_data.p')
for _i in range(self._num_candidates_per_prefix_token):
token_id = top_swap_tokens[_i].item()
# rank, prefix, token_id, token_grad, loss_with_this_token, n_correct_with_this_token
self._data.append(
(_i, self.prefix_ids.tolist(), token_id, token_grads.flatten()[token_id].item(), all_candidate_losses[_i].item(), all_n_correct[_i].item())
)
pickle.dump(self._data, open(hotflip_out_path, 'wb'))
##################################################################################################################
#
# Collect losses for all prefixes. Then set prefix to best one we haven't seen before.
#
for candidate_idx in range(self._num_candidates_per_prefix_token):
new_token_id = top_swap_tokens[candidate_idx]
prefix_ids = tuple(
torch.where(
mask, new_token_id, self.prefix_ids.to(device)
).tolist()
)
self._loss_for_prefix[prefix_ids] = (
all_candidate_losses[candidate_idx].item(),
all_n_correct[candidate_idx].item()
)
# next prefix is the one we know about with the min loss that we haven't tried
# so far.
best_prefix_ids = min(self._loss_for_prefix, key=lambda p: self._loss_for_prefix.get(p)[0])
best_loss, best_n_correct = self._loss_for_prefix[best_prefix_ids]
# if loss < self._min_loss:
# self._min_loss = loss
| """Allow prefix models to stop early."""
if self.args.early_stopping_steps == -1:
return False
return self._steps_since_new_prefix >= self.args.early_stopping_steps | identifier_body |
hotflip.py | model: transformers.PreTrainedModel,
tokenizer: transformers.PreTrainedTokenizer,
preprefix: str = ''
):
super().__init__(
args=args, loss_func=loss_func, model=model, tokenizer=tokenizer, preprefix=preprefix
)
# HotFlip-specific parameters.
self._min_loss = float('inf')
self._num_tokens = args.num_learned_tokens # TODO argparse for n_tokens
self._num_candidates_per_prefix_token = args.hotflip_num_candidates # TODO argparse for this too
self._swap_token_idx = 0
self._tested_prefix_ids = collections.defaultdict(lambda: 0)
# Sort both a version with a preprefix ("The function to compute is") and a version
# where the full prefix is discovered by HotFlip without any assistance.
preprefix_ids = [self.tokenizer.bos_token_id] if self.tokenizer.bos_token_id else []
if preprefix:
preprefix_ids.extend(self.tokenizer.encode(preprefix))
self.preprefix_ids = torch.tensor(preprefix_ids, dtype=int).to(device)
self.prefix_ids = None
self._set_prefix_ids(
self.init_discrete_prefix(num_tokens=self._num_tokens)
)
print(f"preprefix: '{preprefix}'")
# disable grads to model
for p in self.model.parameters(): p.requires_grad = False
# track data specific to HotFlip
self._epoch = 0
self._data = []
self._loss_for_prefix = {}
#
self.prefix_before_input = args.prefix_before_input
def check_early_stop(self) -> bool:
"""Allow prefix models to stop early."""
if self.args.early_stopping_steps == -1:
return False
return self._steps_since_new_prefix >= self.args.early_stopping_steps
def _set_prefix_ids(self, new_ids: torch.Tensor) -> None:
assert new_ids.ndim == 1, "cannot set prefix with more than 1 dim (need list of IDs)"
# Track steps since new prefix to enable early stopping
if (self.prefix_ids is not None) and (self.prefix_ids == new_ids).all():
self._steps_since_new_prefix += 1
else:
self._steps_since_new_prefix = 0
self.prefix_ids = new_ids.to(device)
self.prefix_embedding = nn.Parameter(
self.token_embedding.to(device).forward(self.prefix_ids), requires_grad=True
)
# track prefixes we've tried
self._tested_prefix_ids[(tuple(new_ids.flatten().tolist()), self._swap_token_idx)] += 1
def pre_epoch(self) -> None:
# Print closest tokens at the beginning of each epoch.
if VERBOSE:
print("*" * 30)
print(f"Epoch {epoch}. Closest tokens to '{prefix_str}':")
word_distances = ((self.token_embedding.weight - self.prefix_embedding.reshape(1, emb_dim))**2).sum(1)
assert word_distances.shape == (50_257,)
topk_closest_words = word_distances.topk(k=TOP_K, largest=False)
for _id, _dist in zip(topk_closest_words.indices.cpu().tolist(), topk_closest_words.values.cpu().tolist()):
print(f'\t{self.id_to_word[_id]} ({_id}): {_dist:.3f}')
print("*" * 30)
@property
def _prefix_token_grad(self) -> torch.Tensor:
"""Gradient of the prefix tokens wrt the token embedding matrix."""
return torch.einsum('nd,vd->nv', self.prefix_embedding.grad, self.token_embedding.weight)
def compute_loss_and_call_backward(
self,
x_tokenized: transformers.BatchEncoding,
y_tokenized: transformers.BatchEncoding,
possible_answer_mask: torch.Tensor,
full_text_tokenized: Optional[transformers.BatchEncoding] = None
) -> Tuple[torch.Tensor, int]:
"""Computes loss using `self.loss_func`.
Returns:
loss (float torch.Tensor) -- the loss
num_correct (int): number of examples where prediction was correct
"""
original_input_ids = x_tokenized.input_ids
next_token_ids = y_tokenized.input_ids # only compute loss over next token
_input_ids, loss, n_correct = self._compute_loss_with_set_prefix(
original_input_ids=original_input_ids,
next_token_ids=next_token_ids, # only compute loss over next token
possible_answer_mask=possible_answer_mask
)
loss.backward()
# self._set_prefix_ids(best_prefix)
return loss, n_correct
def post_epoch(self, dataloader: torch.utils.data.DataLoader, possible_answer_mask: torch.Tensor) -> None:
#
# Get candidate IDs for every position.
#
token_idx = self._swap_token_idx
token_grads = self._prefix_token_grad
top_tokens_per_position = (
token_grads.topk(k=self._num_candidates_per_prefix_token, dim=1, largest=False).indices
)
assert top_tokens_per_position.shape == (self._num_tokens, self._num_candidates_per_prefix_token)
top_swap_tokens = top_tokens_per_position[token_idx, :]
#
# Get most likely tokens.
#
prefix_until_swap_ids = torch.cat(
(self.preprefix_ids.to(device), self.prefix_ids[:token_idx].to(device)), dim=0
)[None].to(device)
with torch.no_grad():
all_preprefix_logits = self.model(prefix_until_swap_ids)
swap_token_logits = all_preprefix_logits.logits[:, -1, :]
rvocab = {v: k for k,v in self.tokenizer.vocab.items()}
# dist_sum = (swap_token_logits.log_softmax(dim=1) * .7 + (-1 * token_grads).log_softmax(dim=1))
# for v in (swap_token_logits.log_softmax(dim=1) * .7 + (-1 * token_grads).log_softmax(dim=1)).topk(10).indices.flatten(): print(rvocab[v.item()])
alpha = 0.0 # TODO argparse for this alpha
print(f"HotFlip alpha = {alpha}")
token_losses = (
(swap_token_logits.log_softmax(dim=1) * alpha + (-1 * token_grads).log_softmax(dim=1))
)
top_swap_tokens = token_losses.argsort(descending=True).flatten()
# if we've already tried this (prefix, swap_token_idx) combo, then let's try the next n candidates.
_n = self._tested_prefix_ids[tuple(self.prefix_ids.flatten().tolist()), token_idx] - 1
assert _n >= 0, "something went wrong"
top_swap_tokens = top_swap_tokens[(_n * self._num_candidates_per_prefix_token) : (_n+1) * self._num_candidates_per_prefix_token]
#
# Evaluate candidates.
#
all_candidate_losses = torch.zeros(self._num_candidates_per_prefix_token, dtype=float).to(device)
all_n_correct = torch.zeros(self._num_candidates_per_prefix_token, dtype=int).to(device)
best_loss = self._min_loss
mask = torch.nn.functional.one_hot(
torch.tensor(token_idx), num_classes=self._num_tokens
).bool().to(device)
# Evaluate each prefix.
for batch in tqdm.tqdm(dataloader, desc='evaluating HotFlip candidates', colour='red', leave=False):
# Loop in this order so we only tokenize each thing once.
x_text, y_text = self.prepare_batch(batch=batch)
input_ids = self.tokenizer(x_text, return_tensors='pt', padding='longest')['input_ids'].to(device)
next_token_ids = self.tokenizer(y_text, return_tensors='pt', padding='longest')['input_ids'].to(device)
# only evaluate on single next-token
next_token_ids = next_token_ids[:, 0]
for candidate_idx in range(self._num_candidates_per_prefix_token):
new_token_id = top_swap_tokens[candidate_idx]
prefix_ids = torch.where(
mask, new_token_id, self.prefix_ids.to(device)
).to(device)
with torch.no_grad():
_input_ids, loss, n_correct = (
self._compute_loss_with_set_prefix(
original_input_ids=input_ids,
next_token_ids=next_token_ids,
possible_answer_mask=possible_answer_mask,
prefix_ids=prefix_ids
)
)
all_candidate_losses[candidate_idx] += loss
all_n_correct[candidate_idx] += n_correct
##################################################################################################################
hotflip_out_path = os.path.join(self.args.save_dir_unique, 'hotflip_grads_data.p')
for _i in range(self._num_candidates_per_prefix_token):
token_id = top_swap_tokens[_i].item()
# rank, prefix, token_id, token_grad, loss_with_this_token, n_correct_with_this_token
self._data.append(
(_i, self.prefix_ids.tolist(), token_id, token_grads.flatten()[token_id].item(), all_candidate_losses[_i].item(), all_n_correct[_i].item())
)
pickle.dump(self._data, open(hotflip_out_path, 'wb'))
##################################################################################################################
#
# Collect losses for all prefixes. Then set prefix to best one we haven't seen before.
#
for candidate_idx in range(self._num_candidates_per_prefix_token):
new_token | args: argparse.Namespace,
loss_func: PrefixLoss, | random_line_split |
|
hotflip.py | to compute is") and a version
# where the full prefix is discovered by HotFlip without any assistance.
preprefix_ids = [self.tokenizer.bos_token_id] if self.tokenizer.bos_token_id else []
if preprefix:
preprefix_ids.extend(self.tokenizer.encode(preprefix))
self.preprefix_ids = torch.tensor(preprefix_ids, dtype=int).to(device)
self.prefix_ids = None
self._set_prefix_ids(
self.init_discrete_prefix(num_tokens=self._num_tokens)
)
print(f"preprefix: '{preprefix}'")
# disable grads to model
for p in self.model.parameters(): p.requires_grad = False
# track data specific to HotFlip
self._epoch = 0
self._data = []
self._loss_for_prefix = {}
#
self.prefix_before_input = args.prefix_before_input
def check_early_stop(self) -> bool:
"""Allow prefix models to stop early."""
if self.args.early_stopping_steps == -1:
return False
return self._steps_since_new_prefix >= self.args.early_stopping_steps
def _set_prefix_ids(self, new_ids: torch.Tensor) -> None:
assert new_ids.ndim == 1, "cannot set prefix with more than 1 dim (need list of IDs)"
# Track steps since new prefix to enable early stopping
if (self.prefix_ids is not None) and (self.prefix_ids == new_ids).all():
self._steps_since_new_prefix += 1
else:
self._steps_since_new_prefix = 0
self.prefix_ids = new_ids.to(device)
self.prefix_embedding = nn.Parameter(
self.token_embedding.to(device).forward(self.prefix_ids), requires_grad=True
)
# track prefixes we've tried
self._tested_prefix_ids[(tuple(new_ids.flatten().tolist()), self._swap_token_idx)] += 1
def pre_epoch(self) -> None:
# Print closest tokens at the beginning of each epoch.
if VERBOSE:
|
@property
def _prefix_token_grad(self) -> torch.Tensor:
"""Gradient of the prefix tokens wrt the token embedding matrix."""
return torch.einsum('nd,vd->nv', self.prefix_embedding.grad, self.token_embedding.weight)
def compute_loss_and_call_backward(
self,
x_tokenized: transformers.BatchEncoding,
y_tokenized: transformers.BatchEncoding,
possible_answer_mask: torch.Tensor,
full_text_tokenized: Optional[transformers.BatchEncoding] = None
) -> Tuple[torch.Tensor, int]:
"""Computes loss using `self.loss_func`.
Returns:
loss (float torch.Tensor) -- the loss
num_correct (int): number of examples where prediction was correct
"""
original_input_ids = x_tokenized.input_ids
next_token_ids = y_tokenized.input_ids # only compute loss over next token
_input_ids, loss, n_correct = self._compute_loss_with_set_prefix(
original_input_ids=original_input_ids,
next_token_ids=next_token_ids, # only compute loss over next token
possible_answer_mask=possible_answer_mask
)
loss.backward()
# self._set_prefix_ids(best_prefix)
return loss, n_correct
def post_epoch(self, dataloader: torch.utils.data.DataLoader, possible_answer_mask: torch.Tensor) -> None:
#
# Get candidate IDs for every position.
#
token_idx = self._swap_token_idx
token_grads = self._prefix_token_grad
top_tokens_per_position = (
token_grads.topk(k=self._num_candidates_per_prefix_token, dim=1, largest=False).indices
)
assert top_tokens_per_position.shape == (self._num_tokens, self._num_candidates_per_prefix_token)
top_swap_tokens = top_tokens_per_position[token_idx, :]
#
# Get most likely tokens.
#
prefix_until_swap_ids = torch.cat(
(self.preprefix_ids.to(device), self.prefix_ids[:token_idx].to(device)), dim=0
)[None].to(device)
with torch.no_grad():
all_preprefix_logits = self.model(prefix_until_swap_ids)
swap_token_logits = all_preprefix_logits.logits[:, -1, :]
rvocab = {v: k for k,v in self.tokenizer.vocab.items()}
# dist_sum = (swap_token_logits.log_softmax(dim=1) * .7 + (-1 * token_grads).log_softmax(dim=1))
# for v in (swap_token_logits.log_softmax(dim=1) * .7 + (-1 * token_grads).log_softmax(dim=1)).topk(10).indices.flatten(): print(rvocab[v.item()])
alpha = 0.0 # TODO argparse for this alpha
print(f"HotFlip alpha = {alpha}")
token_losses = (
(swap_token_logits.log_softmax(dim=1) * alpha + (-1 * token_grads).log_softmax(dim=1))
)
top_swap_tokens = token_losses.argsort(descending=True).flatten()
# if we've already tried this (prefix, swap_token_idx) combo, then let's try the next n candidates.
_n = self._tested_prefix_ids[tuple(self.prefix_ids.flatten().tolist()), token_idx] - 1
assert _n >= 0, "something went wrong"
top_swap_tokens = top_swap_tokens[(_n * self._num_candidates_per_prefix_token) : (_n+1) * self._num_candidates_per_prefix_token]
#
# Evaluate candidates.
#
all_candidate_losses = torch.zeros(self._num_candidates_per_prefix_token, dtype=float).to(device)
all_n_correct = torch.zeros(self._num_candidates_per_prefix_token, dtype=int).to(device)
best_loss = self._min_loss
mask = torch.nn.functional.one_hot(
torch.tensor(token_idx), num_classes=self._num_tokens
).bool().to(device)
# Evaluate each prefix.
for batch in tqdm.tqdm(dataloader, desc='evaluating HotFlip candidates', colour='red', leave=False):
# Loop in this order so we only tokenize each thing once.
x_text, y_text = self.prepare_batch(batch=batch)
input_ids = self.tokenizer(x_text, return_tensors='pt', padding='longest')['input_ids'].to(device)
next_token_ids = self.tokenizer(y_text, return_tensors='pt', padding='longest')['input_ids'].to(device)
# only evaluate on single next-token
next_token_ids = next_token_ids[:, 0]
for candidate_idx in range(self._num_candidates_per_prefix_token):
new_token_id = top_swap_tokens[candidate_idx]
prefix_ids = torch.where(
mask, new_token_id, self.prefix_ids.to(device)
).to(device)
with torch.no_grad():
_input_ids, loss, n_correct = (
self._compute_loss_with_set_prefix(
original_input_ids=input_ids,
next_token_ids=next_token_ids,
possible_answer_mask=possible_answer_mask,
prefix_ids=prefix_ids
)
)
all_candidate_losses[candidate_idx] += loss
all_n_correct[candidate_idx] += n_correct
##################################################################################################################
hotflip_out_path = os.path.join(self.args.save_dir_unique, 'hotflip_grads_data.p')
for _i in range(self._num_candidates_per_prefix_token):
token_id = top_swap_tokens[_i].item()
# rank, prefix, token_id, token_grad, loss_with_this_token, n_correct_with_this_token
self._data.append(
(_i, self.prefix_ids.tolist(), token_id, token_grads.flatten()[token_id].item(), all_candidate_losses[_i].item(), all_n_correct[_i].item())
)
pickle.dump(self._data, open(hotflip_out_path, 'wb'))
##################################################################################################################
#
# Collect losses for all prefixes. Then set prefix to best one we haven't seen before.
#
for candidate_idx in range(self._num_candidates_per_prefix_token):
new_token_id = top_swap_tokens[candidate_idx]
prefix_ids = tuple(
torch.where(
mask, new_token_id, self.prefix_ids.to(device)
).tolist()
)
self._loss_for_prefix[prefix_ids] = (
all_candidate_losses[candidate_idx].item(),
all_n_correct[candidate_idx].item()
)
# next prefix is the one we know about with the min loss that we haven't tried
# so far.
best_prefix_ids = min(self._loss_for_prefix, key=lambda p: self._loss_for_prefix.get(p)[0])
best_loss, best_n_correct = self._loss_for_prefix[best_prefix_ids]
# if loss < self._min_loss:
# self._min_loss = loss
| print("*" * 30)
print(f"Epoch {epoch}. Closest tokens to '{prefix_str}':")
word_distances = ((self.token_embedding.weight - self.prefix_embedding.reshape(1, emb_dim))**2).sum(1)
assert word_distances.shape == (50_257,)
topk_closest_words = word_distances.topk(k=TOP_K, largest=False)
for _id, _dist in zip(topk_closest_words.indices.cpu().tolist(), topk_closest_words.values.cpu().tolist()):
print(f'\t{self.id_to_word[_id]} ({_id}): {_dist:.3f}')
print("*" * 30) | conditional_block |
hotflip.py | compute is") and a version
# where the full prefix is discovered by HotFlip without any assistance.
preprefix_ids = [self.tokenizer.bos_token_id] if self.tokenizer.bos_token_id else []
if preprefix:
preprefix_ids.extend(self.tokenizer.encode(preprefix))
self.preprefix_ids = torch.tensor(preprefix_ids, dtype=int).to(device)
self.prefix_ids = None
self._set_prefix_ids(
self.init_discrete_prefix(num_tokens=self._num_tokens)
)
print(f"preprefix: '{preprefix}'")
# disable grads to model
for p in self.model.parameters(): p.requires_grad = False
# track data specific to HotFlip
self._epoch = 0
self._data = []
self._loss_for_prefix = {}
#
self.prefix_before_input = args.prefix_before_input
def | (self) -> bool:
"""Allow prefix models to stop early."""
if self.args.early_stopping_steps == -1:
return False
return self._steps_since_new_prefix >= self.args.early_stopping_steps
def _set_prefix_ids(self, new_ids: torch.Tensor) -> None:
assert new_ids.ndim == 1, "cannot set prefix with more than 1 dim (need list of IDs)"
# Track steps since new prefix to enable early stopping
if (self.prefix_ids is not None) and (self.prefix_ids == new_ids).all():
self._steps_since_new_prefix += 1
else:
self._steps_since_new_prefix = 0
self.prefix_ids = new_ids.to(device)
self.prefix_embedding = nn.Parameter(
self.token_embedding.to(device).forward(self.prefix_ids), requires_grad=True
)
# track prefixes we've tried
self._tested_prefix_ids[(tuple(new_ids.flatten().tolist()), self._swap_token_idx)] += 1
def pre_epoch(self) -> None:
# Print closest tokens at the beginning of each epoch.
if VERBOSE:
print("*" * 30)
print(f"Epoch {epoch}. Closest tokens to '{prefix_str}':")
word_distances = ((self.token_embedding.weight - self.prefix_embedding.reshape(1, emb_dim))**2).sum(1)
assert word_distances.shape == (50_257,)
topk_closest_words = word_distances.topk(k=TOP_K, largest=False)
for _id, _dist in zip(topk_closest_words.indices.cpu().tolist(), topk_closest_words.values.cpu().tolist()):
print(f'\t{self.id_to_word[_id]} ({_id}): {_dist:.3f}')
print("*" * 30)
@property
def _prefix_token_grad(self) -> torch.Tensor:
"""Gradient of the prefix tokens wrt the token embedding matrix."""
return torch.einsum('nd,vd->nv', self.prefix_embedding.grad, self.token_embedding.weight)
def compute_loss_and_call_backward(
self,
x_tokenized: transformers.BatchEncoding,
y_tokenized: transformers.BatchEncoding,
possible_answer_mask: torch.Tensor,
full_text_tokenized: Optional[transformers.BatchEncoding] = None
) -> Tuple[torch.Tensor, int]:
"""Computes loss using `self.loss_func`.
Returns:
loss (float torch.Tensor) -- the loss
num_correct (int): number of examples where prediction was correct
"""
original_input_ids = x_tokenized.input_ids
next_token_ids = y_tokenized.input_ids # only compute loss over next token
_input_ids, loss, n_correct = self._compute_loss_with_set_prefix(
original_input_ids=original_input_ids,
next_token_ids=next_token_ids, # only compute loss over next token
possible_answer_mask=possible_answer_mask
)
loss.backward()
# self._set_prefix_ids(best_prefix)
return loss, n_correct
def post_epoch(self, dataloader: torch.utils.data.DataLoader, possible_answer_mask: torch.Tensor) -> None:
#
# Get candidate IDs for every position.
#
token_idx = self._swap_token_idx
token_grads = self._prefix_token_grad
top_tokens_per_position = (
token_grads.topk(k=self._num_candidates_per_prefix_token, dim=1, largest=False).indices
)
assert top_tokens_per_position.shape == (self._num_tokens, self._num_candidates_per_prefix_token)
top_swap_tokens = top_tokens_per_position[token_idx, :]
#
# Get most likely tokens.
#
prefix_until_swap_ids = torch.cat(
(self.preprefix_ids.to(device), self.prefix_ids[:token_idx].to(device)), dim=0
)[None].to(device)
with torch.no_grad():
all_preprefix_logits = self.model(prefix_until_swap_ids)
swap_token_logits = all_preprefix_logits.logits[:, -1, :]
rvocab = {v: k for k,v in self.tokenizer.vocab.items()}
# dist_sum = (swap_token_logits.log_softmax(dim=1) * .7 + (-1 * token_grads).log_softmax(dim=1))
# for v in (swap_token_logits.log_softmax(dim=1) * .7 + (-1 * token_grads).log_softmax(dim=1)).topk(10).indices.flatten(): print(rvocab[v.item()])
alpha = 0.0 # TODO argparse for this alpha
print(f"HotFlip alpha = {alpha}")
token_losses = (
(swap_token_logits.log_softmax(dim=1) * alpha + (-1 * token_grads).log_softmax(dim=1))
)
top_swap_tokens = token_losses.argsort(descending=True).flatten()
# if we've already tried this (prefix, swap_token_idx) combo, then let's try the next n candidates.
_n = self._tested_prefix_ids[tuple(self.prefix_ids.flatten().tolist()), token_idx] - 1
assert _n >= 0, "something went wrong"
top_swap_tokens = top_swap_tokens[(_n * self._num_candidates_per_prefix_token) : (_n+1) * self._num_candidates_per_prefix_token]
#
# Evaluate candidates.
#
all_candidate_losses = torch.zeros(self._num_candidates_per_prefix_token, dtype=float).to(device)
all_n_correct = torch.zeros(self._num_candidates_per_prefix_token, dtype=int).to(device)
best_loss = self._min_loss
mask = torch.nn.functional.one_hot(
torch.tensor(token_idx), num_classes=self._num_tokens
).bool().to(device)
# Evaluate each prefix.
for batch in tqdm.tqdm(dataloader, desc='evaluating HotFlip candidates', colour='red', leave=False):
# Loop in this order so we only tokenize each thing once.
x_text, y_text = self.prepare_batch(batch=batch)
input_ids = self.tokenizer(x_text, return_tensors='pt', padding='longest')['input_ids'].to(device)
next_token_ids = self.tokenizer(y_text, return_tensors='pt', padding='longest')['input_ids'].to(device)
# only evaluate on single next-token
next_token_ids = next_token_ids[:, 0]
for candidate_idx in range(self._num_candidates_per_prefix_token):
new_token_id = top_swap_tokens[candidate_idx]
prefix_ids = torch.where(
mask, new_token_id, self.prefix_ids.to(device)
).to(device)
with torch.no_grad():
_input_ids, loss, n_correct = (
self._compute_loss_with_set_prefix(
original_input_ids=input_ids,
next_token_ids=next_token_ids,
possible_answer_mask=possible_answer_mask,
prefix_ids=prefix_ids
)
)
all_candidate_losses[candidate_idx] += loss
all_n_correct[candidate_idx] += n_correct
##################################################################################################################
hotflip_out_path = os.path.join(self.args.save_dir_unique, 'hotflip_grads_data.p')
for _i in range(self._num_candidates_per_prefix_token):
token_id = top_swap_tokens[_i].item()
# rank, prefix, token_id, token_grad, loss_with_this_token, n_correct_with_this_token
self._data.append(
(_i, self.prefix_ids.tolist(), token_id, token_grads.flatten()[token_id].item(), all_candidate_losses[_i].item(), all_n_correct[_i].item())
)
pickle.dump(self._data, open(hotflip_out_path, 'wb'))
##################################################################################################################
#
# Collect losses for all prefixes. Then set prefix to best one we haven't seen before.
#
for candidate_idx in range(self._num_candidates_per_prefix_token):
new_token_id = top_swap_tokens[candidate_idx]
prefix_ids = tuple(
torch.where(
mask, new_token_id, self.prefix_ids.to(device)
).tolist()
)
self._loss_for_prefix[prefix_ids] = (
all_candidate_losses[candidate_idx].item(),
all_n_correct[candidate_idx].item()
)
# next prefix is the one we know about with the min loss that we haven't tried
# so far.
best_prefix_ids = min(self._loss_for_prefix, key=lambda p: self._loss_for_prefix.get(p)[0])
best_loss, best_n_correct = self._loss_for_prefix[best_prefix_ids]
# if loss < self._min_loss:
# self._min_loss = loss
| check_early_stop | identifier_name |
cartesian.rs | a>(collections: &'a [C]) -> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
// We start with fresh iterators and a `next_item` full of `None`s.
let mut iterators = collections.iter().map(<&C>::into_iter).collect::<Vec<_>>();
let next_item = iterators.iter_mut().map(Iterator::next).collect();
Product {
collections,
iterators,
next_item,
}
}
/// Iterator returned by [`product()`].
///
/// [`product()`]: ./fn.product.html
pub struct Product<'a, C: 'a, T: 'a>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// The underlying collections that we iterate over.
collections: &'a [C],
/// Our own set of sub-iterators, taken from `collections`.
iterators: Vec<<&'a C as IntoIterator>::IntoIter>,
/// The next item to yield.
next_item: Option<Vec<&'a T>>,
}
impl<'a, C, T> Iterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
type Item = Vec<&'a T>;
fn next(&mut self) -> Option<Self::Item> {
let result = self.next_item.clone();
self.advance();
result
}
/// Calculate bounds on the number of remaining elements.
///
/// This is calculated the same way as [`Product::len()`], but uses
/// a helper type to deal with the return type of `size_hint()`.
/// See there for information on why the used formula is corrected.
///
/// [`Product::len()`]: #method.len
fn size_hint(&self) -> (usize, Option<usize>) {
if self.next_item.is_none() {
return (0, Some(0));
}
let SizeHint(lower, upper) = SizeHint(1, Some(1))
+ self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
SizeHint::from(iterator)
* self.collections[i + 1..]
.iter()
.map(|c| SizeHint::from(&c.into_iter()))
.product()
})
.sum();
(lower, upper)
}
}
impl<'a, C, T> ExactSizeIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{
/// Calculates the exact number of remaining elements.
///
/// The length consists of the following contributions:
///
/// - 1 for the `next_item` to be yielded;
/// - `X` for each currently active iterator, where X is the
/// product of the iterators length and the sizes of all
/// *collections* to the right of it in the product.
///
/// Example
/// -------
///
/// Assume the Cartesian product `[1, 2, 3]×[1, 2]×[1, 2, 3]`. Upon
/// construction, the `Product` type creates three iterators `A`,
/// `B`, and `C` – one iterator for each array. It also extracts
/// one item from each to form `next_item`. Hence, `next_item`
/// contributes `1` to the total length. The three iterators
/// contribute as follows:
///
/// - A: 2 items left × collection of size 2 × collection of size
/// 3 = 12;
/// - B: 1 item left × collection of size 3 = 3;
/// - C: 2 items left = 2.
///
/// Thus, we end up with a total length of `1+12+3+2=18`. This is
/// the same length we get when multiplying the size of all passed
/// collections. (`3*2*3=18`) However, our (complicated) formula
/// also works when the iterator has already yielded some elements.
fn len(&self) -> usize {
if self.next_item.is_none() {
return 0;
}
1 + self
.iterators
.iter()
.enumerate()
.map(|(i, iterator)| {
iterator.len()
* self.collections[i + 1..]
.iter()
.map(|c| c.into_iter().len())
.product::<usize>()
})
.sum::<usize>()
}
}
impl<'a, C, T> ::std::iter::FusedIterator for Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
<&'a C as IntoIterator>::IntoIter: ExactSizeIterator,
{}
impl<'a, C, T> Product<'a, C, T>
where
&'a C: IntoIterator<Item = &'a T>,
{
/// Advances the iterators and updates `self.next_item`.
///
/// This loop works like incrementing a number digit by digit. We
/// go over each iterator and its corresponding "digit" in
/// `next_item` in lockstep, starting at the back.
///
/// If we can advance the iterator, we update the "digit" and are
/// done. If the iterator is exhausted, we have to go from "9" to
/// "10": we restart the iterator, grab the first element, and move
/// on to the next digit.
///
/// The `break` expressions are to be understood literally: our
/// scheme can break in two ways.
/// 1. The very first iterator (`i==0`) is exhausted.
/// 2. A freshly restarted iterator is empty. (should never happen!)
/// In both cases, we want to exhaust `self` immediately. We do so
/// by breaking out of the loop, falling through to the very last
/// line, and manually set `self.next_item` to `None`.
///
/// Note that there is a so-called nullary case, when
/// `cartesian::product()` is called with an empty slice. While
/// this use-case is debatable, the mathematically correct way to
/// deal with it is to yield some empty vector once and then
/// nothing.
///
/// Luckily, we already handle this correctly! Because of the way
/// `Iterator::collect()` works when collecting into an
/// `Option<Vec<_>>`, `next_item` is initialized to some empty
/// vector, so this will be the first thing we yield. Then, when
/// `self.advance()` is called, we fall through the `while` loop and
/// immediately exhaust this iterator, yielding nothing more.
fn advance(&mut self) {
if let Some(ref mut next_item) = self.next_item {
let mut i = self.iterators.len();
while i > 0 {
i -= 1;
// Grab the next item from the current sub-iterator.
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// If that works, we're done!
return;
} else if i == 0 {
// Last sub-iterator is exhausted, so we're
// exhausted, too.
break;
}
// The current sub-terator is empty, start anew.
self.iterators[i] = self.collections[i].into_iter();
if let Some(elt) = self.iterators[i].next() {
next_item[i] = elt;
// Roll over to the next sub-iterator.
} else {
// Should never happen: The freshly restarted
// sub-iterator is already empty.
break;
}
}
}
// Exhaust this iterator if the above loop `break`s.
self.next_item = None;
}
}
#[derive(Debug)]
struct SizeHint(usize, Option<usize>);
impl SizeHint {
fn into_inner(self) -> (usize, Option<usize>) {
(self.0, self.1)
}
}
impl<'a, I: Iterator> From<&'a I> for SizeHint {
fn from(iter: &'a I) -> Self {
let (lower, upper) = iter.size_hint();
SizeHint(lower, upper)
}
}
impl ::std::ops::Add for SizeHint {
type Output = Self;
fn add(self, other: Self) -> Self {
let lower = self.0 + other.0;
let upper = match (self.1, other.1) {
(Some(left), Some(right)) => Some(left + right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl ::std::ops::Mul for SizeHint {
type Output = Self;
fn mul(self, other: Self) -> Self {
let lower = self.0 * other.0;
let upper = match (self.1, other.1) {
(Some(left), Some(right)) => Some(left * right),
_ => None,
};
SizeHint(lower, upper)
}
}
impl ::std::iter::Sum for SizeHint {
fn sum<I: I | ter | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.