text
stringlengths 2
104M
| meta
dict |
---|---|
@book{xie2015,
title = {Dynamic Documents with \textsf{R} and knitr},
author = {Yihui Xie},
year = 2015,
publisher = {Chapman and Hall/CRC},
address = {Boca Raton, Florida},
edition = {2nd}
}
@book{stinerock2018statistics,
title = {Statistics with R: A Beginner's Guide},
author = {Stinerock, Robert},
year = 2018,
publisher = {SAGE},
address = {London, UK},
edition = {1st}
}
@inproceedings{bjork2008global,
title = {Global annual volume of peer reviewed scholarly articles and the share available via different Open Access options.},
author = {Bj{\"o}rk, Bo-Christer and Roos, Annikki and Lauri, Mari},
year = 2008,
booktitle = {Proceedings ELPUB 2008 Conference on Electronic Publishing},
pages = {178--186}
}
@article{ioannidis2012science,
title = {Why science is not necessarily self-correcting},
author = {Ioannidis, John PA},
year = 2012,
journal = {Perspectives on Psychological Science},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 7,
number = 6,
pages = {645--654}
}
@article{meehl1978theoretical,
title = {Theoretical risks and tabular asterisks: {Sir Karl}, {Sir Ronald}, and the slow progress of soft psychology.},
author = {Meehl, Paul E},
year = 1978,
journal = {Journal of Consulting and Clinical Psychology},
publisher = {American Psychological Association},
volume = 46,
number = 4,
pages = 806
}
@article{glass1976primary,
title = {Primary, secondary, and meta-analysis of research},
author = {Glass, Gene V},
year = 1976,
journal = {Educational Researcher},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 5,
number = 10,
pages = {3--8}
}
@book{lipsey2001practical,
title = {Practical meta-analysis.},
author = {Lipsey, Mark W and Wilson, David B},
year = 2001,
publisher = {SAGE}
}
@article{cuijpers2016meta,
title = {Meta-analyses in mental health research. A practical guide},
author = {Cuijpers, Pim},
year = 2016,
journal = {Amsterdam, the Netherlands: Pim Cuijpers Uitgeverij}
}
@article{riley2010meta,
title = {Meta-analysis of individual participant data: Rationale, conduct, and reporting},
author = {Riley, Richard D and Lambert, Paul C and Abo-Zaid, Ghada},
year = 2010,
journal = {BMJ},
publisher = {British Medical Journal Publishing Group},
volume = 340,
pages = {c221}
}
@article{riley2007evidence,
title = {Evidence synthesis combining individual patient data and aggregate data: A systematic review identified current practice and possible methods},
author = {Riley, Richard D and Simmonds, Mark C and Look, Maxime P},
year = 2007,
journal = {Journal of Clinical Epidemiology},
publisher = {Elsevier},
volume = 60,
number = 5,
pages = {431.e1--431.e12}
}
@article{o2007historical,
title = {An historical perspective on meta-analysis: Dealing quantitatively with varying study results},
author = {O'Rourke, Keith},
year = 2007,
journal = {Journal of the Royal Society of Medicine},
publisher = {SAGE Publications Sage UK: London, England},
volume = 100,
number = 12,
pages = {579--582}
}
@book{fisher19351he,
title = {The Design of Experiments},
author = {Fisher, Ronald A},
year = 1935,
publisher = {Oliver \& Boyd, Edinburgh, UK}
}
@article{shannon2016statistical,
title = {A statistical note on {Karl Pearson’s} 1904 meta-analysis},
author = {Shannon, Harry},
year = 2016,
journal = {Journal of the Royal Society of Medicine},
publisher = {SAGE Publications Sage UK: London, England},
volume = 109,
number = 8,
pages = {310--311}
}
@book{smith1980benefits,
title = {The benefits of psychotherapy},
author = {Smith, Mary Lee and Glass, Gene V and Miller, Thomas I},
year = 1980,
publisher = {Johns Hopkins University Press}
}
@article{smith1977meta,
title = {Meta-analysis of psychotherapy outcome studies.},
author = {Smith, Mary L and Glass, Gene V},
year = 1977,
journal = {American Psychologist},
publisher = {American Psychological Association},
volume = 32,
number = 9,
pages = 752
}
@article{eysenck1978exercise,
title = {An exercise in mega-silliness},
author = {Eysenck, Hans J},
year = 1978,
journal = {American Psychologist},
publisher = {American Psychological Association},
volume = 33,
number = 5,
page = 517
}
@article{cuijpers2019eysenck,
title = {Was {Eysenck} right after all? A reassessment of the effects of psychotherapy for adult depression},
author = {Cuijpers, P and Karyotaki, E and Reijnders, M and Ebert, DD},
year = 2019,
journal = {Epidemiology and Psychiatric Sciences},
publisher = {Cambridge University Press},
volume = 28,
number = 1,
pages = {21--30}
}
@book{hunter2004methods,
title = {Methods of meta-analysis: Correcting error and bias in research findings},
author = {Hunter, John E and Schmidt, Frank L},
year = 2004,
publisher = {Sage}
}
@article{schmidt1977development,
title = {Development of a general solution to the problem of validity generalization.},
author = {Schmidt, Frank L and Hunter, John E},
year = 1977,
journal = {Journal of Applied Psychology},
publisher = {American Psychological Association},
volume = 62,
number = 5,
pages = 529
}
@article{elwood2006first,
title = {The first randomized trial of aspirin for heart attack and the advent of systematic overviews of trials},
author = {Elwood, Peter},
year = 2006,
journal = {Journal of the Royal Society of Medicine},
publisher = {SAGE Publications Sage UK: London, England},
volume = 99,
number = 11,
pages = {586--588}
}
@article{peto1980aspirin,
title = {Aspirin after myocardial infarction},
author = {Peto, R and Parish, S},
year = 1980,
journal = {The Lancet},
volume = 1,
number = 8179,
pages = {1172--1173}
}
@article{dersimonian1986meta,
title = {Meta-analysis in clinical trials},
author = {DerSimonian, Rebecca and Laird, Nan},
year = 1986,
journal = {Controlled Clinical Trials},
publisher = {Elsevier},
volume = 7,
number = 3,
pages = {177--188}
}
@book{higgins2019cochrane,
title = {Cochrane Handbook for Systematic Reviews of Interventions},
author = {Higgins, Julian and Thomas, James and Chandler, Jacqueline and Cumpston, Miranda and Li, Tianjing and Page, Matthew J and Welch, Vivian A},
year = 2019,
publisher = {John Wiley \& Sons}
}
@article{sterne2019rob,
title = {RoB 2: A revised tool for assessing risk of bias in randomised trials},
author = {Sterne, Jonathan and Savovi{\'c}, Jelena and Page, Matthew J and Elbers, Roy G and Blencowe, Natalie S and Boutron, Isabelle and Cates, Christopher J and Cheng, Hung-Yuan and Corbett, Mark S and Eldridge, Sandra M and Emberson, Jonathan R and Hern{\'a}n, Miguel A and Hopewell, Sally and Hr{\'o}bjartsson, Asbj{\o}rn and Junqueira, Daniela R and J{\"u}ni, Peter and Kirkham, Jamie J and Lasserson, Toby and Li, Tianjing and McAleenan, Alexandra and Reeves, Barnaby C and Shepperd, Sasha and Shrier, Ian and Stewart, Lesley A and Tilling, Kate and White, Ian R and Whiting, Penny F and Higgins, Julian},
year = 2019,
journal = {BMJ},
volume = 366,
elocation-id = {l4898}
}
@article{patsopoulos2005relative,
title = {Relative citation impact of various study designs in the health sciences},
author = {Patsopoulos, Nikolaos A and Analatos, Apostolos A and Ioannidis, John PA},
year = 2005,
journal = {JAMA},
publisher = {American Medical Association},
volume = 293,
number = 19,
pages = {2362--2366}
}
@article{ioannidis2016mass,
title = {The mass production of redundant, misleading, and conflicted systematic reviews and meta-analyses},
author = {Ioannidis, John PA},
year = 2016,
journal = {The Milbank Quarterly},
publisher = {Wiley Online Library},
volume = 94,
number = 3,
pages = {485--514}
}
@article{ebrahim2016meta,
title = {Meta-analyses with industry involvement are massively published and report no caveats for antidepressants},
author = {Ebrahim, Shanil and Bance, Sheena and Athale, Abha and Malachowski, Cindy and Ioannidis, John PA},
year = 2016,
journal = {Journal of Clinical Epidemiology},
publisher = {Elsevier},
volume = 70,
pages = {155--163}
}
@article{kirsch2002emperor,
title = {The emperor's new drugs: An analysis of antidepressant medication data submitted to the US Food and Drug Administration.},
author = {Kirsch, Irving and Moore, Thomas J and Scoboria, Alan and Nicholls, Sarah S},
year = 2002,
journal = {Prevention \& Treatment},
publisher = {American Psychological Association},
volume = 5,
number = 1,
pages = {23a}
}
@misc{lakens2017examining,
title = {Examining the reproducibility of meta-analyses in psychology: A preliminary report},
author = {Lakens, Daniel and Page-Gould, Elizabeth and van Assen, Marcel ALM and Spellman, Bobbie and Sch{\"o}nbrodt, Felix and Hasselman, Fred and Corker, Katherine S and Grange, James A and Sharples, Amanda and Cavender, Corinne and Hilde, Augusteijn and Heike, Gerger and Cosima, Locher and Ian, Miller and Farid, Anvari and Anne, Scheel},
year = 2017,
publisher = {MetaArXiv},
howpublished = {\url{https://osf.io/xfbjf/}}
}
@article{cuijpers2019role,
title = {The role of common factors in psychotherapy outcomes},
author = {Cuijpers, Pim and Reijnders, Mirjam and Huibers, Marcus JH},
year = 2019,
journal = {Annual Review of Clinical Psychology},
publisher = {Annual Reviews},
volume = 15,
pages = {207--231}
}
@book{wampold2013great,
title = {The great psychotherapy debate: Models, methods, and findings},
author = {Wampold, Bruce E},
year = 2013,
publisher = {Routledge}
}
@book{borenstein2011introduction,
title = {Introduction to meta-analysis},
author = {Borenstein, Michael and Hedges, Larry V and Higgins, Julian PT and Rothstein, Hannah R},
year = 2011,
publisher = {John Wiley \& Sons}
}
@article{greco2013meta,
title = {Meta-analysis: pitfalls and hints},
author = {Greco, T and Zangrillo, A and Biondi-Zoccai, G and Landoni, G},
year = 2013,
journal = {Heart, Lung and Vessels},
publisher = {Edizioni Medico Scientifiche},
volume = 5,
number = 4,
pages = 219
}
@article{sharpe1997apples,
title = {Of apples and oranges, file drawers and garbage: Why validity issues in meta-analysis will not go away},
author = {Sharpe, Donald},
year = 1997,
journal = {Clinical Psychology Review},
publisher = {Elsevier},
volume = 17,
number = 8,
pages = {881--901}
}
@article{fanelli2012negative,
title = {Negative results are disappearing from most disciplines and countries},
author = {Fanelli, Daniele},
year = 2012,
journal = {Scientometrics},
publisher = {Akad{\'e}miai Kiad{\'o}, co-published with Springer Science \& Business},
volume = 90,
number = 3,
pages = {891--904}
}
@article{wicherts2016degrees,
title = {Degrees of freedom in planning, running, analyzing, and reporting psychological studies: A checklist to avoid {$p$}-hacking},
author = {Wicherts, Jelte M and Veldkamp, Coosje LS and Augusteijn, Hilde EM and Bakker, Marjan and Van Aert, Robbie and Van Assen, Marcel ALM},
year = 2016,
journal = {Frontiers in Psychology},
publisher = {Frontiers},
volume = 7,
pages = 1832
}
@article{silberzahn2018many,
title = {Many analysts, one data set: Making transparent how variations in analytic choices affect results},
author = {Silberzahn, Raphael and Uhlmann, Eric Luis and Martin, Daniel P and Anselmi, Pasquale and Aust, Frederik and Awtrey, Eli and Bahn{\'\i}k, {\v{S}}t{\v{e}}p{\'a}n and Bai, Feng and Bannard, Colin and Bonnier, Evelina and Carlsson, R and Cheung, F and Christensen, G and Clay, R and Craig, M A and Dalla Rosa, A and Dam, L and Evans, M H and Flores Cervantes, I and Fong, N and Gamez-Djokic, M and Glenz, A and Gordon-McKeon, S and Heaton, T J and Hederos, K and Heene, M and Hofelich Mohr, A J and H{\"o}gden, F and Hui, K and Johannesson, M and Kalodimos, J and Kaszubowski, E and Kennedy, D M and Lei, R and Lindsay, T A and Liverani, S and Madan, C R and Molden, D and Molleman, E and Morey, R D and Mulder, L B and Nijstad, B R and Pope, N G and Pope, B and Prenoveau, J M and Rink, F and Robusto, E and Roderique, H and Sandberg, A and Schl{\"u}ter, E and Sch{\"o}nbrodt, F D and Sherman, M F and Sommer, S A and Sotak, K and Spain, S and Sp{\"o}rlein, C and Stafford, T and Stefanutti, L and Tauber, S and Ullrich, J and Vianello, M and Wagenmakers, E J and Witkowiak, M and Yoon, S and Nosek, B A},
year = 2018,
journal = {Advances in Methods and Practices in Psychological Science},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 1,
number = 3,
pages = {337--356}
}
@article{pigott2020methodological,
title = {Methodological guidance paper: High-quality meta-analysis in a systematic review},
author = {Pigott, Terri D and Polanin, Joshua R},
year = 2020,
journal = {Review of Educational Research},
publisher = {SAGE Publications Sage CA: Los Angeles, CA},
volume = 90,
number = 1,
pages = {24--46}
}
@article{hamberg2008gender,
title = {Gender bias in medicine},
author = {Hamberg, Katarina},
year = 2008,
journal = {Women’s Health},
publisher = {Sage Publications Sage UK: London, England},
volume = 4,
number = 3,
pages = {237--243}
}
@article{nielsen2017one,
title = {One and a half million medical papers reveal a link between author gender and attention to gender and sex analysis},
author = {Nielsen, Mathias Wullum and Andersen, Jens Peter and Schiebinger, Londa and Schneider, Jesper W},
year = 2017,
journal = {Nature Human Behaviour},
publisher = {Nature Publishing Group},
volume = 1,
number = 11,
pages = {791--796}
}
@article{kim2009status,
title = {Status of women in cardiovascular clinical trials},
author = {Kim, Esther SH and Menon, Venu},
year = 2009,
journal = {Arteriosclerosis, thrombosis, and vascular biology},
publisher = {Am Heart Assoc},
volume = 29,
number = 3,
pages = {279--283}
}
@article{mosca2013fifteen,
title = {Fifteen-year trends in awareness of heart disease in women: Results of a 2012 {American Heart Association} national survey},
author = {Mosca, Lori and Hammond, Gmerice and Mochari-Greenberger, Heidi and Towfighi, Amytis and Albert, Michelle A},
year = 2013,
journal = {Circulation},
publisher = {Am Heart Assoc},
volume = 127,
number = 11,
pages = {1254--1263}
}
@article{adler2014osteoporosis,
title = {Osteoporosis in men: A review},
author = {Adler, Robert A},
year = 2014,
journal = {Bone Research},
publisher = {Nature Publishing Group},
volume = 2,
pages = 14001
}
@article{cummings2013conceiving,
title = {Conceiving the research question and developing the study plan},
author = {Cummings, Steven R and Browner, Warren S and Hulley, Stephen B},
year = 2013,
journal = {Designing Clinical Research},
volume = 4,
pages = {14--22}
}
@article{appelbaum2018journal,
title = {Journal article reporting standards for quantitative research in psychology: The {APA} Publications and Communications Board task force report.},
author = {Appelbaum, Mark and Cooper, Harris and Kline, Rex B and Mayo-Wilson, Evan and Nezu, Arthur M and Rao, Stephen M},
year = 2018,
journal = {American Psychologist},
publisher = {American Psychological Association},
volume = 73,
number = 1,
pages = 3
}
@article{moher2009preferred,
title = {Preferred reporting items for systematic reviews and meta-analyses: The {PRISMA} statement},
author = {Moher, David and Liberati, Alessandro and Tetzlaff, Jennifer and Altman, Douglas G and Prisma Group and others},
year = 2009,
journal = {PLoS Medicine},
publisher = {Public Library of Science},
volume = 6,
number = 7
}
@article{mattos2015systematic,
title = {Systematic review and meta-analysis: What are the implications in the clinical practice?},
author = {Mattos, Claudia Trindade and Ruellas, Ant{\^o}nio Carlos de Oliveira},
year = 2015,
journal = {Dental Press Journal of Orthodontics},
publisher = {SciELO Brasil},
volume = 20,
number = 1,
pages = {17--19}
}
@article{henrich2010most,
title = {Most people are not {WEIRD}},
author = {Henrich, Joseph and Heine, Steven J and Norenzayan, Ara},
year = 2010,
journal = {Nature},
publisher = {Nature Publishing Group},
volume = 466,
number = 7302,
pages = 29
}
@article{schopfel2018electronic,
title = {Are electronic theses and dissertations (still) grey literature in the digital age? A FAIR debate},
author = {Schöpfel, Joachim and Rasuli, Behrooz},
year = 2018,
journal = {The Electronic Library},
publisher = {Emerald Publishing Limited},
volume = 36,
number = 2,
pages = {208--219}
}
@article{saruhanjan2020psychological,
title = {Psychological interventions to improve sleep in college students: A meta-analysis of randomized controlled trials},
author = {Saruhanjan, Karina and Zarski, Anna-Carlotta and Bauer, Tobias and Baumeister, Harald and Cuijpers, Pim and Spiegelhalder, Kai and Auerbach, Randy P and Kessler, Ronald C and Bruffaerts, Ronny and Karyotaki, Eirini and Berking, Matthias and Ebert, David D},
year = 2020,
journal = {Journal of Sleep Research},
publisher = {Wiley Online Library},
pages = {e13097}
}
@article{tipton2019history,
title = {A history of meta-regression: Technical, conceptual, and practical developments between 1974 and 2018},
author = {Tipton, Elizabeth and Pustejovsky, James E and Ahmadi, Hedyeh},
year = 2019,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 10,
number = 2,
pages = {161--179}
}
@article{moher2015preferred,
title = {Preferred reporting items for systematic review and meta-analysis protocols ({PRISMA-P}) 2015 statement},
author = {Moher, David and Shamseer, Larissa and Clarke, Mike and Ghersi, Davina and Liberati, Alessandro and Petticrew, Mark and Shekelle, Paul and Stewart, Lesley A and ,PRISMA-P Group},
year = 2015,
journal = {Systematic Reviews},
publisher = {Springer},
volume = 4,
number = 1,
pages = 1
}
@article{valstad2016relationship,
title = {The relationship between central and peripheral oxytocin concentrations: A systematic review and meta-analysis protocol},
author = {Valstad, Mathias and Alvares, Gail A and Andreassen, Ole A and Westlye, Lars T and Quintana, Daniel S},
year = 2016,
journal = {Systematic Reviews},
publisher = {BioMed Central},
volume = 5,
number = 1,
pages = {1--7}
}
@article{buscher2019effectiveness,
title = {The effectiveness of internet-based self-help interventions to reduce suicidal ideation: Protocol for a systematic review and meta-analysis},
author = {B{\"u}scher, Rebekka and Torok, Michelle and Sander, Lasse},
year = 2019,
journal = {JMIR Research Protocols},
publisher = {JMIR Publications Inc., Toronto, Canada},
volume = 8,
number = 7,
pages = {e14174}
}
@article{quintana2015pre,
title = {From pre-registration to publication: A non-technical primer for conducting a meta-analysis to synthesize correlational data},
author = {Quintana, Daniel S},
year = 2015,
journal = {Frontiers in Psychology},
publisher = {Frontiers},
volume = 6,
pages = 1549
}
@article{bramer2018systematic,
title = {A systematic approach to searching: An efficient and complete method to develop literature searches},
author = {Bramer, Wichor M and de Jonge, Gerdien B and Rethlefsen, Melissa L and Mast, Frans and Kleijnen, Jos},
year = 2018,
journal = {Journal of the Medical Library Association: JMLA},
publisher = {Medical Library Association},
volume = 106,
number = 4,
pages = 531
}
@article{dissemination2009systematic,
title = {Systematic reviews: {CRD's} guidance for undertaking reviews in healthcare},
author = {Tacconelli, Evelina},
year = 2009,
journal = {The Lancet Infectious Diseases},
publisher = {Elsevier},
volume = 10,
number = 4
}
@misc{methods2016methodological,
title = {Methodological expectations of {Campbell Collaboration} intervention reviews ({MECCIR}): Conduct standards},
author = {{Campbell Collaboration}},
year = 2016,
journal = {Campbell Policies and Guidelines Series},
volume = 3,
howpublished = {\url{https://onlinelibrary.wiley.com/page/journal/18911803/homepage/author-guidelines}}
}
@article{sanderson2007tools,
title = {Tools for assessing quality and susceptibility to bias in observational studies in epidemiology: A systematic review and annotated bibliography},
author = {Sanderson, Simon and Tatt, Iain D and Higgins, Julian},
year = 2007,
journal = {International Journal of Epidemiology},
publisher = {Oxford University Press},
volume = 36,
number = 3,
pages = {666--676}
}
@article{higgins2011cochrane,
title = {The {Cochrane Collaboration’s} tool for assessing risk of bias in randomised trials},
author = {Higgins, Julian and Altman, Douglas G and G{\o}tzsche, Peter C and J{\"u}ni, Peter and Moher, David and Oxman, Andrew D and Savovi{\'c}, Jelena and Schulz, Kenneth F and Weeks, Laura and Sterne, Jonathan AC},
year = 2011,
journal = {BMJ},
publisher = {British Medical Journal Publishing Group},
volume = 343,
pages = {d5928}
}
@article{sterne2016robins,
title = {{ROBINS-I}: A tool for assessing risk of bias in non-randomised studies of interventions},
author = {Sterne, Jonathan and Hern{\'a}n, Miguel A and Reeves, Barnaby C and Savovi{\'c}, Jelena and Berkman, Nancy D and Viswanathan, Meera and Henry, David and Altman, Douglas G and Ansari, Mohammed T and Boutron, Isabelle and Carpenter, James R and Chan, An-Wen and Churchill, Rachel and Deeks, Jonathan J and Hr{\'o}bjartsson, Asbj{\o}rn and Kirkham, Jamie and J{\"u}ni, Peter and Loke, Yoon K and Pigott, Theresa D and Ramsay, Craig R and Regidor, Deborah and Rothstein, Hannah R and Sandhu, Lakhbir and Santaguida, Pasqualina L and Sch{\"u}nemann, Holger J and Shea, Beverly and Shrier, Ian and Tugwell, Peter and Turner, Lucy and Valentine, Jeffrey C and Waddington, Hugh and Waters, Elizabeth and Wells, George A and Whiting, Penny F and Higgins, Julian P T},
year = 2016,
journal = {BMJ},
volume = 355,
pages = {i4919}
}
@article{jorgensen2016evaluation,
title = {Evaluation of the {Cochrane} tool for assessing risk of bias in randomized clinical trials: Overview of published comments and analysis of user practice in {Cochrane} and {non-Cochrane} reviews},
author = {J{\o}rgensen, Lars and Paludan-M{\"u}ller, Asger S and Laursen, David RT and Savovi{\'c}, Jelena and Boutron, Isabelle and Sterne, Jonathan AC and Higgins, Julian PT and Hr{\'o}bjartsson, Asbj{\o}rn},
year = 2016,
journal = {Systematic Reviews},
publisher = {Springer},
volume = 5,
number = 1,
pages = 80
}
@article{hohn2019primary,
title = {Primary study quality in psychological meta-analyses: An empirical assessment of recent practice},
author = {Hohn, Richard E and Slaney, Kathleen L and Tafreshi, Donna},
year = 2019,
journal = {Frontiers in Psychology},
publisher = {Frontiers},
volume = 9,
pages = 2667
}
@book{grolemund2014hands,
title = {Hands-on programming with \textsf{R}: Write your own functions and simulations},
author = {Grolemund, Garrett},
year = 2014,
publisher = {O'Reilly}
}
@article{tidyverse,
title = {Welcome to the {tidyverse}},
author = {Hadley Wickham and Mara Averick and Jennifer Bryan and Winston Chang and Lucy D'Agostino McGowan and Romain François and Garrett Grolemund and Alex Hayes and Lionel Henry and Jim Hester and Max Kuhn and Thomas Lin Pedersen and Evan Miller and Stephan Milton Bache and Kirill Müller and Jeroen Ooms and David Robinson and Dana Paige Seidel and Vitalie Spinu and Kohske Takahashi and Davis Vaughan and Claus Wilke and Kara Woo and Hiroaki Yutani},
year = 2019,
journal = {Journal of Open Source Software},
volume = 4,
number = 43,
pages = 1686
}
@article{meta,
title = {How to perform a meta-analysis with \textsf{R}: A practical tutorial},
author = {Balduzzi, Sara and R{\"u}cker, Gerta and Schwarzer, Guido},
year = 2019,
journal = {Evidence-Based Mental Health},
publisher = {Royal College of Psychiatrists},
volume = 22,
number = 4,
pages = {153--160}
}
@article{urviecht,
title = {Conducting meta-analyses in \textsf{R} with the {metafor} package},
author = {Wolfgang Viechtbauer},
year = 2010,
journal = {Journal of Statistical Software},
volume = 36,
number = 3,
pages = {1--48}
}
@article{vance2009data,
title = {Data analysts captivated by \textsf{R}’s power},
author = {Vance, Ashlee},
year = 2009,
journal = {New York Times},
howpublished = {\url{https://www.nytimes.com/2009/01/07/technology/business-computing/07program.html}},
url = {https://www.nytimes.com/2009/01/07/technology/business-computing/07program.html}
}
@misc{openxlsx,
title = {openxlsx: Read, Write and Edit xlsx Files. \textsf{R} package version 4.1.5},
author = {Philipp Schauberger and Alexander Walker},
year = 2020,
url = {https://CRAN.R-project.org/package=openxlsx},
howpublished = {\url{https://CRAN.R-project.org/package=openxlsx}}
}
@book{wickham2016r,
title = {\textsf{R} for data science:Import, tidy, transform, visualize, and model data},
author = {Wickham, Hadley and Grolemund, Garrett},
year = 2016,
publisher = {O'Reilly}
}
@book{schwarzer2015meta,
title = {Meta-analysis with \textsf{R}},
author = {Schwarzer, Guido and Carpenter, James R and R{\"u}cker, Gerta},
year = 2015,
publisher = {Springer}
}
@book{hedges2014statistical,
title = {Statistical methods for meta-analysis},
author = {Hedges, Larry and Olkin, Ingram},
year = 2014,
publisher = {Academic Press}
}
@book{aronow2019foundations,
title = {Foundations of agnostic statistics},
author = {Aronow, Peter M and Miller, Benjamin T},
year = 2019,
publisher = {Cambridge University Press}
}
@book{cohen1988statistical,
title = {Statistical power analysis for the behavioral sciences},
author = {Cohen, J},
year = 1988,
publisher = {Erlbaum Press}
}
@book{cooper2019handbook,
title = {The handbook of research synthesis and meta-analysis},
author = {Cooper, Harris and Hedges, Larry V and Valentine, Jeffrey C},
year = 2019,
publisher = {Russell Sage Foundation}
}
@article{bonett2019point,
title = {Point-biserial correlation: Interval estimation, hypothesis testing, meta-analysis, and sample size determination},
author = {Bonett, Douglas G},
year = 2020,
journal = {British Journal of Mathematical and Statistical Psychology},
publisher = {Wiley Online Library},
volume = 73,
pages = {113--144}
}
@article{cuijpers2014threshold,
title = {What is the threshold for a clinically relevant effect? The case of major depressive disorders},
author = {Cuijpers, Pim and Turner, Erick H and Koole, Sander L and Van Dijke, Annemiek and Smit, Filip},
year = 2014,
journal = {Depression and Anxiety},
publisher = {Wiley Online Library},
volume = 31,
number = 5,
pages = {374--378}
}
@misc{esc,
title = {esc: Effect Size Computation for Meta Analysis (Version 0.5.1)},
author = {Daniel L{\"u}decke},
year = 2019,
doi = {10.5281/zenodo.1249218},
url = {https://CRAN.R-project.org/package=esc},
howpublished = {\url{https://CRAN.R-project.org/package=esc}}
}
@article{becker1988synthesizing,
title = {Synthesizing standardized mean-change measures},
author = {Becker, Betsy Jane},
year = 1988,
journal = {British Journal of Mathematical and Statistical Psychology},
publisher = {Wiley Online Library},
volume = 41,
number = 2,
pages = {257--278}
}
@article{cuijpers2017pre,
title = {Pre-post effect sizes should be avoided in meta-analyses},
author = {Cuijpers, Pim and Weitz, E and Cristea, IA and Twisk, J},
year = 2017,
journal = {Epidemiology and Psychiatric Sciences},
publisher = {Cambridge University Press},
volume = 26,
number = 4,
pages = {364--368}
}
@article{gart1967bias,
title = {On the bias of various estimators of the logit and its variance with application to quantal bioassay},
author = {Gart, John J and Zweifel, James R},
year = 1967,
journal = {Biometrika},
publisher = {JSTOR},
pages = {181--187}
}
@article{j2004add,
title = {What to add to nothing? Use and avoidance of continuity corrections in meta-analysis of sparse data},
author = {Sweeting, Michael J and Sutton, Alexander J and Lambert, Paul C},
year = 2004,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 23,
number = 9,
pages = {1351--1375}
}
@article{efthimiou2018practical,
title = {Practical guide to the meta-analysis of rare events},
author = {Efthimiou, Orestis},
year = 2018,
journal = {Evidence-Based Mental Health},
publisher = {Royal College of Psychiatrists},
volume = 21,
number = 2,
pages = {72--76}
}
@article{zhang1998whats,
title = {What's the Relative Risk? {A} Method of Correcting the Odds Ratio in Cohort Studies of Common Outcomes},
author = {Zhang, Jun and Yu, Kai F.},
year = 1998,
month = 11,
journal = {JAMA},
volume = 280,
number = 19,
pages = {1690--1691}
}
@article{panageas2007you,
title = {When you look matters: The effect of assessment schedule on progression-free survival},
author = {Panageas, Katherine S and Ben-Porat, Leah and Dickler, Maura N and Chapman, Paul B and Schrag, Deborah},
year = 2007,
journal = {Journal of the National Cancer Institute},
publisher = {Oxford University Press},
volume = 99,
number = 6,
pages = {428--432}
}
@article{hedges1981distribution,
title = {Distribution theory for {Glass's} estimator of effect size and related estimators},
author = {Hedges, Larry V},
year = 1981,
journal = {Journal of Educational Statistics},
publisher = {Sage Publications Sage CA: Thousand Oaks, CA},
volume = 6,
number = 2,
pages = {107--128}
}
@article{spearman1904reprinted,
title = {The proof and measurement of association between two things},
author = {Spearman, C},
year = 1904,
journal = {The American Journal of Psychology},
volume = 15,
number = 1,
pages = {72--101}
}
@article{hough1994comparison,
title = {Comparison of the {Glass} and {Hunter-Schmidt} meta-analytic techniques},
author = {Hough, Susan L and Hall, Bruce W},
year = 1994,
journal = {The Journal of Educational Research},
publisher = {Taylor \& Francis},
volume = 87,
number = 5,
pages = {292--296}
}
@article{psychmeta,
title = {{psychmeta}: An \textsf{R} Package for Psychometric Meta-Analysis},
author = {Jeffrey A. Dahlke and Brenton M. Wiernik},
year = 2019,
journal = {Applied Psychological Measurement},
volume = 43,
number = 5,
pages = {415--416}
}
@article{breiman2001statistical,
title = {Statistical modeling: The two cultures (with comments and a rejoinder by the author)},
author = {Breiman, Leo},
year = 2001,
journal = {Statistical Science},
publisher = {Institute of Mathematical Statistics},
volume = 16,
number = 3,
pages = {199--231}
}
@article{hedges1998fixed,
title = {Fixed-and random-effects models in meta-analysis.},
author = {Hedges, Larry V and Vevea, Jack L},
year = 1998,
journal = {Psychological Methods},
publisher = {American Psychological Association},
volume = 3,
number = 4,
pages = 486
}
@article{thompson2001multilevel,
title = {Multilevel models for meta-analysis, and their application to absolute risk differences},
author = {Thompson, Simon G and Turner, Rebecca M and Warn, David E},
year = 2001,
journal = {Statistical Methods in Medical Research},
publisher = {Sage Publications Sage CA: Thousand Oaks, CA},
volume = 10,
number = 6,
pages = {375--392}
}
@article{poole1999random,
title = {Random-effects meta-analyses are not always conservative},
author = {Poole, Charles and Greenland, Sander},
year = 1999,
journal = {American Journal of Epidemiology},
publisher = {Oxford University Press},
volume = 150,
number = 5,
pages = {469--475}
}
@article{furukawa2003low,
title = {Low dosage tricyclic antidepressants for depression},
author = {Furukawa, Toshi A and McGuire, Hugh and Barbui, Corrado},
year = 2003,
journal = {Cochrane Database of Systematic Reviews},
publisher = {John Wiley \& Sons, Ltd},
number = 3
}
@article{viechtbauer2005bias,
title = {Bias and efficiency of meta-analytic variance estimators in the random-effects model},
author = {Viechtbauer, Wolfgang},
year = 2005,
journal = {Journal of Educational and Behavioral Statistics},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 30,
number = 3,
pages = {261--293}
}
@article{paule1982consensus,
title = {Consensus values and weighting factors},
author = {Paule, Robert C and Mandel, John},
year = 1982,
journal = {Journal of Research of the National Bureau of Standards},
volume = 87,
number = 5,
pages = {377--385}
}
@article{sidik2005simple,
title = {Simple heterogeneity variance estimation for meta-analysis},
author = {Sidik, Kurex and Jonkman, Jeffrey N},
year = 2005,
journal = {Journal of the Royal Statistical Society: Series C (Applied Statistics)},
publisher = {Wiley Online Library},
volume = 54,
number = 2,
pages = {367--384}
}
@article{sidik2019note,
title = {A note on the empirical {Bayes} heterogeneity variance estimator in meta-analysis},
author = {Sidik, Kurex and Jonkman, Jeffrey N},
year = 2019,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 38,
number = 20,
pages = {3804--3816}
}
@article{hartung1999alternative,
title = {An alternative method for meta-analysis},
author = {Hartung, Joachim},
year = 1999,
journal = {Biometrical Journal: Journal of Mathematical Methods in Biosciences},
publisher = {Wiley Online Library},
volume = 41,
number = 8,
pages = {901--916}
}
@article{hartung2001refined,
title = {A refined method for the meta-analysis of controlled clinical trials with binary outcome},
author = {Hartung, Joachim and Knapp, Guido},
year = 2001,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 20,
number = 24,
pages = {3875--3889}
}
@article{hartung2001tests,
title = {On tests of the overall treatment effect in meta-analysis with normally distributed responses},
author = {Hartung, Joachim and Knapp, Guido},
year = 2001,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 20,
number = 12,
pages = {1771--1782}
}
@article{follmann1999valid,
title = {Valid inference in random effects meta-analysis},
author = {Follmann, Dean A and Proschan, Michael A},
year = 1999,
journal = {Biometrics},
publisher = {Wiley Online Library},
volume = 55,
number = 3,
pages = {732--737}
}
@article{makambi2004effect,
title = {The effect of the heterogeneity variance estimator on some tests of treatment efficacy},
author = {Makambi, Kepher H},
year = 2004,
journal = {Journal of Biopharmaceutical Statistics},
publisher = {Taylor \& Francis},
volume = 14,
number = 2,
pages = {439--449}
}
@article{sidik2007comparison,
title = {A comparison of heterogeneity variance estimators in combining results of studies},
author = {Sidik, Kurex and Jonkman, Jeffrey N},
year = 2007,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 26,
number = 9,
pages = {1964--1981}
}
@article{veroniki2016methods,
title = {Methods to estimate the between-study variance and its uncertainty in meta-analysis},
author = {Veroniki, Areti Angeliki and Jackson, Dan and Viechtbauer, Wolfgang and Bender, Ralf and Bowden, Jack and Knapp, Guido and Kuss, Oliver and Higgins, Julian PT and Langan, Dean and Salanti, Georgia},
year = 2016,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 7,
number = 1,
pages = {55--79}
}
@article{knapp2003improved,
title = {Improved tests for a random effects meta-regression with a single covariate},
author = {Knapp, Guido and Hartung, Joachim},
year = 2003,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 22,
number = 17,
pages = {2693--2710}
}
@article{wiksten2016hartung,
title = {Hartung--{Knapp} method is not always conservative compared with fixed-effect meta-analysis},
author = {Wiksten, Anna and R{\"u}cker, Gerta and Schwarzer, Guido},
year = 2016,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 35,
number = 15,
pages = {2503--2515}
}
@article{inthout2014hartung,
title = {The {Hartung-Knapp-Sidik-Jonkman} method for random effects meta-analysis is straightforward and considerably outperforms the standard {DerSimonian-Laird} method},
author = {IntHout, Joanna and Ioannidis, John PA and Borm, George F},
year = 2014,
journal = {BMC Medical Research Methodology},
publisher = {BioMed Central},
volume = 14,
number = 1,
pages = 25
}
@article{sidik2002simple,
title = {A simple confidence interval for meta-analysis},
author = {Sidik, Kurex and Jonkman, Jeffrey N},
year = 2002,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 21,
number = 21,
pages = {3153--3159}
}
@article{langan2019comparison,
title = {A comparison of heterogeneity variance estimators in simulated random-effects meta-analyses},
author = {Langan, Dean and Higgins, Julian PT and Jackson, Dan and Bowden, Jack and Veroniki, Areti Angeliki and Kontopantelis, Evangelos and Viechtbauer, Wolfgang and Simmonds, Mark},
year = 2019,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 10,
number = 1,
pages = {83--98}
}
@article{bakbergenuly2020methods,
title = {Methods for estimating between-study variance and overall effect in meta-analysis of odds ratios},
author = {Bakbergenuly, Ilyas and Hoaglin, David C and Kulinskaya, Elena},
year = 2020,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 11,
number = 3,
pages = {426--442}
}
@article{mantel1959statistical,
title = {Statistical aspects of the analysis of data from retrospective studies of disease},
author = {Mantel, Nathan and Haenszel, William},
year = 1959,
journal = {Journal of the National Cancer Institute},
publisher = {Oxford University Press},
volume = 22,
number = 4,
pages = {719--748}
}
@article{robins1986general,
title = {A general estimator for the variance of the {Mantel-Haenszel} odds ratio},
author = {James Robins and Sander Greenland and Norman E. Breslow},
year = 1986,
journal = {American Journal of Epidemiology},
pages = {719--723}
}
@article{yusuf1985beta,
title = {Beta blockade during and after myocardial infarction: An overview of the randomized trials},
author = {Yusuf, Salim and Peto, Richard and Lewis, John and Collins, Rory and Sleight, Peter},
year = 1985,
journal = {Progress in Cardiovascular Diseases},
publisher = {Elsevier},
volume = 27,
number = 5,
pages = {335--371}
}
@article{bradburn2007much,
title = {Much ado about nothing: A comparison of the performance of meta-analytical methods with rare events},
author = {Bradburn, Michael J and Deeks, Jonathan J and Berlin, Jesse A and Russell Localio, A},
year = 2007,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 26,
number = 1,
pages = {53--77}
}
@article{cuijpers2002excess,
title = {Excess mortality in depression: A meta-analysis of community studies},
author = {Cuijpers, Pim and Smit, Filip},
year = 2002,
journal = {Journal of Affective Disorders},
publisher = {Elsevier},
volume = 72,
number = 3,
pages = {227--236}
}
@article{harrer2020prevention,
title = {Prevention of eating disorders at universities: A systematic review and meta-analysis},
author = {Harrer, Mathias and Adam, Sophia H and Messner, Eva-Maria and Baumeister, Harald and Cuijpers, Pim and Bruffaerts, Ronny and Auerbach, Randy P and Kessler, Ronald C and Jacobi, Corinna and Taylor, Craig Barr and Ebert, David D},
year = 2020,
journal = {International Journal of Eating Disorders},
publisher = {Wiley Online Library},
volume = 53,
number = 3,
pages = {823--833}
}
@article{ngamaba2017strongly,
title = {How strongly related are health status and subjective well-being? {Systematic} review and meta-analysis},
author = {Ngamaba, Kayonda Hubert and Panagioti, Maria and Armitage, Christopher J},
year = 2017,
journal = {The European Journal of Public Health},
publisher = {Oxford University Press},
volume = 27,
number = 5,
pages = {879--885}
}
@article{furukawa2020translating,
title = {Translating the {BDI} and {BDI-II }into the {HAMD} and vice versa with equipercentile linking},
author = {Furukawa, Toshi A and Reijnders, Mirjam and Kishimoto, Sanae and Sakata, Masatsugu and DeRubeis, Robert J and Dimidjian, Sona and Dozois, David JA and Hegerl, Ulrich and Hollon, Steven D and Jarrett, Robin B and Lesp{\'{e}}rance, Fran{\c c}ois and Segal, Zindel V and Mohr, David C and Simons, Anne D and Quilty, Lena C and Reynolds, Charles F and Gentili, Claudio and Leucht, Stefan and Engel, Rolf and Cuijpers, Pim},
year = 2020,
journal = {Epidemiology and Psychiatric Sciences},
publisher = {Cambridge University Press},
volume = 29
}
@article{beck1996beck,
title = {Beck {Depression} {Inventory}--{II}},
author = {Beck, Aaron T and Steer, Robert A and Brown, Gregory},
year = 1996,
journal = {Psychological Assessment}
}
@article{schwarzer2019seriously,
title = {Seriously misleading results using inverse of {Freeman-Tukey} double arcsine transformation in meta-analysis of single proportions},
author = {Schwarzer, Guido and Chemaitelly, Hiam and Abu-Raddad, Laith J and R{\"u}cker, Gerta},
year = 2019,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 10,
number = 3,
pages = {476--483}
}
@article{bakbergenuly2018meta,
title = {Meta-analysis of binary outcomes via generalized linear mixed models: A simulation study},
author = {Bakbergenuly, Ilyas and Kulinskaya, Elena},
year = 2018,
journal = {BMC Medical Research Methodology},
publisher = {BioMed Central},
volume = 18,
number = 1,
pages = 70
}
@article{stijnen2010random,
title = {Random effects meta-analysis of event outcome in the framework of the generalized linear mixed model with applications in sparse data},
author = {Stijnen, Theo and Hamza, Taye H and {\"O}zdemir, Pinar},
year = 2010,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 29,
number = 29,
pages = {3046--3067}
}
@article{jordan2017past,
title = {Past-year prevalence of prescription opioid misuse among those 11 to 30 years of age in the {United States}: A systematic review and meta-analysis},
author = {Jordan, Ashly E and Blackburn, Natalie A and Des Jarlais, Don C and Hagan, Holly},
year = 2017,
journal = {Journal of Substance Abuse Treatment},
publisher = {Elsevier},
volume = 77,
pages = {31--37}
}
@article{rucker2008undue,
title = {Undue reliance on {$I^2$} in assessing heterogeneity may mislead},
author = {R{\"u}cker, Gerta and Schwarzer, Guido and Carpenter, James R and Schumacher, Martin},
year = 2008,
journal = {BMC Medical Research Methodology},
publisher = {Springer},
volume = 8,
number = 1,
pages = 79
}
@article{cochran1954some,
title = {Some methods for strengthening the common {$\chi^2$} tests},
author = {Cochran, William G},
year = 1954,
journal = {Biometrics},
publisher = {JSTOR},
volume = 10,
number = 4,
pages = {417--451}
}
@article{hoaglin2016misunderstandings,
title = {Misunderstandings about {$Q$} and ‘{Cochran's} {$Q$} test' in meta-analysis},
author = {Hoaglin, David C},
year = 2016,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 35,
number = 4,
pages = {485--495}
}
@article{higgins2002quantifying,
title = {Quantifying heterogeneity in a meta-analysis},
author = {Higgins, Julian PT and Thompson, Simon G},
year = 2002,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 21,
number = 11,
pages = {1539--1558}
}
@article{viechtbauer2007confidence,
title = {Confidence intervals for the amount of heterogeneity in meta-analysis},
author = {Viechtbauer, Wolfgang},
year = 2007,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 26,
number = 1,
pages = {37--52}
}
@article{jackson2013confidence,
title = {Confidence intervals for the between-study variance in random effects meta-analysis using generalised Cochran heterogeneity statistics},
author = {Jackson, Dan},
year = 2013,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 4,
number = 3,
pages = {220--229}
}
@article{borenstein2017basics,
title = {Basics of meta-analysis: {$I^2$} is not an absolute measure of heterogeneity},
author = {Borenstein, Michael and Higgins, Julian PT and Hedges, Larry V and Rothstein, Hannah R},
year = 2017,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 8,
number = 1,
pages = {5--18}
}
@article{inthout2016plea,
title = {Plea for routinely presenting prediction intervals in meta-analysis},
author = {IntHout, Joanna and Ioannidis, John PA and Rovers, Maroeska M and Goeman, Jelle J},
year = 2016,
journal = {BMJ Open},
publisher = {British Medical Journal Publishing Group},
volume = 6,
number = 7
}
@article{viechtbauer2010outlier,
title = {Outlier and influence diagnostics for meta-analysis},
author = {Viechtbauer, Wolfgang and Cheung, Mike},
year = 2010,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 1,
number = 2,
pages = {112--125}
}
@article{baujat2002graphical,
title = {A graphical method for exploring heterogeneity in meta-analyses: Application to a meta-analysis of 65 trials},
author = {Baujat, Bertrand and Mah{\'e}, C{\'e}dric and Pignon, Jean-Pierre and Hill, Catherine},
year = 2002,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 21,
number = 18,
pages = {2641--2652}
}
@article{olkin2012gosh,
title = {{GOSH}--a graphical display of study heterogeneity},
author = {Olkin, Ingram and Dahabreh, Issa J and Trikalinos, Thomas A},
year = 2012,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 3,
number = 3,
pages = {214--223}
}
@article{hartigan1979algorithm,
title = {Algorithm {AS} 136: A {$k$}-means clustering algorithm},
author = {Hartigan, John A and Wong, Manchek A},
year = 1979,
journal = {Journal of the Royal Statistical Society. Series C (Applied Statistics)},
publisher = {[Wiley, Royal Statistical Society]},
volume = 28,
number = 1,
pages = {100--108}
}
@article{schubert2017dbscan,
title = {{DBSCAN} revisited, revisited: Why and how you should (still) use {DBSCAN}},
author = {Schubert, Erich and Sander, J{\"o}rg and Ester, Martin and Kriegel, Hans Peter and Xu, Xiaowei},
year = 2017,
journal = {ACM Transactions on Database Systems (TODS)},
publisher = {ACM New York, NY, USA},
volume = 42,
number = 3,
pages = {1--21}
}
@article{fraley2002model,
title = {Model-based clustering, discriminant analysis, and density estimation},
author = {Fraley, Chris and Raftery, Adrian E},
year = 2002,
journal = {Journal of the American Statistical Association},
publisher = {Taylor \& Francis},
volume = 97,
number = 458,
pages = {611--631}
}
@article{rucker2020beyond,
title = {Beyond the forest plot: The drapery plot},
author = {R{\"u}cker, Gerta and Schwarzer, Guido},
year = 2021,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 12,
number = 1,
pages = {13--19}
}
@article{infanger2019p,
title = {{$P$} value functions: An underused method to present research results and to promote quantitative reasoning},
author = {Infanger, Denis and Schmidt-Trucks{\"a}ss, Arno},
year = 2019,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 38,
number = 21,
pages = {4189--4197}
}
@article{wellek2017critical,
title = {A critical evaluation of the current “{$p$}-value controversy”},
author = {Wellek, Stefan},
year = 2017,
journal = {Biometrical Journal},
publisher = {Wiley Online Library},
volume = 59,
number = 5,
pages = {854--872}
}
@article{nuzzo2014statistical,
title = {Statistical errors: {$P$} values, the 'gold standard' of statistical validity, are not as reliable as many scientists assume},
author = {Nuzzo, Rigina},
year = 2014,
journal = {Nature},
publisher = {Nature Publishing Group},
volume = 506,
number = 7487,
pages = {150--153}
}
@article{borenstein2013meta,
title = {Meta-analysis and subgroups},
author = {Borenstein, Michael and Higgins, Julian PT},
year = 2013,
journal = {Prevention Science},
publisher = {Springer},
volume = 14,
number = 2,
pages = {134--143}
}
@article{hedges2004power,
title = {The power of statistical tests for moderators in meta-analysis.},
author = {Hedges, Larry V and Pigott, Therese D},
year = 2004,
journal = {Psychological Methods},
publisher = {American Psychological Association},
volume = 9,
number = 4,
pages = 426
}
@article{thompson2002should,
title = {How should meta-regression analyses be undertaken and interpreted?},
author = {Thompson, Simon G and Higgins, Julian PT},
year = 2002,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 21,
number = 11,
pages = {1559--1573}
}
@article{piantadosi1988ecological,
title = {The ecological fallacy},
author = {Piantadosi, Steven and Byar, David P and Green, Sylvan B},
year = 1988,
journal = {American Journal of Epidemiology},
publisher = {Oxford University Press},
volume = 127,
number = 5,
pages = {893--904}
}
@article{higgins2004controlling,
title = {Controlling the risk of spurious findings from meta-regression},
author = {Higgins, JPT and Thompson, SG},
year = 2004,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 23,
number = 11,
pages = {1663--1682}
}
@article{iniesta2016machine,
title = {Machine learning, statistical learning and the future of biological research in psychiatry},
author = {Iniesta, R and Stahl, D and McGuffin, P},
year = 2016,
journal = {Psychological Medicine},
publisher = {Cambridge University Press},
volume = 46,
number = 12,
pages = {2455--2465}
}
@article{gigerenzer2004mindless,
title = {Mindless statistics},
author = {Gigerenzer, G},
year = 2004,
journal = {The Journal of Socio-Economics},
publisher = {Elsevier},
volume = 33,
number = 5,
pages = {587--606}
}
@article{higgins2002statistical,
title = {Statistical heterogeneity in systematic reviews of clinical trials: a critical appraisal of guidelines and practice},
author = {Higgins, J and Thompson, S and Deeks, J and Altman, D},
year = 2002,
journal = {Journal of Health Services Research Policy},
publisher = {SAGE Publications Sage UK: London, England},
volume = 7,
number = 1,
pages = {51--61}
}
@article{mansfiled1982detecting,
title = {Detecting multicollinearity},
author = {Mansfield, Edward R and Helms, Billy P},
year = 1982,
journal = {The American Statistician},
publisher = {Taylor \& Francis},
volume = 36,
number = {3a},
pages = {158--160}
}
@article{berlin1994advantages,
title = {Advantages and limitations of metaanalytic regressions of clinical trials data.},
author = {Berlin, Jesse A and Antman, Elliott M},
year = 1994,
journal = {The Online Journal of Current Clinical Trials}
}
@article{chatfield1995model,
title = {Model uncertainty, data mining and statistical inference},
author = {Chatfield, Chris},
year = 1995,
journal = {Journal of the Royal Statistical Society: Series A (Statistics in Society)},
publisher = {Wiley Online Library},
volume = 158,
number = 3,
pages = {419--444}
}
@article{whittingham2006we,
title = {Why do we still use stepwise modelling in ecology and behaviour?},
author = {Whittingham, Mark J and Stephens, Philip A and Bradbury, Richard B and Freckleton, Robert P},
year = 2006,
journal = {Journal of Animal Ecology},
publisher = {Wiley Online Library},
volume = 75,
number = 5,
pages = {1182--1189}
}
@misc{perfanalytics,
title = {{PerformanceAnalytics}: Econometric Tools for Performance and Risk Analysis. \textsf{R} package version 2.0.4},
author = {Brian G. Peterson and Peter Carl},
year = 2020,
howpublished = {\url{https://CRAN.R-project.org/package=PerformanceAnalytics}}
}
@book{good2013permutation,
title = {Permutation tests: A practical guide to resampling methods for testing hypotheses},
author = {Good, Phillip},
year = 2013,
publisher = {Springer Science \& Business}
}
@article{viechtbauer2015comparison,
title = {A comparison of procedures to test for moderators in mixed-effects meta-regression models.},
author = {Viechtbauer, Wolfgang and L{\'o}pez-L{\'o}pez, Jos{\'e} Antonio and S{\'a}nchez-Meca, Julio and Mar{\'\i}n-Mart{\'\i}nez, Fulgencio},
year = 2015,
journal = {Psychological Methods},
publisher = {American Psychological Association},
volume = 20,
number = 3,
pages = 360
}
@book{kirschemperorbook,
title = {The emperor's new drugs: Exploding the antidepressant myth},
author = {Irving Kirsch},
year = 2010,
publisher = {Basic Books}
}
@book{duval2005publication,
title = {Publication bias in meta-analysis},
author = {Rothstein, Hannah R and Sutton, Alexander J and Borenstein, Michael},
year = 2005,
journal = {Publication bias in meta-analysis: Prevention, assessment and adjustments},
publisher = {John Wiley \& Sons}
}
@article{schmucker2014extent,
title = {Extent of non-publication in cohorts of studies approved by research ethics committees or included in trial registries},
author = {Schmucker, Christine and Schell, Lisa K and Portalupi, Susan and Oeller, Patrick and Cabrera, Laura and Bassler, Dirk and Schwarzer, Guido and Scherer, Roberta W and Antes, Gerd and Von Elm, Erik and Joerg J, Meerpohl},
year = 2014,
journal = {PLOS ONE},
publisher = {Public Library of Science},
volume = 9,
number = 12,
pages = {e114023}
}
@article{scherer2018full,
title = {Full publication of results initially presented in abstracts},
author = {Scherer, Roberta W and Meerpohl, Joerg J and Pfeifer, Nadine and Schmucker, Christine and Schwarzer, Guido and von Elm, Erik},
year = 2018,
journal = {Cochrane Database of Systematic Reviews},
publisher = {John Wiley \& Sons},
volume = 1,
number = 11
}
@article{chan2014increasing,
title = {Increasing value and reducing waste: addressing inaccessible research},
author = {Chan, An-Wen and Song, Fujian and Vickers, Andrew and Jefferson, Tom and Dickersin, Kay and G{\o}tzsche, Peter C and Krumholz, Harlan M and Ghersi, Davina and Van Der Worp, H Bart},
year = 2014,
journal = {The Lancet},
publisher = {Elsevier},
volume = 383,
number = 9913,
pages = {257--266}
}
@article{dechartres2018association,
title = {Association between publication characteristics and treatment effect estimates: a meta-epidemiologic study},
author = {Dechartres, Agnes and Atal, Ignacio and Riveros, Carolina and Meerpohl, Joerg and Ravaud, Philippe},
year = 2018,
journal = {Annals of Internal Medicine},
publisher = {American College of Physicians},
volume = 169,
number = 6,
pages = {385--393}
}
@article{page2020investigating,
title = {Investigating and dealing with publication bias and other reporting biases in meta-analyses of health research: a review},
author = {Page, Matthew J and Sterne, Jonathan AC and Higgins, Julian PT and Egger, Matthias},
year = 2020,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library}
}
@article{simonsohn2020specification,
title = {Specification curve analysis},
author = {Simonsohn, Uri and Simmons, Joseph P and Nelson, Leif D},
year = 2020,
journal = {Nature Human Behaviour},
publisher = {Nature Publishing Group},
volume = 4,
number = 11,
pages = {1208--1214}
}
@article{kerr1998harking,
title = {{HARKing}: Hypothesizing after the results are known},
author = {Kerr, Norbert L},
year = 1998,
journal = {Personality and Social Psychology Review},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 2,
number = 3,
pages = {196--217}
}
@article{mahood2014searching,
title = {Searching for grey literature for systematic reviews: challenges and benefits},
author = {Mahood, Quenby and Van Eerd, Dwayne and Irvin, Emma},
year = 2014,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 5,
number = 3,
pages = {221--234}
}
@article{mcauley2000does,
title = {Does the inclusion of grey literature influence estimates of intervention effectiveness reported in meta-analyses?},
author = {McAuley, Laura and Tugwell, Peter and Moher, David and others},
year = 2000,
journal = {The Lancet},
publisher = {Elsevier},
volume = 356,
number = 9237,
pages = {1228--1231}
}
@article{sterne2000publication,
title = {Publication and related bias in meta-analysis: power of statistical tests and prevalence in the literature},
author = {Sterne, Jonathan and Gavaghan, David and Egger, Matthias},
year = 2000,
journal = {Journal of Clinical Epidemiology},
publisher = {Elsevier},
volume = 53,
number = 11,
pages = {1119--1129}
}
@article{peters2008contour,
title = {Contour-enhanced meta-analysis funnel plots help distinguish publication bias from other causes of asymmetry},
author = {Peters, Jaime L and Sutton, Alex J and Jones, David R and Abrams, Keith R and Rushton, Lesley},
year = 2008,
journal = {Journal of Clinical Epidemiology},
publisher = {Elsevier},
volume = 61,
number = 10,
pages = {991--996}
}
@article{egger1997bias,
title = {Bias in meta-analysis detected by a simple, graphical test},
author = {Egger, Matthias and Smith, George Davey and Schneider, Martin and Minder, Christoph},
year = 1997,
journal = {BMJ},
publisher = {British Medical Journal Publishing Group},
volume = 315,
number = 7109,
pages = {629--634}
}
@article{pustejovsky2019testing,
title = {Testing for funnel plot asymmetry of standardized mean differences},
author = {Pustejovsky, James E and Rodgers, Melissa A},
year = 2019,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 10,
number = 1,
pages = {57--71}
}
@article{peters2006comparison,
title = {Comparison of two methods to detect publication bias in meta-analysis},
author = {Peters, Jaime L and Sutton, Alex J and Jones, David R and Abrams, Keith R and Rushton, Lesley},
year = 2006,
journal = {JAMA},
publisher = {American Medical Association},
volume = 295,
number = 6,
pages = {676--680}
}
@article{sterne2011recommendations,
title = {Recommendations for examining and interpreting funnel plot asymmetry in meta-analyses of randomised controlled trials},
author = {Sterne, Jonathan and Sutton, Alex J and Ioannidis, John P A and Terrin, Norma and Jones, David R and Lau, Joseph and Carpenter, James and R{\"u}cker, Gerta and Harbord, Roger M and Schmid, Christopher H and Tetzlaff, Jennifer and Deeks, Jonathan J and Peters, Jaime and Macaskill, Petra and Schwarzer, Guido and Duval, Sue and Altman, Douglas G and Moher, David and Higgins, Julian P T},
year = 2011,
journal = {BMJ},
volume = 343,
elocation-id = {d4002}
}
@article{duval2000trim,
title = {Trim and fill: a simple funnel-plot--based method of testing and adjusting for publication bias in meta-analysis},
author = {Duval, Sue and Tweedie, Richard},
year = 2000,
journal = {Biometrics},
publisher = {Wiley Online Library},
volume = 56,
number = 2,
pages = {455--463}
}
@article{peters2007performance,
title = {Performance of the trim and fill method in the presence of publication bias and between-study heterogeneity},
author = {Peters, Jaime L and Sutton, Alex J and Jones, David R and Abrams, Keith R and Rushton, Lesley},
year = 2007,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 26,
number = 25,
pages = {4544--4562}
}
@article{terrin2003adjusting,
title = {Adjusting for publication bias in the presence of heterogeneity},
author = {Terrin, Norma and Schmid, Christopher H and Lau, Joseph and Olkin, Ingram},
year = 2003,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 22,
number = 13,
pages = {2113--2126}
}
@article{simonsohn2014p,
title = {P-curve: a key to the file-drawer.},
author = {Simonsohn, Uri and Nelson, Leif D and Simmons, Joseph P},
year = 2014,
journal = {Journal of Experimental Psychology: General},
publisher = {American Psychological Association},
volume = 143,
number = 2,
pages = 534
}
@article{stanley2014meta,
title = {Meta-regression approximations to reduce publication selection bias},
author = {Stanley, Tom D and Doucouliagos, Hristos},
year = 2014,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 5,
number = 1,
pages = {60--78}
}
@article{stanley2008meta,
title = {Meta-regression methods for detecting and estimating empirical effects in the presence of publication selection},
author = {Stanley, Tom D},
year = 2008,
journal = {Oxford Bulletin of Economics and Statistics},
publisher = {Wiley Online Library},
volume = 70,
number = 1,
pages = {103--127}
}
@article{carter2019correcting,
title = {Correcting for bias in psychology: A comparison of meta-analytic methods},
author = {Carter, Evan C and Sch{\"o}nbrodt, Felix D and Gervais, Will M and Hilgard, Joseph},
year = 2019,
journal = {Advances in Methods and Practices in Psychological Science},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 2,
number = 2,
pages = {115--144}
}
@article{stanley2017limitations,
title = {Limitations of {PET-PEESE} and other meta-analysis methods},
author = {Stanley, Tom D},
year = 2017,
journal = {Social Psychological and Personality Science},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 8,
number = 5,
pages = {581--591}
}
@article{rucker2011treatment,
title = {Treatment-effect estimates adjusted for small-study effects via a limit meta-analysis},
author = {R{\"u}cker, Gerta and Schwarzer, Guido and Carpenter, James R and Binder, Harald and Schumacher, Martin},
year = 2011,
journal = {Biostatistics},
publisher = {Oxford University Press},
volume = 12,
number = 1,
pages = {122--142}
}
@misc{metasens,
title = {metasens: Advanced Statistical Methods to Model and Adjust for Bias in Meta-Analysis. \textsf{R} package version 0.5-0},
author = {Guido Schwarzer and James R. Carpenter and Gerta Rücker},
year = 2020,
url = {https://CRAN.R-project.org/package=metasens},
howpublished = {\url{https://CRAN.R-project.org/package=metasens}}
}
@article{simonsohn2014es,
title = {P-curve and effect size: Correcting for publication bias using only significant results},
author = {Simonsohn, Uri and Nelson, Leif D and Simmons, Joseph P},
year = 2014,
journal = {Perspectives on Psychological Science},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 9,
number = 6,
pages = {666--681}
}
@article{simonsohn2015better,
title = {Better P-curves: Making P-curve analysis more robust to errors, fraud, and ambitious {$p$}-hacking, a Reply to {Ulrich} and {Miller} (2015)},
author = {Simonsohn, Uri and Simmons, Joseph P and Nelson, Leif D},
year = 2015,
journal = {Journal of Experimental Psychology: General},
publisher = {American Psychological Association},
volume = 144,
number = 6,
pages = {1146--1152}
}
@article{ioannidis2005most,
title = {Why most published research findings are false},
author = {Ioannidis, John PA},
year = 2005,
journal = {PLOS Medicine},
publisher = {Public Library of Science},
volume = 2,
number = 8,
pages = {e124}
}
@article{open2015estimating,
title = {Estimating the reproducibility of psychological science},
author = {{Open Science Collaboration} and others},
year = 2015,
journal = {Science},
publisher = {American Association for the Advancement of Science},
volume = 349,
number = 6251
}
@article{mcnutt2014reproducibility,
title = {Reproducibility},
author = {McNutt, Marcia},
year = 2014,
journal = {Science},
publisher = {American Association for the Advancement of Science},
volume = 343,
number = 6168,
pages = 229
}
@manual{stringr,
title = {stringr: Simple, Consistent Wrappers for Common String Operations},
author = {Hadley Wickham},
year = 2019,
url = {https://CRAN.R-project.org/package=stringr},
note = {\textsf{R} package version 1.4.0}
}
@manual{poibin,
title = {poibin: The Poisson Binomial Distribution},
author = {Yili Hong and {\textsf{R} Core Team}},
year = 2020,
url = {https://CRAN.R-project.org/package=poibin},
note = {\textsf{R} package version 1.5}
}
@article{aert2016conducting,
title = {Conducting Meta-Analyses Based on {$p$} Values: Reservations and Recommendations for Applying P-Uniform and P-Curve.},
author = {van Aert, Robbie CM and Wicherts, Jelte M and van Assen, Marcel ALM},
year = 2016,
journal = {Perspectives on Psychological Science},
volume = 11,
number = 5
}
@article{hedges1992modeling,
title = {Modeling publication selection effects in meta-analysis},
author = {Hedges, Larry V},
year = 1992,
journal = {Statistical Science},
publisher = {JSTOR},
volume = 7,
number = 2,
pages = {246--255}
}
@article{iyengar1988selection,
title = {Selection models and the file drawer problem},
author = {Iyengar, Satish and Greenhouse, Joel B},
year = 1988,
journal = {Statistical Science},
publisher = {JSTOR},
volume = 3,
number = 1,
pages = {109--117}
}
@article{hedges1996estimating,
title = {Estimating effect size under publication bias: Small sample properties and robustness of a random effects selection model},
author = {Hedges, Larry V and Vevea, Jack L},
year = 1996,
journal = {Journal of Educational and Behavioral Statistics},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 21,
number = 4,
pages = {299--332}
}
@article{mcshane2016adjusting,
title = {Adjusting for publication bias in meta-analysis: An evaluation of selection methods and some cautionary notes},
author = {McShane, Blakeley B and B{\"o}ckenholt, Ulf and Hansen, Karsten T},
year = 2016,
journal = {Perspectives on Psychological Science},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 11,
number = 5,
pages = {730--749}
}
@article{hedges1984estimation,
title = {Estimation of effect size under nonrandom sampling: The effects of censoring studies yielding statistically insignificant mean differences},
author = {Hedges, Larry V},
year = 1984,
journal = {Journal of Educational Statistics},
publisher = {Sage Publications Sage CA: Thousand Oaks, CA},
volume = 9,
number = 1,
pages = {61--85}
}
@article{vevea2005publication,
title = {Publication bias in research synthesis: sensitivity analysis using a priori weight functions.},
author = {Vevea, Jack L and Woods, Carol M},
year = 2005,
journal = {Psychological Methods},
publisher = {American Psychological Association},
volume = 10,
number = 4,
pages = 428
}
@article{friese2019ego,
title = {Is ego depletion real? {An} analysis of arguments},
author = {Friese, Malte and Loschelder, David D and Gieseler, Karolin and Frankenbach, Julius and Inzlicht, Michael},
year = 2019,
journal = {Personality and Social Psychology Review},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 23,
number = 2,
pages = {107--131}
}
@article{pastor2018multilevel,
title = {On the multilevel nature of meta-analysis: a tutorial, comparison of software programs, and discussion of analytic choices},
author = {Pastor, Dena A and Lazowski, Rory A},
year = 2018,
journal = {Multivariate Behavioral Research},
publisher = {Taylor \& Francis},
volume = 53,
number = 1,
pages = {74--89}
}
@article{cheung2014modeling,
title = {Modeling dependent effect sizes with three-level meta-analyses: a structural equation modeling approach.},
author = {Cheung, Mike},
year = 2014,
journal = {Psychological Methods},
publisher = {American Psychological Association},
volume = 19,
number = 2,
pages = 211
}
@article{assink2016fitting,
title = {Fitting three-level meta-analytic models in \textsf{R}: A step-by-step tutorial},
author = {Assink, Mark and Wibbelink, Carlijn JM and others},
year = 2016,
journal = {The Quantitative Methods for Psychology},
volume = 12,
number = 3,
pages = {154--174}
}
@article{moller2015strong,
title = {Strong effects of ionizing radiation from {Chernobyl} on mutation rates},
author = {M{\o}ller, Anders Pape and Mousseau, Timothy A},
year = 2015,
journal = {Scientific Reports},
publisher = {Nature Publishing Group},
volume = 5,
pages = 8363
}
@article{mehta2005people,
title = {People are variables too: Multilevel structural equations modeling.},
author = {Mehta, Paras D and Neale, Michael C},
year = 2005,
journal = {Psychological Methods},
publisher = {American Psychological Association},
volume = 10,
number = 3,
pages = 259
}
@article{bauer2003estimating,
title = {Estimating multilevel linear models as structural equation models},
author = {Bauer, Daniel J},
year = 2003,
journal = {Journal of Educational and Behavioral Statistics},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 28,
number = 2,
pages = {135--167}
}
@book{cheung2015meta,
title = {Meta-analysis: A structural equation modeling approach},
author = {Cheung, Mike},
year = 2015,
publisher = {John Wiley \& Sons}
}
@book{kline2015principles,
title = {Principles and practice of structural equation modeling},
author = {Kline, Rex B},
year = 2015,
publisher = {Guilford}
}
@article{joreskog2006lisrel,
title = {{LISREL} 8.80. {Chicago}: {Scientific Software International}},
author = {J{\"o}reskog, KG and S{\"o}rbom, Dag},
year = 2006,
journal = {Computer software}
}
@misc{muthen2012mplus,
title = {{MPlus}: statistical analysis with latent variables--User's guide},
author = {Muth{\'e}n, Linda K and Muth{\'e}n, Bengt O},
year = 2012,
publisher = {Citeseer}
}
@article{mcardle1984some,
title = {Some algebraic properties of the reticular action model for moment structures},
author = {McArdle, J Jack and McDonald, Roderick P},
year = 1984,
journal = {British Journal of Mathematical and Statistical Psychology},
publisher = {Wiley Online Library},
volume = 37,
number = 2,
pages = {234--251}
}
@article{cheung2008model,
title = {A model for integrating fixed-, random-, and mixed-effects meta-analyses into structural equation modeling.},
author = {Cheung, Mike},
year = 2008,
journal = {Psychological Methods},
publisher = {American Psychological Association},
volume = 13,
number = 3,
pages = 182
}
@article{tang2016testing,
title = {Testing {IB} theories with meta-analytic structural equation modeling},
author = {Tang, Ryan W and Cheung, Mike},
year = 2016,
journal = {Review of International Business and Strategy},
publisher = {Emerald Group Publishing Limited},
volume = 26,
number = 4,
pages = {472--492}
}
@article{cheung2009two,
title = {A two-stage approach to synthesizing covariance matrices in meta-analytic structural equation modeling},
author = {Cheung, Mike and Chan, Wai},
year = 2009,
journal = {Structural Equation Modeling: A Multidisciplinary Journal},
publisher = {Taylor \& Francis},
volume = 16,
number = 1,
pages = {28--53}
}
@article{metasem,
title = {metaSEM: An \textsf{R} package for meta-analysis using structural equation modeling},
author = {Cheung, Mike},
year = 2015,
journal = {Frontiers in Psychology},
publisher = {Frontiers},
volume = 5,
pages = 1521
}
@article{diciccio1996bootstrap,
title = {Bootstrap confidence intervals},
author = {DiCiccio, Thomas J and Efron, Bradley},
year = 1996,
journal = {Statistical Science},
publisher = {JSTOR},
pages = {189--212}
}
@article{koffel2009two,
title = {The two-factor structure of sleep complaints and its relation to depression and anxiety.},
author = {Koffel, Erin and Watson, David},
year = 2009,
journal = {Journal of Abnormal Psychology},
publisher = {American Psychological Association},
volume = 118,
number = 1,
pages = 183
}
@book{thompson2004exploratory,
title = {Exploratory and confirmatory factor analysis},
author = {Thompson, Bruce},
year = 2004,
publisher = {American Psychological Association}
}
@article{mvmeta,
title = {Multivariate meta-analysis for non-linear and other multi-parameter associations},
author = {A. Gasparrini and B. Armstrong and M. G. Kenward},
year = 2012,
journal = {Statistics in Medicine},
volume = 31,
number = 29,
pages = {3821--3839}
}
@article{dias2013evidence,
title = {Evidence synthesis for decision making 2: a generalized linear modeling framework for pairwise and network meta-analysis of randomized controlled trials},
author = {Dias, Sofia and Sutton, Alex J and Ades, AE and Welton, Nicky J},
year = 2013,
journal = {Medical Decision Making},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 33,
number = 5,
pages = {607--617}
}
@article{salanti2014evaluating,
title = {Evaluating the quality of evidence from a network meta-analysis},
author = {Salanti, Georgia and Del Giovane, Cinzia and Chaimani, Anna and Caldwell, Deborah M and Higgins, Julian PT},
year = 2014,
journal = {PLOS ONE},
publisher = {Public Library of Science},
volume = 9,
number = 7,
pages = {e99682}
}
@article{efthimiou2016getreal,
title = {{GetReal} in network meta-analysis: a review of the methodology},
author = {Efthimiou, Orestis and Debray, Thomas PA and van Valkenhoef, Gert and Trelle, Sven and Panayidou, Klea and Moons, Karel GM and Reitsma, Johannes B and Shang, Aijing and Salanti, Georgia and GetReal Methods Review Group},
year = 2016,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 7,
number = 3,
pages = {236--263}
}
@book{dias2018network,
title = {Network meta-analysis for decision-making},
author = {Dias, Sofia and Ades, Anthony E and Welton, Nicky J and Jansen, Jeroen P and Sutton, Alexander J},
year = 2018,
publisher = {John Wiley \& Sons}
}
@article{edwards2009indirect,
title = {Indirect comparisons of treatments based on systematic reviews of randomised controlled trials},
author = {Edwards, SJ and Clarke, MJ and Wordsworth, S and Borrill, J},
year = 2009,
journal = {International Journal of Clinical Practice},
publisher = {Wiley Online Library},
volume = 63,
number = 6,
pages = {841--854}
}
@article{ioannidis2006indirect,
title = {Indirect comparisons: the mesh and mess of clinical trials},
author = {Ioannidis, John PA},
year = 2006,
journal = {The Lancet},
publisher = {Elsevier},
volume = 368,
number = 9546,
pages = {1470--1472}
}
@article{song2009methodological,
title = {Methodological problems in the use of indirect comparisons for evaluating healthcare interventions: survey of published systematic reviews},
author = {Song, Fujian and Loke, Yoon K and Walsh, Tanya and Glenny, Anne-Marie and Eastwood, Alison J and Altman, Douglas G},
year = 2009,
journal = {BMJ},
publisher = {British Medical Journal Publishing Group},
volume = 338,
pages = {b1147}
}
@article{lu2009modeling,
title = {Modeling between-trial variance structure in mixed treatment comparisons},
author = {Lu, Guobing and Ades, AE},
year = 2009,
journal = {Biostatistics},
publisher = {Oxford University Press},
volume = 10,
number = 4,
pages = {792--805}
}
@article{cipriani2013conceptual,
title = {Conceptual and technical challenges in network meta-analysis},
author = {Cipriani, Andrea and Higgins, Julian PT and Geddes, John R and Salanti, Georgia},
year = 2013,
journal = {Annals of Internal Medicine},
publisher = {American College of Physicians},
volume = 159,
number = 2,
pages = {130--137}
}
@article{krahn2013graphical,
title = {A graphical tool for locating inconsistency in network meta-analyses},
author = {Krahn, Ulrike and Binder, Harald and K{\"o}nig, Jochem},
year = 2013,
journal = {BMC Medical Research Methodology},
publisher = {Springer},
volume = 13,
number = 1,
pages = 35
}
@article{dias2010checking,
title = {Checking consistency in mixed treatment comparison meta-analysis},
author = {Dias, Sofia and Welton, Nicky J and Caldwell, DM and Ades, Anthony E},
year = 2010,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 29,
number = {7-8},
pages = {932--944}
}
@article{shim2019network,
title = {Network meta-analysis: application and practice using \textsf{R} software},
author = {Shim, Sung Ryul and Kim, Seong-Jang and Lee, Jonghoo and Rücker, Gerta},
year = 2019,
journal = {Epidemiology and Health},
publisher = {Korean Society of Epidemiology},
volume = 41
}
@manual{nemeta,
title = {netmeta: Network Meta-Analysis using Frequentist Methods},
author = {Gerta Rücker and Ulrike Krahn and Jochem K{\"o}nig and Orestis Efthimiou and Guido Schwarzer},
year = 2020,
url = {https://CRAN.R-project.org/package=netmeta},
howpublished = {\url{https://CRAN.R-project.org/package=netmeta}},
note = {\textsf{R} package version 1.2-1}
}
@article{rucker2012network,
title = {Network meta-analysis, electrical networks and graph theory},
author = {R{\"u}cker, Gerta},
year = 2012,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 3,
number = 4,
pages = {312--324}
}
@article{jackson2013matrix,
title = {A matrix-based method of moments for fitting the multivariate random effects model for meta-analysis and meta-regression},
author = {Jackson, Dan and White, Ian R and Riley, Richard D},
year = 2013,
journal = {Biometrical Journal},
publisher = {Wiley Online Library},
volume = 55,
number = 2,
pages = {231--245}
}
@article{cuijpers2019effectiveness,
title = {Effectiveness and acceptability of cognitive behavior therapy delivery formats in adults with depression: A network meta-analysis},
author = {Cuijpers, P and Noma, H and Karyotaki, E and Cipriani, A and Furukawa, T},
year = 2019,
journal = {JAMA Psychiatry},
publisher = {American Medical Association},
volume = 76,
number = 7,
pages = {700--707}
}
@article{mbuagbaw2017approaches,
title = {Approaches to interpreting and choosing the best treatments in network meta-analyses},
author = {Mbuagbaw, L and Rochwerg, B and Jaeschke, R and Heels-Andsell, D and Alhazzani, W and Thabane, L and Guyatt, Gordon H},
year = 2017,
journal = {Systematic Reviews},
publisher = {BioMed Central},
volume = 6,
number = 1,
pages = {1--5}
}
@article{higgins2012consistency,
title = {Consistency and inconsistency in network meta-analysis: concepts and models for multi-arm studies},
author = {Higgins, J and Jackson, D and Barrett, JK and Lu, G and Ades, AE and White, IR},
year = 2012,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 3,
number = 2,
pages = {98--110}
}
@article{van2012automating,
title = {Automating network meta-analysis},
author = {van Valkenhoef, Gert and Lu, Guobing and de Brock, Bert and Hillege, Hans and Ades, AE and Welton, Nicky J},
year = 2012,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 3,
number = 4,
pages = {285--299}
}
@article{marsman2017bayesian,
title = {A {Bayesian} bird's eye view of ‘{Replications} of important results in social psychology’},
author = {Marsman, Maarten and Sch{\"o}nbrodt, Felix D and Morey, Richard D and Yao, Yuling and Gelman, Andrew and Wagenmakers, Eric-Jan},
year = 2017,
journal = {Royal Society Open Science},
publisher = {The Royal Society Publishing},
volume = 4,
number = 1,
pages = 160426
}
@book{mcgrayne2011theory,
title = {The theory that would not die: {How} {Bayes}' rule cracked the enigma code, hunted down Russian submarines, and emerged triumphant from two centuries of controversy},
author = {McGrayne, Sharon Bertsch},
year = 2011,
publisher = {Yale University Press}
}
@article{etz2018introduction,
title = {Introduction to the concept of likelihood and its applications},
author = {Etz, Alexander},
year = 2018,
journal = {Advances in Methods and Practices in Psychological Science},
publisher = {Sage Publications Sage CA: Los Angeles, CA},
volume = 1,
number = 1,
pages = {60--69}
}
@article{bellhouse2004reverend,
title = {The {Reverend Thomas Bayes, FRS}: a biography to celebrate the tercentenary of his birth},
author = {Bellhouse, David R and others},
year = 2004,
journal = {Statistical Science},
publisher = {Institute of Mathematical Statistics},
volume = 19,
number = 1,
pages = {3--43}
}
@misc{rjags,
title = {rjags: {Bayesian} Graphical Models using {MCMC}. \textsf{R} package version 4-10},
author = {Martyn Plummer},
year = 2019,
url = {https://CRAN.R-project.org/package=rjags},
howpublished = {\url{https://CRAN.R-project.org/package=rjags}}
}
@article{igraph,
title = {The igraph software package for complex network research},
author = {Gabor Csardi and Tamas Nepusz},
year = 2006,
journal = {InterJournal},
volume = {Complex Systems},
pages = 1695,
url = {https://igraph.org}
}
@article{konig2013visualizing,
title = {Visualizing the flow of evidence in network meta-analysis and characterizing mixed treatment comparisons},
author = {K{\"o}nig, Jochem and Krahn, Ulrike and Binder, Harald},
year = 2013,
journal = {Statistics in Medicine},
publisher = {Wiley Online Library},
volume = 32,
number = 30,
pages = {5414--5429}
}
@article{salanti2011graphical,
title = {Graphical methods and numerical summaries for presenting results from multiple-treatment meta-analysis: an overview and tutorial},
author = {Salanti, Georgia and Ades, AE and Ioannidis, John PA},
year = 2011,
journal = {Journal of Clinical Epidemiology},
publisher = {Elsevier},
volume = 64,
number = 2,
pages = {163--171}
}
@misc{extradistr,
title = {extraDistr: Additional Univariate and Multivariate Distributions. \textsf{R} package version 1.9.1},
author = {Tymoteusz Wo\l{}od\'{z}ko},
year = 2020,
howpublished = {\url{https://CRAN.R-project.org/package=extraDistr}}
}
@article{rover2017bayesian,
title = {{Bayesian} Random-Effects Meta-Analysis Using the `bayesmeta` \textsf{R} Package},
author = {R{\"o}ver, Christian},
year = 2017,
journal = {ArXiv Preprint 1711.08683}
}
@article{higgins2009re,
title = {A re-evaluation of random-effects meta-analysis},
author = {Higgins, Julian and Thompson, Simon G and Spiegelhalter, David J},
year = 2009,
journal = {Journal of the Royal Statistical Society: Series A (Statistics in Society)},
publisher = {Wiley Online Library},
volume = 172,
number = 1,
pages = {137--159}
}
@misc{williams2018bayesian,
title = {{Bayesian} meta-analysis with weakly informative prior distributions},
author = {Williams, Donald R and Rast, Philippe and B{\"u}rkner, Paul-Christian},
year = 2018,
publisher = {PsyArXiv},
howpublished = {\url{https://psyarxiv.com/7tbrm/}}
}
@article{hoffman2014no,
title = {The {No-U-Turn} sampler: adaptively setting path lengths in {Hamiltonian} Monte Carlo},
author = {Hoffman, Matthew D and Gelman, Andrew},
year = 2014,
journal = {Journal of Machine Learning Research},
volume = 15,
number = 1,
pages = {1593--1623}
}
@article{mcneish2016using,
title = {On using {Bayesian} methods to address small sample problems},
author = {McNeish, Daniel},
year = 2016,
journal = {Structural Equation Modeling: A Multidisciplinary Journal},
publisher = {Taylor \& Francis},
volume = 23,
number = 5,
pages = {750--773}
}
@article{chung2013nondegenerate,
title = {A nondegenerate penalized likelihood estimator for variance parameters in multilevel models},
author = {Chung, Yeojin and Rabe-Hesketh, Sophia and Dorie, Vincent and Gelman, Andrew and Liu, Jingchen},
year = 2013,
journal = {Psychometrika},
publisher = {Springer},
volume = 78,
number = 4,
pages = {685--709}
}
@article{burknerJSS,
title = {brms: An \textsf{R} Package for {Bayesian} Multilevel Models Using {Stan}},
author = {Paul-Christian Bürkner},
year = 2017,
journal = {Journal of Statistical Software, Articles},
volume = 80,
number = 1,
pages = {1--28}
}
@article{burkner2017advanced,
title = {Advanced {Bayesian} multilevel modeling with the \textsf{R} package brms},
author = {B{\"u}rkner, Paul-Christian},
year = 2017,
journal = {ArXiv Preprint 1705.11123}
}
@misc{tidybayes,
title = {{tidybayes}: Tidy Data and Geoms for {Bayesian} Models. \textsf{R} package version 2.1.1},
author = {Matthew Kay},
year = 2020,
doi = {10.5281/zenodo.1308151},
url = {http://mjskay.github.io/tidybayes},
howpublished = {\url{http://mjskay.github.io/tidybayes}}
}
@article{hoenig2001abuse,
title = {The abuse of power: the pervasive fallacy of power calculations for data analysis},
author = {Hoenig, John M and Heisey, Dennis M},
year = 2001,
journal = {The American Statistician},
publisher = {Taylor \& Francis},
volume = 55,
number = 1,
pages = {19--24}
}
@article{hedges2001power,
title = {The power of statistical tests in meta-analysis.},
author = {Hedges, Larry V and Pigott, Therese D},
year = 2001,
journal = {Psychological Methods},
publisher = {American Psychological Association},
volume = 6,
number = 3,
pages = 203
}
@article{mcguinness2020risk,
title = {{Risk-Of-Bias VISualization} (robvis): An \textsf{R} package and Shiny web app for visualizing risk-of-bias assessments},
author = {McGuinness, Luke A and Higgins, Julian PT},
year = 2020,
journal = {Research Synthesis Methods},
publisher = {Wiley Online Library},
volume = 12,
number = 1
}
@misc{robvis,
title = {robvis: An \textsf{R} package and web application for visualising risk-of-bias assessments},
author = {Luke A McGuinness},
year = 2019,
url = {https://github.com/mcguinlu/robvis},
howpublished = {\url{https://github.com/mcguinlu/robvis}}
}
@article{whiting2011quadas,
title = {{QUADAS-2}: A revised tool for the quality assessment of diagnostic accuracy studies},
author = {Whiting, Penny F and Rutjes, Anne WS and Westwood, Marie E and Mallett, Susan and Deeks, Jonathan J and Reitsma, Johannes B and Leeflang, Mariska MG and Sterne, Jonathan AC and Bossuyt, Patrick MM},
year = 2011,
journal = {Annals of Internal Medicine},
publisher = {American College of Physicians},
volume = 155,
number = 8,
pages = {529--536}
}
@book{xie2018r,
title = {\textsf{R} {Markdown}: The definitive guide},
author = {Xie, Yihui and Allaire, Joseph J and Grolemund, Garrett},
year = 2018,
publisher = {Chapman and Hall/CRC Press}
}
@article{osfr,
title = {{osfr}: An \textsf{R} Interface to the {Open Science Framework}},
author = {Aaron R. Wolen and Chris H.J. Hartgerink and Ryan Hafen and Brian G. Richards and Courtney K. Soderberg and Timothy P. York},
year = 2020,
journal = {Journal of Open Source Software},
volume = 5,
number = 46,
pages = 2071
}
@article{hutton2010misleading,
title = {Misleading statistics},
author = {Hutton, Jane L},
year = 2010,
journal = {Pharmaceutical Medicine},
publisher = {Springer},
volume = 24,
number = 3,
pages = {145--149}
}
@article{christensen2006number,
title = {{Number-Needed-to-Treat} ({NNT})--Needs Treatment with Care},
author = {Christensen, Palle Mark and Kristiansen, Ivar S{\o}nb{\o}},
year = 2006,
journal = {Basic \& Clinical Pharmacology \& Toxicology},
publisher = {Wiley Online Library},
volume = 99,
number = 1,
pages = {12--16}
}
@article{mendes2017number,
title = {{Number needed to treat} ({NNT}) in clinical literature: an appraisal},
author = {Mendes, Diogo and Alves, Carlos and Batel-Marques, Francisco},
year = 2017,
journal = {BMC Medicine},
publisher = {Springer},
volume = 15,
number = 1,
pages = 112
}
@article{furukawa2011obtain,
title = {How to obtain {NNT} from {Cohen's} {$d$}: comparison of two methods},
author = {Furukawa, Toshi A and Leucht, Stefan},
year = 2011,
journal = {PLOS ONE},
publisher = {Public Library of Science},
volume = 6,
number = 4,
pages = {e19070}
}
@article{altman2011obtain,
title = {How to obtain the confidence interval from a {$P$} value},
author = {Altman, Douglas G and Bland, J Martin},
year = 2011,
journal = {BMJ},
publisher = {British Medical Journal Publishing Group},
volume = 343,
pages = {d2090}
}
@article{cohen1992power,
title = {A power primer.},
author = {Cohen, Jacob},
year = 1992,
journal = {Psychological Bulletin},
publisher = {American Psychological Association},
volume = 112,
number = 1,
pages = 155
}
@article{rosnow1996computing,
title = {Computing contrasts, effect sizes, and counternulls on other people's published data: General procedures for research consumers.},
author = {Rosnow, Ralph L and Rosenthal, Robert},
year = 1996,
journal = {Psychological Methods},
publisher = {American Psychological Association},
volume = 1,
number = 4,
pages = 331
}
@article{rosnow2000contrasts,
title = {Contrasts and correlations in effect-size estimation},
author = {Rosnow, Ralph L and Rosenthal, Robert and Rubin, Donald B},
year = 2000,
journal = {Psychological Science},
publisher = {SAGE Publications Sage CA: Los Angeles, CA},
volume = 11,
number = 6,
pages = {446--453}
}
@misc{aaron1998equating,
title = {Equating {$r$}-based and {$d$}-based effect size indices: problems with a commonly recommended formula},
author = {Aaron, Bruce and Kromrey, Jeffrey D and Ferron, John},
year = 1998,
publisher = {ERIC},
url = {https://files.eric.ed.gov/fulltext/ED433353.pdf},
howpublished = {\url{https://files.eric.ed.gov/fulltext/ED433353.pdf}}
}
@manual{thalheimer2002calculate,
title = {How to calculate effect sizes from published research: A simplified methodology},
author = {Thalheimer, Will and Cook, Samantha},
year = 2002,
publisher = {Work-Learning Research},
howpublished = {\url{http://coshima.davidrjfikis.com/EPRS8530/Effect_Sizes_pdf4.pdf}},
volume = 1,
pages = {1--9},
url = {http://coshima.davidrjfikis.com/EPRS8530/Effect\_Sizes\_pdf4.pdf}
}
@article{kraemer2006size,
title = {Size of treatment effects and their importance to clinical research and practice},
author = {Kraemer, Helena Chmura and Kupfer, David J},
year = 2006,
journal = {Biological Psychiatry},
publisher = {Elsevier},
volume = 59,
number = 11,
pages = {990--996}
}
@misc{semplot,
title = {semPlot: Path Diagrams and Visual Analysis of Various SEM Packages' Output. \textsf{R} package version 1.1.2},
author = {Sacha Epskamp},
year = 2019,
howpublished = {\url{https://CRAN.R-project.org/package=semPlot}}
}
@article{sterni2019rob,
title = {{RoB 2}: a revised tool for assessing risk of bias in randomised trials},
author = {Sterne, Jonathan and Savovi{\'c}, Jelena and Page, Matthew J and Elbers, Roy G and Blencowe, Natalie S and Boutron, Isabelle and Cates, Christopher J and Cheng, Hung-Yuan and Corbett, Mark S and Eldridge, Sandra M and others},
year = 2019,
journal = {BMJ},
publisher = {British Medical Journal Publishing Group},
volume = 366
}
@article{viechtbauer2010conducting,
title = {Conducting meta-analyses in R with the metafor package},
author = {Viechtbauer, Wolfgang},
year = 2010,
journal = {Journal of Statistical Software},
volume = 36,
number = 3,
pages = {1--48}
}
@incollection{rmsea,
author = {Browne, MW and Cudeck, R},
title = {Alternative ways of assessing model fit},
editor = {Bollen, KA and Long, JS},
booktitle = {Testing structural equation models},
publisher = {Sage Publications},
year = 1993
}
@article{alexander1989statistical,
title = {Statistical and empirical examination of the chi-square test for homogeneity of correlations in meta-analysis},
author = {Alexander, Ralph A and Scozzaro, Michael J and Borodkin, Lawrence J},
journal = {Psychological Bulletin},
volume = 106,
number = 2,
pages = 329,
year = 1989,
publisher = {American Psychological Association}
}
@article{olkin1995correlations,
title = {Correlations redux},
author = {Olkin, Ingram and Finn, Jeremy D},
journal = {Psychological Bulletin},
volume = {118},
number = {1},
pages = {155},
year = {1995},
publisher = {American Psychological Association}
}
@book{rothman2008modern,
title = {Modern epidemiology},
author = {Rothman, Kenneth J and Greenland, Sander and Lash, Timothy L},
year = {2008},
publisher = {Lippincott Williams \& Wilkins}
}
@article{rucker2015ranking,
title = {Ranking treatments in frequentist network meta-analysis works without resampling methods},
author = {R{\"u}cker, Gerta and Schwarzer, Guido},
journal = {BMC Medical Research Methodology},
volume = {15},
number = {58},
year = {2015},
publisher = {BioMed Central}
}
@incollection{raudenbush2009pi,
author = {Raudenbush, SW},
title = {Analyzing effect sizes: Random effects models},
editor = {Cooper, H and Hedges, LV and Valentine, JC},
booktitle = {The handbook of research synthesis and meta-analysis (2nd Ed.)},
publisher = {Russell Sage Foundation},
year = 2009
}
@article{tamhane2016prevalence,
title = {Prevalence odds ratio versus prevalence ratio: choice comes with consequences},
author = {Tamhane, Ashutosh R and Westfall, Andrew O and Burkholder, Greer A and Cutter, Gary R},
journal = {Statistics in Medicine},
volume = {35},
number = {30},
pages = {5730--5735},
year = {2016},
publisher = {Wiley Online Library}
}
@article{kraemer2009events,
title = {Events per person-time (incidence rate): A misleading statistic?},
author = {Kraemer, Helena Chmura},
journal = {Statistics in medicine},
volume = {28},
number = {6},
pages = {1028--1039},
year = {2009},
publisher = {Wiley Online Library}
}
@article{bender2019limitations,
title = {Limitations of the incidence density ratio as approximation of the hazard ratio},
author = {Bender, Ralf and Beckmann, Lars},
journal = {Trials},
volume = {20},
number = {1},
pages = {1--8},
year = {2019},
publisher = {BioMed Central}
}
@article{parmar1998extracting,
title = {Extracting summary statistics to perform meta-analyses of the published literature for survival endpoints},
author = {Parmar, Mahesh KB and Torri, Valter and Stewart, Lesley},
journal = {Statistics in medicine},
volume = {17},
number = {24},
pages = {2815--2834},
year = {1998},
publisher = {Wiley Online Library}
}
@article{page2021prisma,
title = {{PRISMA} 2020 explanation and elaboration: updated guidance and exemplars for reporting systematic reviews},
author = {Page, Matthew J and Moher, David and Bossuyt, Patrick M and Boutron, Isabelle and Hoffmann, Tammy C and Mulrow, Cynthia D and Shamseer, Larissa and Tetzlaff, Jennifer M and Akl, Elie A and Brennan, Sue E and others},
journal = {BMJ},
volume = {372},
elocation-id = {n71},
URL = {https://www.bmj.com/content/372/bmj.n71},
year = {2021},
publisher = {British Medical Journal Publishing Group}
}
@incollection{betancourt2015hamiltonian,
author = {Betancourt, Michael and Girolami, Mark},
title = {Hamiltonian Monte Carlo for hierarchical Models},
editor = {Upadhyay, SK and Singh, U and Dey, DK and Loganathan, A},
booktitle = {Current Trends in {Bayesian} Methodology with Applications},
publisher = {Chapman & Hall/CRC Press},
year = {2015}
}
@book{lunn2012bugs,
title = {The {BUGS} book: A practical introduction to {Bayesian} analysis},
author = {Lunn, David and Jackson, Chris and Best, Nicky and Thomas, Andrew and Spiegelhalter, David},
year = {2012},
publisher = {Chapman & Hall/CRC Press},
year = {2012}
}
@book{harrer2021doing,
title = {Doing Meta-Analysis With {R}: A Hands-On Guide},
author = {Harrer, Mathias and Cuijpers, Pim and Furukawa Toshi A and Ebert, David D},
year = {2021},
publisher = {Chapman & Hall/CRC Press},
address = {Boca Raton, FL and London},
isbn = {9780367610074},
edition = {1st}
}
@misc{prisma2020package,
title = {{PRISMA2020}: {R} package and {ShinyApp} for producing {PRISMA} 2020 compliant flow diagrams (Version 0.0.1)},
author = {Haddaway, Neil R and McGuinness, Luke A},
year = 2020,
howpublished = {\url{http://doi.org/10.5281/zenodo.4287835}}
}
@misc{vuorre2016bayesian,
author = {Vuorre, Matti},
title = {Sometimes I {R}: {Bayesian} Meta-Analysis with {R}, {Stan} & brms},
url = {https://mvuorre.github.io/posts/2016-09-29-bayesian-meta-analysis/},
year = {2016}
}
@book{montgomery,
title = {Design and Analysis of Experiments},
author = {Montgomery, Douglas C},
year = {2013},
publisher = {Wiley},
address = {Hoboken, NJ},
edition = {8th}
}
@article{cousineau2020approximating,
title={Approximating the distribution of {Cohen’s} {$d_p$} in within-subject designs},
author={Cousineau, Denis},
journal={Quant. Methods Psychol},
volume={16},
pages={418--421},
year={2020}
}
@inproceedings{cousineau2021ci,
title={A study of confidence intervals for {Cohen's} {$d_p$} in within-subject designs with new proposals},
author={Denis Cousineau and Jean-Christophe Goulet-Pelletier},
year={2021},
journal={Quant. Methods Psychol},
volume={17},
pages={51--75}
}
@article{viechtbauer2007approximate,
title={Approximate confidence intervals for standardized effect sizes in the two-independent and two-dependent samples design},
author={Viechtbauer, Wolfgang},
year={2007},
journal={Journal of Educational and Behavioral Statistics},
volume={32},
pages={39--60},
publisher={Sage Publications Sage CA: Thousand Oaks, CA}
}
@article{cohen1994earth,
title={The earth is round ({$p<.05$}).},
author={Cohen, Jacob},
journal={American psychologist},
volume={49},
number={12},
pages={997},
year={1994},
publisher={American Psychological Association}
}
@article{pustejovsky2021meta,
title={Meta-analysis with {R}obust {V}ariance {E}stimation: Expanding the range of working models},
author={Pustejovsky, James E and Tipton, Elizabeth},
journal={Prevention Science},
pages={1--14},
year={2021},
publisher={Springer}
}
@article{hedges2010robust,
title={Robust variance estimation in meta-regression with dependent effect size estimates},
author={Hedges, Larry and Tipton, Elizabeth and Johnson, Matthew},
journal={Research synthesis methods},
volume={1},
number={1},
pages={39--65},
year={2010},
publisher={Wiley Online Library}
}
@article{tipton2015small,
title={Small-sample adjustments for tests of moderators and model fit using robust variance estimation in meta-regression},
author={Tipton, Elizabeth and Pustejovsky, James E},
journal={Journal of Educational and Behavioral Statistics},
volume={40},
number={6},
pages={604--634},
year={2015},
publisher={Sage Publications Sage CA: Los Angeles, CA}
}
@article{tipton2015small2,
title={Small sample adjustments for robust variance estimation with meta-regression.},
author={Tipton, Elizabeth},
journal={Psychological methods},
volume={20},
number={3},
pages={375},
year={2015},
publisher={American Psychological Association}
}
@Manual{clubSandwich,
title = {clubSandwich: Cluster-Robust (Sandwich) Variance Estimators with Small-Sample
Corrections},
author = {James Pustejovsky},
year = {2022},
note = {R package version 0.5.5},
url = {https://CRAN.R-project.org/package=clubSandwich},
}
@Manual{robumeta,
title = {robumeta: Robust Variance Meta-Regression},
author = {Zachary Fisher and Elizabeth Tipton and Hou Zhipeng},
year = {2017},
note = {R package version 2.0},
url = {https://CRAN.R-project.org/package=robumeta},
}
@article{joshi2021clusterwild,
title={Cluster Wild Bootstrapping to Handle Dependent Effect Sizes in Meta-Analysis with a Small Number of Studies},
author={Joshi, Megha and Pustejovsky, James E and Beretvas, S Natasha},
journal={Research Synthesis Methods},
year={2021},
publisher={Wiley Online Library}
}
@Manual{wildmeta,
title = {wildmeta: Cluster Wild Bootstrapping for Meta-Analysis},
author = {Megha Joshi and James Pustejovsky},
year = {2022},
note = {R package version 0.1.0},
url = {https://CRAN.R-project.org/package=wildmeta},
}
@book{riley2021individual,
title = {
Individual Participant Data Meta-Analysis: A Handbook for Healthcare Research
},
author = {Richard D. Riley and Jayne F. Tierney and Lesley A. Stewart},
year = 2021,
publisher = {Wiley},
address = {Hoboken, New Jersey},
edition = {1st}
}
@article{stanley2022beyond,
title={Beyond Random Effects: When Small-Study Findings Are More Heterogeneous},
author={Tom Stanley and Hristos Doucouliagos and John Ioannidis},
journal={Advances in Methods and Practices in Psychological Science},
volume={5},
number={4},
year={2022}
}
@article{shadish2015meta,
title={The Meta-Analytic Big Bang},
author={Shadish, William R and Lecy, Jesse D},
journal={Research Synthesis Methods},
volume={6},
number={3},
pages={246--264},
year={2015},
publisher={Wiley Online Library}
}
@article{wang2021methodological,
title={The methodological quality of individual participant data meta-analysis on intervention effects: systematic review},
author={Wang, Huan and Chen, Yancong and Lin, Yali and Abesig, Julius and Wu, Irene XY and Tam, Wilson},
journal={{BMJ}},
volume={373},
year={2021},
publisher={British Medical Journal Publishing Group}
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
@Manual{R-base,
title = {R: A Language and Environment for Statistical Computing},
author = {{R Core Team}},
organization = {R Foundation for Statistical Computing},
address = {Vienna, Austria},
year = {2022},
url = {https://www.R-project.org/},
}
@Manual{R-bookdown,
title = {bookdown: Authoring Books and Technical Documents with R Markdown},
author = {Yihui Xie},
year = {2022},
note = {R package version 0.26},
url = {https://CRAN.R-project.org/package=bookdown},
}
@Manual{R-knitr,
title = {knitr: A General-Purpose Package for Dynamic Report Generation in R},
author = {Yihui Xie},
year = {2022},
note = {R package version 1.38},
url = {https://yihui.org/knitr/},
}
@Manual{R-rmarkdown,
title = {rmarkdown: Dynamic Documents for R},
author = {JJ Allaire and Yihui Xie and Jonathan McPherson and Javier Luraschi and Kevin Ushey and Aron Atkins and Hadley Wickham and Joe Cheng and Winston Chang and Richard Iannone},
year = {2022},
note = {R package version 2.19},
url = {https://CRAN.R-project.org/package=rmarkdown},
}
@Book{bookdown2016,
title = {bookdown: Authoring Books and Technical Documents with {R} Markdown},
author = {Yihui Xie},
publisher = {Chapman and Hall/CRC},
address = {Boca Raton, Florida},
year = {2016},
note = {ISBN 978-1138700109},
url = {https://bookdown.org/yihui/bookdown},
}
@Book{knitr2015,
title = {Dynamic Documents with {R} and knitr},
author = {Yihui Xie},
publisher = {Chapman and Hall/CRC},
address = {Boca Raton, Florida},
year = {2015},
edition = {2nd},
note = {ISBN 978-1498716963},
url = {https://yihui.org/knitr/},
}
@InCollection{knitr2014,
booktitle = {Implementing Reproducible Computational Research},
editor = {Victoria Stodden and Friedrich Leisch and Roger D. Peng},
title = {knitr: A Comprehensive Tool for Reproducible Research in {R}},
author = {Yihui Xie},
publisher = {Chapman and Hall/CRC},
year = {2014},
note = {ISBN 978-1466561595},
url = {http://www.crcpress.com/product/isbn/9781466561595},
}
@Book{rmarkdown2018,
title = {R Markdown: The Definitive Guide},
author = {Yihui Xie and J.J. Allaire and Garrett Grolemund},
publisher = {Chapman and Hall/CRC},
address = {Boca Raton, Florida},
year = {2018},
note = {ISBN 9781138359338},
url = {https://bookdown.org/yihui/rmarkdown},
}
@Book{rmarkdown2020,
title = {R Markdown Cookbook},
author = {Yihui Xie and Christophe Dervieux and Emily Riederer},
publisher = {Chapman and Hall/CRC},
address = {Boca Raton, Florida},
year = {2020},
note = {ISBN 9780367563837},
url = {https://bookdown.org/yihui/rmarkdown-cookbook},
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Between-Study Heterogeneity {#heterogeneity}
---
<img src="_figs/heterogeneity.jpg" />
<br></br>
<span class="firstcharacter">B</span>
y now, we have already learned how to pool effect sizes in a meta-analysis. As we have seen, the aim of both the fixed- and random-effects model is to synthesize the effects of many different studies into one single number. This, however, only makes sense if we are not comparing apples and oranges. For example, it could be that while the overall effect we calculate in the meta-analysis is small, there are still a few outliers with very high effect sizes. Such information is lost in the aggregate effect, and we do not know if all studies yielded small effect sizes, or if there were exceptions.
\index{Heterogeneity}
\index{Random-Effects Model}
The extent to which true effect sizes vary within a meta-analysis is called **between-study heterogeneity**. We already mentioned this concept briefly in the last chapter in connection with the random-effects model. The random-effects model assumes that between-study heterogeneity causes the true effect sizes of studies to differ. It therefore includes an estimate of $\tau^2$, which quantifies this variance in true effects. This allows to calculate the pooled effect, defined as the mean of the true effect size distribution.
The random-effects model always allows us to calculate a pooled effect size, even if the studies are very heterogeneous. Yet, it does not tell us if this pooled effect can be **interpreted** in a meaningful way. There are many scenarios in which the pooled effect alone is not a good representation of the data in our meta-analysis.
Imagine a case where the heterogeneity is very high, meaning that the true effect sizes (e.g. of some treatment) range from highly positive to negative. If the pooled effect of such a meta-analysis is positive, this does not tell us that there were some studies with a true **negative** effect. The fact that the treatment had an adverse effect in some studies is lost.
High heterogeneity can also be caused by the fact that there are two or more **subgroups** of studies in our data that have a different true effect. Such information can be very valuable for researchers, because it might allow us to find certain contexts in which effects are lower or higher. Yet, if we look at the pooled effect in isolation, this detail will likely be missed. In extreme cases, very high heterogeneity can mean that the studies have **nothing in common**, and that it makes no sense to interpret the pooled effect at all.
Therefore, meta-analysts must always take into account the variation in the analyzed studies. Every good meta-analysis should not only report an overall effect but also state how trustworthy this estimate is. An essential part of this is to quantify and analyze the between-study heterogeneity.
In this chapter, we will have a closer look at different ways to measure heterogeneity, and how they can be interpreted. We will also cover a few tools which allow us to detect studies that contribute to the heterogeneity in our data. Lastly, we discuss ways to address large amounts of heterogeneity in "real-world" meta-analyses.
<br></br>
## Measures of Heterogeneity {#het-measures}
---
Before we start discussing heterogeneity measures, we should first clarify that heterogeneity can mean different things. Rücker and colleagues [-@rucker2008undue], for example, differentiate between **baseline** or **design-related** heterogeneity, and **statistical** heterogeneity.
* **Baseline** or **design-related** heterogeneity arises when the population or research design of studies differs across studies. We have discussed this type of heterogeneity when we talked about the "Apples and Oranges" problem (Chapter \@ref(pitfalls)), and ways to define the research questions (Chapter \@ref(research-question)). Design-related heterogeneity can be reduced **a priori** by setting up a suitable PICO that determines which types of populations and designs are eligible for the meta-analysis.
* **Statistical** heterogeneity, on the other hand, is a quantifiable property, influenced by the spread and precision of the effect size estimates included in a meta-analysis. Baseline heterogeneity **can** lead to statistical heterogeneity (for example if effects differ between included populations) but does not have to. It is also possible for a meta-analysis to display high statistical heterogeneity, even if the included studies themselves are virtually identical. In this guide (and most other meta-analysis texts) the term "between-study heterogeneity" only refers to **statistical** heterogeneity.
\index{Cochran's \textit{Q}}
<br></br>
### Cochran's $Q$ {#cochran-q}
---
Based on the random-effects model, we know that there are two sources of variation causing observed effects to differ from study to study. There is the sampling error $\epsilon_k$, and the error caused by between-study heterogeneity, $\zeta_k$ (Chapter \@ref(rem)). When we want to quantify between-study heterogeneity, the difficulty is to identify how much of the variation can be attributed to the sampling error, and how much to true effect size differences.
Traditionally, meta-analysts have used **Cochran's** $Q$ [@cochran1954some] to distinguish studies' sampling error from actual between-study heterogeneity. Cochran's $Q$ is defined as a **weighted sum of squares** (_WSS_). It uses the deviation of each study's observed effect $\hat\theta_k$ from the summary effect $\hat\theta$, weighted by the inverse of the study's variance, $w_k$:
\begin{equation}
Q = \sum^K_{k=1}w_k(\hat\theta_k-\hat\theta)^2
(\#eq:het1)
\end{equation}
\index{Inverse-Variance Weighting}
Let us take a closer look at the formula. First of all, we see that it uses the same type of inverse-variance weighting that is also applied to pool effect sizes. The mean $\hat\theta$ in the formula is the pooled effect according to the fixed-effect model. The amount to which individual effects deviate from the summary effect, the **residuals**, is squared (so that the value is always positive), weighted and then summed. The resulting value is Cochran's $Q$.
Because of the weighting by $w_k$, the value of $Q$ does not only depend on how much $\hat\theta_k$'s deviate from $\hat\theta$, but also on the precision of studies. If the standard error of an effect size is very low (and thus the precision very high), even small deviations from the summary effect will be given a higher weight, leading to higher values of $Q$.
The value of $Q$ can be used to check if there is **excess variation** in our data, meaning more variation than can be expected from sampling error alone. If this is the case, we can assume that the rest of the variation is due to between-study heterogeneity. We will illustrate this with a little simulation.
\index{Sampling Error}
In our simulation, we want to inspect how $Q$ behaves under two different scenarios: when there is no between-study heterogeneity, and when heterogeneity exists. Let us begin with the no-heterogeneity case. This implies that $\zeta_k=0$, and that the residuals $\hat\theta_k-\hat\theta$ are only product of the sampling error $\epsilon_k$. We can use the `rnorm` function to simulate deviates from some mean effect size $\hat\theta$ (assuming that they follow a normal distribution). Because they are centered around $\hat\theta$, we can expect the mean of these "residuals" to be zero ($\mu$ = 0). For this example, let us assume that the population standard deviation is $\sigma=$ 1, which leads to a **standard** normal distribution.
Normal distributions are usually denoted with $\mathcal{N}$, and we can symbolize that the residuals are draws from a normal distribution with $\mu=$ 0 and $\sigma=$ 1 like this:
\begin{equation}
\hat\theta_k-\hat\theta \sim \mathcal{N}(0,1)
(\#eq:het2)
\end{equation}
Let us try this out in _R_, and draw $K$=40 effect size residuals $\hat\theta_k-\hat\theta$ using `rnorm`.
```{r, eval=F}
set.seed(123) # needed to reproduce results
rnorm(n = 40, mean = 0, sd = 1)
```
```
## [1] -0.56048 -0.23018 1.55871 0.07051 0.12929
## [6] 1.71506 0.46092 -1.26506 -0.68685 -0.44566
## [...]
```
Because the standard normal distribution is the default for `rnorm`, we could have also used the simpler code `rnorm(40)`.
Now, let us simulate that we repeat this process of drawing $n=$ 40 samples many, many times. We can achieve this using the `replicate` function, which we tell to repeat the `rnorm` call ten thousand times. We save the resulting values in an object called `error_fixed`.
```{r}
set.seed(123)
error_fixed <- replicate(n = 10000, rnorm(40))
```
We continue with a second scenario, in which we assume that **between-study heterogeneity** ($\zeta_k$ errors) exists in addition to the sampling error $\epsilon_k$. We can simulate this by adding a second call to `rnorm`, representing the variance in true effect sizes. In this example, we also assume that the true effect sizes follow a standard normal distribution.
We can simulate the residuals of ten thousand meta-analyses with $K$=40 studies and substantial between-study heterogeneity using this code:
```{r}
set.seed(123)
error_random <- replicate(n = 10000, rnorm(40) + rnorm(40))
```
Now that we simulated $\hat\theta_k-\hat\theta$ residuals for meta-analyses **with** and **without** heterogeneity, let us do the same for values of $Q$. For this simulation, we can simplify the formula of $Q$ a little by assuming that the variance, and thus the weight $w_k$ of every study, is **one**, resulting in $w_k$ to drop out of the equation. This means that we only have to use our calls to `rnorm` from before, square and sum the result, and replicate this process ten thousand times.
Here is the code for that:
```{r}
set.seed(123)
Q_fixed <- replicate(10000, sum(rnorm(40)^2))
Q_random <- replicate(10000, sum((rnorm(40) + rnorm(40))^2))
```
An important property of $Q$ is that it is assumed to (approximately) follow a $\chi^2$ distribution. A $\chi^2$ distribution, like the weighted squared sum, can only take positive values. It is defined by its **degrees of freedom**, or d.f.; $\chi^2$ distributions are right-skewed for small d.f., but get closer and closer to a normal distribution when the degrees of freedom become larger. At the same time, the degrees of freedom are also the **expected value**, or mean of the respective $\chi^2$ distribution.
It is assumed that $Q$ will approximately follow a $\chi^2$ distribution with $K-1$ degrees of freedom (with $K$ being the number of studies in our meta-analysis)--**if** effect size differences are **only** caused by sampling error. This means that the mean of a $\chi^2$ distribution with $K-1$ degrees of freedom tells us the value of $Q$ we can expect through sampling error alone.
This explanation was very abstract, so let us have a look at the distribution of our simulated values to make this more concrete. In the following code, we use the `hist` function to plot a histogram of the effect size "residuals" and $Q$ values. We also add a line to each plot, showing the idealized distribution.
Such distributions can be generated by the `dnorm` function for normal distributions, and using `dchisq` for $\chi^2$ distributions, with `df` specifying the degrees of freedom.
```{r, eval=F}
# Histogram of the residuals (theta_k - theta)
# - We produce a histogram for both the simulated values in
# error_fixed and error_random
# - `lines` is used to add a normal distribution in blue.
hist(error_fixed,
xlab = expression(hat(theta[k])~-~hat(theta)), prob = TRUE,
breaks = 100, ylim = c(0, .45), xlim = c(-4,4),
main = "No Heterogeneity")
lines(seq(-4, 4, 0.01), dnorm(seq(-4, 4, 0.01)),
col = "blue", lwd = 2)
hist(error_random,
xlab = expression(hat(theta[k])~-~hat(theta)), prob = TRUE,
breaks = 100,ylim = c(0, .45), xlim = c(-4,4),
main = "Heterogeneity")
lines(seq(-4, 4, 0.01), dnorm(seq(-4, 4, 0.01)),
col = "blue", lwd = 2)
# Histogram of simulated Q-values
# - We produce a histogram for both the simulated values in
# Q_fixed and Q_random
# - `lines` is used to add a chi-squared distribution in blue.
# First, we calculate the degrees of freedom (k-1)
# remember: k=40 studies were used for each simulation
df <- 40-1
hist(Q_fixed, xlab = expression(italic("Q")), prob = TRUE,
breaks = 100, ylim = c(0, .06),xlim = c(0,160),
main = "No Heterogeneity")
lines(seq(0, 100, 0.01), dchisq(seq(0, 100, 0.01), df = df),
col = "blue", lwd = 2)
hist(Q_random, xlab = expression(italic("Q")), prob = TRUE,
breaks = 100, ylim = c(0, .06), xlim = c(0,160),
main = "Heterogeneity")
lines(seq(0, 100, 0.01), dchisq(seq(0, 100, 0.01), df = df),
col = "blue", lwd = 2)
```
These are the plots that _R_ draws for us:
```{r, out.width="50%", echo=FALSE, collapse=TRUE, message=F, warning=F}
par(bg="#FFFEFA")
hist(error_fixed,
xlab = expression(hat(theta[k])~-~hat(theta)), prob = TRUE,
breaks = 100, ylim = c(0, .45), xlim = c(-4,4),
main = "No Heterogeneity")
lines(seq(-4, 4, 0.01), dnorm(seq(-4, 4, 0.01)), col = "#014d64", lwd = 2)
hist(error_random,
xlab = expression(hat(theta[k])~-~hat(theta)), prob = TRUE,
breaks = 100,ylim = c(0, .45), xlim = c(-4,4),
main = "Heterogeneity")
lines(seq(-4, 4, 0.01), dnorm(seq(-4, 4, 0.01)), col = "#014d64", lwd = 2)
df = 40-1
hist(Q_fixed, xlab = expression(italic("Q")), prob = TRUE,
breaks = 100, ylim = c(0, .06),xlim = c(0,160),
main = "No Heterogeneity")
lines(seq(0, 100, 0.01), dchisq(seq(0, 100, 0.01), df = df),
col = "#014d64", lwd = 2)
hist(Q_random, xlab = expression(italic("Q")), prob = TRUE,
breaks = 100, ylim = c(0, .06), xlim = c(0,160),
main = "Heterogeneity")
lines(seq(0, 100, 0.01), dchisq(seq(0, 100, 0.01), df = df),
col = "#014d64", lwd = 2)
```
If you find the code we used to generate the plots difficult to understand, do not worry. We only used it for this simulation, and these are not plots one would produce as part of an actual meta-analysis.
Let us go through what we see in the four histograms. In the first row, we see the distribution of effect size "residuals", with and without heterogeneity. The no-heterogeneity data, as we can see, closely follows the line of the standard normal distribution we included in the plot. This is quite logical since the data was generated by `rnorm` assuming this exact distribution. The data in which we added extra heterogeneity does not follow the standard normal distribution. The dispersion of data is larger, resulting in a distribution with heavier tails.
\index{Cochran's \textit{Q}}
Now, let us explore how this relates to the distribution of $Q$ values in the second row. When there is no heterogeneity, the values of $Q$ follow a characteristic, right-skewed $\chi^2$ distribution. In the plot, the solid line shows the shape of a $\chi^2$ distribution with 39 degrees of freedom (since d.f. = $K-1$, and $K$ = 40 was used in each simulation). We see that the simulated data follows this curve pretty well. This is no great surprise. We have learned that $Q$ follows a $\chi^2$ distribution with $K-1$ degrees of freedom when there is no heterogeneity. Exactly this is the case in our simulated data: variation exists only due to the sampling error.
The distribution looks entirely different for our example **with** heterogeneity. The simulated data do not seem to follow the expected distribution at all. Values are shifted visibly to the right; the mean of the distribution is approximately twice as high. We can conclude that, when there is substantial between-study heterogeneity, the values of $Q$ are considerably higher than the value of $K-1$ we expect under the assumption of no heterogeneity. This comes as no surprise, since we added extra variation to our data to simulate the presence of between-study heterogeneity.
\index{meta Package}
This was a somewhat lengthy explanation, yet it may have helped us to better understand how we can exploit the statistical properties of $Q$. Cochran's $Q$ can be used to **test** if the variation in a meta-analysis significantly exceeds the amount we would expect under the null hypothesis of no heterogeneity.
This **test of heterogeneity** is commonly used in meta-analyses, and if you go back to Chapter \@ref(pooling-es), you will see that **{meta}** also provides us with it by default. It is often referred to as **Cochran's** $Q$ **test**, but this is actually a misnomer. Cochran himself never intended $Q$ to be used in this way [@hoaglin2016misunderstandings].
\index{DerSimonian-Laird Estimator}
\index{I$^2$, Higgins \& Thompson's}
Cochran's $Q$ is a very important statistic, mostly because other common ways to quantify heterogeneity, such as Higgins and Thompson's $I^2$ statistic and $H^2$, are based on it. We will get to these measures in the next sections. Cochran's $Q$ is also used by some heterogeneity variance estimators to calculate $\tau^2$, most famously by the DerSimonian-Laird estimator^[The DerSimonian-Laird method estimates the heterogeneity variance using $\hat\tau^2 = \dfrac{Q-(K-1)}{\sum_{k=1}^{K}w_k-\frac{\sum_{k=1}^Kw^2_k}{\sum_{k=1}^Kw_k}}$, with $\hat\tau^2 := 0$ when $Q<(K-1)$; see also Chapters \@ref(rem) and \@ref(tau-estimators).].
```{block, type='boximportant'}
**Problems With $Q$ & the $Q$-Test**
\vspace{2mm}
Although $Q$ is commonly used and reported in meta-analyses, it has several flaws. Hoaglin [-@hoaglin2016misunderstandings], for example, argues that the assumption of $Q$ following a $\chi^2$ distribution with $K-1$ degrees of freedom does not reflect $Q$'s actual behavior in meta-analysis, and that related procedures such as the DerSimonian-Laird method may therefore be biased.
\vspace{2mm}
A more practical concern is that $Q$ increases both when the number of studies $K$, and when the precision (i.e. the sample size of a study) increases. Therefore, $Q$ and whether it is significant highly depends on the size of your meta-analysis, and thus its statistical power.
From this follows that we should not only rely on the significance of a $Q$-test when assessing heterogeneity. Sometimes, meta-analysts decide whether to apply a fixed-effect or random-effects model based on the significance of the $Q$-test. For the reasons we stated here, this approach is highly discouraged.
```
<br></br>
### Higgins & Thompson's $I^2$ Statistic {#i-squared}
---
\index{I$^2$, Higgins \& Thompson's}
The $I^2$ statistic [@higgins2002quantifying] is another way to quantify between-study heterogeneity, and directly based on Cochran's $Q$. It is defined as the percentage of variability in the effect sizes that is not caused by sampling error. $I^2$ draws on the assumption that $Q$ follows a $\chi^2$ distribution with $K-1$ degrees of freedom under the null hypothesis of no heterogeneity. It quantifies, in percent, how much the **observed** value of $Q$ **exceeds** the **expected** $Q$ value when there is no heterogeneity (i.e. $K-1$).
The formula of $I^2$ looks like this:
\begin{equation}
I^2 = \frac{Q-(K-1)}{Q}
(\#eq:het3)
\end{equation}
where $K$ is the total number of studies. The value of $I^2$ can not be lower than 0%, so if $Q$ happens to be smaller than $K-1$, we simply use $0$ instead of a negative value^[Please note that, while $I^2$ is often calculated using the formula above, **not all software** follows this definition. In **{metafor}**, for example, its value is derived using the estimated between-study heterogeneity $\tau^2$ (see [here](https://www.metafor-project.org/doku.php/tips:i2_multilevel_multivariate)), so that: $$I^2 = \frac{\hat{\tau}^2}{\hat{\tau}^2 + \tilde{v}};$$ where: $$\tilde{v} = \frac{(K-1) \sum w_k}{(\sum w_k)^2 - \sum w_k^2}$$ is the "average" or "typical" sampling variance calculated from the weights $w_k$ of all studies $k$ in our sample (see Chapter \@ref(fem)). In contrast to the formula shown before, this way to calculate $I^2$ makes it easier to see that **$I^2$ is a _relative_ measure of heterogeneity**. It depends on the "typical" within-study variance $\tilde{v}$, which can vary form meta-analysis to meta-analysis. We elaborate on this point in Chapter \@ref(het-measure-which). Please also note that these different formulas can sometimes explain why meta-analysis tools can return **divergent values** of $I^2$, even though the same data was pooled.].
We can use our simulated values of $Q$ from before to illustrate how $I^2$ is calculated. First, let us randomly pick the tenth simulated value in `Q_fixed`, where we assumed no heterogeneity. Then, we use the formula above to calculate $I^2$.
```{r}
# Display the value of the 10th simulation of Q
Q_fixed[10]
# Define k
k <- 40
# Calculate I^2
(Q_fixed[10] - (k-1))/Q_fixed[10]
```
Since the result is negative, we round up to zero, resulting in $I^2$ = 0%. This value tells us that zero percent of the variation in effect sizes is due to between-study heterogeneity. This is in line with the settings used for our simulation.
Now, we do the same with the tenth simulated value in `Q_random`.
```{r}
(Q_random[10] - (k-1))/Q_random[10]
```
We see that the $I^2$ value of this simulation is approximately 50%, meaning that about half of the variation is due to between-study heterogeneity. This is also in line with our expectations since the variation in this example is based, in equal parts, on the simulated sampling error and between-study heterogeneity.
It is common to use the $I^2$ statistic to report the between-study heterogeneity in meta-analyses, and $I^2$ is included by default in the output we get from **{meta}**. The popularity of this statistic may be associated with the fact that there is a "rule of thumb" on how we can interpret it [@higgins2002quantifying]:
* $I^2$ = 25%: low heterogeneity
* $I^2$ = 50%: moderate heterogeneity
* $I^2$ = 75%: substantial heterogeneity.
<br></br>
### The $H^2$ Statistic
---
The $H^2$ statistic [@higgins2002quantifying] is also derived from Cochran's $Q$, and similar to $I^2$. It describes the ratio of the observed variation, measured by $Q$, and the expected variance due to sampling error:
\begin{equation}
H^2 = \frac{Q}{K-1}
(\#eq:het4)
\end{equation}
The computation of $H^2$ is a little more elegant than the one of $I^2$ because we do not have to artificially correct its value when $Q$ is smaller than $K-1$. When there is no between-study heterogeneity, $H^2$ equals one (or smaller). Values greater than one indicate the presence of between-study heterogeneity.
Compared to $I^2$, it is far less common to find this statistic reported in published meta-analyses. However, $H^2$ is also included by default in the output of **{meta}**'s meta-analysis functions.
<br></br>
### Heterogeneity Variance $\tau^2$ & Standard Deviation $\tau$ {#tau}
---
We already discussed the heterogeneity variance $\tau^2$ in detail in Chapter \@ref(rem). As we mentioned there, $\tau^2$ quantifies the **variance** of the true effect sizes underlying our data. When we take the square root of $\tau^2$, we obtain $\tau$, which is the **standard deviation** of the true effect sizes.
A great asset of $\tau$ is that it is expressed on the same scale as the effect size metric. This means that we can interpret it in the same as one would interpret, for example, the mean and standard deviation of the sample's age in a primary study. The value of $\tau$ tells us something about the **range** of the true effect sizes.
We can, for example, calculate the 95% confidence interval of the true effect sizes by multiplying $\tau$ with 1.96, and then adding and subtracting this value from the pooled effect size. We can try this out using the `m.gen` meta-analysis we calculated in Chapter \@ref(pre-calculated-es).
Let us have a look again what the pooled effect and $\tau$ estimate in this meta-analysis were:
```{r, echo=F, message=F, warning=F}
library(meta)
library(dmetar)
data(ThirdWave)
m.gen <- metagen(TE = TE,
seTE = seTE,
studlab = Author,
data = ThirdWave,
sm = "SMD",
comb.fixed = FALSE,
comb.random = TRUE,
method.tau = "REML",
hakn = TRUE,
title = "Third Wave Psychotherapies")
```
```{r}
# Pooled effect
m.gen$TE.random
# Estimate of tau
m.gen$tau
```
We see that $g=$ 0.58 and $\tau=$ 0.29. Based on this data, we can calculate the lower and upper bound of the 95% true effect size confidence interval: 0.58 $-$ 1.96 $\times$ 0.29 = 0.01 and 0.58 $+$ 1.96 $\times$ 0.29 = 1.15.
```{block2, type='boxinfo'}
**"What's the Uncertainty of Our Uncertainty?": Calculation of Confidence Intervals Around $\tau^2$**
\vspace{2mm}
Methods to quantify the uncertainty of our between-study heterogeneity variance estimate (i.e. the confidence intervals around $\tau^2$) remain a field of ongoing investigation. Several approaches are possible, and their adequateness depends on the type of $\tau^2$ estimator (Chapter \@ref(tau-estimators)).
\vspace{4mm}
The **{meta}** package follows the recommendations of Veronikki [-@veroniki2016methods] and uses the $Q$**-Profile** method [@viechtbauer2007confidence] for most estimators.
\vspace{4mm}
The $Q$-Profile method is based on an altered $Q$ version, the **generalized** $Q$**-statistic** $Q_{\text{gen}}$. While the standard version of $Q$ uses the pooled effect based on the fixed-effect model, $Q_{\text{gen}}$ is based on the random-effects model. It uses the overall effect according to the random-effects model, $\hat\mu$, to calculate the deviates, as well as weights based on the random-effects model:
\begin{equation}
Q_{\text{gen}} = \sum_{k=1}^{K} w^*_k (\hat\theta_k-\hat\mu)^2
(\#eq:het5)
\end{equation}
Where $w^*_k$ is the random-effects weight (see Chapter \@ref(tau-estimators)):
\begin{equation}
w^*_k = \frac{1}{s^2_k+\tau^2}
(\#eq:het6)
\end{equation}
$Q_{\text{gen}}$ has also been shown to follow a $\chi^2$ distribution with $K-1$ degrees of freedom. We can think of the generalized $Q$ statistic as a function $Q_{\text{gen}}(\tau^2)$ which returns different values of $Q_{\text{gen}}$ for higher or lower values of $\tau^2$. The results of this function have a $\chi^2$ distribution.
\vspace{4mm}
Since the $\chi^2$ distribution follows a clearly predictable pattern, it is easy to determine confidence intervals with, for example, 95% coverage. We only have to get the value of $\chi^2$ for the 2.5\textsuperscript{th} and 97.5\textsuperscript{th} percentile, based on its $K-1$ degrees of freedom. In _R_, this can be easily done using the **quantile function** `qchisq`, for example: `qchisq(0.975, df=5)`.
\vspace{4mm}
The $Q$-Profile method exploits this relationship to calculate confidence intervals around $\tau^2$ using an iterative process (so-called "profiling"). In this approach, $Q_{\text{gen}}(\widetilde{\tau}^2)$ is calculated repeatedly while increasing the value of $\tau^2$, until the expected value of the lower and upper bound of the confidence interval based on the $\chi^2$ distribution is reached.
\vspace{4mm}
The $Q$-Profile method can be specified in **{meta}** functions through the argument `method.tau.ci = "QP"`. This is the default setting, meaning that we do not have to add this argument manually. The only exception is when we use the DerSimonian-Laird estimator (`method.tau = "DL"`). In this case, a different method, the one by Jackson [-@jackson2013confidence], is used automatically (we can do this manually by specifying `method.tau.ci = "J"`).
Usually, there is no necessity to deviate from **{meta}**'s default behavior, but it may be helpful for others to report which method has been used to calculate the confidence intervals around $\tau^2$ in your meta-analysis.
```
<br></br>
## Which Measure Should I Use? {#het-measure-which}
---
When we assess and report heterogeneity in a meta-analysis, we need a measure which is robust, and not too heavily influenced by statistical power. Cochran's $Q$ increases both when the number of studies increases, and when the precision (i.e. the sample size of a study) increases.
Therefore, $Q$ and whether it is significant highly depends on the size of your meta-analysis, and thus its statistical power. We should therefore not only rely on $Q$, and particularly the $Q$-test, when assessing between-study heterogeneity.
$I^2$, on the other hand, is not sensitive to changes in the number of studies in the analysis. It is relatively easy to interpret, and many researchers understand what it means. Generally, it is not a bad idea to include $I^2$ as a heterogeneity measure in our meta-analysis report, especially if we also provide a confidence interval for this statistic so that others can assess how precise the estimate is.
\index{Sampling Error}
\index{I$^2$, Higgins \& Thompson's}
\index{Heterogeneity}
However, despite its common use in the literature, $I^2$ is not a perfect measure for heterogeneity either. It is not an absolute measure of heterogeneity, and its value still heavily depends on the precision of the included studies [@borenstein2017basics; @rucker2008undue]. As said before, $I^2$ is simply the percentage of variability not caused by sampling error $\epsilon$. If our studies become increasingly large, the sampling error tends to zero, while at the same time, $I^2$ tends to 100%--simply because the studies have a greater sample size.
**Only** relying on $I^2$ is therefore not a good option either. Since $H^2$ behaves similarly to $I^2$, the same caveats also apply to this statistic.
The value of $\tau^2$ and $\tau$, on the other hand, is insensitive to the number of studies, **and** their precision. It does not systematically increase as the number of studies and their size increases. Yet, it is often hard to interpret how relevant $\tau^2$ is from a practical standpoint. Imagine, for example, that we found that the variance of true effect sizes in our study was $\tau^2=$ 0.08. It is often difficult for ourselves, and others, to determine if this amount of variance is meaningful or not.
\index{Prediction Interval}
**Prediction intervals** (PIs) are a good way to overcome this limitation [@inthout2016plea]. Prediction intervals give us a range into which we can expect the effects of future studies to fall based on present evidence.
Say that our prediction interval lies completely on the "positive" side favoring the intervention. This means that, despite varying effects, the intervention is expected to be beneficial in the future across the contexts we studied. If the prediction interval includes zero, we can be less sure about this, although it should be noted that broad prediction intervals are quite common.
To calculate prediction intervals around the overall effect $\hat\mu$, we use both the estimated between-study heterogeneity variance $\hat\tau^2$, as well as the standard error of the pooled effect, $SE_{\hat\mu}$. We sum the squared standard error and $\hat\tau^2$ value, and then take the square root of the result. This leaves us with the standard deviation of the prediction interval, $SD_{\text{PI}}$. A $t$ distribution with $K-1$ degrees of freedom is assumed for the prediction range, which is why we multiply $SD_{\text{PI}}$ with the 97.5\textsuperscript{th} percentile value of $t_{K-1}$, and then add and subtract the result from $\hat\mu$. This gives us the 95% prediction interval of our pooled effect.
The formula for 95% prediction intervals looks like this:
\begin{align}
\hat\mu &\pm t_{K-1, 0.975}\sqrt{SE_{\hat\mu}^2+\hat\tau^2} \notag \\
\hat\mu &\pm t_{K-1, 0.975}SD_{\text{PI}} (\#eq:het7)
\end{align}
All of **{meta}**'s functions can provide us with a prediction interval around the pooled effect, but they do not do so by default. When running a meta-analysis, we have to add the argument `prediction = TRUE` so that prediction intervals appear in the output.
In sum, it is advisable to not resort to one measure only when characterizing the heterogeneity of a meta-analysis. It is recommended to at least always report $I^2$ (with confidence intervals), as well as prediction intervals, and interpret the results accordingly.
<br></br>
## Assessing Heterogeneity in _R_ {#het-R}
---
Let us see how we can use the things we learned about heterogeneity measures in practice. As an illustration, let us examine the heterogeneity of our `m.gen` meta-analysis object a little closer (we generated this object in Chapter \@ref(pre-calculated-es)).
Because the default output of `metagen` objects does not include prediction intervals, we have to update it first. We simply use the `update.meta` function, and tell it that we want `prediction` intervals to be printed out additionally.
```{r}
m.gen <- update.meta(m.gen, prediction = TRUE)
```
Now we can reinspect the results:
```{r, eval=F}
summary(m.gen)
```
```
## Review: Third Wave Psychotherapies
##
## [...]
##
## Number of studies combined: k = 18
##
## SMD 95%-CI t p-value
## Random effects model (HK) 0.5771 [ 0.3782; 0.7760] 6.12 < 0.0001
## Prediction interval [-0.0572; 1.2115]
##
## Quantifying heterogeneity:
## tau^2 = 0.0820 [0.0295; 0.3533]; tau = 0.2863 [0.1717; 0.5944];
## I^2 = 62.6% [37.9%; 77.5%]; H = 1.64 [1.27; 2.11]
##
## Test of heterogeneity:
## Q d.f. p-value
## 45.50 17 0.0002
##
## Details on meta-analytical method:
## - Inverse variance method
## - Restricted maximum-likelihood estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model (df = 17)
## - Prediction interval based on t-distribution (df = 16)
```
In the output, we see results for all heterogeneity measures we defined before. Let us begin with the `Quantifying heterogeneity` section. Here, we see that $\tau^2=$ 0.08. The confidence interval around $\tau^2$ (0.03 - 0.35) does not contain zero, indicating that some between-study heterogeneity exists in our data. The value of $\tau$ is 0.29, meaning that the true effect sizes have an estimated standard deviation of $SD=$ 0.29, expressed on the scale of the effect size metric (here, Hedges' $g$).
A look at the second line reveals that $I^2=$ 63% and that $H$ (the square root of $H^2$) is 1.64. This means that more than half of the variation in our data is estimated to stem from true effect size differences. Using Higgins and Thompson's "rule of thumb", we can characterize this amount of heterogeneity as moderate to large.
Directly under the pooled effect, we see the prediction interval. It ranges from $g=$ -0.06 to 1.21. This means that it is possible that some future studies will find a negative treatment effect based on present evidence. However, the interval is quite broad, meaning that very high effects are possible as well.
\index{Cochran's \textit{Q}}
Lastly, we are also presented with $Q$ and the `Test of heterogeneity`. We see that $Q$=45.5. This is a lot more than what we would expect based on the $K-1=$ 17 degrees of freedom in this analysis. Consequentially, the heterogeneity test is significant ($p<$ 0.001). However, as we mentioned before, we should not base our assessment on the $Q$ test alone, given its known deficiencies.
```{block, type='boxreport'}
**Reporting the Amount of Heterogeneity In Your Meta-Analysis**
\vspace{4mm}
Here is how we could report the amount of heterogeneity we found in our example:
> _"The between-study heterogeneity variance was estimated at $\hat\tau^2$ = 0.08 (95%CI: 0.03-0.35), with an $I^2$ value of 63% (95%CI: 38-78%). The prediction interval ranged from $g$ = -0.06 to 1.21, indicating that negative intervention effects cannot be ruled out for future studies."_
```
So, what do we make out of these results? Overall, our indicators tell us that moderate to substantial heterogeneity is present in our data. The effects in our meta-analysis are not completely heterogeneous, but there are clearly some differences in the true effect sizes between studies.
It may therefore be a good idea to explore what causes this heterogeneity. It is possible that there are one or two studies that do not really "fit in", because they have a much higher effect size. This could have inflated the heterogeneity in our analysis, and even worse: it may have led to an **overestimation** of the true effect.
On the other hand, it is also possible that our pooled effect is influenced heavily by one study with a very large sample size reporting an unexpectedly small effect size. This could mean that the pooled effect **underestimates** the true benefits of the treatment.
To address these concerns, we will now turn to procedures which allow us to assess the robustness of our pooled results: **outlier** and **influence analyses**.
\index{I$^2$, Higgins \& Thompson's}
```{block2, type='boxinfo'}
**The $I^2$ > 50% "Guideline"**
\vspace{4mm}
There are no iron-clad rules determining when exactly further analyses of the between-study heterogeneity are warranted. An approach that is sometimes used in practice is to check for outliers and influential cases when $I^2$ is greater than 50%. When this threshold is reached, we can assume at least moderate heterogeneity, and that (more than) half of the variation is due to true effect size differences.
\vspace{4mm}
This "rule of thumb" is somewhat arbitrary, and, knowing the problems of $I^2$ we discussed, in no way perfect. However, it can still be helpful from a practical perspective, because we can specify **a priori**, and in a consistent way, when we will try to get a more robust version of the pooled effect in our meta-analysis.
\vspace{4mm}
What should be avoided at any cost is to remove outlying and/or influential cases without any stringent rationale, just because we like the results. Such outcomes will be heavily biased by our "researcher agenda" (see Chapter \@ref(pitfalls)), even if we did not consciously try to bend the results into a "favorable" direction.
```
<br></br>
## Outliers & Influential Cases {#outliers}
---
\index{Outlier}
\index{Influential Case}
As mentioned before, between-study heterogeneity can be caused by one or more studies with extreme effect sizes that do not quite "fit in". This may distort our pooled effect estimate, and it is a good idea to reinspect the pooled effect after such **outliers** have been removed from the analysis.
On the other hand, we also want to know if the pooled effect estimate we found is robust, meaning that it does not depend heavily on one single study. Therefore, we also want to know whether there are studies which heavily push the effect of our analysis into one direction. Such studies are called **influential cases**, and we will devote some time to this topic later in this chapter.
<br></br>
### Basic Outlier Removal {#basic-outlier}
---
There are several ways to define the effect of a study as "outlying" [@viechtbauer2010outlier]. An easy, and somewhat "brute force" approach, is to view a study as an outlier if its confidence interval does not overlap with the confidence interval of the pooled effect. The effect size of an outlier is so **extreme** that it differs significantly from the overall effect. To detect such outliers, we can search for all studies:
* for which the **upper bound** of the 95% confidence interval is **lower** than the **lower bound** of the pooled effect confidence interval (i.e. extremely **small** effects)
* for which the **lower bound** of the 95% confidence interval is **higher** than the **upper bound** of the pooled effect confidence interval (i.e. extremely **large** effects).
The idea behind this method is quite straightforward. Studies with a high sampling error are expected to deviate substantially from the pooled effect. However, because the confidence interval of such studies will also be large, this increases the likelihood that the confidence intervals will overlap with the one of the pooled effect.
Yet, if a study has a **low** standard error and **still** (unexpectedly) deviates substantially from the pooled effect, there is a good chance that the confidence intervals will not overlap, and that the study is classified as an outlier.
\index{dmetar Package}
The **{dmetar}** package contains a function called `find.outliers`, which implements this simple outlier removal algorithm. It searches for outlying studies in a **{meta}** object, removes them, and then recalculates the results.
```{block, type='boxdmetar'}
**The "find.outliers" Function**
\vspace{4mm}
The `find.outliers` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
\vspace{2mm}
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/find.outliers.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{meta}** and **{metafor}** package is installed and loaded.
```
The `find.outliers` function only needs an object created by a **{meta}** meta-analysis function as input. Let us see the what results we get for our `m.gen` object.
```{r, eval=F}
find.outliers(m.gen)
```
```
## Identified outliers (random-effects model)
## ------------------------------------------
## "DanitzOrsillo", "Shapiro et al."
##
## Results with outliers removed
## -----------------------------
## Number of studies combined: k = 16
##
## SMD 95%-CI t p-value
## Random effects model 0.4528 [0.3257; 0.5800] 7.59 < 0.0001
## Prediction interval [0.1693; 0.7363]
##
## Quantifying heterogeneity:
## tau^2 = 0.0139 [0.0000; 0.1032]; tau = 0.1180 [0.0000; 0.3213];
## I^2 = 24.8% [0.0%; 58.7%]; H = 1.15 [1.00; 1.56]
##
## Test of heterogeneity:
## Q d.f. p-value
## 19.95 15 0.1739
##
## [...]
```
\index{Weight}
We see that the `find.outliers` function has detected two outliers, “DanitzOrsillo” and “Shapiro et al.”. The function has also automatically rerun our analysis while excluding the identified studies. In the column displaying the random-effects weight of each study, `%W(random)`, we see that the weight of the outlying studies has been set to zero, thus removing them from the analysis.
\index{I$^2$, Higgins \& Thompson's}
Based on the output, we see that the $I^2$ heterogeneity shrinks considerably when the two studies are excluded, from $I^2=$ 63% to 25%. The confidence interval around $\tau^2$ now also includes zero, and the $Q$-test of heterogeneity is not significant anymore. Consequentially, the prediction interval of our estimate has also narrowed. Now, it only contains positive values, providing much more certainty of the robustness of the pooled effect across future studies.
<br></br>
### Influence Analysis {#influence-analysis}
---
\index{Influential Case}
We have now learned a basic way to detect and remove outliers in meta-analyses. However, it is not only extreme effect sizes which can cause concerns regarding the robustness of the pooled effect. Some studies, even if their effect size is not particularly high or low, can still exert a very high **influence** on our overall results.
For example, it could be that we find an overall effect in our meta-analysis, but that its significance depends on a single large study. This would mean that the pooled effect is not statistically significant anymore once the influential study is removed. Such information is very important if we want to communicate to the public how robust our results are.
Outlying and influential studies have an overlapping but slightly different meaning. Outliers are defined through the magnitude of their effect but do not necessarily need to have a substantial impact on the results of our meta-analysis. It is perfectly possible that removal of an outlier as defined before neither changes the average effect size, nor the heterogeneity in our data substantially.
Influential cases, on the other hand, are those studies which--by definition--have a large impact on the pooled effect or heterogeneity, regardless of how high or low the effect is. This does not mean, of course, that a study with an extreme effect size cannot be an influential case. In fact, outliers are often also influential, as our example in the last chapter illustrated. But they do not **have** to be.
\index{Leave-One-Out Method}
There are several techniques to identify influential studies, and they are a little more sophisticated than the basic outlier removal we discussed previously. They are based on the **leave-one-out** method. In this approach, we recalculate the results of our meta-analysis $K$ times, each time **leaving out** one study.
Based on this data, we can calculate different **influence diagnostics**. Influence diagnostics allow us to detect the studies which influence the overall estimate of our meta-analysis the most, and let us assess if this large influence distorts our pooled effect [@viechtbauer2010outlier].
\index{dmetar Package}
The **{dmetar}** package contains a function called `InfluenceAnalysis`, which allows us to calculate these various influence diagnostics using one function. The function can be used for any type of meta-analysis object created by **{meta}** functions.
```{block, type='boxdmetar'}
**The "InfluenceAnalysis" function**
\vspace{4mm}
The `InfluenceAnalysis` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
\vspace{2mm}
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/influence.analysis.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{meta}**, **{metafor}**, **{ggplot2}** and **{gridExtra}** package is installed and loaded.
```
Using the `InfluenceAnalysis` function is relatively straightforward. We only have to specify the name of the meta-analysis object for which we want to conduct the influence analysis. Here, we again use the `m.gen` object.
Because `InfluenceAnalysis` uses the fixed-effect model by default, we also have to set `random = TRUE`, so that the random-effects model will be used. The function can also take other arguments, which primarily control the type of plots generated by the function. Those arguments are detailed in the function documentation.
We save the results of the function in an object called `m.gen.inf`.
```{r, eval=F}
m.gen.inf <- InfluenceAnalysis(m.gen, random = TRUE)
```
```{r, echo=F}
load("data/m_gen_inf.rda")
```
The `InfluenceAnalysis` function creates four influence diagnostic plots: a **Baujat** plot, **influence diagnostics** according to Viechtbauer and Cheung [-@viechtbauer2010outlier], and the leave-one-out meta-analysis results, sorted by effect size and $I^2$ value. We can open each of these plots individually using the `plot` function. Let us go through them one after another.
<br></br>
#### Baujat Plot {#baujat}
---
A Baujat plot can be printed using the `plot` function and by specifying `"baujat"` in the second argument:
```{r, eval=F}
plot(m.gen.inf, "baujat")
```
```{r, out.width='70%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/baujat_col_sep.png')
```
Baujat plots [@baujat2002graphical] are diagnostic plots to detect studies which overly contribute to the heterogeneity in a meta-analysis. The plot shows the contribution of each study to the overall **heterogeneity** (as measured by Cochran's $Q$) on the **horizontal** axis, and its **influence** on the **pooled effect size** on the **vertical** axis.
This "influence" value is determined through the leave-one-out method, and expresses the standardized difference of the overall effect when the study is included in the meta-analysis, versus when it is not included.
Studies on the right side of the plot can be regarded as potentially relevant cases since they contribute heavily to the overall heterogeneity in our meta-analysis. Studies in the upper right corner of the plot may be particularly influential since they have a large impact on both the estimated heterogeneity, and the pooled effect.
As you may have recognized, the two studies we find on the right side of the plot are the ones we already detected before ("DanitzOrsillo" and "Shapiro et al."). These studies do not have a large impact on the overall results (presumably because they have a small sample size), but they do add substantially to the heterogeneity we find in the meta-analysis.
<br></br>
#### Influence Diagnostics {#inf-diags}
---
The next plot contains several influence diagnostics for each of our studies. These can be plotted using this code:
```{r, eval=F}
plot(m.gen.inf, "influence")
```
```{r, out.width='70%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/influence_col_sep.png')
```
We see that the plot displays, for each study, the value of different influence measures. These measures are used to characterize which studies fit well into our meta-analysis model, and which do not. To understand what the diagnostics mean, let us briefly go through them from left to right, top to bottom.
<br></br>
##### Externally Standardized Residuals
---
\index{Leave-One-Out Method}
The first plot displays the externally standardized residual of each study. As is says in the name, these residuals are the deviation of each observed effect size $\hat\theta_k$ from the pooled effect size. The residuals are standardized, and we use an "external" estimate of the pooled effect without the study to calculate the deviations.
The "external" pooled effect $\hat\mu_{\setminus k}$ is obtained by calculating the overall effect without study $k$, along with the principles of the leave-one-out method. The resulting residual is then standardized by (1) the **variance** of the external effect (i.e. the squared standard error of $\hat\mu_{\setminus k}$), (2) the $\tau^2$ estimate of the external pooled effect, and (3) the variance of $k$.
\begin{equation}
t_{k} = \frac{\hat\theta_{k}-\hat\mu_{\setminus k}}{\sqrt{\mathrm{Var}(\hat\mu_{\setminus k})+\hat\tau^2_{\setminus k}+s^2_k}}
(\#eq:het8)
\end{equation}
\index{Sampling Error}
Assuming that a study $k$ fits well into the meta-analysis, the three terms in the denominator capture the sources of variability which determine how much an effect size differs from the average effect. These sources of variability are the sampling error of $k$, the variance of true effect sizes, and the imprecision in our pooled effect size estimate.
If a study does **not** fit into the overall population, we can assume that the residual will be **larger** than expected from the three variance terms alone. This leads to higher values of $t_k$, which indicate that the study is an influential case that does not "fit in".
<br></br>
##### $\mathrm{DFFITS}$ Value
---
The computation of the $\mathrm{DFFITS}$ metric is similar to the one of the externally standardized residuals. The pattern of DFFITS and $t_k$ values is therefore often comparable across studies. This is the formula:
\begin{equation}
\mathrm{DFFITS}_k = \dfrac{\hat\mu-\hat\mu_{\setminus k}}{\sqrt{\dfrac{w_k^{(*)}}{\sum^{K}_{k=1}w_k^{(*)}}(s^2_k+\hat\tau^2_{\setminus k})}}
\end{equation}
For the computation, we also need $w_k^{(*)}$, the (random-effects) weight of study $k$ (Chapter \@ref(fem)), which is divided by the sum of weights to express the study weight in percent.
In general, the $\mathrm{DFFITS}$ value indicates how much the pooled effect changes when a study $k$ is removed, expressed in standard deviations. Again, higher values indicate that a study may be an influential case because its impact on the average effect is larger.
<br></br>
##### Cook's Distance
---
\index{Cook's Distance}
The Cook's distance value $D_k$ of a study can be calculated by a formula very similar to the one of the $\mathrm{DFFITS}$ value, with the largest difference being that for $D_k$, the difference of the pooled effect with and without $k$ is **squared**.
This results in $D_k$ only taking positive values. The pattern across studies, however, is often similar to the $\mathrm{DFFITS}$ value. Here is the formula:
\begin{equation}
D_k = \frac{(\hat\mu-\hat\mu_{\setminus k})^2}{\sqrt{s^2_k+\hat\tau^2}}.
(\#eq:het9)
\end{equation}
\vspace{1mm}
<br></br>
##### Covariance Ratio
---
The covariance ratio of a study $k$ can be calculated by dividing the variance of the pooled effect (i.e. its squared standard error) without $k$ by the variance of the initial average effect.
\begin{equation}
\mathrm{CovRatio}_k = \frac{\mathrm{Var}(\hat\mu_{\setminus k})}{\mathrm{Var}(\hat\mu)}
(\#eq:het10)
\end{equation}
A $\mathrm{CovRatio}_k$ value below 1 indicates that removing study $k$ results in a more precise estimate of the pooled effect size $\hat\mu$.
<br></br>
##### Leave-One-Out $\tau^2$ and $Q$ Values
---
\index{Cochran's \textit{Q}}
The values in this row are quite easy to interpret: they simply display the estimated heterogeneity as measured by $\tau^2$ and Cochran's $Q$, if study $k$ is removed. Lower values of $Q$, but particularly of $\tau^2$ are desirable, since this indicates lower heterogeneity.
<br></br>
##### Hat Value and Study Weight
---
In the last row, we see the study weight and hat value of each study. We already covered the calculation and meaning of study weights extensively in Chapter \@ref(fem), so this measure does not need much more explanation. The hat value, on the other hand, is simply another metric that is equivalent to the study weight. The pattern of the hat values and weights will therefore be identical in our influence analyses.
All of these metrics provide us with a value which, if extreme, indicates that the study is an influential case, and may negatively affect the robustness of our pooled result. However, it is less clear when this point is reached. There is no strict rule which $\mathrm{DFFITS}$, Cook's distance or standardized residual value is **too** high. It is always necessary to evaluate the results of the influence analysis in the context of the research question to determine if it is indicated to remove a study.
Yet, there are a few helpful "rules of thumb" which can guide our decision. The `InfluenceAnalysis` function regards a study as an influential case if one of these conditions is fulfilled^[These conditions are derived from the "rules of thumb" used by the [`influence.rma.uni`](https://www.rdocumentation.org/packages/metafor/versions/2.4-0/topics/influence.rma.uni) function in **{metafor}**. `InfluenceAnalysis` applies this function "under the hood".]:
\begin{equation}
\mathrm{DFFITS}_k > 3\sqrt{\frac{1}{k-1}}
(\#eq:het11)
\end{equation}
\vspace{1mm}
\begin{equation}
D_k > 0.45
(\#eq:het12)
\end{equation}
\vspace{1mm}
\begin{equation}
\mathrm{hat_k} > 3\frac{1}{k}.
(\#eq:het13)
\end{equation}
Studies determined to be influential are displayed in red in the plot generated by the `InfluenceAnalysis` function.
In our example, this is only the case for "Dan", the "DanitzOrsillo" study. Yet, while only this study was defined as influential, there are actually **two** spikes in most plots. We could also decide to define “Sha” (Shapiro et al.) as an influential case because the values of this study are very extreme too.
So, we found that the studies “DanitzOrsillo” and “Shapiro et al.” might be influential. This is an interesting finding, as we selected the same studies based on the Baujat plot, and when only looking at statistical outliers.
This further corroborates that the two studies could have distorted our pooled effect estimate, and cause parts of the between-study heterogeneity we found in our initial meta-analysis.
<br></br>
#### Leave-One-Out Meta-Analysis Results {#loo-ma}
---
\index{Leave-One-Out Method}
\index{Forest Plot}
Lastly, we can also plot the overall effect and $I^2$ heterogeneity of all meta-analyses that were conducted using the leave-one-out method. We can print two **forest plots** (a type of plot we will get to know better in Chapter \@ref(forest-R)), one sorted by the pooled effect size, and the other by the $I^2$ value of the leave-one-out meta-analyses. The code to produce the plots looks like this:
```{r, eval=F}
plot(m.gen.inf, "es")
plot(m.gen.inf, "i2")
```
```{r, out.width='100%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/forestesi2_col_sep.png')
```
In these two forest plots, we see the recalculated pooled effects, with one study omitted each time. In both plots, there is a shaded area with a dashed line in its center. This represents the 95% confidence interval of the original pooled effect size, and the estimated pooled effect itself.
The first plot is ordered by effect size (low to high). Here, we see how the overall effect estimate changes when different studies are removed. Since the two outlying and influential studies "DanitzOrsillo" and "Shapiro et al." have very high effect sizes, we find that the overall effect is smallest when they are removed.
The second plot is ordered by heterogeneity (low to high), as measured by $I^2$. This plot illustrates that the lowest $I^2$ heterogeneity is reached by omitting the studies "DanitzOrsillo" and "Shapiro et al.". This corroborates our finding that these two studies were the main "culprits" for the between-study heterogeneity we found in the meta-analysis.
All in all, the results of our outlier and influence analysis in this example point into the same direction. There are two studies which are likely influential outliers. These two studies may distort the effect size estimate, as well as its precision. We should therefore also conduct and report the results of a sensitivity analysis in which both studies are excluded.
<br></br>
### GOSH Plot Analysis {#gosh}
---
In the previous chapter, we explored the robustness of our meta-analysis using influence analyses based on the leave-one-out method. Another way to explore patterns of heterogeneity in our data are so-called **Graphic Display of Heterogeneity** (GOSH) plots [@olkin2012gosh]. For those plots, we fit the same meta-analysis model to **all possible subsets** of our included studies. In contrast to the leave-one-out method, we therefore not only fit $K$ models, but a model for all $2^{k-1}$ possible study combinations.
This means that creating GOSH plots can become quite computationally expensive when the total number of studies is large. The _R_ implementation we cover here therefore only fits a maximum of 1 million randomly selected models.
Once the models are calculated, we can plot them, displaying the pooled effect size on the x-axis and the between-study heterogeneity on the y-axis. This allows us to look for specific patterns, for example clusters with different effect sizes and amounts of heterogeneity.
A GOSH plot with several distinct clusters indicates that there might be more than one effect size "population" in our data, warranting a subgroup analysis. If the effect sizes in our sample are homogeneous, on the other hand, the GOSH plot displays a roughly symmetric, homogeneous distribution.
\index{metafor Package}
To generate GOSH plots, we can use the `gosh` function in the **{metafor}** package. If you have not installed the package yet, do so now and then load it from the library.
```{r, message=F}
library(metafor)
```
Let us generate a GOSH plot for our `m.gen` meta-analysis object. To do that, we have to "transform" this object created by the **{meta}** package into a **{metafor}** meta-analysis object first, because only those can be used by the `gosh` function.
The function used to perform a meta-analysis in **{metafor}** is called `rma`. It is not very complicated to translate a **{meta}** object to a `rma` meta-analysis. We only have to provide the function with the effect size (`TE`), Standard Error (`seTE`), and between-study heterogeneity estimator (`method.tau`) stored in `m.gen`. We can specify that the Knapp-Hartung adjustment should be used by specifying the argument `test = "knha"`.
We save the newly generated **{metafor}**-based meta-analysis under the name `m.rma`.
```{r, eval=F}
m.rma <- rma(yi = m.gen$TE,
sei = m.gen$seTE,
method = m.gen$method.tau,
test = "knha")
```
Please note that if you used the fixed-effect model in **{meta}**, it is not possible to simply copy `method.tau` to your `rma` call. Instead, this requires one to set the `method` argument to `"FE"` in `rma`.
We can then use the `m.rma` object to generate the GOSH plot. Depending on the number of studies in your analysis, this can take some time, even up to a few hours. We save the results as `res.gosh`.
```{r, eval=F}
res.gosh <- gosh(m.rma)
```
```{r, echo=F, message=F}
load("data/res_gosh.rda")
```
We can then display the plot by plugging the `res.gosh` object into the `plot` function. The additional `alpha` argument controls how transparent the dots in the plot are, with 1 indicating that they are completely opaque. Because there are many, many data points in the graph, it makes sense to use a small alpha value to make it clearer where the values "pile up".
```{r, fig.align="center", fig.width = 3, fig.height=3, eval=F}
plot(res.gosh, alpha = 0.01)
```
```{r, out.width='50%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/gosh0_sep.png')
```
We see an interesting pattern in our data: while most values are concentrated in a cluster with relatively high effects and high heterogeneity, the distribution of $I^2$ values is heavily right-skewed and bi-modal. There seem to be some study combinations for which the estimated heterogeneity is much lower, but where the pooled effect size is also smaller, resulting in a shape with a "comet-like" tail.
\index{Machine Learning}
\index{dmetar Package}
Having seen the effect size$-$heterogeneity pattern in our data, the really important question is: which studies cause this shape? To answer this question, we can use the `gosh.diagnostics` function.
This function uses three clustering or **unsupervised machine learning** algorithms to detect clusters in the GOSH plot data. Based on the identified clusters, the function automatically determines which studies contribute most to each cluster. If we find, for example, that one or several studies are over-represented in a cluster with high heterogeneity, this indicates that these studies, alone or in combination, may **cause** the high heterogeneity.
```{block, type='boxdmetar'}
**The "gosh.diagnostics" function**
\vspace{4mm}
The `gosh.diagnostics` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
\vspace{2mm}
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/gosh.diagnostics.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{gridExtra}**, **{ggplot2}**, **{fpc}** and **{mclust}** package are installed and loaded.
```
\index{Gaussian Mixture Model}
\index{DBSCAN}
\index{K-Means}
The `gosh.diagnostics` function uses three cluster algorithms to detect patterns in our data: the $k$-means algorithm [@hartigan1979algorithm], **density reachability and connectivity clustering**, or DBSCAN [@schubert2017dbscan] and **gaussian mixture models** [@fraley2002model].
It is possible to tune some of the parameters of these algorithms. In the arguments `km.params`, `db.params` and `gmm.params`, we can add a list element which contains specifications controlling the behavior of each algorithm. In our example, we will tweak a few details of the $k$-means and DBSCAN algorithm. We specify that the $k$-means algorithm should search for two clusters ("centers") in our data. In `db.params`, we change the `eps`, or $\epsilon$ value used by DBSCAN. We also specify the `MinPts` value, which determines the minimum number of points needed for each cluster.
You can learn more about the parameters of the algorithms in the `gosh.diagnostics` documentation. There is no clear rule when which parameter specification works best, so it can be helpful to tweak details about each algorithm several times and see how this affects the results.
The code for our `gosh.diagnostics` call looks like this:
```{r, eval=F}
res.gosh.diag <- gosh.diagnostics(res.gosh,
km.params = list(centers = 2),
db.params = list(eps = 0.08,
MinPts = 50))
res.gosh.diag
```
```{r, echo=F}
load("data/res_gosh_diag.rda")
res.gosh.diag
```
In the output, we see the number of clusters that each algorithm has detected. Because each approach uses a different mathematical strategy to segment the data, it is normal that the number of clusters is not identical.
In the `Identification of potential outliers` section, we see that the procedure was able to identify three studies with a large impact on the cluster make-up: study 3, study 4 and study 16.
We can also plot the `gosh.diagnostics` object to inspect the results a little closer.
```{r, eval=F}
plot(res.gosh.diag)
```
```{r, out.width='100%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/gosh1_col_sep.png')
```
```{r, out.width='80%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/gosh2_col_sep.png')
```
\index{Cook's Distance}
This produces several plots. The first three plots display the clustering solution found by each algorithm and the amount of cluster imbalance pertaining to each study in each cluster. Based on this information, a Cook's distance value is calculated for each study, which is used to determine if a study might have a large impact on the detected cluster (and may therefore be an influential case).
The other plots show a GOSH plot again, but there are now shaded points which represent the analyses in which a selected study was included. For example, we see that nearly all results in which study 3 was included are part of a cluster with high heterogeneity values and higher effect sizes. Results in which study 4 was included vary in their heterogeneity, but generally show a somewhat **smaller** average effect. Results in which study 16 was included are similar to the ones found for study 3, but a little more dispersed.
Let us see what happens if we rerun the meta-analysis while removing the three studies that the `gosh.diagnostics` function has identified.
```{r, eval=F}
update.meta(m.gen, exclude = c(3, 4, 16)) %>%
summary()
```
```
## Review: Third Wave Psychotherapies
## SMD 95%-CI %W(random) exclude
## Call et al. 0.7091 [ 0.1979; 1.2203] 4.6
## Cavanagh et al. 0.3549 [-0.0300; 0.7397] 8.1
## DanitzOrsillo 1.7912 [ 1.1139; 2.4685] 0.0 *
## de Vibe et al. 0.1825 [-0.0484; 0.4133] 0.0 *
## Frazier et al. 0.4219 [ 0.1380; 0.7057] 14.8
## Frogeli et al. 0.6300 [ 0.2458; 1.0142] 8.1
## Gallego et al. 0.7249 [ 0.2846; 1.1652] 6.2
## Hazlett-Stevens & Oren 0.5287 [ 0.1162; 0.9412] 7.0
## Hintz et al. 0.2840 [-0.0453; 0.6133] 11.0
## Kang et al. 1.2751 [ 0.6142; 1.9360] 2.7
## Kuhlmann et al. 0.1036 [-0.2781; 0.4853] 8.2
## Lever Taylor et al. 0.3884 [-0.0639; 0.8407] 5.8
## Phang et al. 0.5407 [ 0.0619; 1.0196] 5.2
## Rasanen et al. 0.4262 [-0.0794; 0.9317] 4.7
## Ratanasiripong 0.5154 [-0.1731; 1.2039] 2.5
## Shapiro et al. 1.4797 [ 0.8618; 2.0977] 0.0 *
## Song & Lindquist 0.6126 [ 0.1683; 1.0569] 6.1
## Warnecke et al. 0.6000 [ 0.1120; 1.0880] 5.0
##
## Number of studies combined: k = 15
##
## SMD 95%-CI t p-value
## Random effects model 0.4819 [0.3595; 0.6043] 8.44 < 0.0001
## Prediction interval [0.3586; 0.6053]
##
## Quantifying heterogeneity:
## tau^2 < 0.0001 [0.0000; 0.0955]; tau = 0.0012 [0.0000; 0.3091];
## I^2 = 4.6% [0.0%; 55.7%]; H = 1.02 [1.00; 1.50]
##
## Test of heterogeneity:
## Q d.f. p-value
## 14.67 14 0.4011
## [...]
```
\index{Weight}
We see that studies number 3 and 16 are "DanitzOrsillo" and "Shapiro et al.". These two studies were also found to be influential in previous analyses. Study 4 is the one by "de Vibe". This study does not have a particularly extreme effect size, but the narrow confidence intervals indicate that it has a **high weight**, despite its observed effect size being smaller than the average. This could explain why this study is also influential.
We see that removing the three studies has a large impact on the estimated heterogeneity. The value of $\tau^2$ nearly drops to zero, and the $I^2$ value is also very low, indicating that only 4.6% of the variability in effect sizes is due to true effect size differences. The pooled effect of $g$ = 0.48 is somewhat smaller than our initial estimate $g=$ 0.58, but still within the same orders of magnitude.
Overall, this indicates that the average effect we initially calculated is not **too** heavily biased by outliers and influential studies.
```{block2, type='boxreport'}
**Reporting the Results of Influence Analyses**
\vspace{2mm}
Let us assume we determined that "DanitzOrsillo", "de Vibe et al." and "Shapiro et al." are influential studies in our meta-analysis. In this case, it makes sense to also report the results of a sensitivity analysis in which these studies are excluded.
\vspace{2mm}
To make it easy for readers to see the changes associated with removing the influential studies, we can create a table in which both the original results, as well as the results of the sensitivity analysis are displayed. This table should at least include the pooled effect, its confidence interval and $p$-value, as well as a few measures of heterogeneity, such as prediction intervals and the $I^2$ statistic (as well as the confidence interval thereof).
\vspace{2mm}
It is also important to specify which studies were removed as influential cases, so that others understand on which data the new results are based. Below is an example of how such a table looks like for our `m.gen` meta-analysis from before:
<font size="2">
Analysis | $g$ | 95%CI | $p$ | 95%PI | $I^2$ | 95%CI |
-------------------------------------- | ------ | ---------- | ------- | ----------- | ------ | ------ |
Main Analysis | 0.58 | 0.38-0.78 | <0.001 | -0.06-1.22 | 63% | 39-78 |
Infl. Cases Removed<sup>1</sup> | 0.48 | 0.36-0.60 | <0.001 | 0.36-0.61 | 5% | 0-56 |
<sup>1</sup>Removed as outliers: DanitzOrsillo, de Vibe, Shapiro.
</font>
This type of table is very convenient because we can also add further rows with results of other sensitivity analyses. For example, if we conduct an analysis in which only studies with a low risk of bias (Chapter \@ref(data-extraction)) were considered, we could report the results in a third row.
```
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. Why is it important to examine the between-study heterogeneity of a meta-analysis?
\vspace{-2mm}
2. Can you name the two types of heterogeneity? Which one is relevant in the context of calculating a meta-analysis?
\vspace{-2mm}
3. Why is the **significance** of Cochran's $Q$ test not a sufficient measure of between-study heterogeneity?
\vspace{-2mm}
4. What are the advantages of using prediction intervals to express the amount of heterogeneity in a meta-analysis?
\vspace{-2mm}
5. What is the difference between statistical outliers and influential studies?
\vspace{-2mm}
6. For what can GOSH plots be used?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda5) at the end of this book.**
```
<br></br>
## Summary
* In meta-analyses, we do not only have to pay attention to the pooled effect size, but also to the **heterogeneity** of the data on which this average effect is based. The overall effect does not capture that the true effects in some studies may differ substantially from our point estimate.
* Cochran's $Q$ is commonly used to quantify the variability in our data. Because we know that $Q$ follows a $\chi^2$ distribution, this measure allows us to detect if more variation is present than what can be expected based on sampling error alone. This **excess variability** represents true differences in the effect sizes of studies.
* A statistical test of $Q$, however, heavily depends on the type of data at hand. We should not only rely on $Q$ to assess the amount of heterogeneity. There are other measures, such as $I^2$, $\tau$ or prediction intervals, which may be used additionally.
* The average effect in a meta-analysis can be biased when there are **outliers** in our data. Outliers do not always have a large impact on the results of a meta-analysis. But when they do, we speak of **influential cases**.
* There are various methods to identify outlying and influential cases. If such studies are detected, it is advisable to recalculate our meta-analysis without them to see if this changes the interpretation of our results.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Meta-Regression {#metareg}
---
<img src="_figs/airplanes.jpg" />
<br></br>
\index{Subgroup Analysis}
<span class="firstcharacter">I</span>
n the last chapter, we added subgroup analyses as a new method to our meta-analytic "toolbox". As we learned, subgroup analyses shift the focus of our analyses away from finding one overall effect. Instead, they allow us to investigate patterns of heterogeneity in our data, and what causes them.
\index{Meta-Regression}
We also mentioned that subgroup analyses are a special form of **meta-regression**. It is very likely that you have heard the term "regression" before. Regression analysis is one of the most common statistical methods and used in various disciplines. In its simplest form, a regression model tries to use the value of some variable $x$ to predict the value of another variable $y$. Usually, regression models are based on data comprising individual persons or specimens, for which both the value of $x$ and $y$ is measured.
\index{Sampling Error}
In meta-regression, this logic is applied to **entire studies**. The variable $x$ represents characteristics of studies, for example the year in which it was conducted. Based on this information, a meta-regression model tries to predict $y$, the study's effect size. The fact that effect sizes are used as predicted variables, however, adds some complexity.
In Chapter \@ref(what-is-es), we already learned that observed effect sizes $\hat\theta$ can be more or less **precise** estimators of the study's true effect, depending on their standard error. In "normal" meta-analyses, we take this into account by giving studies a smaller or higher weight. In meta-regression, we also have to make sure that the model pays more attention to studies with a lower sampling error, since we can assume that their estimates are closer to the "truth".
\index{Mixed-Effects Model}
Meta-regression achieves this by assuming a **mixed-effects model**. This model accounts for the fact that observed studies deviate from the true overall effect due to sampling error and between-study heterogeneity. More importantly, however, it also uses one or more variables $x$ to predict differences in the true effect sizes. We already mentioned in the last chapter that subgroup analysis is also based on a mixed-effects model. In this chapter, we will delve a little deeper, and discuss why subgroup analysis and meta-regression are inherently related.
Meta-regression, although it has its own limitations, can be a very powerful tool in meta-analyses. It is also very versatile: **multiple meta-regression**, for example, allows us to include not only one, but several predictor variables, along with their interaction. In the second part of this chapter, we will therefore also have a look at multiple meta-regression, and how we can conduct one using _R_.
<br></br>
## The Meta-Regression Model {#the-metareg-model}
---
In the past, you may have already performed a regression using primary study data, where participants are the unit of analysis. In meta-analyses, the individual data of each participant is usually not available, and we can only resort to aggregated results. This is why we have to perform meta-regression with predictors on a **study level**.
It also means that, while we conduct analyses on samples much larger than usual for primary studies, it is still possible that we do not have enough data points for a meta-regression to be useful. In Chapter \@ref(limits-subgroup), we already covered that subgroup analyses often make no sense when $K<$ 10. Borenstein and colleagues [-@borenstein2011introduction, chapter 20] mention that this guideline may also be applied to meta-regression models, but that it should not be seen as an iron-clad rule.
In a conventional regression, we want to estimate the value $y_i$ of person $i$ using a **predictor** (or **covariate**) $x_i$ with a regression coefficient $\beta$. A standard regression equation, therefore, looks like this:
\begin{equation}
\hat{y_i} = \beta_0 + \beta_1x_i
(\#eq:mr1)
\end{equation}
In meta-regression, the variable $y$ we want to predict is the observed effect size $\hat\theta_k$ of study $k$. The formula for a **meta-regression** looks similar to the one of a normal regression model:
\begin{equation}
\hat\theta_k = \theta + \beta x_{k} + \epsilon_k+\zeta_k
(\#eq:mr2)
\end{equation}
Note that this formula contains two extra terms, $\epsilon_k$ and $\zeta_k$. The same terms can also be found in the equation for the random-effects-model (Chapter \@ref(rem)), and signify two types of independent errors. The first one, $\epsilon_k$, is the sampling error through which the effect size of a study deviates from its true effect.
The second error, $\zeta_k$, denotes that even the true effect size of the study is only sampled from an overarching distribution of effect sizes. This means that between-study heterogeneity exists in our data, which is captured by the heterogeneity variance $\tau^2$.
\index{Random-Effects Model}
\index{Fixed-Effect Model}
\index{Mixed-Effects Model}
Since the equation above includes a **fixed** effect (the $\beta$ coefficient) as well as a **random** effect ($\zeta_k$), the model used in meta-regression is often called a **mixed-effects model**. Conceptually, this model is identical to the mixed-effects model we described in Chapter \@ref(comparing-the-subgroup-effects), where we explained how subgroup analyses work.
<br></br>
### Meta-Regression With a Categorical Predictor
---
\index{Dummy Variable}
Indeed, as mentioned before, subgroup analysis is nothing else than a meta-regression with a categorical predictor. Such categorical variables can be included through **dummy-coding**, e.g.:
\begin{equation}
D_g=\begin{cases}
0: & \text{Subgroup A}\\
1: & \text{Subgroup B.}
\end{cases}
(\#eq:mr3)
\end{equation}
To specify a subgroup analysis in the form of a meta-regression, we simply have to replace the covariate $x_k$ with $D_g$:
\vspace{2mm}
\begin{equation}
\hat\theta_k = \theta + \beta D_g +\epsilon_k+\zeta_k.
(\#eq:mr4)
\end{equation}
\vspace{2mm}
To understand this formula, we have to read it from the left to the right. The goal of the meta-regression model, like every statistical model, is to explain how the observed data was generated. In our case, this is the observed effect size $\hat\theta_k$ of some study $k$ in our meta-analysis. The formula above works like a recipe, telling us which ingredients are needed to produce the observed effect.
First, we take $\theta$, which serves as the **intercept** in our regression model. The value of $\theta$ is identical with the true overall effect size of subgroup A. To see why this is the case, we need to look at the next "ingredient", the term $\beta D_g$. The value of $\beta$ in this term represents the effect size difference $\theta_{\Delta}$ between subgroup A and subgroup B. The value of $\beta$ is multiplied with $D_g$, which can be either 0 or 1, depending on whether the study is part of subgroup A ($D_g = 0$) or subgroup B ($D_g = 1$).
Because multiplying with zero gives zero, the $\beta D_g$ term completely falls out of the equation when we are dealing with a study in subgroup A. When $D_g=1$, on the other hand, we multiply by 1, meaning that $\beta$ remains in the equation and is added to $\theta$, which provides us with the overall effect size in subgroup B. Essentially, the dummy predictor is a way to integrate **two** formulas into **one**. We can easily see this when we write down the formula individually for each subgroup:
\vspace{2mm}
\begin{equation}
D_g=\begin{cases}
0: & \text{$\hat\theta_k = \theta_A + \epsilon_k+\zeta_k$}\\
1: & \text{$\hat\theta_k = \theta_A + \theta_{\Delta} +\epsilon_k+\zeta_k$}
\end{cases}
(\#eq:mr5)
\end{equation}
\vspace{2mm}
Written this way, it becomes clearer that our formula actually contains two models, one for subgroup A, and one for subgroup B. The main difference between the models is that the effect of the second subgroup is "shifted" up or down, depending on the value of $\beta$ (which we denote as $\theta_{\Delta}$ in the formula above).
```{r subgroups2, message = F, out.width = '70%', echo = F, fig.align='center', fig.cap='Meta-regression with a categorical predictor (subgroup analysis).'}
library(OpenImageR)
knitr::include_graphics('images/subgroups2_sep.png')
```
This should make it clear that subgroup analyses work just like a normal regression: they use some variable $x$ to predict the value of $y$, which, in our case, is the effect size of a study. The special thing is that $\beta x_k$ is not continuous--it is a fixed value we add to the prediction, depending on whether a study belongs to a certain subgroup or not. This fixed value of $\beta$ is the estimated difference in effect sizes between two subgroups.
<br></br>
### Meta-Regression With a Continuous Predictor {#metareg-continuous}
---
\index{Weight}
When people speak of a "meta-regression", however, they usually think of models in which a **continuous** variable was used as the predictor. This brings us back the generic meta-regression formula shown in equation 8.2. Here, the regression terms we discussed before are also used, but they serve a slightly different purpose. The term $\theta$ again stands for the intercept, but now represents the predicted effect size when $x = 0$.
To the intercept, the term $\beta x_k$ is added. This part produces a **regression slope**: the continuous variable $x$ is multiplied with the **regression weight** $\beta$, thus lowering or elevating the predicted effect for different values of the covariate.
The aim of the meta-regression model is to find values of $\theta$ and $\beta$ which minimize the difference between the **predicted** effect size, and the **true** effect size of studies (see Figure \@ref(fig:subgroups3)).
```{r subgroups3, message = F, out.width = '70%', echo = F, fig.align='center', fig.cap='Meta-regression with a continuous predictor and four studies.'}
library(OpenImageR)
knitr::include_graphics('images/subgroups3_sep.png')
```
Looking closely at the meta-regression formula, we see that it contains two types of terms. Some terms include a subscript $k$, while others do not. A subscript $k$ indicates that a value **varies** from study to study. When a term does not include a subscript $k$, this means that it stays the same for all studies.
```{r, message = F, out.width = '45%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/metareg_form_sep.png')
```
In a meta-regression, both $\theta$ and $\beta$ are invariable, or fixed. This tells us something important about what a meta-regression does: based on the variation in a predictor variable and the observed effects, it tries to "distill" a **fixed pattern** underlying our data, in the form of a **regression line**. If the meta-regression model fits the data well, the estimated parameters $\theta$ and $\beta$ can be used to predict the effect size of a study the model has **never seen before** (provided we know $x$).
Taking into account both the sampling error $\epsilon_k$ and between-study heterogeneity $\zeta_k$, meta-regression thus tries to find a model that **generalizes** well; not only to the observed effect sizes but to the "universe" of all possible studies of interest.
<br></br>
### Assessing the Model Fit {#metareg-model-fit}
---
An important detail about meta-regression models is that they can be seen as an extension of the "normal" random-effects model we use to pool effect sizes. The random-effects model is nothing but a meta-regression model **without a slope term**. Since it contains no slope, the random-effects model simply predicts the **same value** for each study: the estimate of the pooled effect size $\mu$, which is equivalent to the intercept.
\index{Ordinary Least Squares (OLS)}
\index{Weighted Least Squares (WLS)}
In the first step, the calculation of a meta-regression therefore closely resembles the one of a random-effects meta-analysis, in that the between-study heterogeneity $\tau^2$ is estimated using one of the methods we described in Chapter \@ref(tau-estimators) (e.g. the DerSimonian-Laird or REML method). In the next step, the fixed weights $\theta$ and $\beta$ are estimated. Normal linear regression models use the **ordinary least squares** (OLS) method to find the regression line that fits the data best. In meta-regression, a modified method called **weighted least squares** (WLS) is used, which makes sure that studies with a smaller standard error are given a higher weight.
Once the optimal solution is found, we can check if the newly added regression term explains parts of the effect size heterogeneity. If the meta-regression model fits the data well, the true effect sizes should deviate less from the regression line compared to the pooled effect $\hat\mu$. If this is the case, the predictor $x$ **explains** some of the heterogeneity variance in our meta-analysis.
```{r, message = F, out.width = '100%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rem_mem_sep.png')
```
The fit of the meta-regression model can therefore be assessed by checking how much of the heterogeneity variance it explains. The predictors included in the mixed-effects model should minimize the amount of the **residual**, or unexplained, heterogeneity variance, which we denote with $\tau^2_{\text{unexplained}}$.
In regression analyses, the $R^2$ index is commonly used to quantify the percentage of variation explained by the model. An analogous index, $R^2_{*}$, can also be calculated for meta-regression. We add an asterisk here to indicate that the $R^2$ in meta-regression is slightly different to the one used in conventional regressions, because we deal with **true effect sizes** instead of observed data points. The formula for $R^2_*$ looks like this:
\begin{equation}
R^2_* = 1- \frac{\hat\tau^2_{\text{unexplained}}}{\hat\tau^2_{\text{(total)}}}
(\#eq:mr6)
\end{equation}
$R^2_*$ uses the amount of residual heterogeneity variance that even the meta-regression slope cannot explain, and puts it in relation to the total heterogeneity that we initially found in our meta-analysis. Subtracting this fraction from 1 leaves us with the percentage of between-study heterogeneity explained by the predictor.
There is also another way to formulate $R^2_*$. We can say that it expresses how much the mixed-effects model has **reduced** the heterogeneity variance compared to the initial random-effects pooling model, in percent. This results in the following formula:
\begin{equation}
R^2_* = \frac{\hat\tau^2_{\text{REM}}-\hat\tau^2_{\text{MEM}}}{\hat\tau^2_{\text{REM}}}
(\#eq:mr7)
\end{equation}
In this formula, $\hat\tau^2_{\text{REM}}$ represents the amount of between-study heterogeneity found in the random-effects pooling model, and $\hat\tau^2_{\text{MEM}}$ represents the (residual) variance in the mixed-effects meta-regression model (i.e. the "prediction error" with respect to the true effect sizes).
\index{Wald-Type Test}
Usually, we are not only interested in the amount of heterogeneity explained by the regression model, but also if the regression weight of our predictor $x$ is significant. If this is the case, we can be quite confident that $x$ has an influence on the effect sizes of studies. Both in conventional and meta-regression, the significance of a regression weight is commonly assessed through a **Wald-type** test. This involves calculating the test statistic $z$, by dividing the estimate of $\beta$ through its standard error:
\begin{equation}
z = \frac{\hat\beta}{SE_{\hat\beta}}
(\#eq:mr8)
\end{equation}
Under the null hypothesis that $\beta = 0$, this $z$-statistic follows a standard normal distribution. This allows us to calculate a corresponding $p$-value, which determines if the predictor is significant or not.
However, a test based on the $z$-statistic is not the only way to assess the significance of predictors. Like in normal meta-analysis models, we can also use the Knapp-Hartung adjustment, which results in a test statistic based on the $t$-distribution (see Chapter \@ref(knapp-hartung)). As we learned previously, it is often advisable to use the Knapp-Hartung method, because it reduces the risk of false positives.
<br></br>
## Meta-Regression in _R_ {#metareg-R}
---
The **{meta}** package contains a function called `metareg`, which allows us to conduct a meta-regression. The `metareg` function only requires a **{meta}** meta-analysis object and the name of a covariate as input.
In this example, we will use our `m.gen` meta-analysis object again, which is based on the `ThirdWave` data set (see Chapter \@ref(pre-calculated-es)). Using meta-regression, we want to examine if the **publication year** of a study can be used to predict its effect size. By default, the `ThirdWave` data set does not contain a variable in which the publication year is stored, so we have to create a new `numeric` variable which contains this information. We simply concatenate the publication years of all studies, in the same order in which they appear in the `ThirdWave` data set. We save this variable under the name `year`^[The publication years we use in this example are made up, and only used for illustration purposes.].
```{r}
year <- c(2014, 1998, 2010, 1999, 2005, 2014,
2019, 2010, 1982, 2020, 1978, 2001,
2018, 2002, 2009, 2011, 2011, 2013)
```
Now, we have all the information we need to run a meta-regression. In the `metareg` function, we specify the name of our meta-analysis object `m.gen` as the first argument, and the name of our predictor, `year`, as the second argument. We give the results the name `m.gen.reg`.
```{r}
m.gen.reg <- metareg(m.gen, ~year)
```
Now, let us have a look at the results:
```{r, eval=F}
m.gen.reg
```
```
## Mixed-Effects Model (k = 18; tau^2 estimator: REML)
##
## tau^2 (estimated amount of residual heterogeneity): 0.019 (SE = 0.023)
## tau (square root of estimated tau^2 value): 0.1371
## I^2 (residual heterogeneity / unaccounted variability): 29.26%
## H^2 (unaccounted variability / sampling variability): 1.41
## R^2 (amount of heterogeneity accounted for): 77.08%
##
## Test for Residual Heterogeneity:
## QE(df = 16) = 27.8273, p-val = 0.0332
##
## Test of Moderators (coefficient 2):
## F(df1 = 1, df2 = 16) = 9.3755, p-val = 0.0075
##
## Model Results:
##
## estimate se tval pval ci.lb ci.ub
## intrcpt -36.15 11.98 -3.01 0.008 -61.551 -10.758 **
## year 0.01 0.00 3.06 0.007 0.005 0.031 **
##
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
```
\index{I$^2$, Higgins \& Thompson's}
Let us go through what we can see here. In the first line, the output tells us that a mixed-effects model has been fitted to the data, just as intended. The next few lines provide details on the amount of heterogeneity explained by the model. We see that the estimate of the residual heterogeneity variance, the variance that is not explained by the predictor, is $\hat\tau^2_{\text{unexplained}}=$ 0.019.
The output also provides us with an $I^2$ equivalent, which tells us that after inclusion of the predictor, 29.26% of the variability in our data can be attributed to the remaining between-study heterogeneity. In the normal random-effects meta-analysis model, we found that the $I^2$ heterogeneity was 63%, which means that the predictor was able to "explain away" a substantial amount of the differences in true effect sizes.
In the last line, we see the value of $R^2_*$, which in our example is 77%. This means that 77% of the difference in true effect sizes can be explained by the publication year, a value that is quite substantial.
The next section contains a `Test for Residual Heterogeneity`, which is essentially the $Q$-test we already got to know previously (see Chapter \@ref(cochran-q)). Now, however, we test if the heterogeneity not explained by the predictor is significant. We see that this is the case, with $p$ = 0.03. However, we know the limitations of the $Q$-test (Chapter \@ref(cochran-q)), and should therefore not rely too heavily on this result.
The next part shows the `Test of Moderators`. We see that this test is also significant ($p$ = 0.0075). This means that our predictor, the publication year, does indeed influence the studies' effect size.
The last section provides more details on the estimated regression coefficients. The first line shows the results for the intercept (`intrcpt`). This is the expected effect size (in our case: Hedges' $g$) when our predictor publication year is zero. In our example, this represents a scenario which is, arguably, a little contrived: it shows the predicted effect of a study conducted in the year 0, which is $\hat{g}=$ -36.15. This serves as yet another reminder that good statistical models do not have to be a perfect representation of reality; they just have to be **useful**.
The coefficient we are primarily interested in is the one in the second row. We see that the model's estimate of the regression weight for `year` is 0.01. This means that for every additional year, the effect size $g$ of a study is expected to rise by 0.01. Therefore, we can say that the effect sizes of studies have increased over time. The 95% confidence interval ranges from 0.005 to 0.3, showing that the effect is significant.
Importantly, we are also presented with the corresponding $t$-statistic for each regression coefficient (`tval`). This tells us that the Knapp-Hartung method was used to calculate the confidence interval and $p$-value. Since we also used this adjustment in our initial meta-analysis model, `metareg` automatically used it again here. Otherwise, $z$ values and Wald-type confidence intervals would have been provided.
\index{Bubble Plot}
The **{meta}** package allows us to visualize a meta-regression using the `bubble` function. This creates a **bubble plot**, which shows the estimated regression slope, as well as the effect size of each study. To indicate the weight of a study, the bubbles have different sizes, with a greater size representing a higher weight.
To produce a bubble plot, we only have to plug our meta-regression object into the `bubble` function. Because we also want study labels to be displayed, we set `studlab` to `TRUE`.
```{r, fig.width=8, fig.height=7, out.width="60%", fig.align="center", eval=F}
bubble(m.gen.reg, studlab = TRUE)
```
```{r bubble, fig.width=8, fig.height=7, out.width="60%", fig.align="center", echo=F}
par(bg="#FFFEFA")
bubble(m.gen.reg, studlab = TRUE)
```
\index{Risk of Bias}
For the sake of completeness, we can also try to repeat our subgroup analysis from the previous chapter (Chapter \@ref(subgroup-R)), but this time within a meta-regression framework. This means that we use the risk of bias assessment as a categorical predictor. Since the variable `RiskOfBias` is already included in the `ThirdWave` data set, we do not have to save this information in an additional object. It suffices to simply run the `metareg` function again, but this time, we use `RiskOfBias` as the second function argument.
```{r, eval=F}
metareg(m.gen, RiskOfBias)
```
```
## [...]
## R^2 (amount of heterogeneity accounted for): 15.66%
##
## Test for Residual Heterogeneity:
## QE(df = 16) = 39.3084, p-val = 0.0010
##
## Test of Moderators (coefficient 2):
## F(df1 = 1, df2 = 16) = 2.5066, p-val = 0.1329
##
## Model Results:
##
## estimate se tval pval ci.lb ci.ub
## intrcpt 0.76 0.15 5.00 0.0001 0.44 1.09 ***
## RiskOfBiaslow -0.29 0.18 -1.58 0.1329 -0.69 0.10
## [...]
```
In the output, we see that the value of $R^2_*$, with 15.66%, is considerably smaller than the one of `year`. Consistent with our previous results, we see that the risk of bias variable is not a significant effect size predictor ($p$ = 0.13).
Under `Model Results`, we see that `metareg` has automatically transformed `RiskOfBias` into a dummy variable. The estimate of the intercept, which represents the pooled effect of the "high risk" subgroup, is $g$=0.76. The estimate of the regression coefficient representing studies with a **low** risk of bias is -0.29.
To get the effect size for this subgroup, we have to add the regression weight to the intercept, which results in $g=$ 0.76 - 0.29 $\approx$ 0.47. These results are identical to the ones of a subgroup analysis which assumes a common estimate of $\tau^2$.
<br></br>
## Multiple Meta-Regression {#multiple-metareg}
---
\index{Risk of Bias}
Previously, we only considered the scenario in which we use **one** predictor $\beta x_k$ in our meta-regression model. In the example, we checked if the effect size of a study depends on the year it was published. But now, suppose that reported effect sizes also depend on the **prestige** of the scientific journal in which the study was published. We think that it might be possible that studies in journals with a high reputation report higher effects. This could be because prestigious journals are more selective and mostly publish studies with "ground-breaking" findings.
On the other hand, it is also plausible that journals with a good reputation generally publish studies of a **higher quality**. Maybe it is just the better study quality that is associated with higher effect sizes. So, to check if journal reputation is indeed associated with higher effects, we have to make sure that this relationship is not **confounded** by the fact that prestigious journals are more likely to publish high-quality evidence. This means we have to **control** for study quality when examining the relationship between journal prestige and effect size.
This, and many other research questions, can be dealt with using **multiple meta-regression**. In multiple meta-regression, we use several predictors instead of just one to explain variation in effects. To allow for multiple predictors, we need to modify our previous meta-regression formula (see Equation 8.2), so that it looks like this:
\begin{equation}
\hat \theta_k = \theta + \beta_1x_{1k} + ... + \beta_nx_{nk} + \epsilon_k + \zeta_k
(\#eq:mr10)
\end{equation}
This formula tells us that we can add $n-1$ more predictors $x$ to our meta-regression model, thus turning it into a multiple meta-regression. The three dots in the formula symbolize that, in theory, we can add as many predictors as desired. In reality, however, things are usually more tricky. In the following, we will discuss a few important pitfalls in multiple meta-regression, and how we can build models that are robust and trustworthy. But first, let us cover another important feature of multiple meta-regression, **interactions**.
<br></br>
### Interactions {#interact}
---
\index{Interaction (Regression)}
So far, we only considered the case where we have multiple predictor variables $x_1, x_2, \dots x_n$ in our model, which are added together along with their regression weights $\beta$. Multiple meta-regression models, however, are not only restricted to such **additive** relationships. They can also model predictor **interactions**. An interaction means that the **relationship** between one predictor (e.g. $x_1$) and the estimated effect size **changes** for different values of another covariate (e.g. $x_2$).
Imagine that we want to model two predictors and how they are associated with effect sizes: the publication year ($x_1$) and the quality ($x_2$) of a study. The study quality is coded like this:
\begin{equation}
x_2=\begin{cases}
0: & \text{low}\\
1: & \text{moderate}\\
2: & \text{high.}
\end{cases}
(\#eq:mr11)
\end{equation}
When we assume that there is no interaction between publication year and study quality, we can build a meta-regression model by giving both $x_1$ and $x_2$ a regression weight $\beta$, and **adding** the terms together in our formula:
\begin{equation}
\hat \theta_k = \theta + \beta_1x_{1k} + \beta_2x_{2k} + \epsilon_k + \zeta_k
(\#eq:mr12)
\end{equation}
But what if the relationship between $x_1$ and $x_2$ is more complex? It is possible, like in our previous example, that a more recent publication year is positively associated with higher effects. But not all studies must follow this trend. Maybe the increase is most pronounced among high-quality studies, while the results of low-quality studies stayed largely the same over time. We can visualize this assumed relationship between effect size ($\hat\theta_k$), publication year ($x_1$) and study quality ($x_2$) in the following way:
```{r metareg2, message = F, out.width = '60%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/metareg2_col_sep.png')
```
The graph shows a classic example of an interaction. We see that the steepness of the regression slope depends on the value of another predictor. While the slope for high-quality studies is very steep, indicating a strong relationship between year and effect, the situation is different for low-quality studies. The regression line in this subgroup is virtually horizontal, indicating that the publication year has no, or even a slightly negative effect on the results.
This example shows one of the strengths of interactions: they allow us to examine if the influence of a predictor is the same across all studies, or if it is moderated by another characteristic.
To assess interactions via meta-regression, we need to add an **interaction term** to the model. In our example, this can be achieved by adding a third regression weight $\beta_3$, which captures the interaction $x_{1k}x_{2k}$ we want to test in our model. This gives the following formula:
\begin{equation}
\hat \theta_k = \theta + \beta_1x_{1k} + \beta_2x_{2k} + \beta_3x_{1k}x_{2k}+ \epsilon_k + \zeta_k
(\#eq:mr13)
\end{equation}
Although linear multiple meta-regression models only consist of these simple building blocks, they lend themselves to various applications. Before we start fitting multiple meta-regressions using _R_, however, we should first consider their limitations and pitfalls.
<br></br>
### Common Pitfalls in Multiple Meta-Regression {#limits-metareg}
---
Multiple meta-regression, while very useful when applied properly, comes with certain caveats. Some argue that (multiple) meta-regression is often improperly used and interpreted in practice, leading to a low validity of the results [@higgins2004controlling]. There are some points we have to keep in mind when fitting multiple meta-regression models, which we describe in the following.
<br></br>
#### Overfitting: Seeing A Signal Where There Is None
---
\index{Overfitting}
To better understand the risks of (multiple) meta-regression models, we have to understand the concept of **overfitting**. Overfitting occurs when we build a statistical model that fits the data **too** closely. In essence, this means that we build a statistical model which can predict the data **at hand** very well, but performs badly at predicting **future** data.
This happens when our model assumes that some variation in our data stems from a true "signal", when in fact we only capture random noise [@iniesta2016machine]. As a result, the model produces **false positive** results: it sees relationships where there are none.
```{r overfitting, message = F, out.width = '80%', echo = F, fig.align='center', fig.cap="Predictions of an overfitted model versus model with a robust fit."}
library(OpenImageR)
knitr::include_graphics('images/overfitting_col_sep.png')
```
\index{Ordinary Least Squares (OLS)}
\index{Maximum Likelihood}
For model fitting, regression utilizes **optimization** techniques such as ordinary least squares or maximum likelihood estimation. As we learned, meta-regression uses a weighted version of ordinary least squares (see Chapter \@ref(metareg-model-fit)), and is, therefore, no exception.
This "greedy" optimization, however, means that regression approaches can be prone to overfitting [@gigerenzer2004mindless]. Unfortunately, the risk of building a non-robust model is even higher once we go from conventional to meta-regression. There are several reasons for this [@higgins2004controlling]:
1. In meta-regression, the number of data points is usually small, since we can only use the aggregated information of the included studies.
2. Because meta-analysis aims to be a comprehensive overview of all available evidence, we have no additional data on which we can "test" how well our regression model can predict unseen data.
3. In meta-regression, we have to deal with the potential presence of effect size heterogeneity. Imagine a case in which we have two studies with different effect sizes and non-overlapping confidence intervals. Every variable which has different values for the two studies might then be a potential explanation for the effect size difference. Yet, it seems clear that most of these explanations will be spurious.
4. Meta-regression in general, and multiple meta-regression in particular, makes it very easy to "play around" with predictors. We can test numerous meta-regression models, include more predictors or remove them, in an attempt to explain the heterogeneity in our data. Such an approach is tempting and often found in practice, because meta-analysts want to find an explanation why effect sizes differ [@higgins2002statistical]. However, such behavior has been shown to massively increase the risk of spurious findings, because we can change parts of our model indefinitely until we find a significant model, which is then very likely to be overfitted (i.e. it mostly models statistical noise).
Some guidelines have been proposed to avoid an excessive false positive rate when building meta-regression models:
\index{Parsimony}
\index{Analysis Plan}
* Minimize the number of investigated predictors. In multiple meta-regression, this translates to the concept of **parsimony**: when evaluating the fit of a meta-regression model, we prefer models which achieve a **good** fit with **less** predictors. Estimators such as the Akaike and Bayesian information criterion can help with such decisions. We will show how to interpret these metrics in our hands-on example.
* Predictor selection should be based on predefined, scientifically relevant questions we want to answer in our meta-analysis. It is crucial to already define in the analysis report (Chapter \@ref(analysis-plan)) which predictor (combination) will be included in the meta-regression model. It is not the end of the world if we decide to run a meta-regression that is not mentioned in our analysis plan. In this case, however, we should be honest and mention in our meta-analysis report that we decided to fit the model **after** seeing the data.
* When the number of studies is low (which is very likely to be the case), and we want to compute the significance of a predictor, we should use the Knapp-Hartung adjustment to obtain more robust estimates.
\index{Permutation}
* We can use **permutation** to assess the robustness of our model in resampled data. We will describe the details of this method later.
<br></br>
#### Multi-Collinearity
---
\index{Multi-Collinearity}
\index{Overfitting}
**Multi-collinearity** means that one or more predictors in our regression model can be predicted by another model predictor with high accuracy [@mansfiled1982detecting]. This typically means that we have two or more independent variables in our model which are highly correlated.
Most of the dangers of multi-collinearity are associated with the problem of overfitting. High collinearity can cause our predictor coefficient estimates $\hat\beta$ to behave erratically, and change considerably with minor changes in our data. It also limits the size of the explained variance by the model, in our case $R^2_*$.
Multi-collinearity in meta-regression is common [@berlin1994advantages]. Although multiple regression can handle low degrees of collinearity, we should check and, if necessary, control for very highly correlated predictors. There is no consolidated yes-no-rule to determine the presence of multi-collinearity.
A crude, but often effective way is to check for very high predictor correlations (i.e. $r \geq$ 0.8) before fitting the model. Multi-collinearity can then be reduced by either (1) removing one of the close-to-redundant predictors, or (2) trying to combine the predictors into one single variable.
<br></br>
#### Model Fitting Approaches
---
When building a multiple meta-regression model, there are different approaches to select and include predictors. Here, we discuss the most important ones, along with their strengths and weaknesses:
* **Forced entry**. In forced entry methods, all relevant predictors are forced into the regression model simultaneously. For most functions in _R_, this is the default setting. Although this is a generally recommended procedure, keep in mind that all predictors to use via forced entry should still be based on a predefined, theory-led decision.
* **Hierarchical**. Hierarchical multiple regression means including predictors into our regression model step-wise, based on a clearly defined scientific rationale. First, only predictors which have been associated with effect size differences in previous research are included in the order of their importance. After this step, novel predictors can be added to explore if these variables explain heterogeneity which has not yet been captured by the known predictors.
\index{Step-Wise Regression}
* **Step-wise**. Step-wise entry means that variables/predictors are added to the model one after another. At first glance, this sounds a lot like hierarchical regression, but there is a crucial difference: step-wise regression methods select predictors based on a **statistical criterion**. In a procedure called **forward selection**, the variable explaining the largest amount of variability in the data is used as the first predictor. This process is then repeated for the remaining variables, each time selecting the variable which explains most of the residual unexplained variability in the data. There is also a procedure called **backward selection**, in which all variables are used as predictors in the model first, and then removed successively based on a predefined statistical criterion. There is an extensive literature discouraging the usage of step-wise methods [@chatfield1995model; @whittingham2006we]. If we recall the common pitfalls of multiple regression models we presented above, it becomes clear that these methods have high risk of producing overfitted models with spurious findings. Nevertheless, step-wise methods are still frequently used in practice, which makes it important to know that these procedures exist. If we use stepwise methods ourselves, however, it is advised to primarily do so in an exploratory fashion and to keep the limitations of this procedure in mind.
\index{Multi-Model Inference}
* **Multi-model inference**. Multi-model methods differ from step-wise approaches because they do not try to successively build the one "best" model explaining most of the variance. Instead, in this technique, **all** possible predictor combinations are modeled. This means that several different meta-regressions are created, and subsequently evaluated. This enables a full examination of all possible predictor combinations, and how they perform. A common finding is that there are many different specifications which lead to a good model fit. The estimated coefficients of predictors can then be pooled across all fitted models to infer how important certain variables are overall.
<br></br>
### Multiple Meta-Regression in _R_ {#multiple-metareg-R}
---
\index{meta Package}
\index{metafor Package}
After all this input, it is high time that we start fitting our first multiple meta-regression using _R_. The following examples will be the first ones in which we will not use the **{meta}** package. Instead, we will have a look at **{metafor}** [@urviecht]. This package provides a vast array of advanced functionality for meta-analysis, along with a great documentation^[In fact, to conduct a meta-regression, **{meta}** functions also use the **{metafor}** package internally.]. So, to begin, make sure you have **{metafor}** installed, and loaded from the library.
```{r, message=F}
library(metafor)
```
In our hands-on illustration, we will use the `MVRegressionData` data set. This is a "toy" data set, which we simulated for illustrative purposes.
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "MVRegressionData" Data Set**
\vspace{2mm}
The `MVRegressionData` data set is included directly in the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(MVRegressionData)` automatically saves the data set in your _R_ environment. The data set is then ready to be used. If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/MVRegressionData.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
First, let us have a look at the structure of the data frame:
```{r, message=F}
library(tidyverse)
library(dmetar)
data(MVRegressionData)
glimpse(MVRegressionData)
```
We see that there are six variables in our data set. The `yi` and `sei` columns store the effect size and standard error of a particular study. These columns correspond with the `TE` and `seTE` columns we used before. We have named the variables this way because this is the standard notation that **{metafor}** uses: `yi` represents the observed effect size $y_i$ we want to predict in our (meta-)regression, while `sei` represents $SE_i$, the standard error of study $i$.
The other four variables are predictors to be used in the meta-regression. First, we have `reputation`, which is the (mean-centered) **impact factor** of the journal the study was published in. Impact factors quantify how often articles in a journal are cited, which we use as a proxy for the journal's prestige.
The other variables are `quality`, the quality of the study rated from 0 to 10, `pubyear`, the (centered and scaled) publication year, and `continent`, the continent on which the study was performed. All of these variables are continuous, except for `continent`. The latter is a categorical variable with two levels: Europe and North America.
<br></br>
#### Checking for Multi-Collinearity
---
\index{Multi-Collinearity}
As we mentioned before, we have to check for multi-collinearity of our predictors to make sure that the meta-regression coefficient estimates are robust. A quick way to check for high correlations is to calculate a **intercorrelation matrix** for all continuous variables. This can be done using the `cor` function:
```{r}
MVRegressionData[,c("reputation", "quality", "pubyear")] %>% cor()
```
\index{PerformanceAnalytics Package}
The **{PerformanceAnalytics}** package [@perfanalytics] contains a function called `chart.Correlation`, which we can use to visualize the correlation matrix. We have to install the `PerformanceAnalytics` package first, and then use this code:
```{r, eval = F}
library(PerformanceAnalytics)
MVRegressionData[,c("reputation", "quality", "pubyear")] %>%
chart.Correlation()
```
```{r, echo = F, message=F, fig.align="center", fig.width=5, fig.height=5, out.width="60%"}
library(PerformanceAnalytics)
MVRegressionData[,c("reputation", "quality", "pubyear")] %>%
chart.Correlation() %>%
suppressWarnings()
```
We see that our variables are indeed correlated, but probably not to a degree that would warrant excluding one of them.
<br></br>
#### Fitting a Multiple Meta-Regression Model
---
\index{metafor Package}
Now, we can fit our first meta-regression model using **{metafor}**. Previously, we wanted to explore if a high journal reputation predicts higher effect sizes, or if this is just an artifact caused by the fact that studies in prestigious journals have a higher quality.
Let us assume we already know very well, for example from previous research, that the quality of a study is predictive of its effect size. If this is the case, it makes sense to perform a hierarchical regression: we first include our known predictor `quality`, and then check if `reputation` explains heterogeneity beyond that. When this is true, we can say that journal reputation is indeed associated with higher effects, even if we **control** for the fact that studies in prestigious journals also tend to have a higher quality.
To do this, we use the `rma` function in **{metafor}**. This function runs a random-effects meta-analysis, which is extended to mixed-effects meta-regression models when moderators are added. The `rma` function can take countless arguments, which we can examine by running `?rma` in the _R_ console. Usually, however, we only need to specify a few of them:
* **`yi`**. The column in our data frame in which the effect size of each study is stored.
* **`sei`**. The column in our data frame in which the standard error of the effect size of each study is stored.
* **`data`**. The name of the data frame containing all our meta-analysis data.
* **`method`**. The $\tau^2$ estimator we want to use. The codes we can use for this argument are identical to the ones in **{meta}** (e.g. `"REML"` for restricted maximum likelihood). It is advisable to use `"ML"`, because this allows one to compare different meta-regression models later on.
* **`mods`**. This parameter defines our meta-regression model. First, we specify our model with `~` (a tilde). Then, we add the predictors we want to include, separating them with `+` (e.g. `variable1 + variable2`). Interactions between two variables are denoted by an asterisk (e.g. `variable1 * variable2`).
* **`test`**. The test we want to apply for our regression coefficients. We can choose from `"z"` (default) and `"knha"` (Knapp-Hartung method).
First, let us perform a meta-regression using only `quality` as a predictor. We save the results into an object called `m.qual`, and then inspect the output.
```{r, eval=F}
m.qual <- rma(yi = yi,
sei = sei,
data = MVRegressionData,
method = "ML",
mods = ~ quality,
test = "knha")
m.qual
```
```
## Mixed-Effects Model (k = 36; tau^2 estimator: ML)
##
## tau^2 (estimated amount of residual heterogeneity): 0.066 (SE = 0.023)
## tau (square root of estimated tau^2 value): 0.2583
## I^2 (residual heterogeneity / unaccounted variability): 60.04%
## H^2 (unaccounted variability / sampling variability): 2.50
## R^2 (amount of heterogeneity accounted for): 7.37%
##
## Test for Residual Heterogeneity:
## QE(df = 34) = 88.6130, p-val < .0001
##
## Test of Moderators (coefficient 2):
## F(df1 = 1, df2 = 34) = 3.5330, p-val = 0.0688
##
## Model Results:
##
## estimate se tval pval ci.lb ci.ub
## intrcpt 0.3429 0.1354 2.5318 0.0161 0.0677 0.6181 *
## quality 0.0356 0.0189 1.8796 0.0688 -0.0029 0.0740 .
##
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
```
In the output, we can inspect the results for our predictor `quality` under `Model Results`. We see that the regression weight is not significant ($p=$ 0.069), although it is significant on a trend level ($p<$ 0.1). In total, our model explains $R^2_*=$ 7.37% of the heterogeneity.
Now, let us see what happens when we include `reputation` as a predictor. We add `+ reputation` to our input to `mods` and this time, we save the output as `m.qual.rep`.
```{r, eval=F}
m.qual.rep <- rma(yi = yi,
sei = sei,
data = MVRegressionData,
method = "ML",
mods = ~ quality + reputation,
test = "knha")
m.qual.rep
```
```
## Mixed-Effects Model (k = 36; tau^2 estimator: ML)
##
## tau^2 (estimated amount of residual heterogeneity): 0.0238 (SE = 0.01)
## tau (square root of estimated tau^2 value): 0.1543
## I^2 (residual heterogeneity / unaccounted variability): 34.62%
## H^2 (unaccounted variability / sampling variability): 1.53
## R^2 (amount of heterogeneity accounted for): 66.95%
##
## Test for Residual Heterogeneity:
## QE(df = 33) = 58.3042, p-val = 0.0042
##
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 33) = 12.2476, p-val = 0.0001
##
## Model Results:
##
## estimate se tval pval ci.lb ci.ub
## intrcpt 0.5005 0.1090 4.5927 <.0001 0.2788 0.7222 ***
## quality 0.0110 0.0151 0.7312 0.4698 -0.0197 0.0417
## reputation 0.0343 0.0075 4.5435 <.0001 0.0189 0.0496 ***
##
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
```
We see that now, a new line appears in the `Model Results` section, displaying the results for our `reputation` predictor. The model estimated the regression weight to be 0.034, which is highly significant ($p$ < 0.001).
We also see that the meta-regression model as a whole explains a significant amount of heterogeneity, $R^2_*$ = 66.95% to be precise. This means that journal reputation is associated with higher effect sizes, even when controlling for study quality.
\index{Analysis of Variance}
\index{Maximum Likelihood}
\index{Restricted Maximum Likelihood Estimator}
But does our second model indeed provide a better fit than our first one? To assess this, we can use the `anova` function, providing it with the two models we want to compare. Note that this is only feasible because we fitted both mixed-effects models using maximum likelihood (`"ML"`) instead of restricted maximum likelihood (`"REML"`).
```{r, eval=F}
anova(m.qual, m.qual.rep)
```
```
## df AIC BIC AICc logLik LRT pval QE tau^2 R^2
## Full 4 19.86 26.19 21.15 -5.93 58.30 0.03
## Reduced 3 36.98 41.73 37.73 -15.49 19.11 <.0001 88.61 0.06 48.32%
```
This function performs a model test and provides us with several statistics to assess if `m.qual.rep` has a better fit than `m.qual`. We compare our full model, `m.qual.rep`, which includes both `quality` and `reputation`, with the reduced model, which only includes `quality`.
\index{Likelihood Ratio Test}
The `anova` function performs a **likelihood ratio test**, the results of which we can see in the `LRT` column. The test is highly significant ($\chi^2_1=$ 19.11, $p<$ 0.001), which means that that our full model indeed provides a better fit.
\index{Akaike's Information Criterion}
Another important statistic is reported in the `AICc` column. This provides us with Akaike's information criterion (AIC), corrected for small samples. As we mentioned before, AICc penalizes complex models with more predictors to avoid overfitting.
It is important to note that lower values of AIC mean that a model performs better. In our output, we see that the full model (AICc = 21.15) has a better AIC value than our reduced model (AICc = 37.73), despite having more parameters. All of this suggests that our multiple regression model does indeed provide a good fit to our data.
<br></br>
#### Modeling Interactions
---
\index{Interaction (Regression)}
Let us say we want to model an interaction with our additional predictors `pubyear` (publication year) and `continent`. We assume that the relationship between publication year and effect size differs for European and North American studies. To model this assumption using the `rma` function, we have to connect our predictors with `*` in the `mods` parameter. Because we do not want to compare the models directly using the `anova` function, we use the `"REML"` (restricted maximum likelihood) $\tau^2$ estimator this time.
To facilitate the interpretation, we add factor labels to the `continent` variable in `MVRegressionData` before running the model.
```{r, eval=F}
# Add factor labels to 'continent'
# 0 = Europe
# 1 = North America
levels(MVRegressionData$continent) = c("Europe", "North America")
# Fit the meta-regression model
m.qual.rep.int <- rma(yi = yi,
sei = sei,
data = MVRegressionData,
method = "REML",
mods = ~ pubyear * continent,
test = "knha")
m.qual.rep.int
```
```
## Mixed-Effects Model (k = 36; tau^2 estimator: REML)
##
## tau^2 (estimated amount of residual heterogeneity): 0 (SE = 0.01)
## tau (square root of estimated tau^2 value): 0
## I^2 (residual heterogeneity / unaccounted variability): 0.00%
## H^2 (unaccounted variability / sampling variability): 1.00
## R^2 (amount of heterogeneity accounted for): 100.00%
##
## Test for Residual Heterogeneity:
## QE(df = 32) = 24.8408, p-val = 0.8124
##
## Test of Moderators (coefficients 2:4):
## F(df1 = 3, df2 = 32) = 28.7778, p-val < .0001
##
## Model Results:
##
## estimate se tval pval ci.lb ci.ub
## intrcpt 0.38 0.04 9.24 <.0001 0.30 0.47 ***
## pubyear 0.16 0.08 2.01 0.0520 -0.00 0.33 .
## continentNorth America 0.39 0.06 6.05 <.0001 0.26 0.53 ***
## pubyear:continent 0.63 0.12 4.97 <.0001 0.37 0.89 ***
## North America
## [...]
```
The last line, `pubyear:continentNorth America`, contains the coefficient for our interaction term. Note that **{metafor}** automatically includes not only the interaction term, but also both `pubyear` and `continent` as "normal" lower-order predictors (as one should do).
Also note that, since `continent` is a factor, `rma` detected that this is a dummy-coded predictor, and used our category "Europe" as the $D_g$ = 0 baseline against which the North America category is compared. We see that our interaction term has a positive coefficient (0.63), and is highly significant ($p<$ 0.001).
This indicates that there is an increase in effect sizes in recent years, but that it is stronger in studies conducted in North America. We also see that the model we fitted explains $R^2_*$ = 100% of our heterogeneity.
This is because our data was simulated for illustrative purposes. In practice, you will hardly ever explain all of the heterogeneity in your data--in fact, one should rather be concerned if one finds such results in real-life data, as this might mean that we have overfitted our model.
<br></br>
#### Permutation Test
---
\index{Permutation}
**Permutation** is a mathematical operation in which we take a set containing numbers or objects, and iteratively draw elements from this set to put them in a sequential order. When we already have an ordered set of numbers, this equals a process in which we rearrange, or **shuffle**, the order of our data.
As an example, imagine we have a set $S$ containing three numbers: $S=\{1,2,3 \}$. One possible permutation of this set is $(2,1,3)$; another is $(3,2,1)$. We see that the permuted results both contain all three numbers from before, but in a different order.
Permutation can also be used to perform **permutation tests**, which is a specific type of resampling method. Broadly speaking, resampling methods are used to validate the robustness of a statistical model by providing it with (slightly) different data sampled from the same source or generative process [@good2013permutation, chapter 3.1]. This is a way to better assess if the coefficients in our model indeed capture a true pattern underlying our data; or if we overfitted our model, thereby falsely assuming patterns in our data when they are in fact statistical noise.
Permutation tests do not require that we have a spare “test” data set on which we can evaluate how our meta-regression performs in predicting unseen effect sizes. For this reason, among others, permutation tests have been recommended to assess the robustness of our meta-regression models [@higgins2004controlling].
We will not go too much into the details of how a permutation test is performed for meta-regression models. The most important part is that we re-calculate the $p$-values of our model based on the test statistics obtained across all possible, or many randomly selected, permutations of our original data set.
The crucial indicator here is **how often** the test statistic we obtain from in our permuted data is **equal to or greater** than our original test statistic. For example, if our test statistic is greater or equal to the original one in 50 of 1000 permuted data sets, we get a $p$-value of $p$ = 0.05.
To perform a permutation test on our meta-regression model, we can use **{metafor}**'s in-built `permutest` function. As an example, we recalculate the results of the `m.qual.rep` model we fitted before. We only have to provide the `permutest` function with the `rma` object. Be aware that the permutation test is computationally expensive, especially for large data sets. This means that the function might need some time to run.
```{r, eval=F}
permutest(m.qual.rep)
```
```
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 33) = 12.7844, p-val* = 0.0010
##
## Model Results:
##
## estimate se tval pval* ci.lb ci.ub
## intrcpt 0.4964 0.1096 4.5316 0.2240 0.2736 0.7193
## quality 0.0130 0.0152 0.8531 0.3640 -0.0179 0.0438
## reputation 0.0350 0.0076 4.5964 0.0010 0.0195 0.0505 ***
##
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
```
We again see our familiar output including the results for all predictors. Looking at the `pval*` column, we see that our $p$-value for the reputation predictor has decreased from $p$ < 0.001 to $p_*$ = 0.001. This, however, is still highly significant, indicating that the effect of the predictor is robust.
It has been recommended to always use this permutation test before reporting the results of a meta-regression model [@higgins2004controlling].
```{block, type='boximportant'}
**Permutation Tests in Small Data Sets**
\vspace{4mm}
Please note that when the number of studies $K$ included in our model is small, conventionally used thresholds for statistical significance (i.e. $p$ < 0.05) cannot be reached.
For meta-regression models, a permutation test using `permutest` will only be able to reach statistical significance if $K$ > 4 [@viechtbauer2015comparison].
```
<br></br>
#### Multi-Model Inference {#multimodel-inference}
---
\index{Multi-Model Inference}
We already mentioned that one can also try to model all possible predictor combinations in a procedure called **multi-model inference**. This allows to examine which possible predictor combination provides the best fit, and which predictors are the most important ones overall. To perform multi-model inference, we can use the `multimodel.inference` function^[For more information on this topic, you can also consult an informative [vignette](https://www.metafor-project.org/doku.php/tips:model_selection_with_glmulti_and_mumin), written by Wolfgang Viechtbauer as part of the **{metafor}** documentation.].
```{block, type='boxdmetar'}
**The "multimodel.inference" Function**
\vspace{4mm}
The `multimodel.inference` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
\vspace{2mm}
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/mreg.multimodel.inference.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{metafor}**, **{ggplot2}** and **{MuMIn}** package is installed and loaded.
```
In the function, the following parameters need to be specified:
* **`TE`**. The effect size of each study. Must be supplied as the name of the effect size column in the data set, in quotation marks (e.g. `TE = "effectsize"`).
* **`seTE`**. The standard error of the effect sizes. Must be supplied as the name of the standard error column in the data set (also in quotation marks, e.g. `seTE = "se"`).
* **`data`**. A data frame containing the effect size, standard error and meta-regression predictor(s).
* **`predictors`**. A concatenated array of characters specifying the predictors to be used for multi-model inference. Names of the predictors must be identical to the column names of the data frame supplied to `data`.
* **`method`**. Meta-analysis model to use for pooling effect sizes. `"FE"` is used for the fixed-effect model. Different random-effect models are available, for example `"DL"`, `"SJ"`, `"ML"`, or `"REML"`. If `"FE"` is used, the `test` argument is automatically set to `"z"`, as the Knapp-Hartung method is not meant to be used with fixed-effect models. Default is `"REML"`.
* **`test`**. Method to use for computing test statistics and confidence intervals. Default is `"knha"`, which uses the Knapp-Hartung adjustment. Conventional Wald-type tests are calculated by setting this argument to `"z"`.
* **`eval.criterion`**. Evaluation criterion to apply to the fitted models. Can be either `"AICc"` (default; small sample-corrected Akaike's information criterion), `"AIC"` (Akaike’s information criterion) or `"BIC"` (Bayesian information criterion).
* **`interaction`**. When set to `FALSE` (default), no interactions between predictors are considered. Setting this parameter to `TRUE` means that all interactions are modeled.
Now, let us perform multi-model inference using all predictors in the `MVRegressionData` data set, but **without** interactions. Be aware that running the `multimodel.inference` function can take some time, especially if the number of predictors is large.
```{r, eval=F}
multimodel.inference(TE = "yi",
seTE = "sei",
data = MVRegressionData,
predictors = c("pubyear", "quality",
"reputation", "continent"),
interaction = FALSE)
```
```
## Multimodel Inference: Final Results
## --------------------------
## - Number of fitted models: 16
## - Full formula: ~ pubyear + quality + reputation + continent
## - Coefficient significance test: knha
## - Interactions modeled: no
## - Evaluation criterion: AICc
##
##
## Best 5 Models
## --------------------------
## [...]
## (Intrc) cntnn pubyr qulty rpttn df logLik AICc delta weight
## 12 + + 0.3533 0.02160 5 2.981 6.0 0.00 0.536
## 16 + + 0.4028 0.02210 0.01754 6 4.071 6.8 0.72 0.375
## 8 + + 0.4948 0.03574 5 0.646 10.7 4.67 0.052
## 11 + 0.2957 0.02725 4 -1.750 12.8 6.75 0.018
## 15 + 0.3547 0.02666 0.02296 5 -0.395 12.8 6.75 0.018
## Models ranked by AICc(x)
##
##
## Multimodel Inference Coefficients
## --------------------------
## Estimate Std. Error z value Pr(>|z|)
## intrcpt 0.38614661 0.106983583 3.6094006 0.0003069
## continentNorth America 0.24743836 0.083113174 2.9771256 0.0029096
## pubyear 0.37816796 0.083045572 4.5537402 0.0000053
## reputation 0.01899347 0.007420427 2.5596198 0.0104787
## quality 0.01060060 0.014321158 0.7402055 0.4591753
##
##
## Predictor Importance
## --------------------------
## model importance
## 1 pubyear 0.9988339
## 2 continent 0.9621839
## 3 reputation 0.9428750
## 4 quality 0.4432826
```
$$~$$
```{r, echo=F, fig.height=2, fig.width=6, out.width="70%", fig.align="center"}
load("data/mmi.rda")
mmi$predictor.importance.plot + geom_hline(aes(yintercept = 0.8), size = 2, color = "black")
```
\index{Akaike's Information Criterion}
There is a lot to see here, so let us go through the output step by step.
* **`Multimodel Inference: Final Results`**. This part of the output provides us with details about the fitted models. We see that the total number of $2^4 = 16$ possible models have been fitted. We also see that the function used the corrected AIC (`aicc`) to compare the models.
* **`Best 5 Models`**. Displayed here are the five models with the lowest AICc, sorted from low to high. Predictors are shown in the columns of the table, and models in the rows. A number (weight) or `+` sign (for categorical predictors) indicates that a predictor/interaction term was used in the model, while empty cells indicate that the predictor was omitted. We see that `TE ~ 1 + continent + pubyear + reputation` shows the best fit (AICc = 6.0). But other predictor combinations come very close to this value. Thus, it is hard to say which model is really the "best" model. However, all top five models contain the predictor `pubyear`, suggesting that this variable might be particularly important.
* **`Multimodel Inference Coefficients`**. Here, we can see the coefficients of all predictors, aggregated over all models in which they appear. We see that the coefficient estimate is largest for `pubyear` ($\hat\beta$ = 0.378), which corroborates our finding from before. Approximate confidence intervals can be obtained by subtracting and adding the value stored in `Std.Error`, multiplied by 1.96, from/to `Estimate`.
* **Model-averaged predictor importance plot**. In the plot, the averaged importance of each predictor across all models is displayed. We again see that `pubyear` is the most important predictor, followed by `reputation`, `continent`, and `quality`.
```{block, type='boxinfo'}
**Limitations of Multi-Model Inference**
\vspace{2mm}
This example should make clear that multi-model inference can be a useful way to obtain a comprehensive look at which predictors are important for predicting differences in effect sizes.
\vspace{4mm}
Despite avoiding some of the problems of step-wise regression methods, please note that this method should still be seen as **exploratory**, and may be used when we have no prior knowledge on how our predictors are related to effect sizes in the research field we analyze.
If you decide to build a meta-regression model based on the results of multi-model inference, it is crucial to report this. This is because such a model is not based on an **a priori** hypothesis, but was built based on statistical properties in our sample.
```
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxinfo'}
**Test your knowledge!**
\vspace{4mm}
1. What is the difference between a conventional regression analysis as used in primary studies, and meta-regression?
\vspace{-2mm}
2. Subgroup analyses and meta-regression are closely related. How can the meta-regression formula be adapted for subgroup data?
\vspace{-2mm}
3. Which method is used in meta-regression to give individual studies a differing weight?
\vspace{-2mm}
4. What characteristics mark a meta-regression model that fits our data well? Which index can be used to examine this?
\vspace{-2mm}
5. When we calculate a subgroup analysis using meta-regression techniques, do we assume a separate or common value of $\tau^2$ in the subgroups?
\vspace{-2mm}
6. What are the limitations and pitfalls of (multiple) meta-regression?
\vspace{-2mm}
7. Name two methods that can be used to improve the robustness of (multiple) meta-regression models, and why they are helpful.
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda8) at the end of this book.**
```
<br></br>
## Summary
* In meta-regression, we adapt conventional regression techniques to study-level data. Subgroup analyses can be seen as a special case of meta-regression with categorical predictors and a common estimate of $\tau^2$.
* The aim of a meta-regression model is to **explain** (parts of) the true effect size differences in our data (i.e. the between-study heterogeneity variance $\tau^2$). When a model fits the data well, the deviation of true effects from the regression line should be smaller than their initial deviation from the pooled effect. When this is the case, the unexplained, or residual, heterogeneity will be small. This is captured by the $R^2_*$ index, which tells us the percentage of heterogeneity variation explained by our model.
* In **multiple meta-regression**, two or more predictors are used in the same meta-regression model. It is also possible to test if the predictions of one variable change for different values of another, by introducing interaction terms.
* Although (multiple) meta-regression is very versatile, it is **not** without limitations. Multiple meta-regression makes it very easy to **overfit** models, meaning that random noise instead of true relationships are modeled. Multi-collinearity of predictors may also pose a threat to the validity of our model.
* There are several approaches to ensure that our meta-regression model is robust. We can, for example, only fit models based on a **predefined** theoretical rationale, or use permutation tests. Multi-model inference can be used as an exploratory approach. This method can point us to potentially important predictors and can be used to derive hypotheses to be tested in future research.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Effect Size Calculation & Conversion {#es-calc}
---
<img src="_figs/effect_size_calculation.jpg" />
<br></br>
\index{meta Package}
A problem meta-analysts frequently face is that suitable "raw" effect size data cannot be extracted from all included studies. Most functions in the **{meta}** package, such as `metacont` (Chapter \@ref(pooling-smd)) or `metabin` (Chapter \@ref(pooling-or-rr)), can only be used when complete raw effect size data is available.
In practice, this often leads to difficulties. Some published articles, particularly older ones, do not report results in a way that allows to extract the needed (raw) effect size data. It is not uncommon to find that a study reports the results of a $t$-test, one-way ANOVA, or $\chi^2$-test, but not the group-wise mean and standard deviation, or the number of events in the study conditions, that we need for our meta-analysis.
\index{esc Package}
The good news is that we can sometimes **convert** reported information into the desired effect size format. This makes it possible to include affected studies in a meta-analysis with **pre-calculated** data (Chapter \@ref(pre-calculated-es)) using `metagen`. For example, we can convert the results of a two-sample $t$-test to a standardized mean difference and its standard error, and then use `metagen` to perform a meta-analysis of pre-calculated SMDs. The **{esc}** package [@esc] provides several helpful functions which allow us to perform such conversions directly in _R_.
<br></br>
## Mean & Standard Error
---
\index{Mean, Arithmetic}
\index{Standardized Mean Difference}
\index{Hedges' \textit{g}}
When calculating SMDs or Hedges' $g$ from the mean and **standard error**, we can make use of the fact that the standard deviation of a mean is defined as its standard error, with the square root of the sample size "factored out" [@thalheimer2002calculate]:
\begin{equation}
\text{SD} =\text{SE}\sqrt{n}
(\#eq:esc1)
\end{equation}
We can calculate the SMD or Hedges' $g$ using the `esc_mean_se` function. Here is an example:
```{r}
library(esc)
esc_mean_se(grp1m = 8.5, # mean of group 1
grp1se = 1.5, # standard error of group 1
grp1n = 50, # sample in group 1
grp2m = 11, # mean of group 2
grp2se = 1.8, # standard error of group 2
grp2n = 60, # sample in group 2
es.type = "d") # convert to SMD; use "g" for Hedges' g
```
<br></br>
## Regression Coefficients
---
It is possible to calculate SMDs, Hedges' $g$ or a correlation $r$ from standardized or unstandardized regression coefficients [@lipsey2001practical, Appendix B]. For unstandardized coefficients, we can use the `esc_B` function in **{esc}**. Here is an example:
\index{Correlation}
```{r}
library(esc)
esc_B(b = 3.3, # unstandardized regression coefficient
sdy = 5, # standard deviation of predicted variable y
grp1n = 100, # sample size of the first group
grp2n = 150, # sample size of the second group
es.type = "d") # convert to SMD; use "g" for Hedges' g
```
\vspace{2mm}
```{r, eval=F}
esc_B(b = 2.9, # unstandardized regression coefficient
sdy = 4, # standard deviation of the predicted variable y
grp1n = 50, # sample size of the first group
grp2n = 50, # sample size of the second group
es.type = "r") # convert to correlation
```
```
## Effect Size Calculation for Meta Analysis
##
## Conversion: unstandardized regression coefficient
## to effect size correlation
## Effect Size: 0.3611
## Standard Error: 0.1031
## Variance: 0.0106
## Lower CI: 0.1743
## Upper CI: 0.5229
## Weight: 94.0238
## Fisher's z: 0.3782
## Lower CIz: 0.1761
## Upper CIz: 0.5803
```
\vspace{2mm}
Standardized regression coefficients can be transformed using `esc_beta`.
```{r}
esc_beta(beta = 0.32, # standardized regression coefficient
sdy = 5, # standard deviation of the predicted variable y
grp1n = 100, # sample size of the first group
grp2n = 150, # sample size of the second group
es.type = "d") # convert to SMD; use "g" for Hedges' g
```
```{r, eval= F}
esc_beta(beta = 0.37, # standardized regression coefficient
sdy = 4, # standard deviation of predicted variable y
grp1n = 50, # sample size of the first group
grp2n = 50, # sample size of the second group
es.type = "r") # convert to correlation
```
```
## Effect Size Calculation for Meta Analysis
##
## Conversion: standardized regression coefficient
## to effect size correlation
## Effect Size: 0.3668
## Standard Error: 0.1033
## Variance: 0.0107
## Lower CI: 0.1803
## Upper CI: 0.5278
## Weight: 93.7884
## Fisher's z: 0.3847
## Lower CIz: 0.1823
## Upper CIz: 0.5871
```
Please note that using regression coefficients in meta-analysis can be tricky, because we assume that the the same model has been used in all studies. This is particularly problematic if coefficients are extracted from multiple regression models, because studies may have controlled for different co-variates in their models, which means that the $b$ values are not directly comparable.
<br></br>
## Correlations {#convert-corr}
---
\index{Correlation}
\index{Correlation, Point-Biserial}
For **equally** sized groups ($n_1=n_2$), we can use the following formula to derive the SMD from the **point-biserial** correlation [@lipsey2001practical, chapter 3].
\begin{equation}
r_{pb} = \frac{\text{SMD}}{\sqrt{\text{SMD}^2+4}} ~~~~~~~~
\text{SMD}=\frac{2r_{pb}}{\sqrt{1-r^2_{pb}}} (\#eq:esc2)
\end{equation}
A different formula has to be used for **unequally** sized groups [@aaron1998equating]:
\begin{align}
r_{pb} &= \frac{\text{SMD}}{\sqrt{\text{SMD}^2+\dfrac{(N^2-2N)}{n_1n_2}}} \notag \\
\text{SMD} &= \dfrac{r_{pb}}{\sqrt{(1-r^2)\left(\frac{n_1}{N}\times\left(1-\frac{n_1}{N}\right)\right)}} (\#eq:esc3)
\end{align}
To convert $r_{pb}$ to an SMD or Hedges’ $g$, we can use the `esc_rpb` function.
```{r}
library(esc)
esc_rpb(r = 0.25, # point-biserial correlation
grp1n = 99, # sample size of group 1
grp2n = 120, # sample size of group 2
es.type = "d") # convert to SMD; use "g" for Hedges' g
```
<br></br>
## One-Way ANOVAs
---
\index{Analysis of Variance}
We can also derive the SMD from the $F$-value of a one-way ANOVA with **two** groups. Such ANOVAs can be identified by looking at the **degrees of freedom**. In a one-way ANOVA with two groups, the degrees of freedom should always start with 1 (e.g. $F_{\text{1,147}}$=5.31).
The formula used for the transformation looks like this [based on @rosnow1996computing; @rosnow2000contrasts; see @thalheimer2002calculate]:
\begin{equation}
\text{SMD} = \sqrt{ F\left(\frac{n_1+n_2}{n_1 n_2}\right)\left(\frac{n_1+n_2}{n_1+n_2-2}\right)}
(\#eq:esc4)
\end{equation}
To calculate the SMD or Hedges' $g$ from $F$-values, we can use the `esc_f` function. Here is an example:
```{r}
esc_f(f = 5.04, # F value of the one-way anova
grp1n = 519, # sample size of group 1
grp2n = 528, # sample size of group 2
es.type = "g") # convert to Hedges' g; use "d" for SMD
```
<br></br>
## Two-Sample $t$-Tests
---
\index{Standardized Mean Difference}
An effect size expressed as a standardized mean difference can also be derived from an **independent** two-sample $t$-test value, using the following formula [@rosnow2000contrasts; @thalheimer2002calculate]:
\begin{equation}
\text{SMD} = \frac {t(n_1+n_2)}{\sqrt{(n_1+n_2-2)(n_1n_2)}}
(\#eq:esc5)
\end{equation}
In _R_, we can calculate the SMD or Hedges' `g` from a $t$-value using the `esc_t` function. Here is an example:
```{r}
esc_t(t = 3.3, # t-value
grp1n = 100, # sample size of group1
grp2n = 150, # sample size of group 2
es.type="d") # convert to SMD; use "g" for Hedges' g
```
<br></br>
## $p$-Values
---
\index{P-Value}
At times, studies only report the effect size (e.g. a value of Cohen's $d$), the $p$-value of that effect, and nothing more. Yet, to pool results in a meta-analysis, we need a measure of the **precision** of the effect size, preferably the standard error.
In such cases, we must estimate the standard error from the $p$-value of the effect size. This is possible for effect sizes based on **differences** (i.e. SMDs), or **ratios** (i.e. risk or odds ratios), using the formulas by Altman and Bland [-@altman2011obtain]. These formulas are implemented in the `se.from.p` function in _R_.
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "se.from.p" Function**
\vspace{4mm}
The `se.from.p` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/SE_from_p.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
```
Assuming a study with $N=$ 71 participants, reporting an effect size of $d=$ 0.71 for which $p=$ 0.013, we can calculate the standard error like this:
```{r, eval=F}
library(dmetar)
se.from.p(0.71,
p = 0.013,
N = 71,
effect.size.type = "difference")
```
```
## EffectSize StandardError StandardDeviation LLCI ULCI
## 1 0.71 0.286 2.410 0.149 1.270
```
\vspace{2mm}
For a study with $N=$ 200 participants reporting an effect size of OR = 0.91 with $p=$ 0.38, the standard error is calculated this way:
```{r, message=F, warning=F, eval=F}
library(magrittr) # for pipe
se.from.p(0.91, p = 0.38, N = 200,
effect.size.type = "ratio") %>% t()
```
```
## [,1]
## logEffectSize -0.094
## logStandardError 0.105
## logStandardDeviation 1.498
## logLLCI -0.302
## logULCI 0.113
## EffectSize 0.910
## LLCI 0.739
## ULCI 1.120
```
When `effect.size.type = "ratio"`, the function automatically also calculates the **log-transformed** effect size and standard error, which are needed to use the `metagen` function (Chapter \@ref(pre-calculated-es)).
<br></br>
## $\chi^2$ Tests
---
\index{Odds Ratio}
To convert a $\chi^2$ statistic to an odds ratio, the `esc_chisq` function can be used (assuming that d.f. = 1; e.g. $\chi^2_1$ = 8.7). Here is an example:
```{r}
esc_chisq(chisq = 7.9, # chi-squared value
totaln = 100, # total sample size
es.type = "cox.or") # convert to odds ratio
```
<br></br>
## Number Needed To Treat {#nnt}
---
\index{Number Needed To Treat}
Effect sizes such as Cohen's $d$ or Hedges’ $g$ are often difficult to interpret from a practical standpoint. Imagine that we found an intervention effect of $g=$ 0.35 in our meta-analysis. How can we communicate what such an effect **means** to patients, public officials, medical professionals, or other stakeholders?
To make it easier for others to understand the results, meta-analyses also often report the **number needed to treat** (NNT). This measure is most commonly used in medical research. It signifies how many additional patients must receive the treatment under study to **prevent** one additional **negative event** (e.g. relapse) or **achieve** one additional **positive** event (e.g. symptom remission, response). If NNT = 3, for example, we can say that three individuals must receive the treatment to avoid one additional relapse case; or that three patients must be treated to achieve one additional case of reliable symptom remission, depending on the research question.
When we are dealing with binary effect size data, calculation of NNTs is relatively easy. The formula looks like this:
\begin{equation}
\text{NNT} = (p_{e_{\text{treat}}}-p_{e_{\text{control}}})^{-1}
(\#eq:esc6)
\end{equation}
In this formula, $p_{e_{\text{treat}}}$ and $p_{e_{\text{control}}}$ are the proportions of participants who experienced the event in the treatment and control group, respectively. These proportions are identical to the "risks" used to calculate the risk ratio (Chapter \@ref(rr)), and also known as the **experimental group event rate** (EER) and **control group event rate** (CER). Given its formula, the NTT can also be described as the inverse of the (absolute) risk difference.
Converting standardized mean differences or Hedges' $g$ to a NNT is more complicated. There are two commonly used methods:
\index{Area Under The Curve (AUC)}
* The method by **Kraemer and Kupfer** [-@kraemer2006size], which calculates the NNT from an **area under the curve** (AUC), defined as the probability that a patient in the treatment group has an outcome preferable to the one in the control group. This method allows to calculate the NNT directly from an SMD or $g$ without any extra information.
* The method by **Furukawa and Leucht** calculates NNT values from SMDs using the CER, or a reasonable estimate thereof. Furukawa's method has been shown to be superior in estimating the true NNT value compared to the Kraemer & Kupfer method [@furukawa2011obtain]. If we can make reasonable estimates of the CER, Furukawa's method should therefore always be preferred.
When we use risk or odds ratios as effect size measures, NNTs can be calculated directly from **{meta}** objects using the `nnt` function. After running our meta-analysis using `metabin` (Chapter \@ref(pooling-or-rr)), we only have to plug the results into the `nnt` function. Here is an example:
```{r}
library(meta)
data(Olkin1995)
# Run meta-analysis with binary effect size data
m.b <- metabin(ev.exp, n.exp, ev.cont, n.cont,
data = Olkin1995,
sm = "RR")
nnt(m.b)
```
\vspace{2mm}
The `nnt` function provides the number needed to treat for different assumed CERs. The three lines show the result for the minimum, mean, and maximum CER in our data set. The mean CER estimate is the "typical" NNT that is usually reported.
It is also possible to use `nnt` with `metagen` models, as long as the summary measure `sm` is either `"RR"` or `"OR"`. For such models, we also need to specify the assumed CER in the `p.c` argument in `nnt`. Here is an example using the `m.gen_bin` meta-analysis object we created in Chapter \@ref(m-gen-bin):
```{r, echo=F}
load("data/m.gen_bin.rda")
m.gen_bin$print.subgroup.name = FALSE
```
```{r}
# Also show fixed-effect model results
m.gen_bin <- update.meta(m.gen_bin,
fixed = TRUE)
nnt(m.gen_bin,
p.c = 0.1) # Use a CER of 0.1
```
\vspace{4mm}
\index{dmetar Package}
Standardized mean differences or Hedges' $g$ can be converted to the NNT using the `NNT` function in **{dmetar}**.
```{block, type='boxdmetar'}
**The "NNT" Function**
\vspace{4mm}
If you did **not** install **{dmetar}**, follow these instructions:
\vspace{2mm}
1. Access the source code of the `NNT` function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/NNT.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
```
To use the Kraemer & Kupfer method, we only have to provide the `NNT` function with an effect size (SMD or $g$). Furukawa's method is automatically used as soon as a `CER` value is supplied.
```{r}
NNT(d = 0.245)
NNT(d = 0.245, CER = 0.35)
```
```{block, type='boximportant'}
**A Number to be Treated with Care: Criticism of the NNT**
\vspace{2mm}
While common, usage of NNTs to communicate the results of clinical trials is not uncontroversial. Criticisms include that lay people often misunderstand it [despite purportedly being an "intuitive" alternative to other effect size measures, @christensen2006number]; and that researchers often calculate NNTs incorrectly [@mendes2017number].
\vspace{2mm}
Furthermore, it is not possible to calculate reliable standard errors (and confidence intervals) of NNTs, which means that they can not be used in meta-analyses [@hutton2010misleading]. It is only possible to convert results to the NNT after pooling has been conducted using another effect size measure.
```
<br></br>
## Multi-Arm Studies {#pool-groups}
---
\index{Unit-of-Analysis Problem}
To avoid unit-of-analysis errors (Chapter \@ref(unit-of-analysis)), it is sometimes necessary to pool the mean and standard deviation of two or more trial arms before calculating a (standardized) mean difference. To pool continuous effect size data of two groups, we can use these equations:
\begin{align}
n_{\text{pooled}} &= n_1 + n_2 \\
m_{\text{pooled}} &= \frac{n_1m_1+n_2m_2}{n_1+n_2} \\
SD_{\text{pooled}} &= \sqrt{\frac{(n_1-1)SD^{2}_{1}+ (n_2-1)SD^{2}_{2}+\frac{n_1n_2}{n_1+n_2}(m^{2}_1+m^{2}_2-2m_1m_2)} {n_1+n_2-1}}
\end{align}
We can apply this formula in _R_ using the `pool.groups` function.
```{block, type='boxdmetar'}
**The "pool.groups" Function**
\vspace{4mm}
The `pool.groups` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/pool.groups.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
```
Here is an example:
```{r}
library(dmetar)
pool.groups(n1 = 50, # sample size group 1
n2 = 50, # sample size group 2
m1 = 3.5, # mean group 1
m2 = 4, # mean group 2
sd1 = 3, # sd group 1
sd2 = 3.8) # sd group2
```
<br></br>
## Aggregation of Effect Sizes {#aggregate-es}
---
The `aggregate` function in **{metafor}** can be used to aggregate several dependent, **pre-calculated effect sizes** into one estimate, for example because they are part of the same study or cluster. This is a way to avoid the **unit-of-analysis error** (see Chapter \@ref(unit-of-analysis)), but requires us to assume a value for the within-study correlation, which is typically unknown. Another (and often preferable) way to deal with effect size dependencies are (correlated) hierarchical models, which are illustrated in Chapter \@ref(multilevel-ma).
In this example, we aggregate effect sizes of the `Chernobyl` data set (see Chapter \@ref(multilevel-R)), so that each study only provides one effect size:
```{r, eval=F}
library(metafor)
library(dmetar)
data("Chernobyl")
# Convert 'Chernobyl' data to 'escalc' object
Chernobyl <- escalc(yi = z, # Effect size
sei = se.z, # Standard error
data = Chernobyl)
# Aggregate effect sizes on study level
# We assume a correlation of rho=0.6
Chernobyl.agg <- aggregate(Chernobyl,
cluster = author,
rho = 0.6)
# Show aggregated results
Chernobyl.agg[,c("author", "yi", "vi")]
```
```
## author yi vi
## 1 Aghajanyan & Suskov (2009) 0.2415 0.0079
## 2 Alexanin et al. (2010) 1.3659 0.0012
## 3 Bochkov (1993) 0.2081 0.0014
## 4 Dubrova et al. (1996) 0.3068 0.0132
## 5 Dubrova et al. (1997) 0.4453 0.0110
## [...]
```
Please note that `aggregate` returns the aggregated effect sizes `yi` as well as their *variance* `vi`, the square root of which is the standard error.
$$\tag*{$\blacksquare$}$$
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Doing Meta-Analysis with R: A Hands-On Guide
------------------------------------------------------------------------
[](https://twitter.com/MathiasHarrer) [](https://twitter.com/pimcuijpers) [](https://twitter.com/Toshi_FRKW) [](https://twitter.com/DDEbert)
<a href="https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/" target="_blank"><img src="images/cover.png" class="cover" width="250" align="right"/></a> Welcome to the GitHub repository of <a href="https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/" target="_blank"><strong>"Doing Meta-Analysis with R: A Hands-On Guide"</strong></a>.
This book serves as an accessible introduction into how meta-analyses can be conducted in *R*. Essential steps for meta-analysis are covered, including pooling of outcome measures, forest plots, heterogeneity diagnostics, subgroup analyses, meta-regression, methods to control for publication bias, risk of bias assessments and plotting tools.
Advanced, but highly relevant topics such as network meta-analysis, multi-/three-level meta-analyses, Bayesian meta-analysis approaches, SEM meta-analysis are also covered.
The programming and statistical background covered in the book are kept at a **non-expert level**. A **print version** of this book has been published with [Chapman & Hall/CRC Press](https://www.routledge.com/Doing-Meta-Analysis-with-R-A-Hands-On-Guide/Harrer-Cuijpers-Furukawa-Ebert/p/book/9780367610074) (Taylor & Francis).
<br></br>
## Open Source Repository
------------------------------------------------------------------------
The book has been built using [**{rmarkdown}**](https://rmarkdown.rstudio.com/docs/) and [**{bookdown}**](https://bookdown.org/). Formulas are rendered using [MathJax](http://docs.mathjax.org/en/latest/index.html). All materials and source code we used to compile the guide can be found in this repository. You are free to fork, share and reuse contents. However, the repository is intended to be mainly "read-only"; PRs will generally not be considered (see section below & [preface](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/preface.html#contact-us) of the book for ways to contact us).
<br></br>
## Contributing
------------------------------------------------------------------------
This guide is an open source project, and we owe special thanks to our expert contributors who provided additional content in some of the sections of this guide.
- [**Luke A. McGuinness**](https://twitter.com/mcguinlu), University of Bristol: Chapter 15, Risk of Bias Plots.
Want to contribute to this guide yourself? Feel free to send **Mathias** ([mathias.harrer\@fau.de](mailto:[email protected]){.email}) an E-mail and tell us about your proposed additions.
<br></br>
## Citing the Guide
------------------------------------------------------------------------
The suggested citation is:
```{block, type='boxempty'}
Harrer, M., Cuijpers, P., Furukawa, T.A., & Ebert, D.D. (2021). _Doing Meta-Analysis with R: A Hands-On Guide_. Boca Raton, FL and London: Chapmann & Hall/CRC Press. ISBN 978-0-367-61007-4.
```
Download the reference as [BibTeX](https://www.protectlab.org/meta-analysis-in-r/data/citation.bib) or [.ris](https://www.protectlab.org/meta-analysis-in-r/data/citation.ris).
<br></br>
## Cite the Packages
------------------------------------------------------------------------
In the guide, we present and use various *R* packages. The reason why all of us can use these packages for free is because experts all around the world have devoted enormous time and effort to their development, typically without pay. If you use some of the packages mentioned in this book for your own meta-analysis, we strongly encourage you to also cite them in your report.
In the guide, every time a new package is introduced, we also provide the reference through which it can be cited. It is also possible to run `citation("package")` to retrieve the preferred reference. Thanks!
<br></br>
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
book_filename: "Doing_Meta_Analysis_in_R"
language:
ui:
chapter_name: "Chapter "
delete_merged_file: true
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# (PART) Getting Started {-}
# Introduction {#intro}
---
<img src="_figs/balloon.jpg" alt="balloons" />
<br></br>
<span class="firstcharacter">S</span>
cience is generally assumed to be a cumulative process. In their scientific endeavors, researchers build on the evidence compiled by generations of scientists who came before them. A famous quote by Isaac Newton stresses that if we want to see further, we can do so by standing on the "shoulders of giants". Many of us are fascinated by science **because** it is progressive, furthering our understanding of the world, and helping us to make better decisions.
At least by the numbers alone, this sentiment may be justified. Never in history did we have access to more evidence in the form of published research articles than we do today. Petabytes of research findings are produced every day all around the world. In biomedicine alone, more than one million peer-reviewed articles are published each year [@bjork2008global].
The amount of published research findings is also increasing almost exponentially. The number of articles indexed for each year in one of the largest bibliographical databases, [**PubMed**](pubmed.ncbi.nlm.nih.gov/), symbolizes this in an exemplary fashion. Until the middle of the 20<sup>th</sup> century, only a few hundred research articles are listed for each year. These numbers rise substantially for the following decades, and since the beginning of the 21<sup>st</sup> century, they skyrocket (see Figure \@ref(fig:pubmed)).
```{r, echo=FALSE, message=FALSE, warning=FALSE}
F <- FALSE
```
```{r pubmed, fig.cap='Articles indexed in PubMed by year, 1781-2019', echo=F, message=F, warning = FALSE, fig.align='center', fig.width=4.5, fig.height=3}
library(ggplot2)
options(scipen = 999)
F <- FALSE
read.csv("data/pubmed.csv", skip = 1)[-1,] -> pubmed
ggplot(pubmed, aes(x = Year, y = Count)) +
geom_line() +
geom_area(fill = "lightgrey") +
theme_minimal() +
scale_y_continuous(breaks = seq(0, 125e+4, by = 25e+4)) +
xlab("") + ylab("") +
theme(panel.background = element_rect(fill = "#FFFEFA",
size = 0),
plot.background = element_rect(fill = "#FFFEFA",
size = 0))
```
In principle, this development should make us enthusiastic about the prospects of science. If science is cumulative, more published research equals more evidence. This should allow us to build more powerful theories, and to dismantle fallacies of the past.
Yet, of course, it is not that easy. In a highly influential paper, John Ioannidis of Stanford criticized the notion that science is automatically cumulative and constantly improving. His article has the fitting title "Why Science Is Not Necessarily Self-Correcting" [@ioannidis2012science]. He argues that research fields can often exist in a state where an immense research output is produced on a particular topic or theory, but where fundamental fallacies remain unchallenged and are only perpetuated.
Back in the 1970s, the brilliant psychologist Paul Meehl already observed that in some research disciplines, there is a close resemblance between theories and fashion trends. Many theories, Meehl argued, are not continuously improved or refuted, they simply "fade away" when people start to lose interest in them [@meehl1978theoretical].
It is an inconvenient truth that the scientific process, when left to its own devices, will not automatically move us to the best of all possible worlds. With unprecedented amounts of research findings produced each day, it is even more important to view and critically appraise bodies of evidence **in their entirety**. Meta-analysis can be enormously helpful in achieving this, as long as we acknowledge its own limitations and biases.
<br></br>
## What Are Meta-Analyses? {#what-are-mas}
---
\index{Review, Systematic}
\index{Review, Narrative}
One of its founding fathers, Gene V. Glass, described meta-analysis as an "analysis of analyses" [@glass1976primary]. This simple definition already tells us a lot. In conventional studies, the units of analysis are a number of people, specimens, countries, or objects. In meta-analysis, **primary studies** themselves become the elements of our analysis.
The aim of meta-analysis is to combine, summarize and interpret all available evidence pertaining to a clearly defined research field or research question [@lipsey2001practical, chapter 1]. However, it is only one method to do this. There are at least three distinct ways through which evidence from multiple studies can be synthesized [@cuijpers2016meta].
* **Traditional/Narrative Reviews**. Until way into the 1980s, **narrative reviews** were the most common way to summarize a research field. Narrative reviews are often written by experts and authorities of a research field. There are no strict rules on how studies in a narrative review have to be selected, and how to define the scope of the review. There are also no fixed rules on how to draw conclusions from the reviewed evidence. Overall, this can lead to biases favoring the opinion of the author. Nevertheless, narrative reviews, when written in a balanced way, can be a helpful way for readers to get an overall impression of the relevant research questions and evidence base of a field.
* **Systematic Reviews**. Systematic reviews try to summarize evidence using clearly defined and transparent rules. In systematic reviews, research questions are determined beforehand, and there is an explicit, reproducible methodology through which studies are selected and reviewed. Systematic reviews aim to cover **all** available evidence. They also assess the validity of evidence using predefined standards and present a synthesis of outcomes in a systematic way.
* **Meta-Analyses**. Most meta-analyses can be seen as an advanced type of a systematic review. The scope of meta-analyses is clearly defined beforehand, primary studies are also selected in a systematic and reproducible way, and there are also clear standards through which the validity of the evidence is assessed. This is why it is common to find studies being named a "systematic review **and** meta-analysis". However, there is one aspect which makes meta-analyses special. Meta-analyses aim to combine results from previous studies in a **quantitative** way. The goal of meta-analyses is to integrate quantitative outcomes reported in the selected studies into one numerical estimate. This estimate then summarizes all the individual results. Meta-analyses quantify, for example, the effect of a medication, the prevalence of a disease, or the correlation between two properties, **across all studies**^[This statement is of course only true if meta-analytic techniques were applied soundly, and if the results of the meta-analysis allow for such generalizations.]. They can therefore only be applied to studies which report quantitative results. Compared to systematic reviews, meta-analyses often have to be more exclusive concerning the kind of evidence that is summarized. To perform a meta-analysis, it is usually necessary that studies used the same design and type of measurement, and/or delivered the same intervention (see Chapter \@ref(pitfalls)).
```{block, type='boxinfo'}
**Individual Participant Data Meta-Analysis**
Depending on the definition, there is also a fourth type of evidence synthesis method, so called **Individual Participant Data (IPD) Meta-Analysis** [@riley2010meta; @riley2021individual]. Traditionally, meta-analyses are based on **aggregated** results of studies that are found in the published literature (e.g., means and standard deviations, or proportions). In IPD meta-analysis, the **original** data of all studies is collected instead and combined into one big data set.
IPD meta-analysis has several advantages. For example, it is possible to impute missing data and apply statistical methods in exactly the same way across all studies. Furthermore, they can make it easier to explore variables which influence the outcome of interest. In traditional meta-analyses, only so-called **study-level** variables (e.g., the year of publication, or the population used in the study) can by used to do this. However, it is often **participant-level** information (e.g. an individual person's age or gender) which may play a role as an important moderators of the results. Such variables can only be explored using IPD meta-analysis.
IPD meta-analysis is a relatively new method, and the overwhelming majority of meta-analyses conducted today remain "traditional" meta-analyses. This is also one reason why we will not cover IPD meta-analysis methods in this guide.
This has nothing to do with traditional meta-analysis being superior--the opposite is correct. It is simply due to the fact that making all research data openly available has unfortunately been very uncommon in most disciplines until recently. While it is relatively easy to extract summarized results from published research reports, obtaining original data from all relevant studies is much more challenging. In biomedical research, for example, it has been found that studies which considered both individual participant and aggregate data could only obtain IPD from approximately 64% of the eligible studies [@riley2007evidence]. A more recent review found that, while the median number of studies included in IPD meta-analyses was eleven, individual participant data could only be obtained from a median of seven studies [@wang2021methodological].
```
<br></br>
## "Exercises in Mega-Silliness": A Historical Anecdote {#history}
---
Meta-analysis was not invented by one person alone, but by many founding mothers and fathers [@o2007historical]. The first attempts to statistically summarize the effects of separate, but similar studies date back around 100 years, and can be linked to two of the most important statisticians of all time, Karl Pearson and Ronald A. Fisher.
Pearson, in the beginning of the 20<sup>th</sup> century, combined findings on the effects of typhoid inoculation across the British Empire to calculate a pooled estimate [@shannon2016statistical]. Fisher, in his seminal 1935 book on the design of experiments, covered approaches to analyze data from multiple studies in agricultural research, and already acknowledged the problem that study results may vary due to location and time [@fisher19351he; @o2007historical].
\index{Standardized Mean Difference}
\index{Random-Effects Model}
\index{History of Meta-Analysis}
The name "meta-analysis" and the beginning of its rise to prominence, however, can be traced back to a scholarly dispute raging in the mid-20<sup>th</sup> century. In 1952, the famous British psychologist Hans Jürgen Eysenck (Figure \@ref(fig:eysenck)) published an article in which he claimed that psychotherapy (in that time, this largely meant Freudian psychoanalysis) was ineffective. If patients get better during therapy, it is because their situation would have improved anyway due to factors that have nothing to do with the therapy. Even worse, Eysenck claimed, psychotherapy would often hinder patients from getting better.
The reputation of psychotherapy took a big hit, and it did not recover until the late 1970s. During that time, Gene V. Glass developed a technique he termed "meta-analysis", which allowed to pool **Standardized Mean Differences**^[i.e., the difference in means between two groups, for example an intervention and control group, expressed in the units of the pooled standard deviation of both groups (see Chapter \@ref(s-md)).] across studies. The first extensive application of his technique was in an article published in the **American Psychologist**, written by Mary L. Smith and Glass himself [@smith1977meta]. In this large study, results from 375 studies with more than 4000 participants were combined in a meta-analysis.
The study found that psychotherapies had a pooled effect of 0.68, which can be considered quite large. Glass' work had an immense impact because it provided quantitative evidence that Eysenck's verdict was wrong. Eysenck himself, however, was not convinced, calling the meta-analysis "an abandonment of scholarship" and "an exercise in mega-silliness" [@eysenck1978exercise].
(ref:eysenck) Hans Jürgen Eysenck ([_Sirswindon/CC BY-SA 3.0_](#attr)).
```{r eysenck, fig.cap='(ref:eysenck)', fig.scap="Hans Jürgen Eysenck.", out.width='35%', message = F, echo = F, fig.align='right'}
library(OpenImageR)
knitr::include_graphics('images/eysenck_col.jpg')
```
Today we know that Smith and Glass' study may have overestimated the effects of psychotherapy because it did not control for biases in the included studies [@cuijpers2019eysenck]. However, the primary finding that some psychotherapies are effective has been corroborated by countless other meta-analyses in the following decades. Eysenck's grim response could not change that meta-analysis soon became a commonly used method in various fields of study. This time has been very aptly described as the "meta-analytic Big Bang" [@shadish2015meta].
About the same time Glass developed his meta-analysis method, Hunter and Schmidt started crafting their own type of meta-analysis techniques putting emphasis on the correction of measurement artifacts [@schmidt1977development; @hunter2004methods]. Meta-analysis also found its way into medicine through the groundbreaking work of Peter Elwood and Archie Cochrane, among others, who used meta-analysis to show that aspirin has a small, but statistically and clinically relevant preventive effect on the recurrence of heart attacks [@peto1980aspirin; @elwood2006first; @o2007historical].
In the mid-80s, Rebecca DerSimonian and Nan Laird introduced an approach to calculate random-effects meta-analyses (see Chapter \@ref(rem)) that has been in use to this day [@dersimonian1986meta]. Countless other innovations have helped to increase the applicability, robustness and versatility of meta-analytic methods in the last four decades.
\index{Cochrane}
\index{Cochrane, Risk of Bias Tool}
\index{Cochrane, Handbook}
\index{Campbell Collaboration}
```{block2, type='boxinfo'}
**The Cochrane and Campbell Collaboration**
The [**Cochrane Collaboration**](https://www.cochrane.org/) (or simply **Cochrane**), founded in 1993 and named after Archie Cochrane, has played a crucial role in the development of applied meta-analysis. Cochrane is an international network of researchers, professionals, patients and other relevant stakeholders who "work together to produce credible, accessible health information that is free from commercial sponsorship and other conflicts of interest".
Cochrane uses rigorous standards to synthesize evidence in the biomedical field. The institution has its headquarters in London, but also has local branches in several countries around the world.
The Cochrane Collaboration issues the regularly updated [**Handbook for Systematic Reviews of Interventions**](https://training.cochrane.org/handbook) [@higgins2019cochrane] and the [**Cochrane Risk of Bias Tool**](https://methods.cochrane.org/bias/resources/rob-2-revised-cochrane-risk-bias-tool-randomized-trials) [@sterni2019rob]. Both are widely considered to be standard reference works for all technical details on systematic reviews and meta-analyses (see Chapter \@ref(spec-search-coding)).
An organization similar to Cochrane is the Oslo-based [**Campbell Collaboration**](https://campbellcollaboration.org/), which primarily focuses on research in the social sciences.
```
<br></br>
## Apples and Oranges: A Quick Tour of Meta-Analysis Pitfalls {#pitfalls}
---
In the last decades, meta-analysis has become a universally accepted research tool. This does not come without its own costs. Conducting a high-quality primary study is often very costly, and it can take many years until the results can finally be analyzed. In comparison, meta-analyses can be produced without too many resources, and within a relatively small time. Nevertheless, meta-analyses often have a high impact and they are frequently cited [@patsopoulos2005relative].
This means that scientific journals are often very inclined to publish meta-analyses, maybe even if their quality or scientific merit is limited. Unfortunately, this creates a natural incentive for researchers to produce many meta-analyses, and scientific considerations sometimes become secondary.
Ioannidis [-@ioannidis2016mass] criticized that an immense amount of redundant and misleading meta-analyses is produced each year. On some "hot" topics, there are more than 20 recent meta-analyses. Some meta-analyses may also be heavily biased by corporate interests, for example in pharmacotherapy research [@ebrahim2016meta; @kirsch2002emperor]. As we have mentioned before, reproducibility is a hallmark of good science. In reality, however, the reproducibility of many meta-analyses is all too often limited because important information is not reported [@lakens2017examining].
A common problem is also that different meta-analyses on the same or overlapping topics come to different conclusions. In psychotherapy research, for example, there has been an ongoing debate pertaining to the question if all types of psychotherapy produce equivalent outcomes. Countless reviews have been published supporting either one conclusion or the other [@wampold2013great; @cuijpers2019role].
While some of these issues may be associated with systemic problems of the scientific process, others can be traced back to flaws of meta-analyses themselves. Therefore, we want to lead you through a quick tour of common meta-analysis pitfalls [@borenstein2011introduction, chapter 40; @greco2013meta; @sharpe1997apples].
\index{"Apples and Oranges" Problem}
\index{"Garbage In, Garbage Out" Problem}
\index{"File Drawer" Problem}
\index{"Researcher Agenda" Problem}
<br></br>
### The "Apples and Oranges" Problem
---
One may argue that meta-analysis means combining apples with oranges. Even with the strictest inclusion criteria, studies in a meta-analysis will never be absolutely identical. There will always be smaller or larger differences between the included sample, the way an intervention was delivered, the study design, or the type of measurement used in the studies.
This can be problematic. Meta-analysis means to calculate a numerical estimate which represents the results of all studies. Such an estimate can always be calculated from a statistical point of view, but it becomes meaningless when studies do not share the properties that matter to answer a specific research question.
Imagine the, admittedly absurd, scenario where a meta-analyst decides to pool both studies on the effect of job satisfaction on job performance, as well as all available evidence on the effect of medication on the HbA<sub>1c</sub> value of diabetic patients in one meta-analysis. The results would be pointless to organizational psychologists and diabetologists alike.
Now, imagine that the same poor meta-analyst, trying to learn from previous mistakes, overcompensates and conducts a meta-analysis containing only studies published between 1990 and 1999 in which Canadian males in their sixties with moderate depressive symptoms were treated using 40mg of Fluoxetine per day, for exactly six weeks. The meta-analyst may proudly report the positive results of the study to a psychiatrist. However, the psychiatrist may only ask: "and what do I do if my patient is 45 years old and French"?
This brings us to an important point. The goal of meta-analyses is not to heedlessly throw everything together that can be combined. Meta-analysis can be used to answer relevant research questions that go beyond the particularities of individual studies [@borenstein2011introduction, chapter 40]. The scope and specificity of a meta-analysis should therefore be based on the research question it wants to answer, and this question should be of practical relevance (see Chapter \@ref(spec-search-coding)).
If we are interested, for example, if a type of training program is effective across various age groups, cultural regions and settings, it makes perfect sense to put no restriction on the population and country of origin of a study. However, it may then be advisable to be more restrictive with respect to the training program evaluated in the studies, and only include the ones in which the training had a certain length, or covered similar topics.
Results of such a meta-analysis would allow us not only to estimate the pooled effect of the training but also allow us to quantify if and how much this effect may **vary**. Meta-analysis is capable to accommodate and make sense out of such forms of **heterogeneity**. In Chapter \@ref(heterogeneity), we will have a closer look at this important concept.
To sum up, whether the "Apples and Oranges" problem is in fact an issue highly depends on the question a meta-analysis wants to answer. Variation between studies can often be unproblematic, and even insightful if it is correctly incorporated into the aims and problem specification of a meta-analysis.
<br></br>
### The "Garbage In, Garbage Out" Problem
---
The quality of evidence produced by a meta-analysis heavily depends on the quality of the studies it summarizes. If the results reported in our included findings are biased, or downright incorrect, the results of the meta-analysis will be equally flawed. This is what the "Garbage In, Garbage Out" problem refers to. It can be mitigated to some extent by assessing the quality or **risk of bias** (see Chapter \@ref(spec-search-coding) and \@ref(risk-of-bias-plots)) of the included studies.
However, if many or most of the results are of suboptimal quality and likely biased, even the most rigorous meta-analysis will not be able to balance this out. The only conclusion that can usually be drawn in such cases is that no trustworthy evidence exists for the reviewed topic, and that more high-quality studies have to be conducted in the future. However, even such a rather disappointing outcome can be informative, and help guide future research.
<br></br>
### The "File Drawer" Problem
---
The file drawer problem refers to the issue that not all relevant research findings are published, and are therefore missing in our meta-analysis. Not being able to integrate all evidence in a meta-analysis would be undesirable, but at least tolerable if we could safely assume that research findings are missing at random in the published literature.
Unfortunately, they are not. Positive, "innovative" findings often generate more buzz than failed replications or studies with negative and inconclusive results. In line with this, research shows that in the last decades, less and less negative findings have been published in many disciplines, particularly in social science and the biomedical field [@fanelli2012negative].
There is good reason to believe that studies with negative or "disappointing" results are systematically underrepresented in the published literature and that there is a so called **publication bias**. The exact nature and extent of this bias can be at best a "known unknown" in meta-analyses.
However, there are certain ways through which publication bias can be minimized. One pertains to the way that studies are searched and selected (see Chapter \@ref(spec-search-coding)). The other approaches are statistical methods which try to estimate if publication bias exists in a meta-analysis, and how big its impact may be. We will cover a few of these methods in Chapter \@ref(pub-bias).
<br></br>
### The "Researcher Agenda" Problem
---
When defining the scope of a meta-analysis, searching and selecting studies, and ultimately pooling outcome measures, researchers have to make a myriad of choices. Meta-analysis comes with many "researcher degrees of freedom" [@wicherts2016degrees], leaving much space for decisions which may sometimes be arbitrary, and sometimes the result of undisclosed personal preferences.
The freedom of meta-analysts in their **modus operandi** becomes particularly problematic when researchers are consciously or subconsciously driven by their own agenda. Meta-analyses are usually performed by applied researchers, and having extensive subject-specific expertise on the reviewed topic is a double-edged sword. On the one hand, it can help to derive and answer meaningful research questions in a particular field.
On the other hand, such experts are also deeply invested in the research area they are examining. This means that many meta-analysts may hold strong opinions about certain topics, and may intentionally or unintentionally influence the results in the direction that fits their beliefs.
There is evidence that, given one and the same data set, even experienced analysts with the best intentions can come to drastically varying conclusions [@silberzahn2018many]. The problem may be even more grave in intervention research, where some meta-analysts have a substantial **researcher allegiance** because they have helped to develop the type of intervention under study. Such researchers may of course be much more inclined to interpret outcomes of a meta-analysis more positively than indicated by the evidence.
One way to reduce the researcher agenda problem is pre-registration, and publishing a detailed analysis plan **before** beginning with the data collection for a meta-analysis (see Chapter \@ref(spec-search-coding) and \@ref(pre-registration)).
<br></br>
## Problem Specification, Study Search & Coding {#spec-search-coding}
---
\index{Study Search}
In the last chapter, we took some time to discuss common problems and limitations of meta-analyses. Many of these issues, such as the "Apples and Oranges" problem, the "File Drawer" problem, or the "Researcher Agenda" problem, can and should be addressed by every meta-analyst.
This begins long before you start calculating your first results. No meta-analysis can be conducted without data, and this data has to come from somewhere. We first have to specify the **research question** and **eligibility criteria** of our planned meta-analysis, search for studies and select the relevant ones, extract the data we need for our calculations, and then code important information we want to report later on.
There are several rules, standards and recommendations we can or should follow during each of these steps; they can help us to create a high-quality meta-analysis. Such high-quality meta-analyses contain a comprehensive selection of all suitable evidence, are unbiased and impartial with respect to their subject, and they draw valid, justified, and practically relevant conclusions from their results.
However, even when "following all the rules", it may not always be clear which specific decision is the best to achieve this in practice. It is possible that people will disagree with the way you went about some things. This is normal and usually just fine, as long as your methodological decisions are both **transparent** and **reproducible** [@pigott2020methodological].
In this chapter, we will go chronologically through a few important building blocks needed before we can begin with our first calculations. The length of this chapter is not representative of the time this process of data acquisition takes in reality. From our experience, statistical analyses only make up a maximum of 15% of the time spent on a meta-analysis, much less compared to everything that comes before. But specifying the research question, systematically searching for studies and reliably coding extracted data is essential. It builds the basis of every good meta-analysis.
<br></br>
### Defining the Research Question {#research-question}
---
\index{Research Question}
When designing a study, the first thing we do is define the research question. Meta-analysis is no exception. To define a good research question, it helps to first see it as a form of **problem specification**. To be pertinent and impactful, a meta-analysis should solve a problem. To identify such problems, some subject-specific knowledge is necessary.
If you want to find a good research question for a meta-analysis, it may therefore be helpful to pick a research area in which you have some background knowledge and ask yourself a few basic questions first. What are the questions which are currently relevant in this particular field? Is there a gap in current knowledge on certain topics? Are there any open discussions that remain unsettled? It might also help to think about the intended target audience. What are problems that are relevant to other researchers? What issues might other people, for example health care professionals, state agencies, schools, or human resource departments face?
Meta-analysis depends on previous research. Once you know the general direction of your research problem, it therefore helps to have a look at the current literature. Do previous primary studies exist on this topic, and how did they address the problem? What methods and outcome measures did they use? What limitations did they mention in the background and discussion section of the article? Have previous reviews and meta-analyses addressed the topic, and what issues have they left open?
Cummings and colleagues [-@cummings2013conceiving] have proposed a few criteria we can use to specify the problem to be covered by our meta-analysis, the FINER framework. It states that a research question should be **F**easible, **I**nteresting, **N**ovel, **E**thical, and **R**elevant.
Step by step, asking yourself these questions should make it easier to define what you want to achieve with your meta-analysis. It may also become clear that meta-analysis is **not** suitable for your problem. For example, there may simply be no relevant studies that have addressed the topic; or there may already be recent high-quality meta-analyses in the literature which address the issue sufficiently.
However, if you get the feeling that your problem is relevant to one or several groups of people, that previous studies have provided data pertaining to this problem, and that previous reviews and meta-analyses have not sufficiently or adequately addressed it, you can proceed to turn it into a **research question**.
Let us give you an example of how this can be done. There is evidence suggesting that gender biases exist in medical research [@hamberg2008gender; @nielsen2017one]. Especially in earlier decades, many clinical trials only or largely used male participants, and results were simply assumed to generalize to women as well. This has probably led to worse health outcomes in women for some diseases, such as heart conditions [@kim2009status; @mosca2013fifteen]^[It is of note that gender bias can not only negatively affect women, but also men; an example are diseases such as osteoporosis [@adler2014osteoporosis].].
Let us imagine that you are a medical researcher. You have heard rumors that a commonly used drug, **Chauvicepine**, may have serious side effects in women that have remained largely unrecognized. You determined that this, if true, would be a highly relevant problem because it would mean that many women are prescribed with a drug that may not be safe for them.
A look into the literature reveals that most studies investigating Chauvicepine were randomized placebo-controlled trials. The first of these trials were conducted in populations which only or predominantly consisted of men. But you also found a few more recent trials in which the gender makeup was more balanced. Many of these trials even reported the number of negative side effects that occurred in the trial separately for men and women. You also find a recent commentary in a medical journal in which a doctor reports that in her clinic, many women have experienced negative side effects when being treated with the medication.
Based on this, you decide that it may be interesting to address this problem in a meta-analysis. Therefore, you translate the problem you just discovered into a research question: "does evidence from randomized placebo-controlled trials show that Chauvicepine leads to a significant increase of negative side effects in women, compared to placebo"?
\index{PICO}
Having derived a first formulation of the research question is only the first step. We now have to translate it into concrete **eligibility criteria**. These eligibility criteria will guide the decision which studies will and will not be included in our meta-analysis. They are therefore extremely important and should be absolutely transparent and reproducible.
A good way to start specifying the eligibility criteria is to use the PICO framework [@mattos2015systematic]. This framework is primarily aimed at intervention studies, but it is also helpful for other types of research questions. The letters in PICO stand for **P**opulation, **I**ntervention, **C**ontrol group or comparison, and **O**utcome:
* **Population**: What kind of people or study subjects do studies have to include to be eligible? Again, remember that it is important to address this questions as precisely as possible, and to think of the implications of each definition. If you only want to include studies in young adults, what does "young adults" mean? That only people between 18 and 30 were included? Can that even easily be determined from the published articles? Or is it just important that people were recruited from places which are usually frequented by young adults, such as universities and **Cardi B** concerts? If you only want to include studies on patients with a specific medical condition, how has that condition been diagnosed? By a trained health care professional, or is a self-report questionnaire sufficient? Many of these questions can be answered by resorting to the F and R parts of the FINER framework. Is it feasible to impose such a limitation on published research? And is it a relevant differentiation?
* **Intervention**: What kind of intervention (or alternatively, **exposure**) do studies have to examine? If you want to study the effects of an intervention, it is important to be very clear on the type of treatment that is eligible. How long or short do interventions have to be? Who is allowed to deliver them? What contents must the intervention include? If you do not focus on interventions, how must the **independent variable** be operationalized? Must it be measured by a specific instrument? If you study job satisfaction, for example, how must this construct be operationalized in the studies?
* **Control group** or **comparison**: To what were results of the study compared to? A control group receiving an attention placebo, or a pill placebo? Waitlists? Another treatment? Or nothing at all? It is also possible that there is no comparison or control group at all; for example if you want to study the prevalence estimates of a disease across different studies, or how many specimens of a species there are in different habitats.
* **Outcome**. What kind of outcome or dependent variable do studies have to measure? And **how** must the variable be measured? Is it the mean and standard deviation of questionnaire scores? Or the number of patients who died or got sick? When must the outcome be measured? Simply after the treatment, no matter how long the treatment was? Or after one to two years?
\index{PRISMA Statement}
\index{MARS Statement}
\index{Cochrane, Handbook}
```{block2, type='boxinfo'}
**Guidelines for Systematic Reviews and Meta-Analyses**
In light of the often suboptimal quality of meta-analyses, some guidelines and standards have been established on how meta-analyses should be conducted.
If you meta-analyze evidence in biomedical research or on the effect of an intervention, we strongly advise you to follow the **Preferred Reporting Items for Systematic Reviews and Meta-Analyses**, or PRISMA [@moher2009preferred]. The PRISMA statement contains several recommendations on how nearly all aspects of the meta-analysis process should be reported. The statement can also be found [online](http://www.prisma-statement.org/).^[Recently, the updated **PRISMA 2020** statement [@page2021prisma] has been released, replacing the older version from 2009. Novelties include an abstract reporting checklist, a revised flow diagram that also incorporates information on search updates, as well as greater emphasis on the declaration of competing interests and data sharing.]
For meta-analyses of psychological and behavior research, the **American Psychological Association**'s **Meta-Analysis Reporting Standards** [@appelbaum2018journal], or MARS, may be followed.
Although these standards largely comment on how meta-analyses should be **reported**, they also have implications on best practices when performing a meta-analysis. PRISMA and MARS share many core elements, and many things that we cover in this chapter are also mentioned in both of these guidelines.
An even more detailed resource is the **Cochrane Handbook for Systematic Reviews of Interventions** (see Chapter \@ref(history)), which contains precise recommendations on virtually every aspect of systematic reviews and meta-analyses. An overview of methodological standards for meta-analyses in social science can be found in Pigott and Polanin [-@pigott2020methodological].
```
While the PICO framework is an excellent way to specify the eligibility criteria of a meta-analysis, it does not cover all information that may be relevant. There are a few other aspects to consider [@lipsey2001practical].
One relevant detail are the eligible **research designs**. In evidence-based medicine, it is common to only include evidence from randomized controlled trials (meaning studies in which participants were allocated to the treatment or control group by chance); but this is not always required [@borenstein2011introduction, chapter 40].
\index{WEIRD Populations}
It may also be helpful to specify the **cultural** and **linguistic range** of eligible studies. Most research is based on WEIRD populations, meaning western, educated, industrialized, rich, and democratic societies [@henrich2010most]. Especially in the social sciences, it is very likely that certain effects or phenomena do not generalize well to countries with other societal norms. Many researchers, however, only consider publications in English for their meta-analyses, to avoid having to translate articles in other languages.
This means that some evidence from different language areas will not be taken into account. Although English is the most common language for scientific publishing in most disciplines, it should be at least made transparent in the eligibility criteria that this limitation exists. If one of the goals of a meta-analysis is to examine cross-cultural differences, however, it is generally advisable to extend the eligibility criteria to other languages, provided all the other criteria are fulfilled.
\index{"File Drawer" Problem}
Another important aspect is the **publication type** that is allowed for a meta-analysis. Sometimes, meta-analysts only include research articles which were published in peer-reviewed scientific journals. The argument is that studies taken from this source fulfill higher standards since they have passed the critical eyes of experts in the field. This justification is not without flaws. In Chapter \@ref(pitfalls), we already covered that the "File Drawer" problem can seriously limit the validity of meta-analysis results because positive findings are more likely to get published.
A way to mitigate the risk of publication bias is therefore to also include **grey literature**. Grey literature can be defined as all types of research materials that have not been made available through conventional publication formats. This includes research reports, preprints, working papers, or conference contributions. Dissertations also often count as grey literature, although many of them are indexed in electronic bibliographic databases today [@schopfel2018electronic].
It may be advisable to at least also include dissertations in a meta-analysis. Compared to other types of unpublished material, it may be rather unlikely that the information provided in dissertations is heavily biased or downright fraudulent. Furthermore, you can still define other eligibility criteria to ensure that only studies fulfilling certain methodological requirements are included, no matter if they were published in scientific journals or not.
The last step of defining your eligibility criteria is to write them down as a list of **inclusion** and **exclusion criteria** that you will apply. Here is an example from a meta-analysis of insomnia interventions in college students showing you this can be done [@saruhanjan2020psychological]:
> _"We included: (a) RCTs [randomized controlled trials; authors' note] in which (b) individuals enrolled at a tertiary education facility (university, college or comparable postsecondary higher education facility) at the time of randomization, (c) received a sleep-focused psychological intervention, (d) that was compared with a passive control condition, defined as a control condition in which no active manipulation was induced as part of the study (wait-list, treatment as usual)._
> _For the purposes of this analysis, “sleep-focused” means that (e) effects on symptoms of sleep disturbances (global measures of sleep disturbances, sleep-onset latency [...], fatigue and daytime functionality, pre-sleep behaviour and experiences) were assessed as a (f) target outcome (by declaring a sleep outcome as the primary outcome or by stating the intervention was primarily aimed at this outcome) using (g) standardized symptom measures (objective sleep measures, standardized sleep or fatigue questionnaires, sleep diaries, items recording sleep quantity, quality or hygiene)._
> _Only studies (h) published in English or German were considered for inclusion."_
<br></br>
### Analysis Plan & Preregistration {#analysis-plan}
---
\index{Analysis Plan}
\index{Preregistration}
After your research question and eligibility criteria are set, it is sensible to also write an **analysis plan** [@pigott2020methodological; @tipton2019history]. In statistics, there is an important distinction between **a priori** and **post hoc** analyses. A priori analyses are specified **before seeing the data**. Post hoc, or **exploratory**, analyses are conducted **after seeing the data**, or based on the results implicated by the data.
Results of a priori analyses can be regarded as much more valid and trustworthy than post hoc analyses. Post hoc analyses make it easier to tweak certain details about the analysis or the data itself until results support the goals of the researcher. They are therefore much more prone to the "Researcher Agenda" problem we discussed in Chapter \@ref(pitfalls).
In the analysis plan, we specify all important calculations we want to perform in our meta-analysis a priori. This serves two purposes. First, it allows others to verify that the analyses we made were indeed planned, and are not the mere result of us playing around with the data until something desirable came out. Second, a detailed analysis plan also makes our meta-analysis reproducible, meaning that others can understand what we did at each step of our meta-analysis, and try to replicate them.
When using _R_, we can take the reproducibility of our analyses to a whole other level by writing documents which allow others to re-run every step of our analysis (see Chapter \@ref(reporting-reproducibility) in the "Helpful Tools" section). But this is relevant **after** we complete our analyses. In the analysis plan, we specify what we plan to do **before** any data has been collected.
\index{Random-Effects Model}
\index{Power Analysis}
\index{Subgroup Analysis}
\index{Meta-Regression}
There are a few things we should always specify in our analysis plan. We should make clear which information we will extract, and which effect size metric will be calculated for each included study (see Chapter \@ref(effects)). It is also recommended to decide beforehand if we will use a **fixed-** or **random-effects model** to pool results from each study, based on the amount of variation between studies we expect (see Chapter \@ref(pooling-es)). An a priori **power analysis** may also be helpful to determine how many studies are required for our meta-analysis to find a statistically significant effect (see Chapter \@ref(power) in the "Helpful Tools" section).
Furthermore, it is crucial to determine if we want to assess if some variables explain differences in the outcomes of included studies using subgroup analysis (Chapter \@ref(subgroup)) or meta-regression (Chapter \@ref(metareg)). For example, if our hypothesis states that the publication year might be associated with a study's outcome, and if we want to have a look at this association later in our meta-analysis, we mention this in our analysis plan. If we plan to sort studies into subgroups and then have a look at these subgroups separately, we should also report the exact criteria through which we will determine that a study belongs to a specific subgroup (see Chapter \@ref(study-selection)).
In part II of this book ("Meta-Analysis in _R_"), we will cover various statistical techniques to apply as part of a meta-analysis. Every technique we learn there and plan to apply in our meta-analysis should be mentioned in the analysis plan.
\index{Open Science Framework (OSF)}
\index{Preprint}
\index{Protocol}
\index{PRISMA Statement}
Once you are finished writing your analysis plan, do not simply bury it somewhere--make it public. There are a few excellent options for researchers to make their research documents openly available. For example, we can create a new project on the website of the **Open Science Framework** (OSF; see Chapter \@ref(osf) in the "Helpful Tools" section) and upload our analysis plan there. We can also upload our analysis plan to a preprint server such as **medrxiv.org**, **biorxiv.org**, or **psyarxiv.com**, depending on the nature of our research question.
Once our research question, eligibility criteria, analysis plan, and search strategy (see next chapter) are set, we should also **register** our meta-analysis. If the meta-analysis has a broadly health-related outcome, this may preferably be done using [PROSPERO](https://www.crd.york.ac.uk/prospero/), one of the largest registries for prospective systematic reviews and meta-analyses. The [preregistration service of the OSF](https://osf.io/prereg/) is also a good option.
In case we want to go even one step further, we can also write an entire **protocol** for our meta-analysis [@quintana2015pre]. A meta-analysis protocol contains the analysis plan, plus a description of the scientific background of our study, more methodological detail, and a discussion of the potential impact of the study.
There are also guidelines on how to write such protocols, such as the PRISMA-P Statement [@moher2015preferred]. Meta-analysis protocols are accepted by many peer-review journals. A good example can be found in Büscher, Torok and Sander [-@buscher2019effectiveness], or Valstad and colleagues [-@valstad2016relationship].
A priori analysis plans and preregistration are essential features of a well-made, trustworthy meta-analysis. And they should not make you anxious. Making the perfect choice for each and every methodological decision straight away is difficult, if not impossible. It is perfectly normal to make changes to one's initial plans somewhere down the road. We can assure you that, if you are honest and articulate about changes to your planned approach, most researchers will not perceive this as a sign of failure, but of professionalism and credibility.
<br></br>
### Study Search {#study-search}
---
\index{Study Search}
The next step after determining your eligibility criteria and analysis plan is to search for studies. In Chapter \@ref(what-are-mas), we discussed that most meta-analyses are an advanced type of systematic review. We aim to find **all** available evidence on a research question in order to get an unbiased, comprehensive view of the facts. This means that the search for studies should also be as comprehensive as possible. Not only one, but several sources should be used to search for studies. Here is an overview of important and commonly used sources.
* **Review articles**. It can be very helpful to screen previous reviews on the same or similar topics for relevant references. Narrative and systematic reviews usually provide a citation for all the studies that they included in their review. Many of these studies may also be relevant for your purposes.
* **References in studies**. If you find a study that is relevant for your meta-analysis, it is sensible to also screen the articles this study references. It is very likely that the study cites previous literature on the same topic in the introduction or discussion section, and some of these studies may also be relevant for your meta-analysis.
* **Forward search**. A forward search can be seen as the opposite of screening the references of previous primary studies and reviews. It means to take a study that is relevant for the meta-analysis as basis, and then search for other articles that have cited this study since it has been published. This can be done quite easily on the Internet. You simply have to find the online entry of the study; usually, it is on the website of the journal in which it has been published. Most journal websites today have a functionality to display articles that have cited a study. Alternatively, you can also search for the study on **Google Scholar** (see Table \@ref(tab:bibdatabases)). Google Scholar can display citing research for every entry.
* **Relevant journals**. Often, there are a number of scientific journals which are specialized in the type of research question you are focused on. It can therefore be helpful to search for studies specifically in those journals. Virtually all journals have a website with a search functionality today, which you can use to screen for potentially eligible studies. Alternatively, you can also use electronic bibliographical databases, and use a filter so that only results from one or several journals are displayed.
The methods we described above can be seen as rather fine-grained strategies. They are ways to search in places where it is very likely that a relevant article will be listed. The disadvantage is that these approaches will unlikely uncover all evidence that is really out there. It is therefore advisable to also use **electronic bibliographic databases** for one's search. An overview of important databases can be found in Table \@ref(tab:bibdatabases).
One should always conduct a search in several databases, not just one. Many bibliographical databases contain an immense number of entries. Nevertheless, it is common to find that the overlap in the results of databases is smaller than anticipated. You can select the databases you want to search based on their subject-specific focus. If your meta-analysis focuses on health-related outcomes, for example, you should at least search PubMed and CENTRAL.
When searching bibliographic databases, it is important to develop a **search string**. A search string contains different words or terms, which are connected using operators such as AND or OR. Developing search strings takes some time and experimenting. A good way to start is to use the PICO or eligibility criteria (Chapter \@ref(research-question)) as basis and to connect them using AND (a simplified example would be **"college student" AND "psychotherapy" AND "randomized controlled trial" AND "depression"**).
Most bibliographical databases also allow for **truncation** and **wildcards**. Truncation means to replace a word ending with a symbol, allowing it to vary as part of your search. This is usually done using asterisks. Using "_sociolog\*_" as a search term, for example, means that the database will search for "sociology", "sociological", and "sociologist" at the same time.
A wildcard signifies that a letter in a word can vary. This can come in handy when there are differences in the spelling of words (for example, differences between American English and British English). Take the search term "_randomized_". This will only find studies using American English spelling. If you use a wildcard (often symbolized by a question mark), you can write "_randomi?ed_" instead, and this will also give results in which the British English spelling was used ("randomised").
When developing your search string, you should also have a look at the number of hits. A search string should not be too specific, so that some relevant articles are missed. For example, getting around 3000 hits for your search string is manageable in later steps, and it makes it more likely that all important references will be listed in your results. To see if your search string is generally valid, it sometimes helps to search the first few hundred hits you get, and to check if at least some of the references have something to do with your research question.
Once you developed the final versions of the search strings you want to use in your selected databases, save them somewhere. It is best practice to already include your search string(s) in your preregistration. Reporting of the search string (for example in the supplement) is required if you want to publish a meta-analysis protocol (see Chapter \@ref(research-question)), or the final results of your meta-analysis.
In conclusion, we want to stress that searching bibliographic databases is an art in and of itself, and that this paragraph only barely scratches the surface. A much more detailed discussion of this topic can be found in Cuijpers [-@cuijpers2016meta] and Bramer and colleagues [-@bramer2018systematic].
<br></br>
```{r bibdatabases, echo=F, message=F, fig.align='center', warning=F}
library(kableExtra)
library(stringr)
bibdb = read.csv("data/bib_databases.csv")
bibdb$Type = NULL
bibdb$Database = with(bibdb, paste0("[**", Database, "**](https://www.", str_replace_all(bibdb$Website, " ", ""), ")"))
bibdb$Website = NULL
kableExtra::kable(bibdb,
longtable = T,
booktabs = T,
caption = "A selection of relevant bibliographical databases.",
font_size = 14) %>%
#kable_styling(latex_options = c("repeat_header"), font_size = 8) %>%
column_spec(1, width = "6cm") %>%
#column_spec(2, width = "5cm") %>%
#column_spec(3, width = "3cm") %>%
pack_rows("Core Database", 1, 6, latex_align = "c", latex_gap_space = "2em",
hline_after = T, indent = FALSE, background = "#f2f1ed") %>%
pack_rows("Citation Database", 7, 9, latex_align = "c", latex_gap_space = "2em",
hline_after = T, indent = FALSE, background = "#f2f1ed") %>%
pack_rows("Dissertations", 10, 10, latex_align = "c", latex_gap_space = "2em",
hline_after = T, indent = FALSE, background = "#f2f1ed") %>%
pack_rows("Study Registries", 11, 12, latex_align = "c", latex_gap_space = "2em",
hline_after = T, indent = FALSE, background = "#f2f1ed") %>%
row_spec(0, font_size = 17) %>%
kable_styling(bootstrap_options = c("hover"), font_size = 14)
```
<br></br>
### Study Selection {#study-selection}
---
After completing your study search, you should have been able to collect thousands of references from different sources. The next step is now to select the ones that fulfill your eligibility criteria. It is advised to follow a three-stepped procedure to do this.
In the first step, you should remove duplicate references. Especially when you search in multiple electronic bibliographical databases, it is likely that a reference will appear more than once. An easy way to do this is to first collect all your references in one place by importing them into a **reference management software**. There are several good reference management tools. Some of them, like [**Zotero**](https://www.zotero.org/) or [**Mendeley**](https://www.mendeley.com/) can be downloaded for free. Other programs like [**EndNote**](https://endnote.com/) provide more functionality but usually require a license.
Nearly all of those reference managers have a functionality which allows you to automatically remove duplicate articles. It is important that you write down the number of references you initially found in your study search, and how many references remained after duplicate removal. Such details should be reported later on once you make your meta-analysis public.
After duplicate removal, it is time to eliminate references that do not fit your purpose, based on their **title and abstract**. It is very likely that your study search will yield hundreds of results that are not even remotely linked to your research question^[Lipsey and Wilson [-@lipsey2001practical] tell the amusing anecdote that, when searching articles for a meta-analysis on the relationship between alcohol consumption and aggression, they had to exclude a surprisingly large number of studies in which alcohol was given to fish to examine territorial fighting behavior.]. Such references can be safely removed by looking at their title and abstract only. A reference manager will be helpful for this step too. You can go through each reference one after another and simply remove it when you are sure that the article is not relevant for you^[When exporting references from an electronic database, the abstract is usually added to the reference file, and can be displayed in the reference management tool. If no abstract is found for the reference, it usually only takes a quick Google search of the study title to find it.].
If you think that a study **might** contain interesting information based on the title and abstract, do **not** remove it--even if it seems unlikely that the study is important. It would be unfortunate if you put considerable time and effort into a comprehensive study search just to erroneously delete relevant references in the next step. The title and abstract-based screening of references does not require you to give a specific reason why you excluded the study. In the end, you must only document how many studies remained for the next step.
Based on title and abstract screening, it is likely that more than 90% of your initial references could be removed. In the next step, you should now retrieve the **full article** for each reference. Based on everything reported in the article, you then make a final decision if the study fulfills your eligibility criteria or not. You should be particularly thorough here because it is the final step determining if a study will be included in your meta-analysis or not. Furthermore, it is not simply sufficient to say that you removed a study because it did not fit your purpose. You have to give a **reason** here. For each study you decide to remove, you should document why exactly it was not eligible as per your defined criteria. Besides your eligibility criteria, there is one other reason why you might not be able to include a study.
When going through the full article, it might be possible that you discover that not enough information is provided to decide whether the study is eligible or not. It may be possible that a study simply does not provide enough information on the research design. Another frequent scenario is that the results of a study are not reported in a form that would allow to calculate the effect size metric required for your meta-analysis. If this happens, you should try to contact the corresponding author of the study at least two times, and ask for the needed information. Only if the author does not respond, and if the information lacking in the published article is essential, you can exclude the study.
\index{PRISMA Statement}
\index{Campbell Collaboration}
Once we have arrived at the final selection of studies to include, we write down all the details of the inclusion process in a **flow diagram**. A commonly used template for such a flow chart is the one provided by the [PRISMA guidelines](http://prisma-statement.org/PRISMAStatement/FlowDiagram)^[Neal Haddaway and Luke McGuinness [-@prisma2020package] recently developed a package called **{PRISMA2020}**, which can be used to produce PRISMA 2020-compliant flow diagrams directly in _R_. The functionality of the package can also be accessed via an interactive [web app](https://estech.shinyapps.io/prisma_flowdiagram/).]. This flow chart documents all the necessary information we covered above:
1. How many references we could identify by searching **electronic databases**;
2. How many additional references we found through **other sources**;
3. The number of references that remained after **duplicate removal**;
4. The number of references we removed based on **title and abstract**;
5. The number of **articles** we removed based on the **full manuscript**, including how many articles where excluded due to **specific reason**;
6. The number of **studies** we included in our **qualitative synthesis** (i.e. systematic review) and **quantitative synthesis** (i.e. meta-analysis).
Please note that the number of articles that were not excluded at (5) and the number of studies included in (6) are usually identical, but they do not have to be. For example, it is possible that one article reports results of two or more independent studies, all of which are suitable for meta-analysis. The number of **studies** would then be higher than the number of included **articles**.
```{block2, type='boximportant'}
**Double-Screening**
Nearly all relevant guidelines and consensus statements emphasize that **double screening** should be used during the study selection process [@dissemination2009systematic; @higgins2019cochrane; @methods2016methodological].
This means that at least two people should perform each of the study selection steps independently to avoid errors. Reference removal based on the title and abstract should be conducted independently by two or more researchers, and the combination of all records that have not been removed by the assessors should be forwarded to the next step.
Using two or more assessors is even more important in the final step, in which full articles are screened. In this step, each person should independently assess if a study is eligible, and if it is not, give reasons why.
The assessors should then meet and compare their results. It is common that assessors disagree on the eligibility of some studies, and such disagreements can usually be resolved through discussion. If assessors fail to find an agreement, it can be helpful to determine a senior researcher beforehand who can make a final decision in such cases.
Using two or more assessors is not only advisable in the study selection process. This approach is also beneficial when extracting and coding data (see Chapter \@ref(data-extraction)).
```
<br></br>
### Data Extraction & Coding {#data-extraction}
---
When the selection of studies to be included in the meta-analysis is finalized, data can be extracted. There are three major types of information we should extract from the selected articles [@cuijpers2016meta]:
1. Characteristics of the studies.
2. Data needed to calculate effect sizes.
3. Study quality or risk of bias characteristics.
It is conventional for high-quality meta-analyses to provide a table in which characteristics of the included studies are reported. The exact details reported in this table can vary depending on the research field and research question. However, you should always extract and report the first author of a study, and when it was published. The sample size of each study should also be reported.
Apart from that, you may include some information on characteristics specified in the PICO of your meta-analysis; such as the country of origin, the mean or median age, the proportion of female and male participants, the type of intervention or exposure, the type of control group or comparison (if applicable), as well as the assessed outcomes of each study. If one or several studies have not assessed one of the characteristics, you should indicate that this detail has not been specified in the table.
It is also necessary to extract and collect the data needed to calculate the effect sizes or outcome measures we plan to pool. In Chapter \@ref(discovering-R), we will discuss in greater detail how you can structure your effect size data in a spreadsheet so that it can easily be used for calculations in _R_. If your analysis plan (see Chapter \@ref(analysis-plan)) also includes planned subgroup analyses and meta-regressions, you should also extract the data you need for these analyses from the articles.
\index{Risk of Bias}
\index{Cochrane, Risk of Bias Tool}
It is common in meta-analysis to also rate and report the quality of the primary studies. The information you need to extract from each study to do this depends on the type of rating system you are using. Countless tools to assess the quality of primary studies have been developed in the last decades [@sanderson2007tools].
When only randomized controlled trials are eligible for your study, one of the best ways to code the study quality is to use the **Risk of Bias Tool** developed by Cochrane [[@higgins2011cochrane; @sterni2019rob]](https://methods.cochrane.org/bias/resources/rob-2-revised-cochrane-risk-bias-tool-randomized-trials). As it says in the title, this tool does not assess the quality of studies **per se**, but their risk of bias\index{Risk of Bias}.
Study quality and risk of bias are related, but not identical concepts. "Bias" refers to systematic errors in the results of a study or their interpretation. Risks of bias are aspects of the way a study was conducted, or its results, that may increase the likelihood of such systematic errors. Even when a study only applies methods that are considered the "state of the art", it is still possible that biases exist. A study can fulfill all quality standards that are perceived as important in a particular research field, but sometimes even these best practices may not be enough to shield the study from distortions. The "risk of bias" concept thus has a slightly different focus compared to study quality assessments. It primarily cares about the question if the output of an intervention study is **believable**, and focuses on criteria which are conducive to this goal [@higgins2019cochrane].
\index{Cochrane, ROBINS-I Tool}
On several domains, the risk of bias tool lets you classify the risk of bias of a study as "high" or "low", or it can be determined that there are "some concerns". There are also conventions on how the risk of bias can be summarized visually (see Chapter \@ref(risk-of-bias-plots), where we describe how this can be done in _R_). A similar resource to assess the risk of bias in non-randomized studies is the **Risk of Bias in Non-randomized Studies of Interventions**, or ROBINS-I, tool [[@sterne2016robins]](https://www.riskofbias.info/welcome/home).
The Cochrane Risk of Bias tools have become the standard approach to assess the risk of bias in (non-)randomized clinical trials [@jorgensen2016evaluation]. In other areas, current practices unfortunately still rather resemble the Wild West. In psychological research, for example, study quality assessments are often inconsistent, nontransparent, or not conducted at all [@hohn2019primary].
If you plan to meta-analyze studies other than clinical trials, there are two things you can do. First, you can check if the Risk of Bias or ROBINS-I tool may still be applicable, for example if your studies focus on another type of intervention that simply has no health-related focus. Another--admittedly suboptimal--way may be to search for previous high-quality meta-analyses on similar topics, and check how these studies have determined the quality of primary studies.
This ends our dive into the history of meta-analysis, its problems, and how we can avoid some of them when collecting and encoding our data. The next chapter is the beginning of the "hands-on" part of this guide. In it, we will do our own first steps in _R_.
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block2, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. How can a meta-analysis be defined? What differentiates a meta-analysis from other types of literature reviews?
\vspace{-2mm}
2. Can you name one of the founding mothers and fathers of meta-analysis? What achievement can be attributed to her or him?
\vspace{-2mm}
3. Name three common problems of meta-analyses and describe them in one or two sentences.
\vspace{-2mm}
4. Name qualities that define a good research question for a meta-analysis.
\vspace{-2mm}
5. Have a look at the eligibility criteria of the meta-analysis on sleep interventions in college students (end of Chapter \@ref(research-question)). Can you extract the PICO from the inclusion and exclusion criteria of this study?
\vspace{-2mm}
6. Name a few important sources that can be used to search studies.
\vspace{-2mm}
7. Describe the difference between "study quality" and "risk of bias" in one or two sentences.
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda1) at the end of this book.**
```
<br></br>
## Summary
* More and more scientific research is published each year, making it harder to keep track of available evidence. However, more research output does not automatically result in scientific progress.
* Meta-analysis aims to combine the results of previous studies in a quantitative way. It synthesizes all available evidence pertaining to a research question and can be used for decision-making.
* Meta-analytic methods trace back to the beginning of the 20<sup>th</sup> century. Modern meta-analytic approaches, however, have been developed in the second half of the 20<sup>th</sup> century, and meta-analysis has become a common research tool since then.
* There are several problems that are relevant for each meta-analysis: the "Apples and Oranges" problem, the "Garbage In, Garbage Out" problem, the "File Drawer" problem, and the "Researcher Agenda" problem.
* Many of these problems can be mitigated by defining a clear research question and eligibility criteria, writing an analysis plan, pre-registering the meta-analysis, and conducting the study search and data extraction in a systematic and reproducible way.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' dmetar: Companion R package for the guide 'Doing Meta-Analysis in R'
#'
#' \code{dmetar} serves as the companion R package for the
#' guide \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/}{Doing Meta-Analysis in R}
#' by Mathias Harrer, Pim Cuijpers, Toshi Furukawa and David Daniel Ebert.
#'
#' The package contains complementary functions to facilitate the conduction of meta-analyses
#' using the \pkg{meta}, \pkg{metafor}, \pkg{netmeta} and \pkg{gemtc} packages.
#'
#' @author Mathias Harrer \email{[email protected]}, David Daniel Ebert \email{[email protected]}
#' @docType package
#' @name dmetar
"_PACKAGE"
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Toy Dataset for Network Meta-Analysis using the netmeta package
#'
#' This is a toy dataset containing simulated effect size data of a fictitious
#' network meta-analysis examining the effect of psychotherapies. Effect size
#' data is provided as the standardized mean difference (SMD) between the intervention
#' and control group and its corresponding standard error for each study at post.
#' The dataframe layout is optimized for out-of-the-box usage using
#' the \code{\link[netmeta]{netmeta}} function.
#'
#'
#' @format A data.frame with 5 columns.
#' \describe{
#' \item{studlab}{Character. The name of the included study.}
#' \item{treat1}{Character. The name of the first treatment. Includes psychotherapies for
#' the treatment of depression, "CBT" (Cognitive Behavioral Therapy), "PDT" (Psychodynamic Therapy),
#' "IPT" (Interpersonal Therapy), "PST" (Problem-solving Therapy) and "SUP" (Supportive Counseling),
#' and standard comparison conditions, "TAU" (Treatment as usual), "Placebo" (Placebo), and "WLC" (Waitlist control).}
#' \item{treat2}{Character. The name of the treatment the first treatment was compared to. Includes psychotherapies for
#' the treatment of depression, "CBT" (Cognitive Behavioral Therapy), "PDT" (Psychodynamic Therapy),
#' "IPT" (Interpersonal Therapy), "PST" (Problem-solving Therapy) and "SUP" (Supportive Counseling),
#' and standard comparison conditions, "TAU" (Treatment as usual), "Placebo" (Placebo), and "WLC" (Waitlist control).}
#' \item{TE}{Numeric. The standardized mean difference of the comparison.}
#' \item{seTE}{Numeric. The standard error of the comparison.}
#' }
#'
#' @source Simulated data.
#'
#' @usage data("NetDataNetmeta")
#'
#' @author Mathias Harrer, David Daniel Ebert
#'
"NetDataNetmeta"
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Plot for direct evidence proportions in a network meta-analysis using \code{netmeta}
#'
#' This function plots relevant measures quantifying the direct evidence proportion, mean path length
#' and aggregated minimal parallelism of a frequentist network meta-analysis model generated by
#' \code{\link[netmeta]{netmeta}}.
#'
#' @usage direct.evidence.plot(x, random=FALSE, comparison.label.size=2,
#' numeric.label.size=3, subplot.ratio=c(5, 1.3, 1.3))
#'
#' @param x An object of class 'netmeta' containing the results of a network meta-analysis
#' using the \code{\link[netmeta]{netmeta}} function.
#' @param random Logical. If set to \code{TRUE}, results for the random-effects model are displayed.
#' If set to \code{FALSE}, results for the fixed-effect model are displayed. \code{FALSE} by default.
#' @param comparison.label.size A numeric value for the size of comparison labels
#' to be used in the plot. Default is \code{2}.
#' @param numeric.label.size A numeric value for the label size of numeric values
#' to be used in the plot. Default is \code{3}.
#' @param subplot.ratio A numeric vector containing three numbers. Defines the width for each of
#' the three subplots included in the plot (from left to right). Default is \code{c(5,1.3,1.3)}.
#'
#' @details
#' The function generates a plot containing three subplots displaying relevant characteristics
#' to evaluate the reliability of effect size estimates within a network meta-analysis model.
#' \itemize{
#' \item \strong{Direct evidence proportion}. This bar chart displays the proportion of direct
#' evidence (orange) contained in each network estimate. It is of note that both direct and indirect
#' evidence may contribute to the violation of the assumption of consistency underlying network
#' meta-analysis models. Nevertheless, this plot allows to distinguish comparison estimates
#' for which direct evidence was used, and to what extent, and comparisons which had to be inferred
#' by indirected evidence alone.
#' \item \strong{Minimal Parallelism}. This bar chart displays the minimum number of independent paths
#' contributing to the effect estimate on aggregated level. Large values of parallelism can be
#' as supporting the robustness of the estimate.
#' \item \strong{Mean Path Length}. This bar chart displays the mean path length, which characterizes
#' the degree of indirectness of an estimate. Higher mean path lengths indicate less reliable
#' estimates, given that more similarity assumptions have to be made when serially combining
#' direct comparisons. Following König, Krahn and Binder
#' (\href{https://www.ncbi.nlm.nih.gov/pubmed/24123165}{2013}), comparisons with mean path lengths
#' greater than two should be interpreted with caution. This threshold is displayed as a blue vertical
#' line in the plot.
#' }
#'
#'
#' @references Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803. \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/frequentist-network-meta-analysis.html}{Chapter 11.1}
#'
#' König J., Krahn U., Binder H. (2013): Visualizing the flow of evidence in network meta-analysis and
#' characterizing mixed treatment comparisons. \emph{Statistics in Medicine, 32}, 5414–29
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @import ggplot2 netmeta reshape2 forcats magrittr
#' @importFrom gridExtra grid.arrange
#' @importFrom scales percent
#' @importFrom graphics abline axis lines mtext par plot points rect segments text
#' @importFrom stats as.formula hat influence ks.test optimize pbinom pchisq pf pnorm pt punif qchisq qf qnorm qt reformulate reorder setNames uniroot
#'
#' @return
#' \itemize{
#' \item \code{data}: A data.frame containing columns for the proportion of direct and indirect
#' evidence of each comparison (\code{proportion.direct} and \code{proportion.indirect}), the
#' mean path length (\code{meanpath}) and the minimal parallelism (\code{minpar})
#' for each comparison.
#' \item \code{plot}: The generated plot (if the function output was saved to an object).
#' }
#'
#' @export direct.evidence.plot
#'
#' @seealso
#' \code{\link[netmeta]{netmeta}}, \code{\link[netmeta]{netmeasures}}
#'
#' @examples
#' # Load Senn2013 data from netmeta
#' suppressPackageStartupMessages(library(netmeta))
#' data(Senn2013)
#'
#' # Conduct network meta-analysis (fixed-effects model)
#' nma = netmeta(TE, seTE, treat1, treat2, studlab,
#' data=Senn2013, sm='MD', comb.random=FALSE)
#'
#' # Generate the plot
#' dep = direct.evidence.plot(nma, random=FALSE, comparison.label.size = 1,
#' numeric.label.size=1, subplot.ratio=c(3,1,1))
direct.evidence.plot = function(x, random = FALSE, comparison.label.size = 2, numeric.label.size = 3, subplot.ratio = c(5,
1.3, 1.3)) {
# Validate
x = x
random = random
cts = comparison.label.size
nts = numeric.label.size
spr = subplot.ratio
if (class(x) != "netmeta") {
stop("Input to this function has to be an object of class 'netmeta' created by the 'netmeta::netmeta' function.")
}
# PLOT 1: Direct and Indirect Evidence ####
# Get Measures
measures = netmeasures(x, random = random)$proportion
indirect = 1 - measures
measures = data.frame(comparison = names(measures), direct = measures, indirect = indirect)
rownames(measures) = c()
measures$direct = round(measures$direct, 4)
measures$indirect = round(measures$indirect, 4)
measures.reshape = melt(measures, id.vars = "comparison", measure.vars = c("direct", "indirect"))
names = measures.reshape[measures.reshape$variable == "direct", ]$comparison
direct = measures.reshape[measures.reshape$variable == "direct", ]$value
names = names[order(match(names, direct))]
# Reorder Label
measures = measures %>% mutate(comparison = forcats::fct_reorder(comparison, -direct))
levels = measures$comparison %>% levels
measures.reshape$comparison = factor(measures.reshape$comparison, levels = levels)
# Plot
PlotDirectEvidence = ggplot2::ggplot(measures.reshape, aes(x = forcats::fct_rev(comparison), fill = factor(variable,
levels = c("indirect", "direct")), y = value)) + geom_bar(stat = "identity", position = "fill") +
coord_flip() + theme_minimal() + theme(legend.position = "left") + scale_y_continuous(labels = scales::percent) +
ylab("Percentage") + xlab("Network Estimate") + guides(fill = guide_legend(title = "Evidence")) +
scale_fill_manual(values = c("lightblue", "orange")) + geom_hline(aes(yintercept = 0.25), color = "white") +
geom_hline(aes(yintercept = 0.5), color = "white") + geom_hline(aes(yintercept = 0.75), color = "white")
# PLOT 2: Mean Path Length ####
# Get Measures
mpath = netmeasures(x, random = random)$meanpath
path.df = data.frame(comparison = names(mpath), mpath = mpath)
rownames(path.df) = c()
path.df$comparison = factor(path.df$comparison, levels = levels)
# Plot for summary plot
PlotMeanPathLength_s = ggplot2::ggplot(path.df, aes(x = forcats::fct_rev(comparison), y = mpath)) + geom_bar(stat = "identity",
fill = "lightgray") + coord_flip() + geom_hline(aes(yintercept = 2), color = "blue") + geom_text(aes(x = comparison,
y = 0.4, label = comparison), color = "gray23", size = cts) + geom_text(aes(x = comparison, y = mpath +
0.1, label = round(mpath, 1)), size = nts) + ylab("Mean Path Length") + theme(axis.title.y = element_blank(),
axis.text.y = element_blank(), axis.ticks.y = element_blank(), axis.ticks.x = element_blank(), panel.background = element_blank()) +
scale_x_discrete(position = "top")
# PLOT 3: Parallelism ####
# Get Measures
mpar = netmeasures(x, random = random)$minpar
mpar.df = data.frame(comparison = names(mpar), mpar = mpar)
rownames(mpar.df) = c()
mpar.df$comparison = factor(mpar.df$comparison, levels = levels)
# Plot for summary plot
PlotMinimalParallelism_s = ggplot2::ggplot(mpar.df, aes(x = forcats::fct_rev(comparison), y = mpar)) +
geom_bar(stat = "identity", fill = "lightgray") + coord_flip() + geom_text(aes(x = comparison, y = mpar +
0.1, label = round(mpar, 1)), size = nts) + geom_text(aes(x = comparison, y = 0.4, label = comparison),
color = "gray23", size = cts) + ylab("Minimal Parallelism") + theme(axis.ticks.y = element_blank(),
axis.ticks.x = element_blank(), axis.title.y = element_blank(), axis.text.y = element_blank(), panel.background = element_blank())
# Process for return ####
# Save data used for plotting in df
data = data.frame(proportion.direct = measures$direct, proportion.indirect = measures$indirect, meanpath = mpath,
minpar = mpar)
# Set title
if (random == FALSE) {
plot_title = "Direct evidence proportion for each network estimate (fixed-effect model)"
} else {
plot_title = "Direct evidence proportion for each network estimate (random-effects model)"
}
grid = grid.arrange(PlotDirectEvidence, PlotMinimalParallelism_s, PlotMeanPathLength_s, ncol = 3, widths = spr,
heights = c(4), top = plot_title)
return(list(data = data, plot = grid))
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Perform a \emph{P}-curve analysis
#'
#' This function performs a \eqn{p}-curve analysis using a \code{meta} object or calculated effect size data.
#'
#' @usage pcurve(x, effect.estimation = FALSE, N, dmin = 0, dmax = 1)
#'
#' @param x Either an object of class \code{meta}, generated by the \code{metagen}, \code{metacont}
#' \code{metacor}, \code{metainc}, or \code{metabin} function, or a data frame containing the calculated effect size
#' (named \code{TE}, log-transformed if based on a ratio), standard error (named \code{seTE}) and study label (named \code{studlab})
#' for each study.
#' @param effect.estimation Logical. Should the true effect size underlying the \emph{p}-curve be estimated?
#' If set to \code{TRUE}, a vector containing the total sample size for each study must be provided for
#' \code{N}. \code{FALSE} by default.
#' @param N A numeric vector of same length as the number of effect sizes included in \code{x} specifiying the
#' total sample size \eqn{N} corresponding to each effect. Only needed if \code{effect.estimation = TRUE}.
#' @param dmin If \code{effect.estimation = TRUE}: lower limit for the effect size (\eqn{d}) space in which
#' the true effect size should be searched. Must be greater or equal to 0. Default is 0.
#' @param dmax If \code{effect.estimation = TRUE}: upper limit for the effect size (\eqn{d}) space in which
#' the true effect size should be searched. Must be greater than 0. Default is 1.
#'
#' @details
#' \strong{P-curve Analysis}
#'
#' \eqn{P}-curve analysis (Simonsohn, Simmons & Nelson, 2014, 2015) has been proposed as a method
#' to detect \eqn{p}-hacking and publication bias in meta-analyses.
#'
#' \eqn{P}-Curve assumes that publication bias
#' is not only generated because researchers do not publish non-significant results,
#' but also because analysts “play” around with their data ("\eqn{p}-hacking"; e.g., selectively removing outliers,
#' choosing different outcomes, controlling for different variables) until a non-significant
#' finding becomes significant (i.e., \eqn{p<0.05}).
#'
#' The method assumes that for a specific research
#' question, \eqn{p}-values smaller 0.05 of included studies should follow a right-skewed distribution
#' if a true effect exists, even when the power in single studies was (relatively) low. Conversely,
#' a left-skewed \eqn{p}-value distribution indicates the presence of \eqn{p}-hacking and absence of
#' a true underlying effect. To control for "ambitious" \eqn{p}-hacking, \eqn{P}-curve also incorporates a
#' "half-curve" test (Simonsohn, Simmons & Nelson, 2014, 2015).
#'
#' Simonsohn et al. (2014)
#' stress that \eqn{p}-curve analysis should only be used for test statistics which were actually of interest
#' in the context of the included study, and that a detailed table documenting the reported results
#' used in for the \eqn{p}-curve analysis should be created before communicating
#' results (\href{http://www.p-curve.com/Supplement/}{link}).
#'
#' \strong{Implementation in the function}
#'
#' To generate the \eqn{p}-curve and conduct the analysis, this function reuses parts of the \emph{R} code underlying
#' the \href{http://p-curve.com/app4/pcurve_app4.052.r}{P-curve App 4.052} (Simonsohn, 2017). The effect sizes
#' included in the \code{meta} object or \code{data.frame} provided for \code{x} are transformed
#' into \eqn{z}-values internally, which are then used to calculate {p}-values and conduct the
#' Stouffer and Binomial test used for the \eqn{p}-curve analysis. Interpretations of the function
#' concerning the presence or absence/inadequateness of evidential value are made according to the
#' guidelines described by Simonsohn, Simmons and Nelson (2015):
#'
#' \itemize{
#' \item \strong{Evidential value present}: The right-skewness test is significant for the half curve with
#' \eqn{p<0.05} \strong{or} the \eqn{p}-value of the right-skewness test is \eqn{<0.1} for both the half and full curve.
#' \item \strong{Evidential value absent or inadequate}: The flatness test is \eqn{p<0.05} for the full curve
#' \strong{or} the flatness test for the half curve and the binomial test are \eqn{p<0.1}.
#'}
#'
#' For effect size estimation, the \code{pcurve} function implements parts of the loss function
#' presented in Simonsohn, Simmons and Nelson (2014b).
#' The function generates a loss function for candidate effect sizes \eqn{\hat{d}}, using \eqn{D}-values in
#' a Kolmogorov-Smirnov test as the metric of fit, and the value of \eqn{\hat{d}} which minimizes \eqn{D}
#' as the estimated true effect.
#'
#' It is of note that a lack of robustness of \eqn{p}-curve analysis results
#' has been noted for meta-analyses with substantial heterogeneity (van Aert, Wicherts, & van Assen, 2016).
#' Following van Aert et al., adjusted effect size estimates should only be
#' reported and interpreted for analyses with \eqn{I^2} values below 50 percent.
#' A warning message is therefore printed by
#' the \code{pcurve} function when \code{x} is of class \code{meta} and the between-study heterogeneity
#' of the meta-analysis is substantial (i.e., \eqn{I^2} greater than 50 percent).
#'
#'
#' @references Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803.
#' \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/pcurve.html}{Chapter 9.2}.
#'
#' Simonsohn, U., Nelson, L. D., & Simmons, J. P. (2014a). P-curve: a Key to the File-drawer.
#' \emph{Journal of Experimental Psychology, 143}(2), 534.
#'
#' Simonsohn, U., Nelson, L. D. & Simmons, J. P. (2014b). P-Curve and Effect Size:
#' Correcting for Publication Bias Using Only Significant Results.
#' \emph{Perspectives on Psychological Science 9}(6), 666–81.
#'
#' Simonsohn, U., Nelson, L. D. & Simmons, J. P. (2015). Better P-Curves: Making P-Curve
#' Analysis More Robust to Errors, Fraud, and Ambitious P-Hacking, a Reply to Ulrich and Miller (2015).
#' \emph{Journal of Experimental Psychology, 144}(6), 1146-1152.
#'
#' Simonsohn, U. (2017). R code for the P-Curve App 4.052. http://p-curve.com/app4/pcurve_app4.052.r (Accessed 2019-08-16).
#'
#' Van Aert, R. C., Wicherts, J. M., & van Assen, M. A. (2016).
#' Conducting meta-analyses based on p values: Reservations and recommendations for applying
#' \emph{p}-uniform and \emph{p}-curve. \emph{Perspectives on Psychological Science, 11}(5), 713-729.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @return Returns a plot and main results of the pcurve analysis:
#' \itemize{
#' \item \strong{P-curve plot}: A plot displaying the observed \eqn{p}-curve and significance results
#' for the right-skewness and flatness test.
#' \item \strong{Number of studies}: The number of studies provided for the analysis, the number
#' of significant \eqn{p}-values included in the analysis, and the number of studies with \eqn{p<0.025}
#' used for the half-curve tests.
#' \item \strong{Test results}: The results for the right-skewness and flatness test, including the
#' \eqn{p_{binomial}} value, as well as the \eqn{z} and \eqn{p} value for the full and half-curve test.
#' \item \strong{Power Estimate}: The power estimate and 95\% confidence interval.
#' \item \strong{Evidential value}: Two lines displaying if evidential value is present and/or absent/inadequate based
#' on the results (using the guidelines by Simonsohn et al., 2015, see details).
#' \item \strong{True effect estimate}: If \code{effect.estimation} is set to \code{TRUE}, the estimated true effect
#' \eqn{\hat{d}} is returned additionally.
#'}
#'
#'
#' If results are saved to a variable, a list containing the following objects is returned:
#' \itemize{
#' \item \code{pcurveResults}: A data frame containing the results for the right-skewness and flatness test, including the
#' \eqn{p_{binomial}} value, as well as the \eqn{z} and \eqn{p} value for the full and half-curve test.
#' \item \code{Power}: The power estimate and 95\% confidence interval.
#' \item \code{PlotData}: A data frame with the data used in the \eqn{p}-curve plot.
#' \item \code{Input}: A data frame containing the provided effect sizes, calculated \eqn{p}-values and individual results for each included (significant) effect.
#' \item \code{EvidencePresent}, \code{EvidenceAbsent}, \code{kInput}, \code{kAnalyzed}, \code{kp0.25}: Further results of the \eqn{p}-curve analysis, including the presence/absence of evidence interpretation,
#' and number of provided/significant/\eqn{p<0.025} studies.
#'}
#'
#'
#' @import stringr poibin
#' @importFrom graphics abline axis lines mtext par plot points rect segments text
#' @importFrom stats as.formula hat influence ks.test optimize pbinom pchisq pf pnorm pt punif qchisq qf qnorm qt reformulate reorder setNames uniroot
#'
#' @export pcurve
#'
#' @seealso
#' \code{\link{eggers.test}}
#'
#' @examples
#' # Example 1: Use metagen object, do not estimate d
#' suppressPackageStartupMessages(library(meta))
#'
#' data("ThirdWave")
#' meta1 = metagen(TE,seTE, studlab=ThirdWave$Author, data=ThirdWave)
#'
#' pcurve(meta1)
#'
#'
#' # Example 2: Provide Ns, calculate d estimate
#' N = c(105, 161, 60, 37, 141, 82, 97, 61, 200, 79, 124, 25, 166, 59, 201, 95, 166, 144)
#' pcurve(meta1, effect.estimation = TRUE, N = N)
#'
#' # Example 3: Use metacont object, calculate d estimate
#' data("amlodipine")
#' meta2 <- metacont(n.amlo, mean.amlo, sqrt(var.amlo),
#' n.plac, mean.plac, sqrt(var.plac),
#' data=amlodipine, studlab=study, sm="SMD")
#' N = amlodipine$n.amlo + amlodipine$n.plac
#' pcurve(meta2, effect.estimation = TRUE, N = N, dmin = 0, dmax = 1)
#'
#' # Example 4: Construct x object from scratch
#' sim = data.frame("studlab" = c(paste("Study_", 1:18, sep = "")),
#' "TE" = c(0.561, 0.296, 0.648, 0.362, 0.770, 0.214, 0.476,
#' 0.459, 0.343, 0.804, 0.357, 0.476, 0.638, 0.396, 0.497,
#' 0.384, 0.568, 0.415),
#' "seTE" = c(0.338, 0.297, 0.264, 0.258, 0.279, 0.347, 0.271, 0.319,
#' 0.232, 0.237, 0.385, 0.398, 0.342, 0.351, 0.296, 0.325,
#' 0.322, 0.225))
#' pcurve(sim)
pcurve = function(x, effect.estimation = FALSE, N, dmin = 0, dmax = 1){
# Rename x to metaobject, remove x
metaobject = x
rm(x)
# Stop if metaobject is not meta or does not contain TE or seTE column
if (!(class(metaobject)[1] %in% c("metagen", "metabin", "metacont", "metacor", "metainc", "meta"))){
for (i in 1:length(colnames(metaobject))){
te.exists = FALSE
if (colnames(metaobject)[i]=="TE"){
te.exists = TRUE
break
} else {
}
}
for (i in 1:length(colnames(metaobject))){
sete.exists = FALSE
if (colnames(metaobject)[i]=="seTE"){
sete.exists = TRUE
break
} else {
}
}
for (i in 1:length(colnames(metaobject))){
studlab.exists = FALSE
if (colnames(metaobject)[i]=="studlab"){
studlab.exists = TRUE
break
} else {
}
}
if(te.exists == FALSE | sete.exists ==FALSE | studlab.exists ==FALSE){
stop("x must be a meta-analysis object generated by meta functions or a data.frame with columns labeled studlab, TE, and seTE.")
}
}
#Disable scientific notation
options(scipen=999)
# Calculate Z
zvalues.input = abs(metaobject$TE/metaobject$seTE)
##############################################
# 1. Functions ###############################
##############################################
getncp.f =function(df1,df2, power) {
error = function(ncp_est, power, x, df1,df2) pf(x, df1 = df1, df2=df2, ncp = ncp_est) - (1-power)
xc=qf(p=.95, df1=df1,df2=df2)
return(uniroot(error, c(0, 1000), x = xc, df1 = df1,df2=df2, power=power)$root) }
getncp.c =function(df, power) {
xc=qchisq(p=.95, df=df)
error = function(ncp_est, power, x, df) pchisq(x, df = df, ncp = ncp_est) - (1-power)
return(uniroot(error, c(0, 1000), x = xc, df = df, power=power)$root) }
getncp=function(family,df1,df2,power) {
if (family=="f") ncp=getncp.f(df1=df1,df2=df2,power=power)
if (family=="c") ncp=getncp.c(df=df1,power=power)
return(ncp) }
percent <- function(x, digits = 0, format = "f", ...) {
paste(formatC(100 * x, format = format, digits = digits, ...), "%", sep = "")
}
pbound=function(p) pmin(pmax(p,2.2e-16),1-2.2e-16)
prop33=function(pc)
{
prop=ifelse(family=="f" & p<.05,1-pf(qf(1-pc,df1=df1, df2=df2),df1=df1, df2=df2, ncp=ncp33),NA)
prop=ifelse(family=="c" & p<.05,1-pchisq(qchisq(1-pc,df=df1), df=df1, ncp=ncp33),prop)
prop
}
stouffer=function(pp) sum(qnorm(pp),na.rm=TRUE)/sqrt(sum(!is.na(pp)))
###############################################################################
# 2. Process data ############################################################
###############################################################################
# Note: due to reliance on the pcurve-app function, z-scores are pasted into characters first
# and then screened to generate variables necessary for further computation
zvalues.input = paste("z=", zvalues.input, sep="")
filek = "input"
raw = zvalues.input
raw=tolower(raw)
ktot=length(raw)
k=seq(from=1,to=length(raw))
stat=substring(raw,1,1)
test=ifelse(stat=="r","t",stat)
# Create family
family=test
family=ifelse(test=="t","f",family)
family=ifelse(test=="z","c",family)
#family: f,c converting t-->f and z-->c
# Find comma,parentheses,equal sign
par1 =str_locate(raw,"\\(")[,1]
par2 =str_locate(raw,"\\)")[,1]
comma=str_locate(raw,",")[,1]
eq =str_locate(raw,"=")[,1]
# DF for t-tests
df=as.numeric(ifelse(test=="t",substring(raw,par1+1,par2 -1),NA))
# DF1
df1=as.numeric(ifelse(test=="f",substring(raw,par1+1,comma-1),NA))
df1=as.numeric(ifelse(test=="z",1,df1))
df1=as.numeric(ifelse(test=="t",1,df1))
df1=as.numeric(ifelse(test=="c",substring(raw,par1+1,par2 -1),df1))
# DF2
df2=as.numeric(ifelse(test=="f",substring(raw,comma+1,par2-1),NA))
df2=as.numeric(ifelse(test=="t",df,df2))
equal=abs(as.numeric(substring(raw,eq+1)))
value=ifelse((stat=="f" | stat=="c"),equal,NA)
value=ifelse(stat=="r", (equal/(sqrt((1-equal**2)/df2)))**2,value)
value=ifelse(stat=="t", equal**2 ,value)
value=ifelse(stat=="z", equal**2 ,value)
p=ifelse(family=="f",1-pf(value,df1=df1,df2=df2),NA)
p=ifelse(family=="c",1-pchisq(value,df=df1),p)
p=pbound(p) #Bound it to level of precision, see function 3 above
ksig= sum(p<.05,na.rm=TRUE) #significant studies
khalf=sum(p<.025,na.rm=TRUE) #half p-curve studies
if (ksig <= 2){
stop("Two or less effect sizes were detected, so p-curve analysis cannot be conducted.")
}
##############################################################################
# 3. PP-values ###############################################################
##############################################################################
# Right Skew, Full p-curve
ppr=as.numeric(ifelse(p<.05,20*p,NA))
ppr=pbound(ppr)
# Right Skew, half p-curve
ppr.half=as.numeric(ifelse(p<.025,40*p,NA))
ppr.half=pbound(ppr.half)
# Power of 33%
ncp33=mapply(getncp,df1=df1,df2=df2,power=1/3,family=family)
# Full-p-curve
pp33=ifelse(family=="f" & p<.05,3*(pf(value, df1=df1, df2=df2, ncp=ncp33)-2/3),NA)
pp33=ifelse(family=="c" & p<.05,3*(pchisq(value, df=df1, ncp=ncp33)-2/3),pp33)
pp33=pbound(pp33)
# half p-curve
prop25=3*prop33(.025)
prop25.sig=prop25[p<.05]
#Compute pp-values for the half
pp33.half=ifelse(family=="f" & p<.025, (1/prop25)*(pf(value,df1=df1,df2=df2,ncp=ncp33)-(1-prop25)),NA)
pp33.half=ifelse(family=="c" & p<.025, (1/prop25)*(pchisq(value,df=df1, ncp=ncp33)-(1-prop25)),pp33.half)
pp33.half=pbound(pp33.half)
##############################################################################
# 4. Stouffer & Binomial test ################################################
##############################################################################
# Convert pp-values to Z scores, using Stouffer function above
Zppr = stouffer(ppr)
Zpp33 = stouffer(pp33)
Zppr.half = stouffer(ppr.half)
Zpp33.half = stouffer(pp33.half)
# Overall p-values from Stouffer test
p.Zppr = pnorm(Zppr)
p.Zpp33 = pnorm(Zpp33)
p.Zppr.half = pnorm(Zppr.half)
p.Zpp33.half = pnorm(Zpp33.half)
# Save results to file
main.results=as.numeric(c(ktot, ksig, khalf, Zppr,
p.Zppr, Zpp33, p.Zpp33, Zppr.half,
p.Zppr.half, Zpp33.half, p.Zpp33.half))
# BINOMIAL
# Observed share of p<.025
prop25.obs=sum(p<.025)/sum(p<.05)
# Flat null
binom.r=1-pbinom(q=prop25.obs*ksig- 1, prob=.5, size=ksig)
# Power of 33% null
binom.33=ppoibin(kk=prop25.obs*ksig,pp=prop25[p<.05])
# Save binomial results
binomial=c(mean(prop25.sig), prop25.obs, binom.r, binom.33)
# Beautifyier Function
cleanp=function(p)
{
p.clean=round(p,4) #Round it
p.clean=substr(p.clean,2,6) #Drop the 0
p.clean=paste0("= ",p.clean)
if (p < .0001) p.clean= " < .0001"
if (p > .9999) p.clean= " > .9999"
return(p.clean)
}
#If there are zero p<.025, change Stouffer values for half-p-curve tests for "N/A" messages
if (khalf==0) {
Zppr.half ="N/A"
p.Zppr.half ="=N/A"
Zpp33.half ="N/A"
p.Zpp33.half ="=N/A"
}
#If there are more than 1 p<.025, round the Z and beutify the p-values
if (khalf>0) {
Zppr.half =round(Zppr.half,2)
Zpp33.half =round(Zpp33.half,2)
p.Zppr.half=cleanp(p.Zppr.half)
p.Zpp33.half=cleanp(p.Zpp33.half)
}
#Clean results for full test
Zppr=round(Zppr,2)
Zpp33=round(Zpp33,2)
p.Zppr=cleanp(p.Zppr)
p.Zpp33=cleanp(p.Zpp33)
binom.r=cleanp(binom.r)
binom.33=cleanp(binom.33)
################################################
# 5. Power ####################################
################################################
powerfit=function(power_est)
{
ncp_est=mapply(getncp,df1=df1,df2=df2,power=power_est,family=family)
pp_est=ifelse(family=="f" & p<.05,(pf(value,df1=df1,df2=df2,ncp=ncp_est)-(1-power_est))/power_est,NA)
pp_est=ifelse(family=="c" & p<.05,(pchisq(value,df=df1,ncp=ncp_est)-(1-power_est))/power_est,pp_est)
pp_est=pbound(pp_est)
return(stouffer(pp_est))
}
fit=c()
fit=abs(powerfit(.051))
for (i in 6:99) fit=c(fit,abs(powerfit(i/100)))
mini=match(min(fit,na.rm=TRUE),fit)
hat=(mini+4)/100
x.power=seq(from=5,to=99)/100
get.power_pct =function(pct) {
#Function that finds power that gives p-value=pct for the Stouffer test
#for example, get.power_pct(.5) returns the level of power that leads to p=.5 for the stouffer test.
#half the time we would see p-curves more right skewed than the one we see, and half the time
#less right-skewed, if the true power were that get.power_pct(.5). So it is the median estimate of power
#similarliy, get.power_pct(.1) gives the 10th percentile estimate of power...
#Obtain the normalized equivalent of pct, e.g., for 5% it is -1.64, for 95% it is 1.64
z=qnorm(pct) #convert to z because powerfit() outputs a z-score.
#Quantify gap between computed p-value and desired pct
error = function(power_est, z) powerfit(power_est) - z
#Find the value of power that makes that gap zero, (root)
return(uniroot(error, c(.0501, .99),z)$root) }
# Boundary conditions
p.power.05=pnorm(powerfit(.051)) #Proability p-curve would be at least at right-skewed if power=.051
p.power.99=pnorm(powerfit(.99)) #Proability p-curve would be at least at right-skewed if power=.99
# Lower end of ci
if (p.power.05<=.95) power.ci.lb=.05
if (p.power.99>=.95) power.ci.lb=.99
if (p.power.05>.95 && p.power.99<.95) power.ci.lb=get.power_pct(.95)
# Higher end of CI
if (p.power.05<=.05) power.ci.ub=.05
if (p.power.99>=.05) power.ci.ub=.99
if (p.power.05>.05 && p.power.99<.05) power.ci.ub=get.power_pct(.05)
# Save power fit
power_results=c(power.ci.lb,hat,power.ci.ub)
##############################################################################
# 6. Plot ###################################################################
##############################################################################
# Green line (Expected p-curve for 33% power)
gcdf1=prop33(.01)
gcdf2=prop33(.02)
gcdf3=prop33(.03)
gcdf4=prop33(.04)
green1=mean(gcdf1,na.rm=TRUE)*3
green2=mean(gcdf2-gcdf1,na.rm=TRUE)*3
green3=mean(gcdf3-gcdf2,na.rm=TRUE)*3
green4=mean(gcdf4-gcdf3,na.rm=TRUE)*3
green5=mean(1/3-gcdf4,na.rm=TRUE)*3
green=100*c(green1,green2,green3,green4,green5)
# Blue line (observed p-curve)
ps=ceiling(p[p<.05]*100)/100
blue=c()
for (i in c(.01,.02,.03,.04,.05)) blue=c(blue,sum(ps==i,na.rm=TRUE)/ksig*100)
# Red line
red=c(20,20,20,20,20)
# Make the graph
x = c(.01,.02,.03,.04,.05)
par(mar=c(6,5.5,1.5,3))
moveup=max(max(blue[2:5])-66,0)
ylim=c(0,105+moveup)
legend.top=100+moveup
plot(x,blue, type='l', col='dodgerblue2', main="",
lwd=2, xlab="", ylab="", xaxt="n",yaxt="n", xlim=c(0.01,0.051),
ylim=ylim, bty='L', las=1,axes=F)
x_=c(".01",".02",".03",".04",".05")
axis(1,at=x,labels=x_)
y_=c("0%","25%","50%","75%","100%")
y=c(0,25,50,75,100)
axis(2,at=y,labels=y_,las=1,cex.axis=1.2)
mtext("Percentage of test results",font=2,side=2,line=3.85,cex=1.25)
mtext("p ",font=4,side=1,line=2.3,cex=1.25)
mtext(" -value", font=2,side=1,line=2.3,cex=1.25)
points(x,blue,type="p",pch=20,bg="dodgerblue2",col="dodgerblue2")
text(x+.00075,blue+3.5,percent(round(blue)/100),col='black', cex=.75)
lines(x,red, type='l', col='firebrick2', lwd=1.5, lty=3)
lines(x,green, type='l', col='springgreen4', lwd=1.5, lty=5)
tab1=.017 #Labels for line at p=.023 in x-axis
tab2=tab1+.0015 #Test results and power esimates at tab1+.0015
gap1=9 #between labels
gap2=4 #between lable and respective test (e.g., "OBserved p-curve" and "power estimate")
font.col='gray44'
text.blue=paste0("Power estimate: ",percent(hat),", CI(",
percent(power.ci.lb),",",
percent(power.ci.ub),")")
text(tab1,legend.top, adj=0,cex=.85,bquote("Observed "*italic(p)*"-curve"))
text(tab2,legend.top-gap2,adj=0,cex=.68,text.blue,col=font.col)
text.red=bquote("Tests for right-skewness: "*italic(p)*""[Full]~.(p.Zppr)*", "*italic(p)*""[Half]~.(p.Zppr.half))
#note: .() within bquote prints the value rather than the variable name
text(tab1,legend.top-gap1, adj=0,cex=.85, "Null of no effect" )
text(tab2,legend.top-gap1-gap2, adj=0,cex=.68, text.red, col=font.col )
text.green=bquote("Tests for flatness: "*italic(p)*""[Full]~.(p.Zpp33)*", "*italic(p)*""[half]~.(p.Zpp33.half)*", "*italic(p)*""[Binomial]~.(binom.33))
text(tab1,legend.top-2*gap1, adj=0,cex=.85,"Null of 33% power")
text(tab2,legend.top-2*gap1-gap2, adj=0,cex=.68,text.green,col=font.col)
segments(x0=tab1-.005,x1=tab1-.001,y0=legend.top,y1=legend.top, col='dodgerblue2',lty=1,lwd=1.5)
segments(x0=tab1-.005,x1=tab1-.001,y0=legend.top-gap1, y1=legend.top-gap1,col='firebrick2',lty=3,lwd=1.5)
segments(x0=tab1-.005,x1=tab1-.001,y0=legend.top-2*gap1,y1=legend.top-2*gap1,col='springgreen4',lty=2,lwd=1.5)
rect(tab1-.0065,legend.top-2*gap1-gap2-3,tab1+.032,legend.top+3,border='gray85')
msgx=bquote("Note: The observed "*italic(p)*"-curve includes "*.(ksig)*
" statistically significant ("*italic(p)*" < .05) results, of which "*.(khalf)*
" are "*italic(p)*" < .025.")
mtext(msgx,side=1,line=4,cex=.65,adj=0)
kns=ktot-ksig
if (kns==0) ns_msg="There were no non-significant results entered."
if (kns==1) ns_msg=bquote("There was one additional result entered but excluded from "*italic(p)*"-curve because it was "*italic(p)*" > .05.")
if (kns>1) ns_msg=bquote("There were "*.(kns)*" additional results entered but excluded from "*italic(p)*"-curve because they were "*italic(p)*" > .05.")
mtext(ns_msg,side=1,line=4.75,cex=.65,adj=0)
##############################################################################
# 7 Save Calculations #######################################################
##############################################################################
# table_calc
table_calc=data.frame(raw, p, ppr, ppr.half, pp33, pp33.half,
qnorm(ppr), qnorm(ppr.half), qnorm(pp33), qnorm(pp33.half))
headers1=c("Entered statistic","p-value", "ppr", "ppr half", "pp33%","pp33 half",
"Z-R","Z-R half","Z-33","z-33 half")
table_calc=setNames(table_calc,headers1)
# table_figure
headers2=c("p-value","Observed (blue)","Power 33% (Green)", "Flat (Red)")
table_figure=setNames(data.frame(x,blue,green,red),headers2)
################################################
# 8. Cumulative p-curves (Deprecated) ##########
################################################
#7.1 FUNCTION THAT RECOMPUTES OVERALL STOUFFER TEST WITHOUT (K) MOST EXTREME VALUES, ADJUSTING THE UNIFORM TO ONLY INCLUDE RANGE THAT REMAINS
dropk=function(pp,k,droplow)
{
#Syntax:
#pp: set of pp-values to analyze sensitivity to most extremes
#k: # of most extreme values to exclude
#dropsmall: 1 to drop smallest, 0 to drop largest
pp=pp[!is.na(pp)] #Drop missing values
n=length(pp) #See how many studies are left
pp=sort(pp) #Sort the pp-value from small to large
if (k==0) ppk=pp #If k=0 do nothing for nothing is being dropped
#If we are dropping low values
if (droplow==1 & k>0)
{
#Eliminate lowest k from the vector of pp-values
ppk=(pp[(1+k):n])
ppmin=min(pp[k],k/(n+1)) #Boundary used to define possible range of values after exclusion
ppk=(ppk-ppmin)/(1-ppmin) #Take the k+1 smallest pp-value up to the highest, subtract from each the boundary value, divide by the range, ~U(0,1) under the null
#This is explained in Supplement 1 of Simonsohn, Simmons Nelson, JEPG 2016 "Better p-curves" paper. See https://osf.io/mbw5g/
}
#If we are dropping high values
if (droplow==0 & k>0)
{
#Eliminate lowest k from the vector of pp-values
ppk=pp[1:(n-k)]
ppmax=max(pp[n-k+1],(n-k)/(n+1)) #Find new boundary of range
ppk=ppk/ppmax #Redefine range to make U(0,1)
}
#In case of a tie with two identical values we would have the ppk be 0 or 1, let's replace that with almost 0 and almost 1
ppk=pmax(ppk,.00001) #Adds small constant to the smallest redefined p-value, avoids problem if dropped p-value is "equal" to next highest, then that pp-value becomes 0
ppk=pmin(ppk,.99999) #Subtract small constant to the largest redefined pp-value, same reason
Z=sum(qnorm(ppk))/sqrt(n-k)
return(pnorm(Z))
} #End function dropk
#7.2 Apply function, in loop with increasing number of exclusions, to full p-curve
#Empty vectors for results
droplow.r=droplow.33=drophigh.r=drophigh.33=c()
#Loop over full p-curves
for (i in 0:(round(ksig/2)-1))
{
#Drop the lowest k studies in terms of respective overall test
#Right skew
droplow.r= c(droplow.r, dropk(pp=ppr,k=i,droplow=1))
drophigh.r=c(drophigh.r, dropk(pp=ppr,k=i,droplow=0))
#Power of 33%
droplow.33=c(droplow.33, dropk(pp=pp33,k=i,droplow=1))
drophigh.33=c(drophigh.33, dropk(pp=pp33,k=i,droplow=0))
}
#Half p-curves
if (khalf>0)
{
droplow.halfr=drophigh.halfr=c()
for (i in 0:(round(khalf/2)-1))
{
#Drop the lowest k studies in terms of respective overall test
droplow.halfr= c(droplow.halfr, dropk(pp=ppr.half,k=i,droplow=1))
drophigh.halfr=c(drophigh.halfr, dropk(pp=ppr.half,k=i,droplow=0))
} #End loop
}#End if that runs calculations only if khalf>0
#7.3 FUNCTION THAT DOES THE PLOT OF RESULTS
plotdrop=function(var,col)
{
k=length(var)
#Plot the dots
plot(0:(k-1),var,xlab="",ylab="",type="b",yaxt="n",xaxt="n",main="",
cex.main=1.15,ylim=c(0,1),col=col)
#Add marker in results with 0 drops
points(0,var[1],pch=19,cex=1.6)
#Red line at p=.05
abline(h=.05,col="red")
#Y-axis value labels
axis(2,c(.05,2:9/10),labels=c('.05','.2','.3','.4','.5','6','7','.8','.9'),las=1,cex.axis=1.5)
axis(1,c(0:(k-1)),las=1,cex.axis=1.4)
}
######################################################################################
# 9. Effect Estimation ###############################################################
######################################################################################
if (effect.estimation == TRUE){
# Define ci.to.t function
ci.to.t = function(TE, lower, upper, n){
z.to.d = function(z, n){
d = (2*z)/sqrt(n)
return(abs(d))
}
ci.to.p = function(est, lower, upper){
SE = (upper-lower)/(2*1.96)
z = abs(est/SE)
p = exp(-0.717*z - 0.416*z^2)
return(p)
}
d.to.t = function(d, n){
df = n-2
t = (d*sqrt(df))/2
return(t)
}
p = ci.to.p(TE, lower, upper)
z = abs(qnorm(p/2))
d = z.to.d(z, n)
t = d.to.t(d, n)
return(t)
}
#Function 13 - loss function
loss=function(t_obs,df_obs,d_est) {
#1.Convert all ts to the same sign (for justification see Supplement 5)
t_obs=abs(t_obs)
#2 Compute p-values
p_obs=2*(1-pt(t_obs,df=df_obs))
#3 Keep significant t-values and corresponding df.
t.sig=subset(t_obs,p_obs<.05)
df.sig=subset(df_obs,p_obs<.05)
#4.Compute non-centrality parameter implied by d_est and df_obs
#df+2 is total N.
#Becuase the noncentrality parameter for the student distribution is ncp=sqrt(n/2)*d,
#we add 2 to d.f. to get N, divide by 2 to get n, and by 2 again for ncp, so -->df+2/4
ncp_est=sqrt((df.sig+2)/4)*d_est
#5.Find critical t-value for p=.05 (two-sided)
#this is used below to compute power, it is a vector as different tests have different dfs
#and hence different critical values
tc=qt(.975,df.sig)
#4.Find power for ncp given tc, again, this is a vector of implied power, for ncp_est, for each test
power_est=1-pt(tc,df.sig,ncp_est)
#5.Compute pp-values
#5.1 First get the overall probability of a t>tobs, given ncp
p_larger=pt(t.sig,df=df.sig,ncp=ncp_est)
#5.2 Now, condition on p<.05
ppr=(p_larger-(1-power_est))/power_est #this is the pp-value for right-skew
#6. Compute the gap between the distribution of observed pp-values and a uniform distribution 0,1
KSD=ks.test(ppr,punif)$statistic #this is the D statistic outputted by the KS test against uniform
return(KSD)
}
if(missing(N)){
stop("If 'effect.estimation=TRUE', argument 'N' must be provided.")
}
if (length(N) != length(metaobject$TE)){
stop("N must be of same length as the number of studies contained in x.")
}
lower = metaobject$TE - (metaobject$seTE*1.96)
upper = metaobject$TE + (metaobject$seTE*1.96)
t_obs = ci.to.t(metaobject$TE, lower, upper, N)
df_obs = N-2
#Results will be stored in these vectors, create them first
loss.all=c()
di=c()
#Compute loss for effect sizes between d=c(dmin,dmax) in steps of .01
for (i in 0:((dmax-dmin)*100))
{
d=dmin+i/100 #effect size being considered
di=c(di,d) #add it to the vector (kind of silly, but kept for symmetry)
options(warn=-1) #turn off warning becuase R does not like its own pt() function!
loss.all=c(loss.all,loss(df_obs=df_obs,t_obs=t_obs,d_est=d))
#apply loss function so that effect size, store result
options(warn=0) #turn warnings back on
}
#find the effect leading to smallest loss in that set, that becomes the starting point in the optimize command
imin=match(min(loss.all),loss.all) #which i tested effect size lead to the overall minimum?
dstart=dmin+imin/100 #convert that i into a d.
#optimize around the global minimum
dhat=optimize(loss,c(dstart-.1,dstart+.1), df_obs=df_obs,t_obs=t_obs)
options(warn=-0)
#Plot results
plot(di,loss.all,xlab="Effect size\nCohen-d", ylab="Loss (D stat in KS test)",ylim=c(0,1), main="How well does each effect size fit? (lower is better)")
points(dhat$minimum,dhat$objective,pch=19,col="red",cex=2)
text(dhat$minimum,dhat$objective-.08,paste0("p-curve's estimate of effect size:\nd=",round(dhat$minimum,3)),col="red")
}
######################################################################################
# 10. Prepare Results for Return #####################################################
######################################################################################
# Get results
main.results = round(main.results, 3)
ktotal = round(main.results[1]) # Get the total number of inserted TEs
k.sign = round(main.results[2]) # Get the total number of significant TEs
k.025 = round(main.results[3]) # Get the number of p<0.25 TEs
skew.full.z = main.results[4] # Get the Z-score for the full curve skewness test
skew.full.p = main.results[5] # Get the p-value for the full curve skewness test
flat.full.z = main.results[6] # Get the Z-score for the full curve flatness test
flat.full.p = main.results[7] # Get the p-value for the full curve flatness test
skew.half.z = main.results[8] # Get the Z-score for the half curve skewness test
skew.half.p = main.results[9] # Get the p-value for the half curve skewness test
flat.half.z = main.results[10] # Get the Z-score for the half curve flatness test
flat.half.p = main.results[11] # Get the p-value for the half curve flatness test
skew.binomial.p = round(binomial[3], 3) # Get the skewness binomial p-value
flat.binomial.p = round(binomial[4], 3) # Get the flatness binomial p-value
# Make data.frame
skewness = c(skew.binomial.p, skew.full.z, skew.full.p, skew.half.z, skew.half.p)
flatness = c(flat.binomial.p, flat.full.z, flat.full.p, flat.half.z, flat.half.p)
colnames.df = c("pBinomial", "zFull", "pFull", "zHalf", "pHalf")
rownames.df = c("Right-skewness test", "Flatness test")
pcurveResults = rbind(skewness, flatness)
colnames(pcurveResults) = colnames.df
rownames(pcurveResults) = rownames.df
# Power results
power_results = round(power_results, 3)
powerEstimate = power_results[2]
powerLower = power_results[1]
powerUpper = power_results[3]
Power = as.data.frame(cbind(powerEstimate, powerLower, powerUpper))
rownames(Power) = ""
# Presence and absence of evidential value
# - If the half p-curve test is right-skewed with p<.05 or both the half and full test
# are right-skewed with p<.1, then p-curve analysis indicates the presence of evidential value
# - Evidential value is inadequate or absent if the 33% power test is p<.05 for the full p-curve
# or both the half p-curve and binomial 33% power test are p<.1
if (skew.half.p < 0.05 | (skew.half.p < 0.1 & skew.full.p < 0.1)){
presence.ev = "yes"
} else {
presence.ev = "no"
}
if (flat.full.p < 0.05 | (flat.half.p < 0.1 & flat.binomial.p < 0.1)){
absence.ev = "yes"
} else {
absence.ev = "no"
}
# Plot Data
PlotData = round(table_figure, 3)
# Input Data
table_calc[,1] = NULL
colnames(table_calc) = c("p", "ppSkewFull", "ppSkewHalf", "ppFlatFull", "ppFlatHalf", "zSkewFull", "zSkewHalf",
"zFlatFull", "zFlatHalf")
Input = cbind(metaobject$TE, round(table_calc,3))
rownames(Input) = paste(1:length(metaobject$TE), metaobject$studlab)
colnames(Input)[1] = "TE"
# Cat the results
cat("P-curve analysis", "\n", "-----------------------", "\n")
cat("- Total number of provided studies: k =", ktot, "\n")
cat("- Total number of p<0.05 studies included into the analysis: k =",
k.sign, paste("(", round(k.sign/ktot*100, 2), "%)", sep=""), "\n")
cat("- Total number of studies with p<0.025: k =", k.025,
paste("(", round(k.025/ktot*100, 2), "%)", sep=""), "\n")
cat(" ", "\n")
cat("Results", "\n", "-----------------------", "\n")
print(pcurveResults)
cat("Note: p-values of 0 or 1 correspond to p<0.001 and p>0.999, respectively.")
cat(" ", "\n")
cat("Power Estimate: ", Power[,1]*100, "%", " (", Power[,2]*100, "%-", Power[,3]*100, "%)", "\n",sep="")
cat(" ", "\n")
cat("Evidential value", "\n", "-----------------------", "\n")
cat("- Evidential value present:", presence.ev, "\n")
cat("- Evidential value absent/inadequate:", absence.ev, "\n")
if (effect.estimation==TRUE){
cat(" ", "\n")
cat("P-curve's estimate of the true effect size: d=", round(dhat$minimum, 3), sep="")
if (metaobject$I2 > 0.49 & (class(metaobject)[1] %in% c("metagen", "metabin", "metacont", "meta", "metainc"))){
cat(" ", "\n")
cat("Warning: I-squared of the meta-analysis is >= 50%, so effect size estimates are not trustworthy.")
}
dEstimate = round(dhat$minimum, 3)
return.list = list("pcurveResults" = pcurveResults,
"Power" = Power,
"PlotData" = PlotData,
"Input" = Input,
"EvidencePresent" = presence.ev,
"EvidenceAbsent" = absence.ev,
"kInput" = ktot,
"kAnalyzed" = k.sign,
"kp0.25" = k.025,
"dEstimate" = dEstimate)
} else {
return.list = list("pcurveResults" = pcurveResults,
"Power" = Power,
"PlotData" = PlotData,
"Input" = Input,
"EvidencePresent" = presence.ev,
"EvidenceAbsent" = absence.ev,
"kInput" = ktot,
"kAnalyzed" = k.sign,
"kp0.25" = k.025)
}
cat(" ", "\n")
invisible(return.list)
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Influence Diagnostics
#'
#' Conducts an influence analysis of a meta-analysis generated by \code{\link[meta]{meta}} functions,
#' and allows to produce influence diagnostic plots.
#'
#' @usage InfluenceAnalysis(x, random = FALSE, subplot.heights = c(30,18),
#' subplot.widths = c(30,30), forest.lims = 'default',
#' return.separate.plots = FALSE, text.scale = 1)
#'
#' @param x An object of class \code{meta}, generated by the \code{metabin}, \code{metagen},
#' \code{metacont}, \code{metacor}, \code{metainc}, \code{metarate} or \code{metaprop} function.
#' @param random Logical. Should the random-effects model be used to generate the influence diagnostics?
#' Uses the \code{method.tau} specified in the \code{meta} object if one
#' of "\code{DL}", "\code{HE}", "\code{SJ}", "\code{ML}", "\code{REML}", "\code{EB}", "\code{PM}", "\code{HS}" or "\code{GENQ}" (to ensure compatibility with
#' the \code{\link[metafor]{metafor}} package). Otherwise, the DerSimonian-Laird
#' (\code{"DL"}; DerSimonian & Laird, 1986) estimator is used. \code{FALSE} by default.
#' @param subplot.heights Concatenated array of two numerics. Specifies the heights of the
#' first (first number) and second (second number) row of the overall plot generated when plotting the results.
#' Default is \code{c(30,18)}.
#' @param subplot.widths Concatenated array of two numerics. Specifies the widths of the
#' first (first number) and second (second number) column of the overall results plot generated when plotting the results.
#' Default is \code{c(30,30)}.
#' @param forest.lims Concatenated array of two numerics. Specifies the x-axis limits of the forest plots
#' generated when plotting the results. Use \code{"default"} if standard settings should be used (this is the default).
#' @param return.separate.plots Logical. When plotted, should the influence plots be shown as separate plots in lieu
#' of returning them in one overall plot?
#' @param text.scale Positive numeric. Scaling factor for the text geoms used when plotting the results. Values <1 shrink the
#' text, while values >1 increase the text size. Default is \code{1}.
#'
#' @details
#' The function conducts an influence analysis using the "Leave-One-Out" paradigm internally
#' and produces data for four influence diagnostics. Diagnostic plots can be produced by saving the output of the
#' function to an object and plugging it into the \code{plot} function.
#' These diagnostics may be used to determine which study or effect size
#' may have an excessive influence on the overall results of a meta-analysis and/or contribute substantially to
#' the between-study heterogeneity in an analysis. This may be used for outlier detection and to test
#' the robustness of the overall results found in an analysis. Results for four diagnostics are calculated:
#' \itemize{
#' \item \strong{Baujat Plot}: Baujat et al. (2002) proposed a plot to evaluate heterogeneity patterns in
#' a meta-analysis. The x-axis of the Baujat plot shows the overall heterogeneity contribution of each effect size
#' while the y-axis shows the influence of each effect size on the pooled result. The \code{\link[meta]{baujat}}
#' function is called internally to produce the results. Effect sizes or studies with high values
#' on both the x and y-axis may be considered to be influential cases; effect sizes or studies
#' with high heterogeneity contribution (x-axis) and low influence on the overall results can be outliers
#' which might be deleted to reduce the amount of between-study heterogeneity.
#' \item \strong{Influence Characteristics}: Several influence analysis diagnostics
#' proposed by Viechtbauer & Cheung (2010). Results are calculated by an internal call
#' to \code{\link[metafor]{influence.rma.uni}}. In the console output, potentially influential studies are marked
#' with an asterisk (\code{*}). When plotted, effect sizes/studies determined to be influential cases
#' using the "rules of thumb" described in Viechtbauer & Cheung (2010) are shown in red. For further
#' details, see the documentation of the \code{\link[metafor]{influence.rma.uni}} function.
#' \item \strong{Forest Plot for the Leave-One-Out Analysis, sorted by Effect Size}: This
#' displays the effect size and \eqn{I^2}-heterogeneity when omitting one of the \eqn{k} studies each time.
#' The plot is sorted by effect size to determine which studies or effect sizes particularly
#' affect the overall effect size. Results are generated by an internal call to \code{\link[meta]{metainf}}.
#' \item \strong{Forest Plot for the Leave-One-Out Analysis, sorted by \eqn{I^2}}: see above; results are sorted
#' by \eqn{I^2} to determine the study for which exclusion results in the greatest reduction of heterogeneity.
#'}
#'
#' @references Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803. \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/influenceanalyses.html}{Chapter 6.3}
#'
#' DerSimonian R. & Laird N. (1986), Meta-analysis in clinical trials. \emph{Controlled Clinical Trials, 7}, 177–188.
#'
#' Viechtbauer, W., & Cheung, M. W.-L. (2010). Outlier and influence diagnostics for meta-analysis. \emph{Research Synthesis Methods, 1}, 112–125.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @return A \code{list} object of class \code{influence.analysis} containing the
#' following objects is returned (if results are saved to a variable):
#' \itemize{
#' \item \code{BaujatPlot}: The Baujat plot
#' \item \code{InfluenceCharacteristics}: The Viechtbauer-Cheung influence characteristics plot
#' \item \code{ForestEffectSize}: The forest plot sorted by effect size
#' \item \code{ForestI2}: The forest plot sorted by between-study heterogeneity
#' \item \code{Data}: A \code{data.frame} containing the data used for plotting.
#'}
#' Otherwise, the function prints out (1) the results of the Leave-One-Out Analysis (sorted by \eqn{I^2}),
#' (2) the Viechtbauer-Cheung Influence Diagnostics and (3) Baujat Plot data (sorted by heterogeneity contribution),
#' in this order. Plots can be produced manually by plugging a saved object of class \code{InfluenceAnalysis} generated by
#' the function into the \code{plot} function. It is also possible to only produce one specific plot by
#' specifying the name of the plot as a \code{character} in the second argument of the \code{plot} call (see Examples).
#'
#' @import ggplot2 ggrepel forcats dplyr grid
#' @importFrom gridExtra grid.arrange arrangeGrob
#' @importFrom metafor rma.uni influence.rma.uni
#' @importFrom meta metainf
#' @importFrom graphics abline axis lines mtext par plot points rect segments text
#' @importFrom stats as.formula hat influence ks.test optimize pbinom pchisq pf pnorm pt punif qchisq qf qnorm qt reformulate reorder setNames uniroot
#'
#' @export InfluenceAnalysis
#'
#' @seealso \code{\link[metafor]{influence.rma.uni}}, \code{\link[meta]{metainf}}, \code{\link[meta]{baujat}}
#'
#' @examples
#' # Load 'ThirdWave' data
#' data(ThirdWave)
#'
#' # Create 'meta' meta-analysis object
#' suppressPackageStartupMessages(library(meta))
#' meta = metagen(TE, seTE, studlab = paste(ThirdWave$Author), data=ThirdWave)
#'
#' # Run influence analysis; specify to return separate plots when plotted
#' inf.an = InfluenceAnalysis(meta, return.separate.plots = TRUE)
#'
#' # Show results in console
#' inf.an
#'
#' # Generate all plots
#' plot(inf.an)
#'
#' # For baujat plot
#' plot(inf.an, "baujat")
#'
#' # For influence diagnostics plot
#' plot(inf.an, "influence")
#'
#' # For forest plot sorted by effect size
#' plot(inf.an, "ES")
#'
#' # For forest plot sorted by I-squared
#' plot(inf.an, "I2")
### Influence Analysis function for fixed-effect-model meta-analyses
InfluenceAnalysis = function(x, random = FALSE, subplot.heights = c(30, 18), subplot.widths = c(30, 30),
forest.lims = "default", return.separate.plots = FALSE, text.scale = 1) {
# Validate
x = x
if (class(x)[1] %in% c("meta", "metabin", "metagen", "metacont", "metacor", "metainc", "metaprop", "metarate")) {
} else {
stop("Object 'x' must be of class 'meta', 'metabin', 'metagen', 'metacont', 'metacor', 'metainc', or 'metaprop'")
}
n.studies = x$k
TE = x$TE
seTE = x$seTE
random = random
if (random %in% c(TRUE, FALSE)) {
} else {
stop("'random' must be set to either TRUE or FALSE.")
}
forest.lims = forest.lims
if (forest.lims[1] == "default" | (class(forest.lims[1]) == "numeric" & class(forest.lims[2]) == "numeric")) {
} else {
stop("'forest.lims' must either be 'default' or two concatenated numerics for ymin and ymax.")
}
return.seperate.plots = return.separate.plots
if (return.seperate.plots %in% c(TRUE, FALSE)) {
} else {
stop("'return.separate.plots' must be set to either TRUE or FALSE.")
}
heights = subplot.heights
if (class(heights[1]) == "numeric" & class(heights[2]) == "numeric") {
} else {
stop("'subplot.heights' must be two concatenated numerics.")
}
widths = subplot.widths
if (class(widths[1]) == "numeric" & class(widths[2]) == "numeric") {
} else {
stop("'widths' must be two concatenated numerics.")
}
text.scale = text.scale
if (text.scale > 0) {
} else {
stop("'text.scale' must be a single number greater 0.")
}
if (length(unique(x$studlab)) != length(x$studlab)) {
stop("'Study labels in the 'meta' object must be unique.")
}
########################################
cat("[===============")
########################################
if (random == FALSE) {
method.rma = "FE"
method.meta = "fixed"
} else {
if (x$method.tau %in% c("DL", "HE", "SJ", "ML", "REML", "EB", "HS", "GENQ", "PM")) {
method.rma = x$method.tau
} else {
method.rma = "DL"
cat("Tau estimator is unkown to metafor::rma; DerSimonian-Laird ('DL') estimator used.")
}
method.meta = "random"
}
# Perform Meta-Analysis using metafor, get influence results
res = metafor::rma.uni(yi = TE, sei = seTE, measure = "GEN", data = x, method = method.rma, slab = studlab)
metafor.inf = influence(res)
# Recode inf
metafor.inf$is.infl = ifelse(metafor.inf$is.infl == TRUE, "yes", "no")
cheungviechtdata = cbind(study = substr(rownames(as.data.frame(metafor.inf$inf)), 1, 3), as.data.frame(metafor.inf$inf), is.infl = metafor.inf$is.infl)
rownames(cheungviechtdata) = NULL
if (length(unique(cheungviechtdata$study)) < length(cheungviechtdata$study)) {
i = 3
while (length(unique(cheungviechtdata$study)) < length(cheungviechtdata$study)) {
i = i + 1
cheungviechtdata$study = substr(rownames(as.data.frame(metafor.inf$inf)), 1, i)
}
}
# If study labels are only numeric: reset level indexing
if (sum(grepl("[A-Za-z]", levels(as.factor(cheungviechtdata$study)), perl = T)) == 0){
cheungviechtdata$study = factor(cheungviechtdata$study, levels = sort(as.numeric(levels(cheungviechtdata$study))))
}
########################################
cat("===============")
########################################
scalefun = function(x) sprintf("%.1f", x)
cheungviechtdata = as.data.frame(cheungviechtdata)
# Generate plots
rstudent.plot = ggplot2::ggplot(cheungviechtdata, aes(y = rstudent, x = study, color = is.infl, group = 1)) +
geom_line(color = "black") + geom_point(size = 2) + scale_color_manual(values = c("blue", "red")) +
theme_minimal() + theme(axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45,
size = 5), axis.title.y = element_text(size = 7), axis.text.y = element_text(size = 5)) + ylab("Stand. Residual") +
scale_y_continuous(labels = scalefun)
dffits.thresh = 3 * sqrt(metafor.inf$p/(metafor.inf$k - metafor.inf$p))
dffits.plot = ggplot2::ggplot(cheungviechtdata, aes(y = dffits, x = study, color = is.infl, group = 1)) +
geom_line(color = "black") + geom_point(size = 2) + scale_color_manual(values = c("blue", "red")) +
theme_minimal() + theme(axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45,
size = 5), axis.title.y = element_text(size = 7), axis.text.y = element_text(size = 5)) + ylab("DFFITS") +
scale_y_continuous(labels = scalefun)
# geom_hline(yintercept = dffits.thresh, linetype='dashed', color='black')
cook.d.plot = ggplot2::ggplot(cheungviechtdata, aes(y = cook.d, x = study, color = is.infl, group = 1)) +
geom_line(color = "black") + geom_point(size = 2) + scale_color_manual(values = c("blue", "red")) +
theme_minimal() + theme(axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45,
size = 5), axis.title.y = element_text(size = 7), axis.text.y = element_text(size = 5)) + ylab("Cook's Distance") +
scale_y_continuous(labels = scalefun)
cov.r.plot = ggplot2::ggplot(cheungviechtdata, aes(y = cov.r, x = study, color = is.infl, group = 1)) +
geom_line(color = "black") + geom_point(size = 2) + scale_color_manual(values = c("blue", "red")) +
theme_minimal() + theme(axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45,
size = 5), axis.title.y = element_text(size = 7), axis.text.y = element_text(size = 5)) + ylab("Covariance Ratio") +
scale_y_continuous(labels = scalefun)
tau2.del.plot = ggplot2::ggplot(cheungviechtdata, aes(y = tau2.del, x = study, color = is.infl, group = 1)) +
geom_line(color = "black") + geom_point(size = 2) + scale_color_manual(values = c("blue", "red")) +
theme_minimal() + theme(axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45,
size = 5), axis.title.y = element_text(size = 7), axis.text.y = element_text(size = 5)) + ylab("tau-squared (L-0-0)") +
scale_y_continuous(labels = scalefun)
QE.del.plot = ggplot2::ggplot(cheungviechtdata, aes(y = QE.del, x = study, color = is.infl, group = 1)) +
geom_line(color = "black") + geom_point(size = 2) + scale_color_manual(values = c("blue", "red")) +
theme_minimal() + theme(axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45,
size = 5), axis.title.y = element_text(size = 7), axis.text.y = element_text(size = 5)) + ylab("Q (L-0-0)") +
scale_y_continuous(labels = scalefun)
hat.thresh = 3 * (metafor.inf$p/metafor.inf$k)
hat.plot = ggplot2::ggplot(cheungviechtdata, aes(y = hat, x = study, color = is.infl, group = 1)) + geom_line(color = "black") +
geom_point(size = 2) + scale_color_manual(values = c("blue", "red")) + theme_minimal() + theme(axis.title.x = element_blank(),
legend.position = "none", axis.text.x = element_text(angle = 45, size = 5), axis.title.y = element_text(size = 7),
axis.text.y = element_text(size = 5)) + ylab("hat") + scale_y_continuous(labels = scalefun)
# geom_hline(yintercept = hat.thresh, linetype='dashed', color='black')
weight.plot = ggplot2::ggplot(cheungviechtdata, aes(y = weight, x = study, color = is.infl, group = 1)) +
geom_line(color = "black") + geom_point(size = 2) + scale_color_manual(values = c("blue", "red")) +
theme_minimal() + theme(axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45,
size = 5), axis.title.y = element_text(size = 7), axis.text.y = element_text(size = 5)) + ylab("weight") +
scale_y_continuous(labels = scalefun)
rma.influence.plot = arrangeGrob(rstudent.plot, dffits.plot, cook.d.plot, cov.r.plot, tau2.del.plot, QE.del.plot,
hat.plot, weight.plot, ncol = 2)
# Perform Influence Analysis on meta object, generate forests
meta.inf = meta::metainf(x, pooled = method.meta)
if (x$sm %in% c("RR", "OR", "IRR")) {
effect = x$sm
n.studies = n.studies
# Create Sortdat data set for sorting
sortdat = data.frame(studlab = meta.inf$studlab, mean = exp(meta.inf$TE), lower = exp(meta.inf$lower),
upper = exp(meta.inf$upper), i2 = meta.inf$I2)
sortdat2 = sortdat[1:(nrow(sortdat) - 2), ]
lastline = sortdat[nrow(sortdat), ]
# Change summary label
if (random == TRUE) {
lastline[1] = "Random-Effects Model"
} else {
lastline[1] = "Fixed-Effect Model"
}
for (i in 2:4) {
lastline[i] = format(round(lastline[i], 2), nsmall = 2)
}
# Sort
sortdat.es = sortdat2[order(sortdat2$mean), ]
sortdat.es = sortdat.es %>% mutate(studlab = forcats::fct_reorder(studlab, -mean))
sortdat.i2 = sortdat2[order(sortdat2$i2), ]
sortdat.i2 = sortdat.i2 %>% mutate(studlab = forcats::fct_reorder(studlab, -i2))
# Generate Forest Plots
if (forest.lims[1] == "default") {
if (min(sortdat.es$lower) > 0.5){
min = 0.5
} else {
min = NA
}
if (max(sortdat.es$upper) <= 1){
max = 1.2
} else {
max = round(max(sortdat.es$upper) + 0.5, 0)
}
} else {
if (forest.lims[1] <= 0){
min = NA
} else {
min = forest.lims[1]
}
max = forest.lims[2]
}
if (method.meta == "fixed"){
plot.sum.effect = exp(x$TE.fixed)
plot.sum.lower = exp(x$lower.fixed)
plot.sum.upper = exp(x$upper.fixed)
} else {
plot.sum.effect = exp(x$TE.random)
plot.sum.lower = exp(x$lower.random)
plot.sum.upper = exp(x$upper.random)
}
########################################
cat("===============")
########################################
forest.es = ggplot(sortdat.es, aes(x = studlab, y = mean, ymin = lower, ymax = upper)) + geom_pointrange() +
geom_text(aes(label = paste(format(round(mean, 2), nsmall = 2), " [", format(round(lower, 2),
nsmall = 2), ";", format(round(upper, 2), nsmall = 2), "] ", "; I2=", format(round(i2, 2),
nsmall = 2), sep = ""), y = Inf), hjust = "inward", size = 2 * text.scale) + geom_hline(yintercept = 1,
color = "blue") + ylab(paste(effect, " (", as.character(lastline$studlab), ")", sep = "")) + ggtitle("Sorted by Effect Size") +
coord_flip() + theme_minimal() + theme(axis.title.y = element_blank(), axis.title.x = element_text(color = "black",
size = 12, face = "bold"), axis.text.y = element_text(color = "black", size = 9 * text.scale),
plot.title = element_text(face = "bold", hjust = 0.5), axis.line.x = element_line(color = "black"),
axis.ticks.x = element_line(color = "black"), axis.text.x = element_text(color = "black", size = 9 *
text.scale)) + scale_y_continuous(trans = "log2", limits = c(min, max)) +
geom_rect(aes(ymin=plot.sum.lower, ymax=plot.sum.upper, xmin=0, xmax=Inf), alpha=0.08, fill="lightgreen", color=NA) + geom_hline(yintercept = plot.sum.effect, color = "darkgreen", linetype="dotted", size=0.5) + geom_pointrange()
forest.i2 = ggplot(sortdat.i2, aes(x = studlab, y = mean, ymin = lower, ymax = upper)) + geom_pointrange() +
geom_text(aes(label = paste("I2=", format(round(i2, 2), nsmall = 2), "; ", format(round(mean,
2), nsmall = 2), " [", format(round(lower, 2), nsmall = 2), ";", format(round(upper, 2), nsmall = 2),
"] ", sep = ""), y = Inf), hjust = "inward", size = 2 * text.scale) + geom_hline(yintercept = 1,
color = "blue") + ylab(paste(effect, " (", as.character(lastline$studlab), ")", sep = "")) + ggtitle("Sorted by I-squared") +
coord_flip() + theme_minimal() + theme(axis.title.y = element_blank(), axis.title.x = element_text(color = "black",
size = 12, face = "bold"), axis.text.y = element_text(color = "black", size = 9 * text.scale),
plot.title = element_text(face = "bold", hjust = 0.5), axis.line.x = element_line(color = "black"),
axis.ticks.x = element_line(color = "black"), axis.text.x = element_text(color = "black", size = 9 *
text.scale)) + scale_y_continuous(trans = "log2", limits = c(min, max)) +
geom_rect(aes(ymin=plot.sum.lower, ymax=plot.sum.upper, xmin=0, xmax=Inf), alpha=0.08, fill="lightgreen", color=NA) + geom_hline(yintercept = plot.sum.effect, color = "darkgreen", linetype="dotted", size=0.5) + geom_pointrange()
} else if (class(x)[1] %in% c("metacor", "metaprop", "metarate")) {
effect = x$sm
n.studies = n.studies
# Create Sortdat data set for sorting
sortdat = data.frame(studlab = meta.inf$studlab, mean = meta.inf$TE, lower = meta.inf$lower,
upper = meta.inf$upper, i2 = meta.inf$I2)
sortdat2 = sortdat[1:(nrow(sortdat) - 2), ]
lastline = sortdat[nrow(sortdat), ]
# Change summary label
if (random == TRUE) {
lastline[1] = "Random-Effects Model"
} else {
lastline[1] = "Fixed-Effect Model"
}
for (i in 2:4) {
lastline[i] = format(round(lastline[i], 2), nsmall = 2)
}
# Sort
sortdat.es = sortdat2[order(sortdat2$mean), ]
sortdat.es = sortdat.es %>% mutate(studlab = forcats::fct_reorder(studlab, -mean))
sortdat.i2 = sortdat2[order(sortdat2$i2), ]
sortdat.i2 = sortdat.i2 %>% mutate(studlab = forcats::fct_reorder(studlab, -i2))
# Backtransform
backtransformer = function(x, sm, n){
# Define functions
z2cor = function(x)
{
res <- (exp(2 * x) - 1)/(exp(2 * x) + 1)
res
}
logit2p = function(x)
{
res <- 1/(1 + exp(-x))
res
}
asin2p = function (x, n = NULL, value = "mean", warn = TRUE)
{
if (all(is.na(x)))
return(x)
if (is.null(n)) {
minimum <- asin(sqrt(0))
maximum <- asin(sqrt(1))
}
else {
minimum <- 0.5 * (asin(sqrt(0/(n + 1))) + asin(sqrt((0 +
1)/(n + 1))))
maximum <- 0.5 * (asin(sqrt(n/(n + 1))) + asin(sqrt((n +
1)/(n + 1))))
}
sel0 <- x < minimum
sel1 <- x > maximum
if (any(sel0, na.rm = TRUE)) {
if (is.null(n)) {
if (warn)
warning("Negative value for ", if (length(x) >
1)
"at least one ", if (value == "mean")
"transformed proportion using arcsine transformation.\n Proportion set to 0.",
if (value == "lower")
"lower confidence limit using arcsine transformation.\n Lower confidence limit set to 0.",
if (value == "upper")
"upper confidence limit using arcsine transformation.\n Upper confidence limit set to 0.",
sep = "")
}
else {
if (warn)
warning("Too small value for ", if (length(x) >
1)
"at least one ", if (value == "mean")
"transformed proportion using Freeman-Tukey double arcsine transformation.\n Proportion set to 0.",
if (value == "lower")
"lower confidence limit using Freeman-Tukey double arcsine transformation.\n Lower confidence limit set to 0.",
if (value == "upper")
"upper confidence limit using Freeman-Tukey double arcsine transformation.\n Upper confidence limit set to 0.",
sep = "")
}
}
if (any(sel1, na.rm = TRUE)) {
if (is.null(n)) {
if (warn)
warning("Too large value for ", if (length(x) >
1)
"at least one ", if (value == "mean")
"transformed proportion using arcsine transformation.\n Proportion set to 1.",
if (value == "lower")
"lower confidence limit using arcsine transformation.\n Lower confidence limit set to 1.",
if (value == "upper")
"upper confidence limit using arcsine transformation.\n Upper confidence limit set to 1.",
sep = "")
}
else {
if (warn)
warning("Too large value for ", if (length(x) >
1)
"at least one ", if (value == "mean")
"transformed proportion using Freeman-Tukey double arcsine transformation.\n Proportion set to 1.",
if (value == "lower")
"lower confidence limit using Freeman-Tukey double arcsine transformation.\n Lower confidence limit set to 1.",
if (value == "upper")
"upper confidence limit using Freeman-Tukey double arcsine transformation.\n Upper confidence limit set to 1.",
sep = "")
}
}
res <- rep(NA, length(x))
sel <- !(sel0 | sel1)
sel <- !is.na(sel) & sel
res[sel0] <- 0
res[sel1] <- 1
if (is.null(n)) {
res[sel] <- sin(x[sel])^2
}
else {
res[sel] <- 0.5 * (1 - sign(cos(2 * x[sel])) * sqrt(1 -
(sin(2 * x[sel]) + (sin(2 * x[sel]) - 1/sin(2 * x[sel]))/n[sel])^2))
}
res
}
asin2ir = function (x, time = NULL, value = "mean", warn = TRUE)
{
if (all(is.na(x)))
return(x)
minimum <- 0.5 * (sqrt(0/time) + sqrt((0 + 1)/time))
sel0 <- x < minimum
if (any(sel0, na.rm = TRUE)) {
if (warn)
warning("Too small value for ", if (length(x) > 1)
"at least one ", if (value == "mean")
"transformed proportion using Freeman-Tukey double arcsine transformation.\n Rate set to 0.",
if (value == "lower")
"lower confidence limit using Freeman-Tukey double arcsine transformation.\n Lower confidence limit set to 0.",
if (value == "upper")
"upper confidence limit using Freeman-Tukey double arcsine transformation.\n Upper confidence limit set to 0.",
sep = "")
}
res <- rep(NA, length(x))
sel <- !sel0
sel <- !is.na(sel) & sel
res[sel0] <- 0
res[sel] <- (1/time[sel] - 8 * x[sel]^2 + 16 * time[sel] *
x[sel]^4)/(16 * x[sel]^2 * time[sel])
res[res < 0] <- 0
res
}
if(sm == "COR"){
res = x
}
if(sm == "IR"){
res = x
}
if(sm == "PRAW"){
res = x
}
if(sm == "ZCOR"){
res = z2cor(x)
}
if(sm == "PLOGIT"){
res = logit2p(x)
}
if (sm == "PAS"){
res <- asin2p(x, value = value, warn = FALSE)
}
if (sm == "PFT"){
res = asin2p(x, n, value = value, warn = FALSE)
}
if (sm == "IRS"){
res = x^2
}
if (sm == "IRFT"){
res = asin2ir(x, time=n, value = value, warn = FALSE)
}
if (sm == "IRLN"){
res = exp(x)
}
if (sm == "PLN"){
res = exp(x)
}
res
}
if (class(x)[1] %in% c("metaprop", "metacor")){
n.h.m = meta.inf$n.harmonic.mean[1:(length(meta.inf$n.harmonic.mean)-2)]
n.h.m.tot = meta.inf$n.harmonic.mean[length(meta.inf$n.harmonic.mean)]
n.h.m.es = n.h.m[order(sortdat.es$mean)]
n.h.m.i2 = n.h.m[order(sortdat.i2$mean)]
sortdat.es$mean = backtransformer(sortdat.es$mean, sm=effect, n=n.h.m.es)
sortdat.es$lower = backtransformer(sortdat.es$lower, sm=effect, n=n.h.m.es)
sortdat.es$upper = backtransformer(sortdat.es$upper, sm=effect, n=n.h.m.es)
sortdat.i2$mean = backtransformer(sortdat.i2$mean, sm=effect, n=n.h.m.i2)
sortdat.i2$lower = backtransformer(sortdat.i2$lower, sm=effect, n=n.h.m.i2)
sortdat.i2$upper = backtransformer(sortdat.i2$upper, sm=effect, n=n.h.m.i2)
if (method.meta == "fixed"){
plot.sum.effect = backtransformer(x$TE.fixed, sm=effect, n=n.h.m.tot)
plot.sum.lower = backtransformer(x$lower.fixed, sm=effect, n=n.h.m.tot)
plot.sum.upper = backtransformer(x$upper.fixed, sm=effect, n=n.h.m.tot)
} else {
plot.sum.effect = backtransformer(x$TE.random, sm=effect, n=n.h.m.tot)
plot.sum.lower = backtransformer(x$lower.random, sm=effect, n=n.h.m.tot)
plot.sum.upper = backtransformer(x$upper.random, sm=effect, n=n.h.m.tot)
}
} else {
if(meta.inf$sm == "IRFT"){
n.h.m = meta.inf$t.harmonic.mean[1:(length(meta.inf$t.harmonic.mean)-2)]
n.h.m.es = n.h.m[order(sortdat.es$mean)]
n.h.m.i2 = n.h.m[order(sortdat.i2$mean)]
n.h.m.tot = meta.inf$t.harmonic.mean[length(meta.inf$t.harmonic.mean)]
sortdat.es$mean = backtransformer(sortdat.es$mean, sm=effect, n=n.h.m.es)
sortdat.es$lower = backtransformer(sortdat.es$lower, sm=effect, n=n.h.m.es)
sortdat.es$upper = backtransformer(sortdat.es$upper, sm=effect, n=n.h.m.es)
sortdat.i2$mean = backtransformer(sortdat.i2$mean, sm=effect, n=n.h.m.i2)
sortdat.i2$lower = backtransformer(sortdat.i2$lower, sm=effect, n=n.h.m.i2)
sortdat.i2$upper = backtransformer(sortdat.i2$upper, sm=effect, n=n.h.m.i2)
if (method.meta == "fixed"){
plot.sum.effect = backtransformer(x$TE.fixed, sm=effect, n=n.h.m.tot)
plot.sum.lower = backtransformer(x$lower.fixed, sm=effect, n=n.h.m.tot)
plot.sum.upper = backtransformer(x$upper.fixed, sm=effect, n=n.h.m.tot)
} else {
plot.sum.effect = backtransformer(x$TE.random, sm=effect, n=n.h.m.tot)
plot.sum.lower = backtransformer(x$lower.random, sm=effect, n=n.h.m.tot)
plot.sum.upper = backtransformer(x$upper.random, sm=effect, n=n.h.m.tot)
}
} else {
n.h.m.tot = meta.inf$n.harmonic.mean[length(meta.inf$n.harmonic.mean)]
sortdat.es$mean = backtransformer(sortdat.es$mean, sm=effect, n=NULL)
sortdat.es$lower = backtransformer(sortdat.es$lower, sm=effect, n=NULL)
sortdat.es$upper = backtransformer(sortdat.es$upper, sm=effect, n=NULL)
sortdat.i2$mean = backtransformer(sortdat.i2$mean, sm=effect, n=NULL)
sortdat.i2$lower = backtransformer(sortdat.i2$lower, sm=effect, n=NULL)
sortdat.i2$upper = backtransformer(sortdat.i2$upper, sm=effect, n=NULL)
if (method.meta == "fixed"){
plot.sum.effect = backtransformer(x$TE.fixed, sm=effect, n=n.h.m.tot)
plot.sum.lower = backtransformer(x$lower.fixed, sm=effect, n=n.h.m.tot)
plot.sum.upper = backtransformer(x$upper.fixed, sm=effect, n=n.h.m.tot)
} else {
plot.sum.effect = backtransformer(x$TE.random, sm=effect, n=n.h.m.tot)
plot.sum.lower = backtransformer(x$lower.random, sm=effect, n=n.h.m.tot)
plot.sum.upper = backtransformer(x$upper.random, sm=effect, n=n.h.m.tot)
}
}
}
# Generate Forest Plots
if (forest.lims[1] == "default") {
if (class(x)[1] == "metacor"){
min = min(sortdat.es$mean)-0.2
} else {
min = -0.2
}
max = max(sortdat.es$mean) + 0.5
} else {
min = forest.lims[1]
max = forest.lims[2]
}
# Set ggtitles
if (class(x)[1] == "metaprop"){
ggtitl = as.character("Proportion")
} else if (class(x)[1] == "metacor"){
ggtitl = as.character("Correlation")
} else {
ggtitl = as.character("Rate")
}
########################################
cat("===============")
########################################
forest.es = ggplot(sortdat.es, aes(x = studlab, y = mean, ymin = lower, ymax = upper)) + geom_pointrange() +
geom_text(aes(label = paste(format(round(mean, 2), nsmall = 2), " [", format(round(lower, 2),
nsmall = 2), ";", format(round(upper, 2), nsmall = 2), "] ", "; I2=", format(round(i2, 2),
nsmall = 2), sep = ""), y = Inf), hjust = "inward", size = 2 * text.scale) + geom_hline(yintercept = 0,
color = "blue") + ylab(paste(ggtitl, " (", as.character(lastline$studlab), ")", sep = "")) + ggtitle(paste("Sorted by",ggtitl)) +
coord_flip() + theme_minimal() + theme(axis.title.y = element_blank(), axis.title.x = element_text(color = "black",
size = 12, face = "bold"), axis.text.y = element_text(color = "black", size = 9 * text.scale),
plot.title = element_text(face = "bold", hjust = 0.5), axis.line.x = element_line(color = "black"),
axis.ticks.x = element_line(color = "black"), axis.text.x = element_text(color = "black", size = 9 *
text.scale)) + scale_y_continuous(limits = c(min, max)) +
geom_rect(aes(ymin=plot.sum.lower, ymax=plot.sum.upper, xmin=0, xmax=Inf), alpha=0.08, fill="lightgreen", color=NA) + geom_hline(yintercept = plot.sum.effect, color = "darkgreen", linetype="dotted", size=0.5) + geom_pointrange()
forest.i2 = ggplot(sortdat.i2, aes(x = studlab, y = mean, ymin = lower, ymax = upper)) + geom_pointrange() +
geom_text(aes(label = paste("I2=", format(round(i2, 2), nsmall = 2), "; ", format(round(mean,
2), nsmall = 2), " [", format(round(lower, 2), nsmall = 2), ";", format(round(upper, 2), nsmall = 2),
"] ", sep = ""), y = Inf), hjust = "inward", size = 2 * text.scale) + geom_hline(yintercept = 0,
color = "blue") + ylab(paste(ggtitl, " (", as.character(lastline$studlab), ")", sep = "")) + ggtitle("Sorted by I-squared") +
coord_flip() + theme_minimal() + theme(axis.title.y = element_blank(), axis.title.x = element_text(color = "black",
size = 12, face = "bold"), axis.text.y = element_text(color = "black", size = 9 * text.scale),
plot.title = element_text(face = "bold", hjust = 0.5), axis.line.x = element_line(color = "black"),
axis.ticks.x = element_line(color = "black"), axis.text.x = element_text(color = "black", size = 9 *
text.scale)) + scale_y_continuous(limits = c(min, max)) +
geom_rect(aes(ymin=plot.sum.lower, ymax=plot.sum.upper, xmin=0, xmax=Inf), alpha=0.08, fill="lightgreen", color=NA) + geom_hline(yintercept = plot.sum.effect, color = "darkgreen", linetype="dotted", size=0.5) + geom_pointrange()
} else {
# Create Sortdat data set for sorting
sortdat = data.frame(studlab = meta.inf$studlab, mean = meta.inf$TE, lower = meta.inf$lower, upper = meta.inf$upper,
i2 = meta.inf$I2)
sortdat2 = sortdat[1:(nrow(sortdat) - 2), ]
lastline = sortdat[nrow(sortdat), ]
# Change summary label
if (random == TRUE) {
lastline[1] = "Random-Effects Model"
} else {
lastline[1] = "Fixed-Effect Model"
}
for (i in 2:4) {
lastline[i] = format(round(lastline[i], 2), nsmall = 2)
}
# Sort
sortdat.es = sortdat2[order(sortdat2$mean), ]
sortdat.es = sortdat.es %>% mutate(studlab = forcats::fct_reorder(studlab, -mean))
sortdat.i2 = sortdat2[order(sortdat2$i2), ]
sortdat.i2 = sortdat.i2 %>% mutate(studlab = forcats::fct_reorder(studlab, -i2))
# Generate Forest Plots
if (forest.lims[1] == "default") {
min = round(min(sortdat.es$lower) - 0.1, 2)
max = round(max(sortdat.es$upper) + 0.3, 2)
} else {
min = forest.lims[1]
max = forest.lims[2]
}
if (method.meta == "fixed"){
plot.sum.effect = x$TE.fixed
plot.sum.lower = x$lower.fixed
plot.sum.upper = x$upper.fixed
} else {
plot.sum.effect = x$TE.random
plot.sum.lower = x$lower.random
plot.sum.upper = x$upper.random
}
########################################
cat("===============")
########################################
forest.es = ggplot(sortdat.es, aes(x = studlab, y = mean, ymin = lower, ymax = upper)) + geom_pointrange() +
geom_text(aes(label = paste(format(round(mean, 2), nsmall = 2), " [", format(round(lower, 2),
nsmall = 2), ";", format(round(upper, 2), nsmall = 2), "] ", "; I2=", format(round(i2, 2),
nsmall = 2), sep = ""), y = Inf), hjust = "inward", size = 2 * text.scale) + geom_hline(yintercept = 0,
color = "blue") + ylim(min, max) + ylab(paste("Effect Size (", as.character(lastline$studlab),
")", sep = "")) + ggtitle("Sorted by Effect Size") + coord_flip() + theme_minimal() + theme(axis.title.y = element_blank(),
axis.title.x = element_text(color = "black", size = 12, face = "bold"), axis.text.y = element_text(color = "black",
size = 9 * text.scale), plot.title = element_text(face = "bold", hjust = 0.5), axis.line.x = element_line(color = "black"),
axis.ticks.x = element_line(color = "black"), axis.text.x = element_text(color = "black", size = 9 *
text.scale)) +
geom_rect(aes(ymin=plot.sum.lower, ymax=plot.sum.upper, xmin=0, xmax=Inf), alpha=0.08, fill="lightgreen", color=NA) + geom_hline(yintercept = plot.sum.effect, color = "darkgreen", linetype="dotted", size=0.5) + geom_pointrange()
forest.i2 = ggplot(sortdat.i2, aes(x = studlab, y = mean, ymin = lower, ymax = upper)) + geom_pointrange() +
geom_text(aes(label = paste("I2=", format(round(i2, 2), nsmall = 2), "; ", format(round(mean,
2), nsmall = 2), " [", format(round(lower, 2), nsmall = 2), ";", format(round(upper, 2), nsmall = 2),
"] ", sep = ""), y = Inf), hjust = "inward", size = 2) + geom_hline(yintercept = 0, color = "blue") +
ylim(min, max) + ylab(paste("Effect Size (", as.character(lastline$studlab), ")", sep = "")) +
ggtitle("Sorted by I-squared") + coord_flip() + theme_minimal() + theme(axis.title.y = element_blank(),
axis.title.x = element_text(color = "black", size = 12, face = "bold"), axis.text.y = element_text(color = "black",
size = 9 * text.scale), plot.title = element_text(face = "bold", hjust = 0.5), axis.line.x = element_line(color = "black"),
axis.ticks.x = element_line(color = "black"), axis.text.x = element_text(color = "black", size = 9 *
text.scale)) +
geom_rect(aes(ymin=plot.sum.lower, ymax=plot.sum.upper, xmin=0, xmax=Inf), alpha=0.08, fill="lightgreen", color=NA) + geom_hline(yintercept = plot.sum.effect, color = "darkgreen", linetype="dotted", size=0.5) + geom_pointrange()
}
# Generate baujat plot Define baujat.silent
baujat.silent = function(x, yscale = 1, xlim, ylim, ...) {
TE = x$TE
seTE = x$seTE
TE.fixed = metagen(TE, seTE, exclude = x$exclude)$TE.fixed
k = x$k
studlab = x$studlab
SE = x$seTE
m.inf = metainf(x, pooled = "fixed")
TE.inf = m.inf$TE[1:length(TE)]
seTE.inf = m.inf$seTE[1:length(TE)]
ys = (TE.inf - TE.fixed)^2/seTE.inf^2
ys = ys * yscale
xs = (TE - TE.fixed)^2/seTE^2
if (!is.null(x$exclude))
xs[x$exclude] = 0
res = data.frame(studlab = studlab, x = xs, y = ys, se = SE)
return(res)
}
########################################
cat("===============")
########################################
bjt = baujat.silent(x)
BaujatPlot = ggplot(bjt, aes(x = x, y = y)) + geom_point(aes(size = (1/se)), color = "blue", alpha = 0.75) +
geom_rug(color = "lightgray", alpha = 0.5) + theme(legend.position = "none") + xlab("Overall heterogeneity contribution") +
ylab("Influence on pooled result") + geom_label_repel(label = bjt$studlab, color = "black", size = 1.5 *
text.scale, alpha = 0.7)
# Return
########################################
cat("===============] DONE \n")
########################################
# Prepare data for return
return.data = cbind(sortdat2, cheungviechtdata[, 2:ncol(cheungviechtdata)], HetContrib = bjt$x, InfluenceEffectSize = bjt$y)
if (x$sm %in% c("RR", "OR", "IRR")) {colnames(return.data)[1:2] = c("Author", effect)}
else {colnames(return.data)[1:2] = c("Author", "effect")}
returnlist = list(BaujatPlot = BaujatPlot,
InfluenceCharacteristics = rma.influence.plot,
ForestEffectSize = forest.es,
ForestI2 = forest.i2,
Data = return.data) %>% suppressMessages() %>% suppressWarnings()
if (return.separate.plots == T){class(returnlist) = c("InfluenceAnalysis", "rsp")}
if (return.separate.plots == F){class(returnlist) = c("InfluenceAnalysis", "rsp.null")}
returnlist
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Pool the results of two treatment arms
#'
#' This function allows to pool the mean, standard deviation and sample size of two experimental groups
#' of a study. Results of two treatment arms may be pooled to mitigate the unit-of-analysis problem
#' and avoid "double-counting" in meta-analyses in which studies with more than two experimental groups
#' are included.
#'
#' @usage pool.groups(n1, n2, m1, m2, sd1, sd2)
#'
#' @param n1 Numeric vector or single number. The number of participants in arm 1.
#' @param n2 Numeric vector or single number. The number of participants in arm 2.
#' @param m1 Numeric vector or single number. The mean in arm 1.
#' @param m2 Numeric vector or single number. The mean in arm 2.
#' @param sd1 Numeric vector or single number. The standard deviation (\eqn{SD}) in arm 1.
#' @param sd2 Numeric vector or single number. The standard deviation (\eqn{SD}) in arm 2.
#'
#' @details Many randomized-controlled trials do not only include a single intervention
#' and control group, but compare the effect of two or more interventions to a control
#' group. It might be tempting in such a scenario to simply include all the comparisons
#' between the intervention groups and control within a study into one meta-analysis.
#' Yet, researchers should abstain from this practice, as this would mean that the
#' control group is used twice for the meta-analysis, thus “double-counting” the
#' participants in the control group. This results in a \strong{unit-of-analysis error}, as
#' the effect size are correlated, and thus not independent, but are treated as if they
#' would stem from independent samples.
#'
#' One way to deal with this is to synthesize the results of the intervention arms to
#' obtain one single comparison to the control group. Despite its practical limitations
#' (sometimes, this would mean synthesizing the results from extremely different types
#' of interventions), this procedure does get rid of the unit-of-analysis error problem.
#'
#' To synthesize the pooled effect size data (pooled Mean, Standard Deviation and \eqn{N}), the
#' following formulae are used:
#'
#'\deqn{N_{pooled}=N_1+N_2}
#'\deqn{M_{pooled}=\frac{N_1M_1+N_2M_2}{N_1+N_2}}
#'\deqn{SD_{pooled} = \sqrt{\frac{(N_1-1)SD^{2}_{1}+ (N_2-1)SD^{2}_{2}+\frac{N_1N_2}{N_1+N_2}(M^{2}_1+M^{2}_2-2M_1M_2)} {N_1+N_2-1}}}
#'
#' \strong{What should i do when an study has more than two intervention groups?}
#'
#' If a study has more than one two intervention groups you want to synthesize
#' (e.g. four arms, with three distinct intervention arms), you can pool the effect
#' size data for the first two interventions, and then synthesize the pooled data you
#' calculated with the data from the third group.
#'
#' @references Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803. \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/i.html}{Chapter 13.9}
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @return Returns a data.frame containing the following columns:
#' \itemize{
#' \item \code{Mpooled}: The pooled mean of both groups
#' \item \code{SDpooled}: The pooled standard deviation of both groups
#' \item \code{Npooled}: The pooled number of participants of both groups
#' }
#'
#' @export
#'
#' @seealso \code{\link{se.from.p}}
#'
#' @examples
#' pool.groups(n1 = 50, n2 = 56,
#' m1 = 7.83, m2 = 8.32,
#' sd1 = 3.52, sd2 = 2.25)
pool.groups = function(n1, n2, m1, m2, sd1, sd2) {
n1 = n1
n2 = n2
m1 = m1
m2 = m2
sd1 = sd1
sd2 = sd2
if (is.numeric(n1) == FALSE) {
stop("'n1' must by of type numeric().")
}
if (is.numeric(n2) == FALSE) {
stop("'n2' must by of type numeric().")
}
if (is.numeric(m1) == FALSE) {
stop("'m1' must by of type numeric().")
}
if (is.numeric(m2) == FALSE) {
stop("'m2' must by of type numeric().")
}
if (is.numeric(sd1) == FALSE) {
stop("'sd1' must by of type numeric().")
}
if (is.numeric(sd2) == FALSE) {
stop("'sd2' must by of type numeric().")
}
Npooled = n1 + n2
Mpooled = (n1 * m1 + n2 * m2)/(n1 + n2)
SDpooled = sqrt(((n1 - 1) * sd1^2 + (n2 - 1) * sd2^2 + (((n1 * n2)/(n1 + n2)) * (m1^2 + m2^2 - 2 * m1 *
m2)))/(n1 + n2 - 1))
return(data.frame(Mpooled, SDpooled, Npooled))
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Toy Dataset for Network Meta-Analysis using the gemtc package
#'
#' This is a toy dataset containing simulated effect size data of a fictitious
#' network meta-analysis examining the effect of psychotherapies. Effect size
#' data is provided as the standardized mean difference (SMD) between the intervention
#' and control group and its corresponding standard error for each study at post.
#' The dataframe layout is optimized for out-of-the-box usage using the \code{data.re}
#' argument of the \code{\link[gemtc]{mtc.network}} function.
#'
#'
#' @format A data.frame with 4 columns.
#' \describe{
#' \item{study}{Character. The name of the included study.}
#' \item{treatment}{Character. The name of the treatment under study. Includes psychotherapies for
#' the treatment of depression, "CBT" (Cognitive Behavioral Therapy), "PDT" (Psychodynamic Therapy),
#' "IPT" (Interpersonal Therapy), "PST" (Problem-solving Therapy) and "SUP" (Supportive Counseling),
#' and comparison conditions, "TAU" (Treatment as usual), "Placebo" (Placebo), and "WLC" (Waitlist control).
#' Each treatment condition in a study is displayed in its own row of the dataframe.}
#' \item{diff}{Numeric. The standardized mean difference of the comparison. The row in each study in which
#' this variable is \code{NA} represents the comparison condition for the effect size displayed above.}
#' \item{std.err}{Numeric. The standard error of the comparison. The row in each study in which
#' this variable is \code{NA} represents the comparison condition for the standard error of
#' the effect size displayed above.}
#' }
#'
#' @source Simulated data.
#'
#' @usage data("NetDataGemtc")
#'
#' @author Mathias Harrer, David Daniel Ebert
#'
"NetDataGemtc"
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Create a RevMan-style risk of bias summary chart
#'
#' This function generates summary plots for study quality assessments using the
#' \href{https://bit.ly/2KGQtfG}{Cochrance Risk of Bias Tool}.
#' Summary plots follow the style of \href{https://bit.ly/30eJK29}{RevMan} RoB summary charts.
#'
#' @usage rob.summary(data, name.high="High", name.unclear="Unclear",
#' name.low="Low", studies, name.missing, table = FALSE)
#'
#' @param data A \code{data.frame} containing a column for each risk of bias criterion, where
#' rows represent each individual studies. The risk of bias assessment for each criterion for each
#' study must be coded as a character string. Up to four codes can be used, referring to low risk of bias,
#' unclear risk of bias, high risk of bias, or missing information. The string used to specify the categories
#' must be specified in \code{name.high}, \code{name.unclear}, \code{name.low} and/or \code{name.missing},
#' unless defaults for those parameters are used.
#' @param name.high Character specifying how the "high risk of bias" category was coded in \code{data}
#' (e.g., \code{name.high = "high"}). Default is \code{"High"}.
#' @param name.unclear Character specifying how the "unclear risk of bias" category was coded in \code{data}
#' (e.g., \code{name.unclear = "unclear"}). Default is \code{"Unclear"}.
#' @param name.low Character specifying how the "low risk of bias" category was coded in \code{data}
#' (e.g., \code{name.low = "low"}). Default is \code{"Low"}.
#' @param name.missing Character specifying how missing information was coded in \code{data}
#' (e.g., \code{name.missing = "missing"}). Default is \code{"Missing"}. All ratings, including missing
#' information, must be coded as strings, so using \code{NA} in \code{data} to signify missing information
#' is not valid.
#' @param studies A vector of the same length as the number of rows in \code{data} specifying the study
#' labels for the risk of bias ratings. Only has to be specified when \code{table = TRUE}.
#' @param table Should an additional RevMan style risk of bias table be produced? If set to \code{TRUE},
#' \code{studies} must be specified. \code{FALSE} by default.
#'
#' @details The function automatically removes seperators like "-" or "." from column names/risk of bias criteria. To produce
#' a "clean" plot, you may therefore seperate words in the column names of the \code{data} data frame using these
#' symbols (e.g. \code{"Allocation_Concealment"} to return "Allocation Concealment").
#'
#' @references Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803.
#' \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/creating-a-revman-style-risk-of-bias-summary.html}{Chapter 10}
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @import ggplot2
#' @importFrom tidyr gather
#'
#' @export rob.summary
#'
#' @seealso
#' \code{\link{direct.evidence.plot}}
#'
#' @examples
#' # Example 1: No missing information, only produce summary plot
#' data = data.frame(
#' "study" = c("Higgins et al., 2011", "Borenstein et al., 2008", "Holm, 1971",
#' "Zajonc et al., 2005", "Cuijpers, 2014"),
#' "Allocation_concealment" = c("Low", "High", "High", "Unclear", "High"),
#' "Randomization" = c("Low", "High", "Unclear", "Low", "High"),
#' "Sequence_generation" = c("Low", "High", "Unclear", "Unclear", "High"),
#' "ITT.Analyses" = c("Low", "High", "Unclear", "Unclear", "Unclear"),
#' "Selective_outcome_reporting" = c("Low", "High", "High", "High", "Unclear")
#' )
#' rob.summary(data)
#'
#' # Example 2: Missing information, produce additional summary table
#' data2 = data.frame(
#' "study" = c("Higgins et al., 2011", "Borenstein et al., 2008", "Holm, 1971",
#' "Zajonc et al., 2005", "Cuijpers, 2014"),
#' "Allocation_concealment" = c("low", "high", "high", "uc", "high"),
#' "Randomization" = c("low", "high", "miss", "low", "high"),
#' "Sequence_generation" = c("low", "high", "uc", "uc", "high"),
#' "ITT.Analyses" = c("low", "high", "uc", "uc", "uc"),
#' "Selective_outcome_reporting" = c("low", "high", "high", "high", "uc")
#' )
#' rob.summary(data2, name.high = "high", name.unclear = "uc", name.low = "low",
#' name.missing = "miss", studies = data2$study, table = TRUE)
rob.summary = function(data,
name.high="High",
name.unclear="Unclear",
name.low="Low",
studies,
name.missing,
table = FALSE){
# Class Checks
if (class(data) != "data.frame"){
stop("'data' must be of class 'data.frame'.")
}
if (missing(name.missing)){
# Only select columns with RoB data
colnames.rob = character()
for (i in 1:ncol(data)){
vect = as.character(data[,i])
for (j in 1:length(data[,i])){
if (vect[j] %in% c(name.high, name.unclear, name.low)){
colnames.rob[i] = TRUE
} else {
colnames.rob[i] = FALSE
message(cat("Column '", colnames(data)[i],
"' removed from plot because it did not contain the specified RoB ratings (only). \n",
sep=""))
break
}
}
}
# Use mask: rob data
rob = data[ , as.logical(colnames.rob)]
# Relevel for plot
for (i in 1:ncol(rob)){
rob[,i] = as.character(rob[,i])
rob[rob[,i]==name.high,i] = "High"
rob[rob[,i]==name.unclear,i] = "Unclear"
rob[rob[,i]==name.low,i] = "Low"
}
# Make table
if (table == TRUE){
if (missing(studies)){
stop("'studies' has to be specified when 'table = TRUE'.")
}
if (length(as.vector(studies)) != nrow(data)){
stop("'studies' vector is not of equal length as the data.")
}
robby = rob
robby$study = studies
robby = gather(robby, condition, measurement, -study)
robby$condition = gsub("_"," ", robby$condition)
robby$condition = gsub("-"," ", robby$condition)
robby$condition = gsub("\\."," ", robby$condition)
robby[robby$measurement=="Low", "measurement"] = "+"
robby[robby$measurement=="Unclear", "measurement"] = "?"
robby[robby$measurement=="High", "measurement"] = "-"
rob.table = ggplot(data = robby, aes(y = condition, x = study)) +
geom_tile(color="black", fill="white", size = 0.8) +
geom_point(aes(color=as.factor(measurement)), size=20) +
geom_text(aes(label = measurement), size = 8) +
scale_x_discrete(position = "top") +
scale_color_manual(values = c("?" = "#E2DF07",
"-" = "#BF0000",
"+" = "#02C100")) +
theme_minimal() +
coord_equal() +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_text(size = 15, color = "black"),
axis.text.x = element_text(size = 13, color = "black", angle = 90, hjust=0),
legend.position = "none",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank())
}
# Make long format, clean the factors
rob.long = gather(rob, condition, measurement, factor_key = TRUE)
rob.long$condition = gsub("_"," ",rob.long$condition)
rob.long$condition = gsub("-"," ",rob.long$condition)
rob.long$condition = gsub("\\."," ",rob.long$condition)
rob.long$measurement = as.factor(rob.long$measurement)
rob.long$measurement = factor(rob.long$measurement, levels(rob.long$measurement)[c(1, 3, 2)])
# Make plot
rob.plot = ggplot(data = rob.long) +
geom_bar(mapping = aes(x = condition, fill = measurement), width = 0.7,
position = "fill", color = "black") +
coord_flip(ylim = c(0, 1)) +
guides(fill = guide_legend(reverse = TRUE)) +
scale_fill_manual("Risk of Bias",
labels = c(" High risk of bias ",
" Unclear risk of bias ",
" Low risk of bias "),
values = c(Unclear = "#E2DF07", High = "#BF0000", Low = "#02C100")) +
scale_y_continuous(labels = scales::percent) +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_text(size = 18, color = "black"),
axis.line.x = element_line(colour = "black", size = 0.5, linetype = "solid"),
legend.position = "bottom",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
legend.background = element_rect(linetype = "solid", colour = "black"),
legend.title = element_blank(),
legend.key.size = unit(0.75, "cm"),
legend.text = element_text(size = 14))
plot(rob.plot)
if (table == TRUE){
plot(rob.table)
}
} else {
# Only select columns with RoB data
data = as.data.frame(data)
colnames.rob = character()
for (i in 1:ncol(data)){
vect = as.character(data[,i])
for (j in 1:length(data[,i])){
if (vect[j] %in% c(name.high, name.unclear, name.low, name.missing)){
colnames.rob[i] = TRUE
} else {
colnames.rob[i] = FALSE
message(cat("Column '", colnames(data)[i],
"' removed from plot because it did not contain the specified RoB ratings (only). \n",
sep=""))
break
}
}
}
# Use mask: rob data
rob = data[ , as.logical(colnames.rob)]
# Relevel for plot
for (i in 1:ncol(rob)){
rob[,i] = as.character(rob[,i])
rob[rob[,i]==name.high,i] = "High"
rob[rob[,i]==name.unclear,i] = "Unclear"
rob[rob[,i]==name.low,i] = "Low"
rob[rob[,i]==name.missing,i] = "Missing"
}
# Make Table
if (table == TRUE){
if (missing(studies)){
stop("'studies' has to be specified when 'table = TRUE'.")
}
if (length(as.vector(studies)) != nrow(data)){
stop("'studies' vector is not of equal length as the data.")
}
robby = rob
robby$study = studies
robby = gather(robby, condition, measurement, -study)
robby$condition = gsub("_"," ", robby$condition)
robby$condition = gsub("-"," ", robby$condition)
robby$condition = gsub("\\."," ", robby$condition)
robby[robby$measurement=="Low", "measurement"] = "+"
robby[robby$measurement=="Unclear", "measurement"] = "?"
robby[robby$measurement=="High", "measurement"] = "-"
robby[robby$measurement=="Missing", "measurement"] = " "
rob.table = ggplot(data = robby, aes(y = condition, x = study)) +
geom_tile(color="black", fill="white", size = 0.8) +
geom_point(aes(color=as.factor(measurement)), size=20) +
geom_text(aes(label = measurement), size = 8) +
scale_x_discrete(position = "top") +
scale_color_manual(values = c("?" = "#E2DF07",
"-" = "#BF0000",
"+" = "#02C100",
" " = "white")) +
theme_minimal() +
coord_equal() +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_text(size = 15, color = "black"),
axis.text.x = element_text(size = 13, color = "black", angle = 90, hjust=0),
legend.position = "none",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank())
}
# Make long format, clean the factors
rob.long = gather(rob, condition, measurement, factor_key = TRUE)
rob.long$condition = gsub("_"," ",rob.long$condition)
rob.long$condition = gsub("-"," ",rob.long$condition)
rob.long$condition = gsub("\\."," ",rob.long$condition)
rob.long$measurement = as.factor(rob.long$measurement)
rob.long$measurement = factor(rob.long$measurement, levels(rob.long$measurement)[c(3,1,4,2)])
rob.plot = ggplot(data = rob.long) +
geom_bar(mapping = aes(x = condition, fill = measurement), width = 0.7,
position = "fill", color = "black") +
coord_flip(ylim = c(0, 1)) +
guides(fill = guide_legend(reverse = TRUE)) +
scale_fill_manual("Risk of Bias",
labels = c(" Missing information ",
" High risk of bias ",
" Unclear risk of bias ",
" Low risk of bias "),
values = c(Unclear = "#E2DF07",
High = "#BF0000",
Low = "#02C100",
Missing = "white")) +
scale_y_continuous(labels = scales::percent) +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_text(size = 18, color = "black"),
axis.line.x = element_line(colour = "black", size = 0.5, linetype = "solid"),
legend.position = "bottom",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
legend.background = element_rect(linetype = "solid", colour = "black"),
legend.title = element_blank(),
legend.key.size = unit(0.75, "cm"),
legend.text = element_text(size = 14))
plot(rob.plot)
if (table == TRUE){
plot(rob.table)
}
}
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Perform Egger's test of the intercept
#'
#' This function performs Egger's test of the intercept for funnel plot asymmetry using an object
#' of class \code{meta}.
#'
#' @usage eggers.test(x)
#'
#' @param x An object of class \code{meta}, generated by the \code{metabin}, \code{metagen},
#' \code{metacont}, \code{metacor}, \code{metainc}, or \code{metaprop} function.
#'
#' @details Performs Egger's test (Egger et al., 1997) for funnel plot asymmetry.
#' The \code{\link[meta]{metabias}} function is called internally. Egger's test may lack
#' the statistical power to detect bias when the number of studies is small. Sterne et al.
#' (2011) recommend to perform funnel plot asymmetry tests only when \eqn{k \geq 10}. A warning
#' is therefore printed when the number of studies in the \code{meta} object is \eqn{k < 10}.
#'
#' @references
#'
#' Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803. \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/smallstudyeffects.html}{Chapter 9.1}
#'
#' Egger M, Smith GD, Schneider M & Minder C (1997), Bias in meta-analysis detected by a simple,
#' graphical test. \emph{BMJ}, 315, 629–634.
#'
#' Sterne, JAC et al. (2011), Recommendations for Examining and Interpreting Funnel Plot
#' Asymmetry in Meta-Analyses of Randomised Controlled Trials. \emph{BMJ}
#' 343, 1, doi: 10.1136/bmj.d4002 .
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @importFrom meta metabias
#' @importFrom graphics abline axis lines mtext par plot points rect segments text
#' @importFrom stats as.formula hat influence ks.test optimize pbinom pchisq pf pnorm pt punif qchisq qf qnorm qt reformulate reorder setNames uniroot
#'
#' @return Returns a data.frame containing the following columns:
#' \itemize{
#' \item \code{Intercept}: The intercept (bias).
#' \item \code{ConfidenceInterval}: The 95\% confidence interval of the intercept.
#' \item \code{t}: The t-statistic for the intercept test.
#' \item \code{p}: The p-value for Egger's test.
#' }
#'
#' @export eggers.test
#'
#' @seealso \code{\link[meta]{metabias}}
#'
#' @examples
#' # Create meta-analysis results using the 'metagen' function
#' suppressPackageStartupMessages(library(meta))
#' data(ThirdWave)
#' m = metagen(TE, seTE, studlab = paste(Author),
#' data = ThirdWave, comb.random = FALSE, hakn=TRUE)
#'
#' # Plug result into 'eggers_test' function
#' eggers.test(m)
eggers.test = function(x) {
# Validate
x = x
if (x$k < 10) {
warning(paste("Your meta-analysis contains k =", x$k, "studies. Egger's test may lack the statistical power to detect bias when the number of studies is small (i.e., k<10)."))
}
if (class(x)[1] %in% c("meta", "metabin", "metagen", "metacont", "metacor", "metainc", "metaprop")) {
# Conduct metabias
eggers = meta::metabias(x, k.min = 3, method = "linreg")
# Get Intercept
intercept = as.numeric(eggers$estimate[1]) %>% round(digits = 3)
# Get SE
se = as.numeric(eggers$estimate[2])
# Calculate 95CI
LLCI = intercept - 1.96 * se %>% round(digits = 1)
ULCI = intercept + 1.96 * se %>% round(digits = 1)
CI = paste(LLCI, "-", ULCI, sep = "")
# Get t
t = as.numeric(eggers$statistic) %>% round(digits = 3)
# Get df
df = as.numeric(eggers$parameters)
# Get p
p = as.numeric(eggers$p.value) %>% round(digits = 5)
# Make df
df = data.frame(Intercept = intercept, ConfidenceInterval = CI, t = t, p = p)
row.names(df) = "Egger's test"
} else {
stop("x must be of type 'metabin', 'metagen', 'metacont', 'metainc' or 'metaprop'")
}
return(df)
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' A priori power calculator
#'
#' This function performs an \emph{a priori} power estimation of a meta-analysis
#' for different levels of assumed between-study heterogeneity.
#'
#' @usage power.analysis(d, OR, k, n1, n2, p = 0.05, heterogeneity = 'fixed')
#'
#' @param d The hypothesized, or plausible overall effect size of a treatment/intervention under study compared
#' to control, expressed as the standardized mean difference (SMD). Effect sizes must be positive
#' numerics (i.e., expressed as positive effect sizes).
#' @param OR The hypthesized, or plausible overall effect size of a treatment/intervention under study compared
#' to control, expressed as the Odds Ratio (OR). If both \code{d} and \code{OR} are specified, results
#' will only be computed for the value of \code{d}.
#' @param k The expected number of studies to be included in the meta-analysis.
#' @param n1 The expected, or plausible mean sample size of the treatment group in the studies to be included in the meta-analysis.
#' @param n2 The expected, or plausible mean sample size of the control group in the studies to be included in the meta-analysis.
#' @param p The alpha level to be used for the power computation. Default is \eqn{\alpha = 0.05}.
#' @param heterogeneity Which level of between-study heterogeneity to assume for the meta-analysis. Can be either
#' \code{"fixed"} for no heterogeneity/a fixed-effect model, \code{"low"} for low heterogeneity, \code{"moderate"}
#' for moderate-sized heterogeneity or \code{"high"} for high levels of heterogeneity. Default is \code{"fixed"}.
#'
#' @details While researchers conducting primary studies can plan the size of their sample
#' based on the effect size they want to find, the situation is a different in
#' meta-analysis, where one can only work with the published material.
#' However, researchers have some control over the number of studies they want to include in their
#' meta-analysis (e.g., through more leniently or strictly defined inclusion criteria).
#' Therefore, one can change the power to some extent by including more or less studies into
#' the meta-analysis. Conventionally, a power of \eqn{1-\beta = 0.8} is deemed suffienct to detect an existing effect.
#' There are four things one has to make assumptions about when assessing the power of meta-analysis a priori.
#'
#' \itemize{
#' \item The number of included or includable studies
#' \item The overall size of the studies we want to include (are the studies in the field rather small or large?)
#' \item The effect size to determine. This is particularly important, as assumptions have to be made
#' about how big an effect size has to be to still be clinically meaningful. One study calculated
#' that for interventions against depression, even effects as small as \emph{SMD}=0.24 may still
#' be meaningful for patients (Cuijpers et al. 2014). If the aim is to study negative effects of an
#' intervention (e.g., death or symptom deterioration), even very small effect sizes are extremely
#' important and should be detected.
#' \item The heterogeneity of our studies’ effect sizes, as this also affects the precision of the pooled estimate,
#' and thus its potential to find significant effects.
#' }
#'
#' The \code{power.analysis} function implements the formula by Borenstein et al. (2011) to calculate
#' the power estimate. Odds Ratios are converted to \code{d} internally before the power is estimated, and
#' are then reconverted.
#'
#' @references
#'
#' Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803. \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/power-analysis.html}{Chapter 13}
#'
#'Cuijpers, P., Turner, E.H., Koole, S. L., Van Dijke, A., & Smit, F. (2014).
#'What Is the Threshold for a Clinically Relevant Effect? The Case of Major Depressive Disorders.
#'\emph{Depression and Anxiety, 31}(5): 374–78.
#'
#'Borenstein, M., Hedges, L.V., Higgins, J.P.T. and Rothstein, H.R. (2011). Introduction to Meta-Analysis. John Wiley & Sons.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @return The \strong{estimated power} of the meta-analysis, expressed as a value between 0 and 1 (i.e., 0\%-100\%).
#'
#' An additional plot is generated, showing the effect size (x), power (y), estimated power (red point) and
#' estimated power for changing effect sizes (blue line). A dashed line at 80\% power is also provided as a
#' visual threshold for sufficient power.
#'
#' @export power.analysis
#'
#' @import ggplot2
#'
#' @seealso \code{\link{power.analysis.subgroup}}
#'
#' @examples
#'
#' # Example 1: Using SMD and fixed-effect model (no heterogeneity)
#' power.analysis(d=0.124, k=10, n1=50, n2=50, heterogeneity = 'fixed')
#'
#' # Example 2: Using OR and assuming moderate heterogeneity
#' power.analysis(OR=0.77, k=12, n1=50, n2=50, heterogeneity = 'high')
power.analysis = function(d, OR, k, n1, n2, p = 0.05, heterogeneity = "fixed") {
odds = FALSE
if (missing(OR) & missing(d)) {
stop("Either 'd' or 'OR' must be provided.")
}
if (!(heterogeneity %in% c("fixed", "low", "moderate", "high"))) {
stop("'heterogeneity' must be either 'fixed', 'low', 'moderate', 'high'.")
}
# Cohen's d not provided: calculate from OR
if (missing(d)) {
odds = TRUE
d = log(OR) * (sqrt(3)/pi)
cat("Power Analysis based on log-transformed OR. \n")
} else {
}
es = d
if (heterogeneity == "fixed") {
het.factor = 1
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
power = 1 - (pnorm(zval - lambda)) + (pnorm(-zval - lambda))
cat("Fixed-effect model used. \n")
}
if (heterogeneity == "low") {
het.factor = 1.33
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
v.m = 1.33 * v.m
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
power = 1 - (pnorm(zval - lambda)) + (pnorm(-zval - lambda))
cat("Random-effects model used (low heterogeneity assumed). \n")
}
if (heterogeneity == "moderate") {
het.factor = 1.67
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
v.m = 1.67 * v.m
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
power = 1 - (pnorm(zval - lambda)) + (pnorm(-zval - lambda))
cat("Random-effects model used (moderate heterogeneity assumed). \n")
}
if (heterogeneity == "high") {
het.factor = 2
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
v.m = 2 * v.m
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
power = 1 - (pnorm(zval - lambda)) + (pnorm(-zval - lambda))
cat("Random-effects model used (high heterogeneity assumed). \n")
}
# Loop for data for plot
dvec = (1:1000)/1000
if (d > 1) {
dvec = (1:(d * 1000))/1000
}
powvect = vector()
for (i in 1:length(dvec)) {
d = dvec[i]
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
v.m = het.factor * v.m
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
powvect[i] = 1 - (pnorm(zval - lambda)) + (pnorm(-zval - lambda))
}
# Generate plot
if (odds == FALSE) {
plotdat = as.data.frame(cbind(dvec, powvect))
plot = ggplot(data = plotdat, aes(x = dvec, y = powvect)) + geom_line(color = "blue", size = 2) +
geom_point(aes(x = es, y = power), color = "red", size = 5) + theme_minimal() + geom_hline(yintercept = 0.8,
color = "black", linetype = "dashed") + ylab("Power") + xlab("Effect size (SMD)")
} else {
dvecs = exp(dvec * (pi/sqrt(3)))
dvec.inv = exp(-dvec * (pi/sqrt(3)))
dvec = as.vector(rbind(dvec.inv, dvecs))
powvect = as.vector(rbind(powvect, powvect))
plotdat = as.data.frame(cbind(dvec, powvect))
plot = ggplot(data = plotdat, aes(x = dvec, y = powvect)) + geom_line(color = "blue", size = 2) +
geom_point(aes(x = exp(es * (pi/sqrt(3))), y = power), color = "red", size = 5) + theme_minimal() +
geom_hline(yintercept = 0.8, color = "black", linetype = "dashed") + ylab("Power") + xlab("Effect size (OR)") +
scale_x_log10()
}
plot(plot)
cat("Power: \n")
return(power)
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Calculate the number needed to treat (NNT)
#'
#' This function calculates the number needed to treat (\eqn{NTT}) using event data or
#' effect sizes (such as Cohen's \eqn{d} or Hedges' \eqn{g}).
#'
#' @usage NNT(d, CER, event.e, n.e, event.c, n.c, names, method)
#'
#' @param d A single numeric or concatenated vector of numerics representing the effect size expressed as
#' Cohen's \eqn{d} or Hedges' \eqn{g}. If this is the only parameter specified in the function, the method by
#' Kraemer and Kupfer is used automatically to calculate \eqn{NNT}s.
#' @param CER The control group event ratio. Furukawa's method (Furukawa & Leucht, 2011) to calculate NNTs
#' from \code{d} requires that the assumed response ("event") ratio in the control group (\eqn{\frac{n_{responders}}{N_{total}}})
#' is specified. The CER can assume values from 0 to 1. If a value is specified for \code{CER}, Furukawa's method is
#' used automatically. Parameter \code{method} has to be set to \code{"KraemerKupfer"} to override this.
#' @param event.e Single number or numeric vector. The number of (favourable) events in the experimental group.
#' @param n.e Single number or numeric vector. The number participants in the experimental group.
#' @param event.c Single number or numeric vector. The number of (favourable) events in the control group.
#' @param n.c Single number or numeric vector. The number participants in the control group.
#' @param names Optional. Character vector of equal length as the vector supplied to \code{d} or \code{event.e} containing
#' study/effect size labels.
#' @param method The method to be used to calculate the NNT from \code{d}. Either \code{"KraemerKupfer"} for the
#' method proposed by Kraemer and Kupfer (2006) or \code{"Furukawa"} for the Furukawa method (Furukawa & Leucht, 2011).
#' Please note that the Furukawa method can only be used when \code{CER} is specified.
#'
#' @details This function calculates the number needed to treat (\eqn{NNT}) from effect sizes (Cohen's \eqn{d} and Hedges' \eqn{g})
#' or, alternatively, from raw event data.
#'
#' Two methods to calculate the \eqn{NTT} from \code{d} are implemented in this function.
#' \itemize{
#' \item The method by \strong{Kraemer and Kupfer} (2006),
#' calculates\eqn{NTT} from the Area Under the Curve (\eqn{AUC}) defined as the probability that a patient in the treatment
#' has an outcome preferable to one in the control. This method allows to calculate the NNT directly from \code{d} without
#' any extra variables.
#' \item The method by \strong{Furukawa} calculates the \eqn{NNT} from \code{d} using a reasonable estatimate
#' of \eqn{CER}, in this context the response rate in the control group.
#' }
#'
#' Furukawa's method has been shown to be superior in predicting
#' the \eqn{NNT} compared to the Kraemer & Kupfer method (Furukawa & Leucht, 2011). If reasonable assumptions can be made concerning
#' the \eqn{CER}, Furukawa's method should therefore be preferred.
#'
#' When event data is used for the function, the \eqn{CER} and \eqn{EER} (experimental group event rate) is calculated internally, and
#' the standard definition of the \eqn{NTT}, \eqn{\frac{1}{EER-CER}}, is used.
#'
#' Please note that negative NNT values returned by the function refer to the number needed to harm (\eqn{NNH}), as the intervention
#' is assumed to be inferior to the control group treatment based on the effect size data supplied to the function.
#'
#' @references
#'
#' Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803. \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/smallstudyeffects.html}{Chapter 9.1}
#'
#'Furukawa, T. A., & Leucht, S. (2011). How to obtain NNT from Cohen's d: comparison of two methods. \emph{PloS one, 6}(4), e19070.
#'
#' Kraemer H.C., Kupfer D.J. (2006) Size of treatment effects and their importance
#' to clinical research and practice. {Biol. Psychiatry 59}: 990–996.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @export NNT
#'
#' @seealso \code{\link{se.from.p}}
#'
#' @examples
#'
#' # Example 1: Convert Cohen's d using the Kraemer & Kupfer method
#' d = c(-0.123, 0.234, 0.123, 1.234, 0.12)
#' NNT(d)
#'
#' # Example 2: Convert Cohen's d using the Furukawa method
#' d = c(-0.123, 0.234, 0.123, 1.234, 0.12)
#' CER = c(0.42, 0.35, 0.26, 0.21, 0.23)
#' NNT(d, CER)
#'
#' # Example 3: Convert event data
#' NNT(event.e = 10, event.c = 20, n.e = 200, n.c = 200)
NNT = function(d, CER, event.e, n.e, event.c, n.c, names, method) {
# Calculate Cohens-d
if (missing(event.e)) {
if (missing(CER)) {
# Use Kraemer
NNT = 1/((2 * pnorm(d/sqrt(2)) - 1))
cat("Kraemer & Kupfer's method used. \n")
} else {
if (missing(method)) {
NNT = 1/(pnorm(d + qnorm(CER)) - CER)
cat("Furukawa's method used. \n")
} else {
if (method == "KraemerKupfer") {
NNT = 1/((2 * pnorm(d/sqrt(2)) - 1))
cat("Kraemer & Kupfer's method used. \n")
} else {
}
if (method == "Furukawa") {
if (missing(CER) | class(CER) != "numeric") {
stop("To use Furukawa's method, provide a numeric value for CER. \n")
} else {
NNT = 1/(pnorm(d + qnorm(CER)) - CER)
cat("Furukawa's method used. \n")
}
} else {
}
}
}
} else {
# Calculate from raw event data
if (class(event.e) == "numeric" & missing(d)) {
EER = event.e/n.e
CER = event.c/n.c
NNT = abs(1/(EER - CER))
if (missing(method)) {
} else {
if (method %in% c("KraemerKupfer", "Furukawa")) {
cat("NNTs were calculated from raw data, so neither Kraemer & Kupfer nor Furukawa method was used.")
} else {
}
}
}
}
if (sum(NNT < 0) > 0) {
cat("Negative NNT values refer to the number needed to harm (NNT) \n")
}
if (missing(names)) {
return(NNT)
} else {
data = data.frame(Name = names, NNT = NNT)
return(data)
return(NNT)
}
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' GOSH plot dataset
#'
#' @format Dataset of class \code{gosh.rma}.
#'
#' @source Data generated by fitting a random-effects meta-analysis with \code{rma.uni}
#' using \code{dmetar}s in-built \code{ThirdWave} dataset, which was then provided as input to the
#' \code{gosh} function in \code{metafor}.
#'
#' @usage data("m.gosh")
#'
#' @author Mathias Harrer, David Daniel Ebert
#'
"m.gosh"
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Calculate the Surface Under the Cumulative Ranking score of from a network meta-analysis
#'
#' This function calculates the SUCRA (Surface Under the Cumulative Ranking) score from a rank
#' probability matrix or an object of class \code{mtc.result} generated by the \code{\link[gemtc]{mtc.run}}
#' function.
#'
#' @usage sucra(x, lower.is.better = FALSE, plot = TRUE)
#'
#' @param x An object of class \code{mtc.rank.probability} generated by the \code{\link[gemtc]{rank.probability}} function
#' or a matrix/data.frame in which the rows correspond to the treatment, and columns to the
#' probability of a specific treatment having this rank (see details). Rownames of the matrix should
#' contain the name of the specific treatment.
#' @param lower.is.better Logical. Do lower (i.e., more negative) effect sizes mean that effects are
#' higher? \code{FALSE} by default. Use the default when the provided matrix already contains the
#' correct rank probability for each treatment, and values ought not to be inverted.
#' @param plot Logical. Should a bar plot be drawn for the sucra scores? \code{TRUE} by default.
#'
#' @details The SUCRA score is a metric to evaluate which treatment in a network is likely
#' to be the most efficacious in the context of network meta-analyses. The SUCRA score is calculated
#' in the function using the formula described in Salanti, Ades and Ioannidis (2011):
#' \deqn{SUCRA_j = \frac{\sum_{b=1}^{a-1}cum_{jb}}{a-1}}
#' Where \eqn{j} is some treatment, \eqn{a} are all competing treatments, \eqn{b} are the
#' \eqn{b = 1, 2, ..., a-1} best treatments, and \eqn{cum} represents the cumulative probability
#' of a treatment being among the \eqn{b} best treatments.
#'
#' Other than an object of class \code{mtc.rank.probability} for argument \code{x}, the function can also be provided
#' with a \eqn{m \times n} matrix where \eqn{m} are rows corresponding to each treatment in the
#' network meta-analysis, and the \eqn{n} columns correspond to each rank (1st, 2nd, etc.). Rank probabilities
#' should be provided as a value from 0 to 1. Rownames of the matrix should correspond to the treatment names.
#' Here is an example rank probability matrix for eight treatments:
#'
#' \tabular{lrrrrrrrr}{
#' . \tab [,1] \tab [,2] \tab [,3] \tab [,4] \tab [,5] \tab [,6] \tab [,7] \tab [,8]\cr
#' CBT \tab 0.000000 \tab 0.000000 \tab 0.000000 \tab 0.000000 \tab 0.000000 \tab 0.001275 \tab 0.087400 \tab 0.911325\cr
#' IPT \tab 0.000000 \tab 0.000000 \tab 0.000000 \tab 0.000000 \tab 0.000000 \tab 0.179400 \tab 0.745875 \tab 0.074725\cr
#' PDT \tab 0.000000 \tab 0.000000 \tab 0.000225 \tab 0.020300 \tab 0.978025 \tab 0.001450 \tab 0.000000 \tab 0.000000\cr
#' PLA \tab 0.002825 \tab 0.551175 \tab 0.262525 \tab 0.181550 \tab 0.001925 \tab 0.000000 \tab 0.000000 \tab 0.000000\cr
#' PST \tab 0.000000 \tab 0.000000 \tab 0.000000 \tab 0.000025 \tab 0.001450 \tab 0.817850 \tab 0.166725 \tab 0.013950\cr
#' SUP \tab 0.000000 \tab 0.216450 \tab 0.398700 \tab 0.383950 \tab 0.000900 \tab 0.000000 \tab 0.000000 \tab 0.000000\cr
#' TAU \tab 0.000375 \tab 0.229200 \tab 0.338525 \tab 0.414175 \tab 0.017700 \tab 0.000025 \tab 0.000000 \tab 0.000000\cr
#' WLC \tab 0.996800 \tab 0.003175 \tab 0.000025 \tab 0.000000 \tab 0.000000 \tab 0.000000 \tab 0.000000 \tab 0.000000
#' }
#'
#' @references Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803.
#' \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/bayesian-network-meta-analysis.html}{Chapter 11.2}.
#'
#'Salanti, G., Ades, A. E. & Ioannidis, J.P.A. (2011). Graphical Methods and Numerical Summaries for
#'Presenting Results from Multiple-Treatment Meta-Analysis: An Overview and Tutorial.
#'\emph{Journal of Clinical Epidemiology, 64} (2): 163–71.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @import ggplot2
#' @importFrom forcats fct_reorder
#'
#' @export sucra
#'
#' @seealso
#' \code{\link{direct.evidence.plot}}
#'
#' @examples
#' \dontrun{
#' # Example1 : conduct NMA using gemtc, calculate SUCRAs
#' suppressPackageStartupMessages(library(gemtc))
#' suppressPackageStartupMessages(library(igraph))
#' data("NetDataGemtc")
#'
#' network = suppressWarnings(mtc.network(data.re = NetDataGemtc))
#'
#' plot(network, layout = layout.fruchterman.reingold)
#'
#' model = mtc.model(network, linearModel = "fixed",
#' n.chain = 4,
#' likelihood = "normal",
#' link = "identity")
#'
#' mcmc = mtc.run(model, n.adapt = 5000, n.iter = 100000, thin = 10)
#'
#' rp = rank.probability(mcmc)
#'
#' sucra(rp, lower.is.better = TRUE)}
#'
#'
#'
#' # Example 2: construct rank proabability matrix, then use sucra function
#' rp = rbind(CBT = c(0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.001500, 0.088025, 0.910475),
#' IPT = c(0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.176975, 0.748300, 0.074725),
#' PDT = c(0.000000, 0.000000, 0.000250, 0.021725, 0.976525, 0.001500, 0.000000, 0.000000),
#' PLA = c(0.003350, 0.546075, 0.266125, 0.182125, 0.002325, 0.000000, 0.000000, 0.000000),
#' PST = c(0.000000, 0.000000, 0.000000, 0.000000, 0.001500, 0.820025, 0.163675, 0.014800),
#' SUP = c(0.000000, 0.217450, 0.403950, 0.378000, 0.000600, 0.000000, 0.000000, 0.000000),
#' TAU = c(0.000225, 0.232900, 0.329675, 0.418150, 0.019050, 0.000000, 0.000000, 0.000000),
#' WLC = c(0.996425, 0.003575, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000))
#'
#' sucra(rp, lower.is.better = TRUE)
sucra = function(x, lower.is.better = FALSE, plot = TRUE) {
rank.probability = x
# Convert rank.probability to matrix
mat = as.matrix(rank.probability)
# Loop over treatments, for each treatment: calculate SUCRA
a = ncol(mat)
j = nrow(mat)
names = rownames(mat)
sucra = numeric()
for (x in 1:j) {
sucra[x] = sum(cumsum(mat[x, 1:(a - 1)]))/(a - 1)
}
# If condition for lower.is.better
if (lower.is.better == TRUE) {
sucra = numeric()
for (x in 1:j) {
sucra[x] = 1 - sum(cumsum(mat[x, 1:(a - 1)]))/(a - 1)
}
}
# Make data.frame
res = data.frame(Treatment = names, SUCRA = sucra)
# Order
res = res[order(-res$SUCRA), ]
rownames(res) = 1:j
if (plot==TRUE){
plot = ggplot(res, aes(x=fct_reorder(Treatment, -SUCRA), y=SUCRA)) +
geom_bar(stat="identity") +
theme_minimal() +
theme(axis.text.x = element_text(angle=45, color="black"),
axis.text.y = element_text(color="black")) +
ylab("SUCRA") +
xlab("Treatment")
plot(plot)
}
rownames(res) = res$Treatment
res$Treatment = NULL
return(res)
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Toy Dataset for Multivariate Meta-Regression
#'
#' This is a toy dataset containing simulated effect size data of a fictitious
#' meta-analysis examining the effect of various putative effect moderators. Effect size
#' data is provided as the standardized mean difference (SMD) between the intervention
#' and control group and its corresponding standard error for each study at post.
#' Columns are named in after arguments of the \code{\link[metafor]{rma.uni}}
#' function to facilitate out-of-the-box usage.
#'
#'
#' @format A data.frame with 6 columns.
#' \describe{
#' \item{yi}{Numeric. The calculated standardized mean difference at post-test between the intervention and control group}
#' \item{sei}{Numeric. The standard error of the standardized mean difference}
#' \item{reputation}{Numeric. The mean-centered score signifying the
#' "reputation" (for example, impact factor) of the journal the respective study was published in.}
#' \item{quality}{Numeric. The methodological quality of the study, rated from 0-10 (low to high).}
#' \item{pubyear}{Numeric. The z-standardized year of publication.}
#' \item{continent}{Numeric. The continent the study was conducted in.}
#' }
#'
#' @source Simulated data.
#'
#' @usage data("MVRegressionData")
#'
#' @author Mathias Harrer, David Daniel Ebert
#'
"MVRegressionData"
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Extensive documentation for the dmetar package can be found at: \n www.bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/")
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
utils::globalVariables(c("studlab", "rstudent", "study", "is.infl", "dffits",
"cook.d", "cov.r", "tau2.del", "QE.del", "hat",
"weight", "i2", "lower", "upper", "y", "se", "comparison",
"variable", "value", "model", "punif", "condition", "measurement",
"TE", "seTE", "Subgroup", "Treatment", "SUCRA", "estimate",
"I2", "cluster", "Delta_Percentage", "Cluster", "cooks.distance",
"Cooks.Distance"))
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' 'Third-Wave' cognitive behavioral interventions for perceived stress in college students dataset
#'
#' This is a toy dataset containing pre-calculated effect size data of a meta-analysis on
#' randomized controlled trials comparing the effectiveness of 'third-wave' CBT interventions
#' for perceived stress in college students to inactive controls. Effect size data is provided
#' as the standardized mean difference (SMD) between the intervention and control group
#' and its corresponding standard error for each study at post.
#' The dataset also contains columns for study characteristics which may serve as potential
#' effect size moderators.
#'
#' @format A data.frame with 8 columns.
#' \describe{
#' \item{Author}{Character. The study label containing the author(s) of the study}
#' \item{TE}{Numeric. The calculated standardized mean difference at post-test between the intervention and control group}
#' \item{seTE}{Numeric. The standard error of the standardized mean difference}
#' \item{RiskOfBias}{Character. The risk of bias rating according to the Cochrance Risk of Bias Tool.}
#' \item{TypeControlGroup}{Character. The type of control group used in the study}
#' \item{InterventionDuration}{Character. The dichotomized duration of the intervention}
#' \item{InterventionType}{Character. The type of third-wave intervention rationale used}
#' \item{ModeOfDelivery}{Character. The mode of delivery used for the intervention}
#' }
#'
#' @source Slightly changed dataset of a meta-analysis on third-wave CBT interventions for
#' perceived stress in college students.
#'
#' @usage data(ThirdWave)
#'
#' @author Mathias Harrer, Eva-Maria Rathner, David Daniel Ebert
#'
"ThirdWave"
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Identify studies contributing to heterogeneity patterns found in GOSH plots
#'
#' This function uses three unsupervised learning learning algorithms
#' (\emph{k}-means, DBSCAN and Gaussian Mixture Models) to identify studies
#' contributing to the heterogeneity-effect size patterns found in GOSH (graphic
#' display of study heterogeneity) plots.
#'
#' @usage gosh.diagnostics(data, km = TRUE, db = TRUE, gmm = TRUE, km.centers =
#' 3, db.eps = 0.15, db.min.pts = 20, gmm.diag = TRUE, gmm.clusters = 2,
#' gmm.tolerance = 1e-16, gmm.itermax = 10000, seed = 123)
#'
#' @param data An object of class \code{gosh.rma} created through the
#' \code{\link[metafor]{gosh}} function.
#' @param km Logical. Should the \emph{k}-Means algorithm be used to identify
#' patterns in the GOSH plot matrix? TRUE by default.
#' @param db Logical. Should the DBSCAN algorithm be used to identify patterns
#' in the GOSH plot matrix? TRUE by default.
#' @param gmm Logical. Should a bivariate Gaussian Mixture Model be used to
#' identify patterns in the GOSH plot matrix? TRUE by default.
#' @param km.centers Number of clusters to assume for the \emph{k}-Means
#' algorithm. Default is 3.
#' @param db.eps The distance radius \eqn{\epsilon} used to determine clusters
#' (DBSCAN). Default is 0.15.
#' @param db.min.pts The minimum number of points \eqn{minPts} required within
#' \eqn{\epsilon} used to determine clusters. Default is 20.
#' @param gmm.diag Logical. Should the covariance matrix of the components be
#' restricted to diagonal matrices? Default is TRUE.
#' @param gmm.clusters Number of clusters to assume for the Gaussian Mixture
#' Model. Default is 2.
#' @param gmm.tolerance Relative change threshold of log-likelihood used to stop
#' the Expectation-Maximization algorithm for the Gaussian Mixture Model.
#' Default is 1e-16.
#' @param gmm.itermax Maximum number of iterations for the
#' Expectation-Maximization algorithm used in the Gaussian Mixture Model to
#' reach convergence. Default is 10000.
#' @param seed Seed used for reproducibility. Default seed is 123.
#'
#' @details
#'
#' \strong{GOSH Plots}
#'
#' GOSH (\emph{graphic display of study
#' heterogeneity}) plots were proposed by Olkin, Dahabreh and Trikalinos
#' (2012) as a diagnostic plot to assess effect size heterogeneity. GOSH plots
#' facilitate the detection of both (i) outliers and (ii) distinct homogeneous
#' subgroups within the modeled data.
#'
#' Data for the plots is generated by fitting a random-effects-model with the
#' same specifications as in the meta-analysis to all \eqn{\mathcal{P}(k),
#' \emptyset \notin \mathcal{P}(k), \forall 2^{k-1} \leq 10^6} possible
#' subsets of studies in an analysis. For \eqn{|\mathcal{P}(k)| > 10^6}, 1
#' million subsets are randomly sampled and used for model fitting when using
#' the \code{\link[metafor]{gosh}} function.
#'
#'
#' \strong{GOSH Plot Diagnostics}
#'
#' Although GOSH plots allow to detect heterogeneity patterns and distinct
#' subgroups within the data, interpretation which studies contribute to a
#' certain subgroup or pattern is often difficult or computationally
#' intensive. To facilitate the detection of studies responsible for specific
#' patterns within the GOSH plots, this function randomly samples \eqn{10^4}
#' data points from the GOSH Plot data (to speed up computation). Of the data
#' points, only the \eqn{z}-transformed \eqn{I^2} and effect size value is
#' used (as other heterogeneity metrics produced for the GOSH plot data using
#' the \code{\link[metafor]{gosh}} function are linear combinations of
#' \eqn{I^2}). To this data, three clustering algorithms are applied.
#' \itemize{ \item The first algorithm is \emph{k}-means clustering using the
#' algorithm by Hartigan & Wong (1979) and \eqn{m_k = 3} cluster centers by
#' default. The functions uses the \code{\link[stats]{kmeans}} implementation
#' to perform \emph{k}-Means clustering. \item As \eqn{k}-means does not
#' perform well in the presence of distinct arbitrary subclusters and noise,
#' the function also applies \strong{DBSCAN} (\emph{density reachability and
#' connectivity clustering}; Schubert et al., 2017). The hyperparameters
#' \eqn{\epsilon} and \eqn{MinPts} can be tuned for each analysis to maintain
#' a reasonable amount of granularity while not producing too many
#' subclusters. The function uses the \code{\link[fpc]{dbscan}} implementation
#' to perform the DBSCAN clustering. \item Lastly, as a clustering approach
#' using a probabilistic model, Gaussian Mixture Models (GMM; Leisch, 2004)
#' are integrated in the function using an internal call to the
#' \code{\link[flexmix]{flexmix}} implementation. The GMM implemented here use
#' the Expectation-Maximization algorithm for clustering, hyperparameters of
#' which can be tuned using the \code{gmm.tolerance} and \code{gmm.itermax}
#' parameters. }
#'
#' To assess which studies predominantly contribute to a detected cluster,
#' the function calculates the cluster imbalance of a specific study using the
#' difference between (i) the expected share of subsets containing a specific
#' study if the cluster makeup was purely random (viz., representative for the
#' full sample), and the (ii) actual share of subsets containing a specific
#' study within a cluster. Cook's distance for each study is then calculated
#' based on a linear intercept model to determine the leverage of a specific
#' study for each cluster makeup. Studies with a leverage value three times
#' above the mean in any of the generated clusters (for all used clustering
#' algorithms) are returned as potentially influential cases and the GOSH plot
#' is redrawn highlighting these specific studies.
#'
#' @references
#' Leisch, F. (2004). \emph{Flexmix: A general framework for finite
#' mixture models and latent glass regression in R}.
#'
#' Hartigan, J. A., & Wong, M. A. (1979). Algorithm as 136: A K-Means Clustering Algorithm.
#' \emph{Journal of the Royal Statistical Society. Series C (Applied Statistics), 28} (1). 100–108.
#'
#' Olkin, I., Dahabreh, I. J., Trikalinos, T. A. (2012). GOSH–a Graphical Display of Study Heterogeneity.
#' \emph{Research Synthesis Methods 3}, (3). 214–23.
#'
#' Schubert, E., Sander, J., Ester, M., Kriegel, H. P. & Xu, X. (2017). DBSCAN Revisited, Revisited:
#' Why and How You Should (Still) Use DBSCAN. \emph{ACM Transactions on Database Systems (TODS) 42}, (3). ACM: 19.
#'
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @import dplyr cluster mvtnorm
#' @importFrom factoextra fviz_cluster
#' @importFrom fpc dbscan
#' @importFrom cowplot plot_grid
#' @importFrom reshape2 melt
#' @importFrom stats kmeans cooks.distance lm
#' @importFrom flexmix flexmix FLXMCmvnorm
#'
#'
#' @export gosh.diagnostics
#'
#' @seealso \code{\link{InfluenceAnalysis}}
#'
#' @examples
#' # Example: load gosh data (created with metafor's 'gosh' function),
#' # then use function
#' data("m.gosh")
#' gosh.diagnostics(m.gosh)
gosh.diagnostics = function(data,
km = TRUE,
db = TRUE,
gmm = TRUE,
km.centers = 3,
db.eps = 0.15, db.min.pts = 20,
gmm.diag = TRUE, gmm.clusters = 2, gmm.tolerance = 1e-16, gmm.itermax = 10000,
seed = 123) {
# Redefine Variables
data = data
km.centers = km.centers
eps = db.eps; rm(db.eps)
sav = data
min.pts = db.min.pts; rm(db.min.pts)
do.km = km; rm(km)
do.db = db; rm(db)
do.gmm = gmm; rm(gmm)
# set seed
set.seed(seed)
# Check input
if (class(data) != "gosh.rma"){
stop("Argument 'data' provided does not have class 'gosh.rma'.")
}
if (do.km == FALSE & do.db == FALSE & do.gmm == FALSE){
stop("At least one of 'km', 'db', or 'gmm' must be set to TRUE.")
}
# Start loading bar
cat(" ", "\n", "Perform Clustering...", "\n")
cat("[===========")
# Create full dataset from gosh output
dat.full = sav$res
dat.full = cbind(dat.full, sav$incl)
cat("=========")
# Create dataset for k-Means
dat.km = data.frame(scale(dat.full$I2, center = TRUE, scale = TRUE), scale(dat.full$estimate, center = TRUE,
scale = TRUE))
colnames(dat.km) = c("I2", "estimate")
# Create dataset for DBSCAN DBSCAN can become too computationally intensive for very large GOSH data. For
# N_gosh > 10.000, N = 10.000 data points are therefore randomly sampled.
if (nrow(dat.full) < 10000) {
dat.db.full = dat.full
} else {
dat.db.full = dat.full[sample(1:nrow(dat.full), 10000), ] #Sample 10.000 rows
}
dat.db = data.frame(scale(dat.db.full$I2, center = TRUE, scale = TRUE), scale(dat.db.full$estimate, center = TRUE,
scale = TRUE))
colnames(dat.db) = c("I2", "estimate")
cat("=========")
# K-Means
km = kmeans(dat.km, centers = km.centers) # Kmeans
# Only use 5000 rows for plotting to increase speed
if (length(as.numeric(km$cluster)) > 5000){
km.plot.mask = sample(1:length(as.numeric(km$cluster)), 5000)
km.plot = km
km.plot$cluster = km$cluster[km.plot.mask]
dat.km.plot = dat.km[km.plot.mask,]
} else {
km.plot = km
dat.km.plot = dat.km
}
km.clusterplot = fviz_cluster(km.plot, data = dat.km.plot, stand = FALSE, ellipse = FALSE, show.clust.cent = FALSE,
geom = "point", ggtheme = theme_minimal(), shape = 16, ylab = "Effect Size (z-score)", xlab = "I-squared (z-score)",
main = "K-means Algorithm", pointsize = 0.5) + coord_flip()
cat("=========")
# DBSCAN
db = fpc::dbscan(dat.db, eps = eps, MinPts = min.pts)
# Only use 5000 rows for plotting to increase speed
if (length(as.numeric(db$cluster)) > 5000){
db.plot.mask = sample(1:length(as.numeric(db$cluster)), 5000)
db.plot = db
db.plot$cluster = db$cluster[db.plot.mask]
dat.db.plot = dat.db[db.plot.mask,]
} else {
db.plot = db
dat.db.plot = dat.db
}
db.clusterplot = fviz_cluster(db.plot, data = dat.db.plot, stand = FALSE, ellipse = FALSE, show.clust.cent = FALSE,
shape = 16, geom = "point", ggtheme = theme_minimal(), ylab = "Effect Size (z-score)", xlab = "I-squared (z-score)",
main = "DBSCAN Algorithm (black dots are outliers)") + coord_flip()
cat("=========")
# GMM
# Use same data as used for DBSCAN
dat.gmm.full = dat.db.full
dat.gmm = dat.db
gmm = flexmix::flexmix(cbind(I2, estimate) ~ 1,
k = gmm.clusters,
data = dat.gmm,
model = FLXMCmvnorm(diagonal = gmm.diag),
control = list(tolerance = gmm.tolerance,
iter.max = gmm.itermax))
# Only use 5000 rows for plotting to increase speed
if (length(as.numeric(gmm@cluster)) > 5000){
gmm.plot.mask = sample(1:length(as.numeric(gmm@cluster)), 5000)
dat.gmm.plot = dat.gmm[gmm.plot.mask,]
dat.gmm.plot$cluster = gmm@cluster[gmm.plot.mask]
} else {
dat.gmm.plot = dat.gmm
dat.gmm.plot$cluster = gmm@cluster
}
gmm.clusterplot = ggplot(data = dat.gmm.plot, aes(x = estimate,
y = I2,
color = as.factor(cluster))) +
geom_point(alpha=0.6) +
theme_minimal() +
xlab("Effect Size (z-score)") +
ylab("I-squared (z-score)") +
ggtitle("Gaussian Mixture Model") +
guides(color=guide_legend(title="Cluster"))
cat("=========")
# Add to dfs
dat.km.full = dat.full
dat.km.full$cluster = km$cluster
dat.db.full$cluster = db$cluster
dat.gmm.full$cluster = gmm@cluster
####################################################
# Extract the Percentages###########################
# K-Means############################################
dat.km.full$cluster = as.factor(dat.km.full$cluster)
n.levels.km = nlevels(dat.km.full$cluster)
# Loop for the total n of studies
dat.km.full.total = dat.km.full[, -c(1:6, ncol(dat.km.full))]
n.cluster.tots = apply(dat.km.full.total, 2, sum)
n.cluster.tots = data.frame(unlist(as.matrix(n.cluster.tots)))
colnames(n.cluster.tots) = c("n.tots")
cat("=========")
# Loop for the cluster-wise n of studies
n = sapply(split(dat.km.full.total, dat.km.full$cluster), function(x) apply(x, 2, sum))
# Calculate Percentages
deltas = as.data.frame(apply(n, 2, function(x) (x/n.cluster.tots$n.tots) - mean(x/n.cluster.tots$n.tots)))
# Generate the plot
Study = rep(1:nrow(deltas), n.levels.km)
delta.df = suppressMessages(reshape2::melt(deltas))
delta.df[, 3] = Study
delta.df$variable = as.factor(delta.df$variable)
colnames(delta.df) = c("Cluster", "Delta_Percentage", "Study")
cat("=========")
km.plot = ggplot(data = delta.df, aes(x = Study, y = Delta_Percentage, group = Cluster)) + geom_line(aes(color = Cluster)) +
geom_point(aes(color = Cluster)) + scale_x_continuous(name = "Study", breaks = seq(0, nrow(deltas),
1)) + scale_y_continuous(name = "Delta Percentage") + theme(axis.text = element_text(size = 5)) +
ggtitle("Cluster imbalance (K-Means algorithm)") + geom_hline(yintercept = 0, linetype = "dashed")
####################################################
# Cook's Distance Plot###########################
# K-Means############################################
m.cd.km = by(delta.df, as.factor(delta.df$Cluster), function(x) lm(Delta_Percentage ~ 1, data = x))
m.cd.km$`0` = NULL
m.cd.km = lapply(m.cd.km, cooks.distance)
m.cd.km.df = data.frame(Cooks.Distance = matrix(unlist(m.cd.km)))
m.cd.km.df$Cluster = as.factor(rep(1:(n.levels.km), each = nrow(deltas)))
m.cd.km.df$Study = rep(1:nrow(deltas), times = (n.levels.km))
outlier.cd.km = 3 * mean(m.cd.km.df$Cooks.Distance)
if (n.levels.km <= 2){
m.cd.km.df[m.cd.km.df$Cluster=="2", "Cooks.Distance"] = m.cd.km.df[m.cd.km.df$Cluster=="2", "Cooks.Distance"] + 0.01
km.cd.plot = ggplot(data = m.cd.km.df, aes(x = Study, y = Cooks.Distance, group = Cluster)) +
geom_line(aes(color=Cluster), alpha = 0.5) +
geom_point(aes(color = Cluster)) +
scale_x_continuous(name = "Study", breaks = seq(0, nrow(deltas), 1)) +
scale_y_continuous(name = "Cook's Distance") +
theme(axis.text = element_text(size = 5)) +
ggtitle("Cluster imbalance (Cook's Distance)") +
geom_hline(yintercept = outlier.cd.km, linetype = "dashed") +
geom_hline(yintercept = 0, linetype = "dashed")
} else {
km.cd.plot = ggplot(data = m.cd.km.df, aes(x = Study, y = Cooks.Distance, group = Cluster)) +
geom_line(aes(color=Cluster), alpha = 0.5) +
geom_point(aes(color = Cluster)) +
scale_x_continuous(name = "Study", breaks = seq(0, nrow(deltas), 1)) +
scale_y_continuous(name = "Cook's Distance") +
theme(axis.text = element_text(size = 5)) +
ggtitle("Cluster imbalance (Cook's Distance)") +
geom_hline(yintercept = outlier.cd.km, linetype = "dashed") +
geom_hline(yintercept = 0, linetype = "dashed")
}
####################################################
# Extract the Percentages###########################
# DBSCAN############################################
dat.db.full$cluster = as.factor(dat.db.full$cluster)
n.levels.db = nlevels(dat.db.full$cluster)
# Loop for the total n of studies
dat.db.full.total = dat.db.full[, -c(1:6, ncol(dat.db.full))]
n.cluster.tots = apply(dat.db.full.total, 2, sum)
n.cluster.tots = data.frame(unlist(as.matrix(n.cluster.tots)))
colnames(n.cluster.tots) = c("n.tots")
# Loop for the cluster-wise n of studies
n = sapply(split(dat.db.full.total, dat.db.full$cluster), function(x) apply(x, 2, sum))
# Calculate Percentages
deltas = as.data.frame(apply(n, 2, function(x) (x/n.cluster.tots$n.tots) - mean(x/n.cluster.tots$n.tots)))
# Generate the plot
Study = rep(1:nrow(deltas), n.levels.db)
delta.df = suppressMessages(reshape2::melt(deltas))
delta.df[, 3] = Study
delta.df$variable = as.factor(delta.df$variable)
colnames(delta.df) = c("Cluster", "Delta_Percentage", "Study")
delta.df = filter(delta.df, !Cluster == 0) #Zero Class (Outliers are removed)
db.plot = ggplot(data = delta.df, aes(x = Study, y = Delta_Percentage, group = Cluster)) + geom_line(aes(color = Cluster)) +
geom_point(aes(color = Cluster)) + scale_x_continuous(name = "Study", breaks = seq(0, nrow(deltas),
1)) + scale_y_continuous(name = "Delta Percentage") + theme(axis.text = element_text(size = 5)) +
ggtitle("Cluster imbalance (Density-Based Clustering)") + geom_hline(yintercept = 0, linetype = "dashed")
####################################################
# Cook's Distance Plot###########################
# DBSCAN############################################
m.cd.db = by(delta.df, as.factor(delta.df$Cluster), function(x) lm(Delta_Percentage ~ 1, data = x))
m.cd.db$`0` = NULL
m.cd.db = lapply(m.cd.db, cooks.distance)
m.cd.db.df = data.frame(Cooks.Distance = matrix(unlist(m.cd.db)))
m.cd.db.df$Cluster = as.factor(rep(1:(n.levels.db - 1), each = nrow(deltas)))
m.cd.db.df$Study = rep(1:nrow(deltas), times = (n.levels.db - 1))
outlier.cd.db = 3 * mean(m.cd.db.df$Cooks.Distance)
if (n.levels.db <= 2){
m.cd.db.df[m.cd.db.df$Cluster=="2", "Cooks.Distance"] = m.cd.db.df[m.cd.db.df$Cluster=="2", "Cooks.Distance"] + 0.01
db.cd.plot = ggplot(data = m.cd.db.df, aes(x = Study, y = Cooks.Distance, group = Cluster)) +
geom_line(aes(color=Cluster), alpha = 0.5) +
geom_point(aes(color = Cluster)) +
scale_x_continuous(name = "Study", breaks = seq(0, nrow(deltas), 1)) +
scale_y_continuous(name = "Cook's Distance") +
theme(axis.text = element_text(size = 5)) +
ggtitle("Cluster imbalance (Cook's Distance)") +
geom_hline(yintercept = outlier.cd.db, linetype = "dashed") +
geom_hline(yintercept = 0, linetype = "dashed")
} else {
db.cd.plot = ggplot(data = m.cd.db.df, aes(x = Study, y = Cooks.Distance, group = Cluster)) +
geom_line(aes(color=Cluster), alpha = 0.5) +
geom_point(aes(color = Cluster)) +
scale_x_continuous(name = "Study", breaks = seq(0, nrow(deltas), 1)) +
scale_y_continuous(name = "Cook's Distance") +
theme(axis.text = element_text(size = 5)) +
ggtitle("Cluster imbalance (Cook's Distance)") +
geom_hline(yintercept = outlier.cd.db, linetype = "dashed") +
geom_hline(yintercept = 0, linetype = "dashed")
}
####################################################
# Extract the Percentages###########################
# GMM ############################################
dat.gmm.full$cluster = as.factor(dat.gmm.full$cluster)
n.levels.gmm = nlevels(dat.gmm.full$cluster)
# Loop for the total n of studies
dat.gmm.full.total = dat.gmm.full[, -c(1:6, ncol(dat.gmm.full))]
n.cluster.tots = apply(dat.gmm.full.total, 2, sum)
n.cluster.tots = data.frame(unlist(as.matrix(n.cluster.tots)))
colnames(n.cluster.tots) = c("n.tots")
cat("=========")
# Loop for the cluster-wise n of studies
n = sapply(split(dat.gmm.full.total, dat.gmm.full$cluster), function(x) apply(x, 2, sum))
# Calculate Percentages
deltas = as.data.frame(apply(n, 2, function(x) (x/n.cluster.tots$n.tots) - mean(x/n.cluster.tots$n.tots)))
# Generate the plot
Study = rep(1:nrow(deltas), n.levels.gmm)
delta.df = suppressMessages(reshape2::melt(deltas))
delta.df[, 3] = Study
delta.df$variable = as.factor(delta.df$variable)
colnames(delta.df) = c("Cluster", "Delta_Percentage", "Study")
cat("===========] DONE ", "\n")
gmm.plot = ggplot(data = delta.df, aes(x = Study, y = Delta_Percentage, group = Cluster)) + geom_line(aes(color = Cluster)) +
geom_point(aes(color = Cluster)) + scale_x_continuous(name = "Study", breaks = seq(0, nrow(deltas),
1)) + scale_y_continuous(name = "Delta Percentage") + theme(axis.text = element_text(size = 5)) +
ggtitle("Cluster imbalance (GMM)") + geom_hline(yintercept = 0, linetype = "dashed")
####################################################
# Cook's Distance Plot###########################
# GMM ############################################
m.cd.gmm = by(delta.df, as.factor(delta.df$Cluster), function(x) lm(Delta_Percentage ~ 1, data = x))
m.cd.gmm$`0` = NULL
m.cd.gmm = lapply(m.cd.gmm, cooks.distance)
m.cd.gmm.df = data.frame(Cooks.Distance = matrix(unlist(m.cd.gmm)))
m.cd.gmm.df$Cluster = as.factor(rep(1:(n.levels.gmm), each = nrow(deltas)))
m.cd.gmm.df$Study = rep(1:nrow(deltas), times = (n.levels.gmm))
outlier.cd.gmm = 3 * mean(m.cd.gmm.df$Cooks.Distance)
if (n.levels.gmm <= 2){
m.cd.gmm.df[m.cd.gmm.df$Cluster=="2", "Cooks.Distance"] = m.cd.gmm.df[m.cd.gmm.df$Cluster=="2", "Cooks.Distance"] + 0.01
gmm.cd.plot = ggplot(data = m.cd.gmm.df, aes(x = Study, y = Cooks.Distance, group = Cluster)) +
geom_line(aes(color=Cluster), alpha = 0.5) +
geom_point(aes(color = Cluster)) +
scale_x_continuous(name = "Study", breaks = seq(0, nrow(deltas), 1)) +
scale_y_continuous(name = "Cook's Distance") +
theme(axis.text = element_text(size = 5)) +
ggtitle("Cluster imbalance (Cook's Distance)") +
geom_hline(yintercept = outlier.cd.gmm, linetype = "dashed") +
geom_hline(yintercept = 0, linetype = "dashed")
} else {
gmm.cd.plot = ggplot(data = m.cd.gmm.df, aes(x = Study, y = Cooks.Distance, group = Cluster)) +
geom_line(aes(color=Cluster), alpha = 0.5) +
geom_point(aes(color = Cluster)) +
scale_x_continuous(name = "Study", breaks = seq(0, nrow(deltas), 1)) +
scale_y_continuous(name = "Cook's Distance") +
theme(axis.text = element_text(size = 5)) +
ggtitle("Cluster imbalance (Cook's Distance)") +
geom_hline(yintercept = outlier.cd.gmm, linetype = "dashed") +
geom_hline(yintercept = 0, linetype = "dashed")
}
#######################
# Generate Output Plot#
#######################
if (do.km == TRUE){
print(plot_grid(km.clusterplot, plot_grid(km.plot, km.cd.plot, nrow=2), nrow=1))
cat("\n","Number of k-means clusters used:", n.levels.km, "\n")
}
if (do.db == TRUE){
print(plot_grid(db.clusterplot, plot_grid(db.plot, db.cd.plot, nrow=2), nrow=1))
cat("\n","Number of DBSCAN clusters detected:", n.levels.db-1, "\n")
}
if (do.gmm == TRUE){
print(plot_grid(gmm.clusterplot, plot_grid(gmm.plot, gmm.cd.plot, nrow=2), nrow=1))
cat("\n","Number of GMM clusters detected:", n.levels.gmm, "\n")
}
############################################
# Plot GOSH for potential outlying studies #
############################################
# Get outlying studies
# Kmeans
outlier.studies.km.df = m.cd.km.df[m.cd.km.df$Cooks.Distance>outlier.cd.km,]
outlier.studies.km = unique(outlier.studies.km.df$Study)
# DBSCAN
outlier.studies.db.df = m.cd.db.df[m.cd.db.df$Cooks.Distance>outlier.cd.db,]
outlier.studies.db = unique(outlier.studies.db.df$Study)
# GMM
outlier.studies.gmm.df = m.cd.gmm.df[m.cd.gmm.df$Cooks.Distance>outlier.cd.gmm,]
outlier.studies.gmm = unique(outlier.studies.gmm.df$Study)
# Use all identified outliers
outlier.studies.all = unique(c(outlier.studies.km, outlier.studies.db, outlier.studies.gmm))
outlier.studies.all.mask = outlier.studies.all + 6 # Add 6 to use as mask
cat("\n", "Identification of potential outliers", "\n", "---------------------------------", "\n")
# Get plotting dataset and only choose outlier studies as mask, use db data
if (length(as.numeric(db$cluster)) > 5000){
dat.all.outliers = dat.db.full[db.plot.mask, c(3,6, outlier.studies.all.mask)]
} else {
dat.all.outliers = dat.db.full[,c(3,6, outlier.studies.all.mask)]
}
if (length(outlier.studies.all) > 0){
# Loop through all identified outliers
for (i in 1:length(outlier.studies.all)){
outlier.plot = ggplot(data = dat.all.outliers, aes(x = estimate,
y = I2,
color = dat.all.outliers[,i+2])) +
geom_point(alpha=0.8) +
scale_color_manual(values = c("lightgrey", "#00BFC4")) +
theme_minimal() +
theme(legend.position = "none",
plot.title = element_text(hjust = 0.5, face = "bold")) +
xlab("Effect Size") +
ylab("I-squared")
density.db.upper = ggplot(data = dat.all.outliers, aes(x = estimate,
fill = dat.all.outliers[,i+2])) +
geom_density(alpha = 0.5) +
theme_classic() +
theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks = element_blank(),
legend.position = "none",
plot.background = element_blank(),
axis.line.x = element_blank(),
axis.title.y = element_text(color="white"),
axis.text.y = element_text(color="white"),
axis.line.y = element_line(color="white")
) +
scale_fill_manual(values = c("lightgrey", "#00BFC4"))
blankPlot = ggplot()+geom_blank(aes(1,1))+
theme(plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_blank()
)
density.db.right = ggplot(data = dat.all.outliers, aes(x = I2,
fill = dat.all.outliers[,i+2])) +
geom_density(alpha = 0.5) +
theme_classic() +
theme(axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
legend.position = "none",
plot.background = element_blank(),
axis.line.y = element_blank(),
axis.title.x = element_text(color="white"),
axis.text.x = element_text(color="white"),
axis.line.x = element_line(color="white")
) +
scale_fill_manual(values = c("lightgrey", "#00BFC4")) +
coord_flip()
print(plot_grid(density.db.upper,
blankPlot,
outlier.plot,
density.db.right,
nrow = 2,
rel_heights = c(1,5),
rel_widths = c(4,1),
labels = c(paste("Study ", outlier.studies.all[i]),
"", "", "")))
}
cat(" ", "\n", "Studies identified as potential outliers:", "\n")
if (do.km == TRUE){
cat("\n", "- K-means:", outlier.studies.km, "\n")
}
if (do.db == TRUE){
cat("\n", "- DBSCAN:", outlier.studies.db, "\n")
}
if (do.gmm == TRUE){
cat("\n", "- Gaussian Mixture Model:", outlier.studies.gmm, "\n")
}
} else {
cat("\n", "No potential outliers detected.")
}
invisible(list(km.clusters = n.levels.km,
db.clusters = n.levels.db-1,
gmm.clusters = n.levels.gmm,
k = ncol(dat.db.full)-7,
outliers.km = outlier.studies.km,
outliers.dbscan = outlier.studies.db,
outliers.gmm = outlier.studies.gmm,
km.plot = plot_grid(km.clusterplot, plot_grid(km.plot, km.cd.plot, nrow=2), nrow=1),
dbscan.plot = plot_grid(db.clusterplot, plot_grid(db.plot, db.cd.plot, nrow=2), nrow=1),
gmm.plot = plot_grid(gmm.clusterplot, plot_grid(gmm.plot, gmm.cd.plot, nrow=2), nrow=1)))
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' A priori power calculator for subgroup contrasts
#'
#' This function performs an \emph{a priori} power estimation for a test for subgroup differences
#' within a meta-analysis.
#'
#' @usage power.analysis.subgroup(TE1, TE2, seTE1, seTE2, sd1, sd2, var1, var2,
#' two.tailed=TRUE)
#'
#' @param TE1 Pooled effect size (e.g., standardized mean difference, Hedges' \eqn{g}, log-Odds Ratio or other
#' linear continuous effect size) of the first subgroup of studies.
#' @param TE2 Pooled effect size (e.g., standardized mean difference, Hedges' \eqn{g}, log-Odds Ratio or other
#' linear continuous effect size) of the second subgroup of studies.
#' @param seTE1 Pooled standard error of the first subgroup of studies. Either \code{seTE1/seTE2},
#' \code{sd1/sd2}, or \code{var1/var2} must be provided.
#' @param seTE2 Pooled standard error of the second subgroup of studies. Either \code{seTE1/seTE2},
#' \code{sd1/sd2}, or \code{var1/var2} must be provided.
#' @param sd1 Pooled standard deviation of the first subgroup of studies. Either \code{seTE1/seTE2},
#' \code{sd1/sd2}, or \code{var1/var2} must be provided.
#' @param sd2 Pooled standard deviation of the second subgroup of studies. Either \code{seTE1/seTE2},
#' \code{sd1/sd2}, or \code{var1/var2} must be provided.
#' @param var1 Pooled variance of the first subgroup of studies. Either \code{seTE1/seTE2},
#' \code{sd1/sd2}, or \code{var1/var2} must be provided.
#' @param var2 Pooled variance of the second subgroup of studies. Either \code{seTE1/seTE2},
#' \code{sd1/sd2}, or \code{var1/var2} must be provided.
#' @param two.tailed Logical. Should a two-tailed (\code{TRUE}) or one-tailed (\code{FALSE}) test (\eqn{\alpha = 0.05}) be assumed?
#' Default is \code{TRUE}.
#'
#' @details This function provides an estimate of the power \eqn{1-\beta} of a subgroup contrast analysis provided the
#' assumed effect sizes in each subgroup and their dispersion measures. The function implements the formulae
#' described by Hedges and Pigott (2001).
#'
#' @references
#' Hedges, L. V., & Pigott, T. D. (2001). The power of statistical tests in meta-analysis.
#' \emph{Psychological methods, 6}(3), 203.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @return Returns the estimated power of the subgroup contrast, expressed as a value between 0 and 1 (i.e., 0\%-100\%).
#'
#' An additional plot is generated, showing the effect size difference (x), power (y), estimated power (red point) and
#' estimated power for changing effect size differences (blue line). A dashed line at 80\% power is also provided as a
#' visual threshold for sufficient power.
#'
#' @export power.analysis.subgroup
#'
#' @import ggplot2
#'
#' @seealso \code{\link{power.analysis}}
#'
#' @examples
#' # Example 1: using standard error and two-tailed test
#' power.analysis.subgroup(TE1=0.30, TE2=0.66, seTE1=0.13, seTE2=0.14)
#'
#' # Example 2: using variance and one-tailed test
#' power.analysis.subgroup(TE1=-0.91, TE2=-1.22, var1 = 0.0023, var2 = 0.0078,
#' two.tailed = FALSE)
power.analysis.subgroup = function(TE1, TE2, seTE1, seTE2, sd1, sd2, var1, var2, two.tailed = TRUE) {
gamma = abs(TE1 - TE2)
if (missing(var1)) {
if (missing(sd1)) {
var1 = (((TE1 - 1.96 * seTE1) - TE1)/-1.96)^2
var2 = (((TE2 - 1.96 * seTE2) - TE2)/-1.96)^2
varg = var1 + var2
} else {
var1 = sd1^2
var2 = sd2^2
varg = var1 + var2
}
} else {
varg = var1 + var2
}
# Define c_a
ca_1tail = 1.64
ca_2tail = 1.96
# Calculate
onetail = (1 - pnorm(ca_1tail - (gamma/sqrt(varg))))
twotail = (1 - pnorm(ca_2tail - (gamma/sqrt(varg))) + pnorm(-ca_2tail - (gamma/sqrt(varg))))
# Return
if (two.tailed == TRUE) {
if (gamma > 1) {
gammas = (1:((gamma + 3) * 1000))/1000
powervec = vector()
for (i in 1:length(gammas)) {
powervec[i] = (1 - pnorm(ca_2tail - (gammas[i]/sqrt(varg))) + pnorm(-ca_2tail - (gammas[i]/sqrt(varg))))
}
} else {
gammas = (1:1000)/1000
powervec = vector()
for (i in 1:length(gammas)) {
powervec[i] = (1 - pnorm(ca_2tail - (gammas[i]/sqrt(varg))) + pnorm(-ca_2tail - (gammas[i]/sqrt(varg))))
}
}
plotdat = as.data.frame(cbind(gammas, powervec))
plot = ggplot(data = plotdat, aes(x = gammas, y = powervec)) + geom_line(color = "blue", size = 2) +
geom_point(aes(x = gamma, y = twotail), color = "red", size = 5) + theme_minimal() + geom_hline(yintercept = 0.8,
color = "black", linetype = "dashed") + ylab("Power") + xlab("Effect size difference")
plot(plot)
if (!is.na(plotdat[plotdat$powervec >= 0.8, ][1, 1])) {
cat("Minimum effect size difference needed for sufficient power: ", plotdat[plotdat$powervec >=
0.8, ][1, 1], " (input: ", gamma, ")", "\n", sep = "")
}
cat("Power for subgroup difference test (two-tailed): \n")
return(twotail)
} else {
if (gamma > 1) {
gammas = (1:((gamma + 3) * 1000))/1000
powervec = vector()
for (i in 1:length(gammas)) {
powervec[i] = (1 - pnorm(ca_1tail - (gammas[i]/sqrt(varg))))
}
} else {
gammas = (1:1000)/1000
powervec = vector()
for (i in 1:length(gammas)) {
powervec[i] = (1 - pnorm(ca_1tail - (gammas[i]/sqrt(varg))))
}
}
plotdat = as.data.frame(cbind(gammas, powervec))
plot = ggplot(data = plotdat, aes(x = gammas, y = powervec)) + geom_line(color = "blue", size = 2) +
geom_point(aes(x = gamma, y = onetail), color = "red", size = 5) + theme_minimal() + geom_hline(yintercept = 0.8,
color = "black", linetype = "dashed") + ylab("Power") + xlab("Effect size difference")
plot(plot)
if (!is.na(plotdat[plotdat$powervec >= 0.8, ][1, 1])) {
cat("Minimum effect size difference needed for sufficient power: ", plotdat[plotdat$powervec >=
0.8, ][1, 1], " (input: ", gamma, ")", "\n", sep = "")
}
cat("Power for subgroup difference test (one-tailed): \n")
return(onetail)
}
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Calculate the standard error from the effect size and p-value
#'
#' This function calculates the standard error of an effect size provided the exact
#' \eqn{p}-value and (continuous) effect size according to the formula
#' by \href{https://www.ncbi.nlm.nih.gov/pubmed/21824904}{Altman and Bland (2011)}.
#'
#' @usage se.from.p(effect.size, p, N, effect.size.type = 'difference',
#' calculate.g = FALSE)
#'
#' @param effect.size Numeric vector or single number. The effect size, such as the
#' standardized mean difference, Hedges' \eqn{g} or other continuous effect size.
#' @param p Numeric vector or single number. The exact \eqn{p}-value corresponding to the
#' effect size.
#' @param N Numeric vector or single number. The total number of samples used to
#' calculate the effect size/\eqn{p}-value.
#' @param effect.size.type The type of effect sizes provided in \code{effect.size}. For
#' effect sizes based on differences (e.g., mean differences), this parameter has to be
#' set to \code{"difference"}. For effect sizes based on ratios (e.g., risk ratio, odds ratio),
#' this parameter has to be set to \code{"ratio"}.
#' @param calculate.g Logical. Calculates the standardized mean difference
#' corrected for small sample bias (Hedges' \eqn{g}). \code{FALSE} by default.
#'
#' @details This function calculates the standard error, standard deviation and 95\% confidence
#' interval of an effect size given the effect size and exact \eqn{p}-value. The function can be used for
#' \itemize{
#' \item effect sizes based on \strong{differences} (e.g., mean differences) by setting \code{effect.size.type}
#' to \code{"difference"}, or
#' \item effect sizes based on \strong{ratios} (e.g. risk ratios, odds ratios or
#' hazard ratios) by setting \code{effect.size.type} to \code{"ratio"}. When ratios are used, the
#' function returns the log-transformed effect sizes, standard error, standard deviation and confidence interval,
#' which can be used for meta-analytic pooling using the \code{\link[meta]{metagen}} function,
#' along with the original effect size and confidence interval.
#' }
#'
#' @references Altman D.G. & Bland J.M. (2011) How to obtain the confidence interval
#' of a \emph{p} value. \emph{BMJ 343}:d2090.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @import esc
#'
#' @return A dataframe containing the following columns:
#' \itemize{
#' \item \code{(log)EffectSize}: The input effect size. Log-transformed if \code{effect.size.type="ratio"}.
#' \item \code{Hedges.g}: The calculated Hedges' g values (only if \code{calculate.g=TRUE}).
#' \item \code{(log)StandardError}: The standard error (SE) for the effect size. Log-transformed if \code{effect.size.type="ratio"}.
#' \item \code{(log)LLCI} and \code{(log)ULCI}: The lower and upper 95\% confidence interval of the effect size. Log-transformed if \code{effect.size.type="ratio"}.}
#'
#' @export se.from.p
#'
#' @examples
#' # Example 1: one single effect size
#' se.from.p(effect.size = 0.71, p = 0.013, N = 75,
#' effect.size.type= "difference", calculate.g = TRUE)
#'
#' # Example 2: vector of effect sizes (Odds Ratio)
#' effect.size = c(0.91, 1.01, 0.72, 0.43)
#' p = c(0.05, 0.031, 0.001, 0.09)
#' N = c(120, 86, 450, 123)
#' se.from.p(effect.size = effect.size, p = p, N = N,
#' effect.size.type = "ratio")
se.from.p = function(effect.size, p, N, effect.size.type = "difference", calculate.g = FALSE) {
# Set params
ES = effect.size
p = p
N = N
ES.type = effect.size.type
calculate.g = calculate.g
if (is.numeric(ES) == FALSE) {
stop("'effect.size' is not of type numeric().")
}
if (is.numeric(p) == FALSE) {
stop("'p' is not of type numeric().")
}
if (is.numeric(N) == FALSE) {
stop("'N' is not of type numeric().")
}
if (ES.type %in% c("difference", "ratio") == FALSE) {
stop("'effect.size.type' must be either 'difference' or 'ratio'.")
}
# Difference vs. Ratio
# Difference
if (ES.type == "difference") {
if (calculate.g == TRUE) {
ES = hedges_g(d = ES, totaln = N)
z = -0.862 + sqrt(0.743 - 2.404 * log(p))
SE = ES/z
SD = SE * sqrt(N)
LLCI = ES - 1.96 * SE
ULCI = ES + 1.96 * SE
data = data.frame(ES, SE, SD, LLCI, ULCI)
colnames(data) = c("Hedges.g", "StandardError", "StandardDeviation", "LLCI", "ULCI")
} else {
z = -0.862 + sqrt(0.743 - 2.404 * log(p))
SE = ES/z
SD = SE * sqrt(N)
LLCI = ES - 1.96 * SE
ULCI = ES + 1.96 * SE
data = data.frame(ES, SE, SD, LLCI, ULCI)
colnames(data) = c("EffectSize", "StandardError", "StandardDeviation", "LLCI", "ULCI")
}
}
if (ES.type == "ratio") {
if (calculate.g == TRUE) {
stop("Hedges' g cannot be calculated for ratios using this function; set 'calculate.g=FALSE'.")
} else {
z = -0.862 + sqrt(0.743 - 2.404 * log(p))
ES = log(ES)
SE = abs(ES/z)
SD = SE * sqrt(N)
LLCI = ES - 1.96 * SE
ULCI = ES + 1.96 * SE
# Exponentiate to get original scale
expES = exp(ES)
expLLCI = exp(LLCI)
expULCI = exp(ULCI)
data = data.frame(ES, SE, SD, LLCI, ULCI, expES, expLLCI, expULCI)
colnames(data) = c("logEffectSize", "logStandardError", "logStandardDeviation", "logLLCI", "logULCI", "EffectSize",
"LLCI", "ULCI")
}
}
return(data)
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Perform multimodel inference on a meta-regression model
#'
#' This function performs multimodel inference to evaluate the importance of predictors
#' in a meta-analytical meta-regression model.
#'
#' @usage multimodel.inference(TE, seTE, data, predictors, method='REML', test='knha',
#' eval.criterion='AICc', interaction=FALSE, seed = 123)
#'
#' @param TE The precalculated effect size for each study. Must be supplied as the name of the effect size
#' column in the dataset (in quotation marks; e.g. \code{TE = "effectsize"}).
#' @param seTE The precalculated standard error for each study. Must be supplied as the name of the standard error
#' column in the dataset (in quotation marks; e.g. \code{seTE = "se"}).
#' @param data A \code{data.frame} containing columns for the effect size, standard error and
#' meta-regression predictors of each study/effect.
#' @param predictors A concentenated array of characters specifying the predictors to be used
#' for multimodel inference. Names of the predictors must be identical to the names of the column
#' names of the \code{data.frame} supplied to \code{data}.
#' @param method Meta-analysis model to use for pooling effect sizes. Use \code{'FE'} for the
#' fixed-effect model. Different random-effect models are available: \code{'DL', 'HE', 'SJ', 'ML', 'REML', 'EB', 'HS', 'GENQ'}.
#' If \code{'FE'} is used, the \code{test} argument is automatically set to \code{'z'}, as the Knapp-Hartung
#' method is not meant to be used with fixed-effect models. Default is \code{'REML'}, and it is strongly advised to remain with
#' this option to use a standard (mixed-effects) meta-regression model.
#' @param test Method to use to compute test statistics and confidence intervals. Default is \code{'knha'}
#' which uses the Knapp-Hartung (Knapp & Hartung, 2003) adjustment method. "Conventional" Wald-type tests and
#' CIs are calculated by setting this argument to \code{'z'}. When \code{method='FE'}, this argument is
#' set to \code{'z'} automatically as the Knapp-Hartung method was not meant to be used with fixed-effect models.
#' @param eval.criterion Evaluation criterion to sort the multiple models by. Can be either \code{'AICc'}
#' (default; corrected Akaike's Information Criterion), \code{'AIC'} (Akaike's Information Criterion) or
#' \code{'BIC'} (Bayesian Information Criterion).
#' @param interaction If set to \code{FALSE} (default), no interactions between predictors are considered. Setting this parameter to
#' \code{TRUE} means that all interactions are modeled.
#' @param seed Optional. Set a seed for the function.
#'
#' @details Multi-model methods differ from stepwise methods as they do not try to successively build
#' the “best” one (meta-regression) model explaining most of the variance. Instead, in this procedure,
#' all possible combinations of a predifined selection of predictors are modeled, and evaluated using
#' a criterion such as Akaike’s Information Criterion, which rewards simpler models.
#' This enables a full eximination of all possible models, and how they perform.
#' A common finding using this procedure is that there are many different kinds of predictor
#' combinations within a model which lead to a good fit. In multimodel inference, the estimated
#' coefficients of predictors can then be synthesized across all possible models to infer how
#' important certain predictors are overall.
#'
#' Multimodel Inference can be a useful way to obtain a comprehensive look on which predictors are
#' more or less important for predicting differences in effect sizes. Despite avoiding some of the
#' problems of stepwise regression methods, it should be noted that this method should still be rather
#' seen as exploratory, and may be used when there are no prior knowledge on how our predictors are
#' related to effect sizes in the research field under study.
#'
#' The \code{multimodel.inference} function calls the \code{\link[metafor]{rma.uni}} function internally,
#' which is then fed forward to the \code{\link[MuMIn]{dredge}} function for multimodel inference through
#' two utility functions returned by the \code{multimodel.inference} function.
#'
#' Parts of the computations in this function are based on a vignette by Wolfgang Viechtbauer, which can be found
#' \href{http://www.metafor-project.org/doku.php/tips:model_selection_with_glmulti_and_mumin}{here}.
#'
#'
#' @references
#'
#' Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803. \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/smallstudyeffects.html}{Chapter 9.1}
#'
#' Knapp, G., & Hartung, J. (2003). Improved tests for a random effects meta-regression with a single covariate.
#' \emph{Statistics in Medicine, 22}, 2693–2710.
#'
#' Viechtbauer, W. (2019). \emph{Model Selection using the glmulti and MuMIn Packages}. \href{http://www.metafor-project.org/doku.php/tips:model_selection_with_glmulti_and_mumin}{Link}.
#' Last accessed 01-Aug-2019.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @import MuMIn ggplot2
#' @importFrom metafor rma rma.uni
#'
#' @return Returns four tables and a plot:
#' \itemize{
#' \item \strong{Final Results (Summary Table)}: Displays the number of fitted models, model formula,
#' method to calculate test statistics and confidence intervals, interactions, and evaluation criterion used.
#' \item \strong{Best 5 Models}: Displays the top five models in terms of the evaluation criterion used.
#' Predictors are displayed as columns of the table, and models as rows. A number (weight) or \code{+}
#' sign (for categorical predictors) indicates that a predictor/interaction term was used for the
#' model, while empty cells indicate that the predictor was omitted in this model. Other metrics such as the
#' \code{weight}, evaluation metric \code{delta} compared to the best model, logLikelihood and degrees of freedom
#' are also diplayed
#' \item \strong{Multimodel Inference Coefficients}: Displays the coefficients and statistical significance
#' of each regression term in the model.
#' \item \strong{Predictor Importance}: Displays the importance value for each model term. The table is sorted from
#' highest to lowest. A common rule of thumb is to consider a predictor as important when its importance value is above 0.8.
#' \item \strong{Predictor Importance Plot}: A bar plot for the predictor importance data along with a reference line for the
#' 0.8 value often used as a crude threshold to characterize a predictor as important.
#' }
#'
#' @export multimodel.inference
#'
#' @seealso \code{\link[MuMIn]{dredge}}
#'
#' @examples
#' \dontrun{
#' # Example 1: Perform multimodel inference with default settings
#' data('MVRegressionData')
#' library(metafor)
#' mmi = multimodel.inference(TE = 'yi', seTE = 'sei', data = MVRegressionData,
#' predictors = c('pubyear', 'quality',
#' 'reputation', 'continent'))
#'
#' # Example 2: Model Interaction terms, set method to 'DL',
#' # change evaluation criterion to bic
#' multimodel.inference(TE = 'yi', seTE = 'sei', data = MVRegressionData,
#' predictors = c('pubyear', 'quality',
#' 'reputation', 'continent'),
#' method='DL', eval.criterion = 'BIC', interaction = TRUE)
#'
#' # Example 3: Use only categorical predictors
#' data('ThirdWave')
#' multimodel.inference(TE = 'TE', seTE = 'seTE', data = ThirdWave,
#' predictors = colnames(ThirdWave)[4:7], interaction = FALSE)}
multimodel.inference = function(TE, seTE, data, predictors, method = "REML", test = "knha", eval.criterion = "AICc",
interaction = FALSE, seed = 123) {
# Define MuMIn link
MetaforMuMIn = function() {
coefTable.rma = function(model, ...) {
makeCoefTable = function (x, se, df = NA_real_, coefNames = names(x))
{
# Unexported function from the MuMIn package, see MuMIn:::.makeCoefTable
if (n <- length(x)) {
xdefined <- !is.na(x)
ndef <- sum(xdefined)
if (ndef < n) {
if (length(se) == ndef) {
y <- rep(NA_real_, n)
y[xdefined] <- se
se <- y
}
if (length(df) == ndef) {
y <- rep(NA_real_, n)
y[xdefined] <- df
df <- y
}
}
}
if (n && n != length(se))
stop("length(x) is not equal to length(se)")
ret <- matrix(NA_real_, ncol = 3L, nrow = length(x), dimnames = list(coefNames,
c("Estimate", "Std. Error", "df")))
if (n)
ret[, ] <- cbind(x, se, rep(if (is.null(df)) NA_real_ else df,
length.out = n), deparse.level = 0L)
class(ret) <- c("coefTable", "matrix")
ret
}
makeCoefTable(model$b, model$se, coefNames = rownames(model$b))
}
expr.split = function (x, split = ":", paren.open = c("(", "[", "{"), paren.close = c(")",
"]", "}"), quotes = c("\"", "'", "`"), esc = "\\", prepare = NULL)
{
x0 <- x
if (is.function(prepare))
x <- prepare(x)
m <- length(x)
n <- nchar(x)
res <- vector("list", m)
for (k in 1L:m) {
pos <- integer(0L)
inquote <- ch <- ""
inparen <- integer(3L)
for (i in seq.int(n[k])) {
chprv <- ch
ch <- substr(x[k], i, i)
if (inquote != "") {
if (chprv == esc && ch == esc)
ch <- " "
else if (chprv != esc && ch == inquote)
inquote <- ""
}
else {
inparen[j] <- inparen[j <- (inparen != 0L) &
(ch == paren.close)] - 1L
if (ch %in% quotes)
inquote <- ch
else if (any(j <- (ch == paren.open)))
inparen[j] <- inparen[j] + 1L
else if (all(inparen == 0L) && ch == split)
pos <- c(pos, i)
}
}
res[[k]] <- substring(x0[k], c(1L, pos + 1L), c(pos -
1L, n[k]))
}
res
}
fixCoefNames = function (x, peel = TRUE)
{
if (!length(x))
return(x)
ox <- x
ia <- grep(":", x, fixed = TRUE)
if (!length(ia))
return(structure(x, order = rep.int(1L, length(x))))
x <- ret <- x[ia]
if (peel) {
if (all(substr(x, 1L, pos <- regexpr("_", x, fixed = TRUE)) %in%
c("count_", "zero_"))) {
ret <- substr(ret, pos + 1L, 256L)
k <- TRUE
suffix <- ""
}
else {
k <- grepl("^\\w+\\(.+\\)$", x, perl = TRUE)
fname <- substring(x[k], 1L, attr(regexpr("^\\w+(?=\\()",
x[k], perl = TRUE), "match.length"))
k[k] <- !vapply(fname, exists, FALSE, mode = "function",
envir = .GlobalEnv)
if (any(k)) {
pos <- vapply(x[k], function(z) {
parens <- lapply(lapply(c("(", ")"), function(s) gregexpr(s,
z, fixed = TRUE)[[1L]]), function(y) y[y >
0L])
parseq <- unlist(parens, use.names = FALSE)
p <- cumsum(rep(c(1L, -1L), sapply(parens,
length))[order(parseq)])
if (any(p[-length(p)] == 0L))
-1L
else parseq[1L]
}, 1L, USE.NAMES = FALSE)
k[k] <- pos != -1L
pos <- pos[pos != -1]
if (any(k))
ret[k] <- substring(x[k], pos + 1L, nchar(x[k]) -
1L)
}
suffix <- ")"
}
}
else k <- FALSE
spl <- expr.split(ret, ":", prepare = function(x) gsub("((?<=:):|:(?=:))",
"_", x, perl = TRUE))
ret <- vapply(lapply(spl, base::sort), paste0, "", collapse = ":")
if (peel && any(k))
ret[k] <- paste0(substring(x[k], 1L, pos), ret[k], suffix)
ox[ia] <- ret
ord <- rep.int(1, length(ox))
ord[ia] <- sapply(spl, length)
structure(ox, order = ord)
}
.getCoefNames = function (formula, data, contrasts, envir = parent.frame())
{
colnames(eval(call("model.matrix.default", object = formula,
data = data, contrasts.arg = contrasts), envir = envir))
}
makeArgs.default = function (obj, termNames, opt, ...)
{
#Unexported function from the MuMIn package, see MuMIn:::makeArgs.default
reportProblems <- character(0L)
termNames[termNames %in% opt$interceptLabel] <- "1"
f <- reformulate(c(if (!opt$intercept) "0" else if (!length(termNames)) "1",
termNames), response = opt$response)
environment(f) <- opt$gmFormulaEnv
ret <- list(formula = f)
if (!is.null(opt$gmCall$start)) {
coefNames <- fixCoefNames(.getCoefNames(f, opt$gmDataHead,
opt$gmCall$contrasts, envir = opt$gmEnv))
idx <- match(coefNames, opt$gmCoefNames)
if (anyNA(idx))
reportProblems <- append(reportProblems, "cannot subset 'start' argument. Coefficients in the model do not exist in 'global.model'")
else ret$start <- substitute(start[idx], list(start = opt$gmCall$start,
idx = idx))
}
attr(ret, "problems") <- reportProblems
ret
}
makeArgs.rma = function(obj, termNames, comb, opt, ...) {
ret <- makeArgs.default(obj, termNames, comb, opt)
names(ret)[1L] <- "mods"
ret
}
assign("coefTable.rma", coefTable.rma, envir = .GlobalEnv)
assign("makeArgs.rma", makeArgs.rma, envir = .GlobalEnv)
return(invisible())
}
MetaforMuMIn()
inner_mmi = function() {
# Set supplied seed
seed = seed
set.seed(seed)
# Check 'method'; if 'FE', switch test to 'z'.
if (method %in% c("FE", "DL", "HE", "SJ", "ML", "REML", "EB", "HS", "GENQ")) {
if (method == "FE" & test != "z") {
test = "z"
cat("Knapp-Hartung adjustments are only meant to be used for random-effects models. \n Parameter 'test' has therefore been changed to 'z'. \n")
} else {
}
} else {
stop("'method' must be either 'FE', 'DL', 'HE', 'SJ', 'ML', 'REML', 'EB', 'HS', or 'GENQ'.")
}
# Change supplied df to conform to glmulti
if (TE %in% colnames(data)) {
} else {
stop("Column '", TE, "' not found in dataset.")
}
if (seTE %in% colnames(data)) {
} else {
stop("Column '", seTE, "' not found in dataset.")
}
for (i in 1:length(predictors)) {
if (predictors[i] %in% colnames(data)) {
} else {
stop("Predictor '", predictors[i], "' not found in dataset.")
}
}
if (eval.criterion[1] %in% c("AICc", "BIC", "AIC")) {
} else {
stop("'eval.criterion' must be either 'AICc' (default), 'AIC' or 'BIC'.")
}
TE = data[TE]
seTE = data[seTE]
preds = data[predictors]
glm.data = data.frame(TE = TE, seTE = seTE)
colnames(glm.data) = c("TE", "seTE")
glm.data = cbind(glm.data, preds)
# Build the formula
interaction = interaction
if (interaction == FALSE) {
predictor.string = paste(predictors, collapse = "+")
} else {
predictor.string = paste(predictors, collapse = "*")
}
form = as.formula(paste("~", predictor.string, collapse = ""))
# Build rma model
full = suppressMessages(suppressWarnings(metafor::rma(yi = TE, sei = seTE, mods = form, data = glm.data, method = method,
test = test)))
# Multimodel Inference
if (eval.criterion == "AICc") {
res = suppressMessages(suppressWarnings(dredge(full, trace = 2, rank = "AICc")))
}
if (eval.criterion == "AIC") {
res = suppressMessages(suppressWarnings(MuMIn::dredge(full, trace = 2, rank = "AIC")))
}
if (eval.criterion == "BIC") {
res = suppressMessages(suppressWarnings(MuMIn::dredge(full, trace = 2, rank = "BIC")))
}
# Save results for all models: all.models, top5.models
all.models = res
top5.models = res[1:5, ]
# Create Multimodel Inference Coeffient Table and save: multimodel.coef
multimodel.coef = summary(MuMIn::model.avg(res, revised.var = TRUE))
multimodel.coef = multimodel.coef$coefmat.full
# Create importance table and save: predictor.importance
predictor.importance = data.frame(model = names(importance(res)), importance = as.numeric(importance(res)))
# Print out results
cat("\n", "Multimodel Inference: Final Results", "--------------------------", sep = "\n")
cat("\n", "- Number of fitted models:", nrow(all.models))
cat("\n", "- Full formula:", as.character(form))
cat("\n", "- Coefficient significance test:", test)
if (interaction == TRUE) {
cat("\n", "- Interactions modeled: yes")
} else {
cat("\n", "- Interactions modeled: no")
}
cat("\n", "- Evaluation criterion:", eval.criterion, "\n")
cat("\n", "Best 5 Models", "--------------------------", "\n", sep = "\n")
print(top5.models)
cat("\n", "Multimodel Inference Coefficients", "--------------------------", "\n", sep = "\n")
print(multimodel.coef)
cat("\n", "Predictor Importance", "--------------------------", "\n", sep = "\n")
print(predictor.importance)
# Print graph
ggpredictor = ggplot(predictor.importance, aes(x = reorder(model, importance), y = importance)) +
geom_bar(stat = "identity") + coord_flip() + geom_hline(yintercept = 0.8, color = "blue") + theme_minimal() +
theme(axis.title.y = element_blank()) + ylab("Predictor Importance")
suppressWarnings(suppressMessages(plot(ggpredictor)))
# Return results
invisible(list(all.models = all.models, top5.models = top5.models, multimodel.coef = multimodel.coef,
predictor.importance = predictor.importance, predictor.importance.plot = suppressWarnings(suppressMessages(ggpredictor)),
formula = form, fitted.models = nrow(all.models), eval.criterion = eval.criterion))
}
invisible(inner_mmi())
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Subgroup analysis using a mixed-effects model
#'
#' This function performs a mixed-effects (random-effects model within subgroups,
#' fixed-effect model between subgroups) subgroup analysis using \code{meta} objects.
#'
#' @usage subgroup.analysis.mixed.effects(x, subgroups, exclude = "none")
#'
#' @param x An object of class \code{meta}, generated by the \code{metabin}, \code{metagen},
#' \code{metacont}, \code{metacor}, \code{metainc}, or \code{metaprop} function.
#' @param subgroups A character vector of the same length as the number of studies within the
#' meta-analysis, with a unique code for the subgroup each study belongs to. Must have the
#' same order as the studies in the \code{meta} object.
#' @param exclude Single string or concatenated array of strings. The name(s) of the subgroup
#' levels to be excluded from the subgroup analysis. If \code{"none"} (default), all subgroup
#' levels are used for the analysis.
#'
#' @details This function conducts a test for differences in effect sizes between subgroups of a meta-analysis.
#' The function implements a mixed-effect model, in which the overall effect size for each subgroup is
#' calculated using a random-effect model, and the test for subgroup differences is conducted using a
#' fixed-effect model. The implementation follows the fixed-effects (plural) model described in Borenstein
#' and Higgins (2013).
#'
#' This model is appropriate for subgroup tests when the subgroup levels under study
#' are assumed to be exhaustive for the characteristic at hand, and are not randomly chosen instances
#' of a "population" of subgroup levels. For example, the fixed-effects (plural) model used in the function
#' is valid when differences between studies published before and after a certain year are considered as a
#' (binary) subgroup level. When subgroup levels can be assumed to be random samples from a distribution of
#' subgroup levels, a random-effects model is more appropriate, and may be calculated using
#' the \code{\link[meta]{update.meta}} function.
#'
#' The function uses the study effect sizes \code{TE} and their standard error \code{seTE} of the provided
#' \code{meta} object to perform the subgroup analyses. Specifications of the summary measure \code{sm} are
#' inherited and used to backtransform log-transformed effect sizes to their original metrics if necessary.
#'
#' Results can be inspected by plugging the function output into the \code{summary} function. Forest plots
#' can be generated using \code{forest}. Additional arguments of the \code{\link[meta]{forest.meta}} function
#' can be passed to the \code{forest} function for additional styling.
#'
#' @references
#'
#' Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803. \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/subgroup.html}{Chapter 7}.
#'
#' Borenstein, M. & Higgins, J. P. T. (2013). Meta-Analysis and Subgroups. \emph{Prevention Science, 14} (2): 134–43.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @return Returns a \code{list} with five objects:
#' \itemize{
#' \item \code{within.subgroup.results}: The pooled effect size for each subgroup and corresponding measures of heterogeneity (
#' \code{Q} and \code{I2}). If the summary measure \code{sm} is defined as one of
#' \code{"RR"}, \code{"RD"}, \code{"OR"}, \code{"ASD"}, \code{"HR"} or \code{"IRR"} in the
#' \code{meta} object provided in \code{x}, the backtransformed (exponentiated)
#' pooled effect for each subgroup effect size along with the 95\% confidence interval is also provided.
#' \item \code{subgroup.analysis.results}: The results for the \code{Q}-test for subgroup differences, its degrees of freedom \code{df} and
#' \emph{p}-value.
#' \item \code{m.random}: An object of class \code{meta} containing the results of the random-effects model applied
#' for pooling results in each subgroup in the first step.
#' \item \code{method.tau}: The \eqn{\tau^2} estimator used for within-subgroup pooling
#' (inherited from the \code{meta} object provided in \code{x}).
#' \item \code{k}: The total number of included studies.
#' }
#'
#' @aliases sgame
#'
#' @export subgroup.analysis.mixed.effects
#' @export sgame
#'
#' @importFrom meta forest metagen update.meta
#'
#' @seealso \code{\link{multimodel.inference}}
#'
#' @examples
#' # Example 1: Hedges' g as effect size, precalculated effect sizes
#' suppressPackageStartupMessages(library(meta))
#' data("ThirdWave")
#' ThirdWave = ThirdWave[c(1,2,3,5,9,18),]
#'
#' m1 <- metagen(TE = TE,
#' seTE = seTE,
#' studlab = paste(ThirdWave$Author),
#' data=ThirdWave,
#' comb.fixed = FALSE,
#' method.tau = "PM",
#' sm = "SMD")
#'
#' sgame1 = subgroup.analysis.mixed.effects(x = m1, subgroups = ThirdWave$TypeControlGroup)
#' summary(sgame1)
#'
#' # Generate Forest Plot
#' # Additional arguments of the meta::forest.meta can be supplied
#' forest(sgame1, col.diamond = "darkgreen")
#'
#'
#' # Example 2: Hedges' g as effect size, raw effect data
#' suppressPackageStartupMessages(library(meta))
#' data(amlodipine)
#'
#' # Create an arbitrary subgroup for illustration purposes
#' amlodipine$subgroup = rep(c("A","B"),4)
#'
#' m2 <- metacont(n.amlo, mean.amlo, sqrt(var.amlo),
#' n.plac, mean.plac, sqrt(var.plac),
#' data=amlodipine, studlab=amlodipine$study,
#' sm = "SMD")
#'
#' sgame2 = subgroup.analysis.mixed.effects(x = m2, subgroups = amlodipine$subgroup)
#' summary(sgame2)
#' forest(sgame2)
#'
#'
#' # Example 3: Risk ratio as effect size, binary outcome data, exlcude one level
#' suppressPackageStartupMessages(library(meta))
#' data(Olkin95)
#'
#' # Create an arbitrary subgroup for illustration purposes
#' Olkin95$subgroup = c(rep(c("A","B"), 30), rep("C",10))
#'
#' m3 <- metabin(event.e, n.e, event.c, n.c,
#' data = Olkin95, studlab = Olkin95$author,
#' method = "Inverse")
#'
#' # Use shorthand
#' sgame3 = sgame(x = m3, subgroups = Olkin95$subgroup,
#' exclude = "B")
#' summary(sgame3)
#'
#'
#' # Example 4: IRR as effect size, incidence data
#' suppressPackageStartupMessages(library(meta))
#' data(smoking)
#'
#' # Create an arbitrary subgroup for illustration purposes
#' smoking$subgroup = c(rep(c("A"), 4), rep(c("B"), 3))
#'
#' m4 <- metainc(d.smokers, py.smokers,
#' d.nonsmokers, py.nonsmokers,
#' data=smoking, studlab=study, sm="IRR")
#'
#' sgame4 = subgroup.analysis.mixed.effects(x = m4, subgroups = smoking$subgroup)
#' summary(sgame4)
#' forest(sgame4)
subgroup.analysis.mixed.effects = sgame = function(x, subgroups, exclude = "none") {
# Define variables
m = x
subgroups = subgroups
exclude = exclude
# Levels of subgroup
subgroups = as.factor(subgroups)
k = as.vector(summary(subgroups))
levels = levels(subgroups)
k.level.df = data.frame(level = levels, k = k)
# Out Loop for wrong input
if (length(subgroups) != length(m$studlab)) {
stop("Subgroup variable does not contain the same number of cases as the 'meta' object. You need to define a variable which provides a subgroup value for each effect size included in your 'meta' results object.")
}
# get 'Exclude' Subgroup level names
if (exclude[1] != "none") {
levels = levels[!levels %in% exclude]
k = k.level.df[(k.level.df$level %in% levels), ]$k
}
# Create Loop for subgroups
list = list()
for (x in levels) {
list[[x]] = which(subgroups %in% c(paste(x)))
}
# Loop over list to generate subgroup results
sg.results = list()
for (x in 1:length(list)) {
sg.results[[x]] = update.meta(m, subset = list[[x]])
}
# Loop over sg.results to get effect size estimates
ES = vector()
SE = vector()
Qsg = vector()
I2sg = vector()
I2sg.lower = vector()
I2sg.upper = vector()
for (x in 1:length(sg.results)) {
ES[x] = sg.results[[x]]$TE.random
SE[x] = sg.results[[x]]$seTE.random
Qsg[x] = sg.results[[x]]$Q
I2sg[x] = sg.results[[x]]$I2
I2sg.lower[x] = sg.results[[x]]$lower.I2
I2sg.upper[x] = sg.results[[x]]$upper.I2
}
me.data = data.frame(Subgroup = levels, TE = ES, seTE = SE)
# Fixed Meta-Analysis betweens subgroups
meta = metagen(TE, seTE, data = me.data,
comb.fixed = TRUE, comb.random = FALSE,
byvar = Subgroup, hakn = FALSE)
# Create full output dataset
if (m$sm %in% c("RR", "RD", "OR", "ASD", "HR", "IRR")){
subgroup.results = data.frame(Subgroup = me.data$Subgroup,
k = k,
TE = me.data$TE,
seTE = me.data$seTE,
sm = exp(me.data$TE),
LLCI = round(exp(meta$lower), 3),
ULCI = round(exp(meta$upper), 3),
p = meta$pval,
Q = Qsg,
I2 = round(I2sg, 2),
I2.lower = round(I2sg.lower, 2),
I2.upper = round(I2sg.upper, 2))
colnames(subgroup.results)[5] = m$sm
} else {
subgroup.results = data.frame(Subgroup = me.data$Subgroup,
k = k,
TE = me.data$TE,
seTE = me.data$seTE,
LLCI = round(meta$lower, 3),
ULCI = round(meta$upper, 3),
p = meta$pval,
Q = Qsg,
I2 = round(I2sg, 2),
I2.lower = round(I2sg.lower, 2),
I2.upper = round(I2sg.upper, 2))
if (m$sm != ""){
colnames(subgroup.results)[3] = m$sm
colnames(subgroup.results)[4] = "SE"
}
}
mixedeffects.results = data.frame(Q = meta$Q,
df = meta$df.Q,
p = meta$pval.Q,
row.names = "Between groups")
# Create Forest plot data
forest.m = update.meta(m, byvar=subgroups, comb.fixed = FALSE)
if (exclude[1] != "none"){
exclude.level = levels(subgroups)[levels(subgroups) %in% exclude]
exclude.nums = which(subgroups %in% exclude.level)
not.exclude.nums = 1:m$k
not.exclude.nums = not.exclude.nums[!(not.exclude.nums %in% exclude.nums)]
forest.m = update.meta(m, byvar=subgroups, comb.fixed = FALSE, subset = not.exclude.nums)
}
forest.m$TE.random = meta$TE.fixed
forest.m$lower.random = meta$lower.fixed
forest.m$upper.random = meta$upper.fixed
forest.m$Q = meta$Q
forest.m$df.Q = meta$df.Q
forest.m$pval.Q = meta$pval.Q
rownames(subgroup.results) = subgroup.results$Subgroup
subgroup.results$Subgroup = NULL
reslist = list(within.subgroup.results = subgroup.results,
subgroup.analysis.results = mixedeffects.results,
m.random = forest.m,
method.tau = m$method.tau,
k = sum(k))
class(reslist) = "subgroup.analysis.mixed.effects"
invisible(reslist)
reslist
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#' Calculate \eqn{I^2} and the variance distribution for multilevel meta-analysis models
#'
#' This function calculates values of \eqn{I^2} and the variance distribution for multilevel meta-analysis
#' models fitted with \code{\link[metafor]{rma.mv}}.
#'
#'
#' @usage mlm.variance.distribution(x)
#'
#' @param x An object of class \code{rma.mv}. Must be a multilevel model with two random effects (three-level meta-analysis model).
#'
#' @details This function estimates the distribution of variance in a three-level meta-analysis
#' model (fitted with the \code{\link[metafor]{rma.mv}} function). The share of variance attributable to
#' sampling error, within and between-cluster heterogeneity is calculated,
#' and an estimate of \eqn{I^2} (total and for Level 2 and Level 3) is provided.
#'
#' The function uses the formula by Cheung (2014) to estimate the variance proportions attributable to each
#' model component and to derive the \eqn{I^2} estimates.
#'
#'
#' @references
#'
#' Harrer, M., Cuijpers, P., Furukawa, T.A, & Ebert, D. D. (2019).
#' \emph{Doing Meta-Analysis in R: A Hands-on Guide}. DOI: 10.5281/zenodo.2551803. \href{https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/mlma.html}{Chapter 12}
#'
#'Cheung, M. W. L. (2014). Modeling dependent effect sizes with three-level meta-analyses: a structural equation modeling approach. \emph{Psychological Methods, 19}(2), 211.
#'
#' @author Mathias Harrer & David Daniel Ebert
#'
#' @import ggplot2
#' @importFrom stats model.matrix
#'
#' @return Returns a plot summarizing the variance distribution and \eqn{I^2} values,
#' as well as a data frame for the results.
#'
#' @export mlm.variance.distribution
#'
#' @examples
#' # Use dat.konstantopoulos2011 from the "metafor" package
#' suppressPackageStartupMessages(library(metafor))
#'
#' # Build Multilevel Model (Three Levels)
#' m = rma.mv(yi, vi, random = ~ 1 | district/school, data=dat.konstantopoulos2011)
#'
#' # Calculate Variance Distribution
#' mlm.variance.distribution(m)
mlm.variance.distribution = function(x){
m = x
# Check class
if (!(class(m)[1] %in% c("rma.mv", "rma"))){
stop("x must be of class 'rma.mv'.")
}
# Check for three level model
if (m$sigma2s != 2){
stop("The model you provided does not seem to be a three-level model. This function can only be used for three-level models.")
}
# Get variance diagonal and calculate total variance
n = m$k.all
vector.inv.var = 1/(diag(m$V))
sum.inv.var = sum(vector.inv.var)
sum.sq.inv.var = (sum.inv.var)^2
vector.inv.var.sq = 1/(diag(m$V)^2)
sum.inv.var.sq = sum(vector.inv.var.sq)
num = (n-1)*sum.inv.var
den = sum.sq.inv.var - sum.inv.var.sq
est.samp.var = num/den
# Calculate variance proportions
level1=((est.samp.var)/(m$sigma2[1]+m$sigma2[2]+est.samp.var)*100)
level2=((m$sigma2[1])/(m$sigma2[1]+m$sigma2[2]+est.samp.var)*100)
level3=((m$sigma2[2])/(m$sigma2[1]+m$sigma2[2]+est.samp.var)*100)
# Prepare df for return
Level=c("Level 1", "Level 2", "Level 3")
Variance=c(level1, level2, level3)
df.res=data.frame(Variance)
colnames(df.res) = c("% of total variance")
rownames(df.res) = Level
I2 = c("---", round(Variance[2:3], 2))
df.res = as.data.frame(cbind(df.res, I2))
totalI2 = Variance[2] + Variance[3]
# Generate plot
df1 = data.frame("Level" = c("Sampling Error", "Total Heterogeneity"),
"Variance" = c(df.res[1,1], df.res[2,1]+df.res[3,1]),
"Type" = rep(1,2))
df2 = data.frame("Level" = rownames(df.res),
"Variance" = df.res[,1],
"Type" = rep(2,3))
df = as.data.frame(rbind(df1, df2))
g = ggplot(df, aes(fill=Level, y=Variance, x=as.factor(Type))) +
coord_cartesian(ylim = c(0,1), clip = "off") +
geom_bar(stat="identity", position="fill", width = 1, color="black") +
scale_y_continuous(labels = scales::percent)+
theme(axis.title.x=element_blank(),
axis.text.y = element_text(color="black"),
axis.line.y = element_blank(),
axis.title.y=element_blank(),
axis.line.x = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.y = element_line(lineend = "round"),
legend.position = "none",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
legend.background = element_rect(linetype="solid",
colour ="black"),
legend.title = element_blank(),
legend.key.size = unit(0.75,"cm"),
axis.ticks.length=unit(.25, "cm"),
plot.margin = unit(c(1,3,1,1), "lines")) +
scale_fill_manual(values = c("darkseagreen3", "deepskyblue3", "darkseagreen2",
"deepskyblue1", "deepskyblue2")) +
# Add Annotation
# Total Variance
annotate("text", x = 1.5, y = 1.05,
label = paste("Total Variance:",
round(m$sigma2[1]+m$sigma2[2]+est.samp.var, 3))) +
# Sampling Error
annotate("text", x = 1, y = (df[1,2]/2+df[2,2])/100,
label = paste("Sampling Error Variance: \n", round(est.samp.var, 3)), size = 3) +
# Total I2
annotate("text", x = 1, y = ((df[2,2])/100)/2-0.02,
label = bquote("Total"~italic(I)^2*":"~.(round(df[2,2],2))*"%"), size = 3) +
annotate("text", x = 1, y = ((df[2,2])/100)/2+0.05,
label = paste("Variance not attributable \n to sampling error: \n", round(m$sigma2[1]+m$sigma2[2],3)), size = 3) +
# Level 1
annotate("text", x = 2, y = (df[1,2]/2+df[2,2])/100, label = paste("Level 1: \n",
round(df$Variance[3],2), "%", sep=""), size = 3) +
# Level 2
annotate("text", x = 2, y = (df[5,2]+(df[4,2]/2))/100,
label = bquote(italic(I)[Level2]^2*":"~.(round(df[4,2],2))*"%"), size = 3) +
# Level 3
annotate("text", x = 2, y = (df[5,2]/2)/100,
label = bquote(italic(I)[Level3]^2*":"~.(round(df[5,2],2))*"%"), size = 3)
print(df.res)
cat("Total I2: ", round(totalI2, 2), "% \n", sep="")
suppressWarnings(print(g))
invisible(df.res)
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
### Influence Analysis function for fixed-effect-model meta-analyses
influence.analysis.fixed<-function(data){
data<-data
TE<-data$TE
seTE<-data$seTE
res <- rma(yi=TE, sei=seTE, measure="ZCOR",
data=influence.data,
method = "FE")
res
inf <- influence(res)
influence.data<-metainf(data)
influence.data$I2<-format(round(influence.data$I2,2),nsmall=2)
plot(inf)
baujat(data)
forest(influence.data,
sortvar=I2,
rightcols = c("TE","ci","I2"),
smlab = "Sorted by I-squared")
forest(influence.data,
sortvar=TE,
rightcols = c("TE","ci","I2"),
smlab = "Sorted by Effect size")
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
## Copy and paste the code underneath in its enterity into your console
## Then hit 'Enter ⏎'
direct.evidence.plot = function(x){
# Packages
library(netmeta)
library(ggplot2)
library(reshape2)
# Get Measures
measures = netmeasures(x)$proportion
indirect = 1- measures
measures = data.frame(comparison = names(measures), direct = measures, indirect = indirect)
rownames(measures) = c()
measures$direct = round(measures$direct, 4)
measures$indirect = round(measures$indirect, 4)
measures.reshape = melt(measures, id.vars = "comparison", measure.vars = c("direct", "indirect"))
names = measures.reshape[measures.reshape$variable=="direct",]$comparison
direct = measures.reshape[measures.reshape$variable=="direct",]$value
names = names[order(match(names, direct))]
# Plot
gg = ggplot(measures.reshape,
aes(x=reorder(comparison, value),
fill=factor(variable, levels=c("indirect","direct")),
y=value)) +
geom_bar(stat="identity", position="fill") +
coord_flip() +
theme_minimal() +
scale_x_discrete(limits=names)+
ggtitle("Direct evidence proportion \n for each network estimate")+
scale_y_continuous(labels = scales::percent) +
ylab("Percentage")+
xlab("Network Estimate")+
guides(fill = guide_legend(title="Evidence"))+
scale_fill_manual(values=c('lightblue', 'orange'))
return(list(plot = gg, data = measures))
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
## This is the function code for the plotloss function
## Copy and paste the code underneath in its enterity into your console
## Then hit 'Enter ⏎'
##DISCLAIMER: This code is a wrapper including parts of the code to estimate the
##true effect size with P-Curve using a K-S test. The code can be found at
##http://www.p-curve.com/Supplement/Rcode_paper2/APPENDIX%20-%20Loss%20Function%20and%20Estimation.R
##This method has been proposed and described in
##Simonsohn, Nelson, Simmons (Perspectives on Psych Science 2014) - "P-Curve and Effect Size: Correcting for Publication Bias Using Only Significant Results" V9(6) p.666-681
t.test2 <- function(m1,m2,s1,s2,n1,n2,m0=0,equal.variance=FALSE)
{
if( equal.variance==FALSE )
{
se <- sqrt( (s1^2/n1) + (s2^2/n2) )
# welch-satterthwaite df
df <- ( (s1^2/n1 + s2^2/n2)^2 )/( (s1^2/n1)^2/(n1-1) + (s2^2/n2)^2/(n2-1) )
} else
{
# pooled standard deviation, scaled by the sample sizes
se <- sqrt( (1/n1 + 1/n2) * ((n1-1)*s1^2 + (n2-1)*s2^2)/(n1+n2-2) )
df <- n1+n2-2
}
t <- (m1-m2-m0)/se
dat <- c(m1-m2, se, t, 2*pt(-abs(t),df))
names(dat) <- c("Difference of means", "Std Error", "t", "p-value")
return(t)
}
#################################################################################################
#SYNTAX beneath taken from http://www.p-curve.com/Supplement/Rcode_paper2/APPENDIX%20-%20Loss%20Function%20and%20Estimation.R
#LOSS FUNCTION
loss=function(t_obs,df_obs,d_est) {
#################################################################################################
#SYNTAX beneath taken from http://www.p-curve.com/Supplement/Rcode_paper2/APPENDIX%20-%20Loss%20Function%20and%20Estimation.R
#################################################################################################
#SYNTAX:
#1. t_obs is a vector with observed t-values,
#2. df_obs vector with degrees of freedom associated with each t-value
#3. d_est is the effect size on which fitted p-curve is based and the measure of loss computed
#################################################################################################
#1.Convert all ts to the same sign (for justification see Supplement 5)
t_obs=abs(t_obs)
#2 Compute p-values
p_obs=2*(1-pt(t_obs,df=df_obs))
#3 Keep significant t-values and corresponding df.
t.sig=subset(t_obs,p_obs<.05)
df.sig=subset(df_obs,p_obs<.05)
#4.Compute non-centrality parameter implied by d_est and df_obs
#df+2 is total N.
#Becuase the noncentrality parameter for the student distribution is ncp=sqrt(n/2)*d,
#we add 2 to d.f. to get N, divide by 2 to get n, and by 2 again for ncp, so -->df+2/4
ncp_est=sqrt((df.sig+2)/4)*d_est
#5.Find critical t-value for p=.05 (two-sided)
#this is used below to compute power, it is a vector as different tests have different dfs
#and hence different critical values
tc=qt(.975,df.sig)
#4.Find power for ncp given tc, again, this is a vector of implied power, for ncp_est, for each test
power_est=1-pt(tc,df.sig,ncp_est)
#5.Compute pp-values
#5.1 First get the overall probability of a t>tobs, given ncp
p_larger=pt(t.sig,df=df.sig,ncp=ncp_est)
#5.2 Now, condition on p<.05
ppr=(p_larger-(1-power_est))/power_est #this is the pp-value for right-skew
#6. Compute the gap between the distribution of observed pp-values and a uniform distribution 0,1
KSD=ks.test(ppr,punif)$statistic #this is the D statistic outputted by the KS test against uniform
return(KSD)
}
#Function 2: Estimate d and plot loss function
plotloss=function(data,dmin,dmax)
{
#Create t_obs and df_obs vector
data<-data
m1<-as.numeric(data$Me)
m2<-as.numeric(data$Mc)
s1<-as.numeric(data$Se)
s2<-as.numeric(data$Sc)
n1<-as.numeric(data$Ne)
n2<-as.numeric(data$Nc)
t_obs<-t.test2(m1=m1,m2=m2,s1=s1,s2=s2,n1=n1,n2=n2)
df_obs<-(n1+n2)-2
#################################################################################################
#SYNTAX:
#t_obs : vector with observed t-values
#df_obs : vector with degrees of freedom associated with each t-value
#dmin : smallest effect size to consider
#dnax : largest effect size to consider
#e.g., dmin=-1, dmax=1 would look for the best fitting effect size in the d>=-1 and d<=1 range
#################################################################################################
#Results will be stored in these vectors, create them first
loss.all=c()
di=c()
#Compute loss for effect sizes between d=c(dmin,dmax) in steps of .01
for (i in 0:((dmax-dmin)*100))
{
d=dmin+i/100 #effect size being considered
di=c(di,d) #add it to the vector (kind of silly, but kept for symmetry)
options(warn=-1) #turn off warning becuase R does not like its own pt() function!
loss.all=c(loss.all,loss(df_obs=df_obs,t_obs=t_obs,d_est=d))
#apply loss function so that effect size, store result
options(warn=0) #turn warnings back on
}
#find the effect leading to smallest loss in that set, that becomes the starting point in the optimize command
imin=match(min(loss.all),loss.all) #which i tested effect size lead to the overall minimum?
dstart=dmin+imin/100 #convert that i into a d.
#optimize around the global minimum
dhat=optimize(loss,c(dstart-.1,dstart+.1), df_obs=df_obs,t_obs=t_obs)
options(warn=-0)
#Plot results
pcurveplot<-plot(di,loss.all,xlab="Effect size\nCohen-d", ylab="Loss (D stat in KS test)",ylim=c(0,1), main="How well does each effect size fit? (lower is better)")
points(dhat$minimum,dhat$objective,pch=19,col="red",cex=2)
text(dhat$minimum,dhat$objective-.08,paste0("p-curve's estimate of effect size:\nd=",round(dhat$minimum,3)),col="red")
return(c(pcurveplot,dhat$minimum))
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
## Copy and paste the code underneath in its enterity into your console
## Then hit 'Enter ⏎'
sucra = function(rank.probability, lower.is.better = FALSE){
rank.probability = rank.probability
lower.is.better = lower.is.better
# packages
library(gemtc)
# Convert rank.probability to matrix
mat = as.matrix(rank.probability)
# Loop over treatments, for each treatment: calculate SUCRA
a = ncol(mat)
j = nrow(mat)
names = rownames(mat)
sucra = numeric()
for (x in 1:j){
sucra[x] = sum(cumsum(mat[x,1:(a-1)]))/(a-1)
}
# If condition for lower.is.better
if (lower.is.better==TRUE){
sucra = numeric()
for (x in 1:j){
sucra[x] = 1-sum(cumsum(mat[x,1:(a-1)]))/(a-1)
}
}
# Make data.frame
res = data.frame("Treatment"=names, "SUCRA" = sucra)
# Order
res = res[order(-res$SUCRA),]
rownames(res) = 1:j
return(res)
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# meta to metafor
library(meta)
data(smoking)
m1 <- metainc(d.smokers, py.smokers,
d.nonsmokers, py.nonsmokers,
data=smoking, studlab=study)
print(m1, digits=2)
m1$TE
m1$seTE
rma(yi = m1$TE, sei = m1$seTE, slab = m1$studlab, method=m1$method.tau)
# metaSEM
library(metaSEM)
options(scipen = 999)
# Fixed Effects Model #############################################
m1 = meta(y = lifesat, v = lifesat_var, data = wvs94a, RE.constraints = 0)
summary(m1)
# Random Effects Model ##############################################
m2 = meta(y = lifesat, v = lifesat_var, data = wvs94a)
summary(m2)
# Compare
anova(m2, m1)
# LRT Delta Chi2 df1 = 26.97127
# Get random effects
coef(m2, select="random")
# Plot
plot(m2)
# Multivariate MA ##############################################
m3 = meta(y = cbind(lifesat, lifecon), v = cbind(lifesat_var, inter_cov, lifecon_var),
data = wvs94a)
summary(m3)
# Likelihood-based CIs instead of Wald-type
m3 = meta(y = cbind(lifesat, lifecon), v = cbind(lifesat_var, inter_cov, lifecon_var),
data = wvs94a, intervals.type = "LB")
summary(m3)
# Select random effects, make matrix, calculate cor
coef(m3, select = "random") %>% vec2symMat() %>% cov2cor()
plot(m3, axis.labels = c("Life satisfaction", "Life control"))
# The small circle dots are the observed effect sizes, whereas
# the dashed ellipses around them are the 95% confidence ellipses.
# A confidence ellipse is the bivariate generalization of the CI
# (see Friendly et al., 2013). If we were able to repeat Study i
# by collecting new data, 95% of such ellipses constructed in the
# replications will contain Study i’s true bivariate effect sizes.
# The confidence ellipses around the studies are not tilted in the figure,
# showing that the effect sizes are conditionally independent.
# The solid square in the location (−4.8338,−4.0960) represents the estimated
# average population effect sizes for the vaccinated and the nonvaccinated groups.
# The small ellipse in a solid line is the 95% confidence ellipse of the average
# effect sizes. It indicates the best estimates of the average population effect
# sizes for the vaccinated and the nonvaccinated groups in the long run. The large
# ellipse in a dashed line indicates the random effects for the 95% of studies
# that may fall inside this ellipse. It is constructed based on the estimated
# variance component of the ran- dom effects, which is a bivariate generalization
# of the 95% plausible value interval (Raudenbush, 2009). If we randomly select
# studies, 95% of the selected studies may fall inside the ellipse in long run.
# Therefore, the true population effect sizes of the studies vary greatly.
# Moreover, we also calculate the average effect size for the vaccinated group
# (−4.8338 in the x-axis) and the average effect size for the nonva- ccinated group
# (−4.0960 in the y-axis) and their 95% CIs. They are shown by the diamonds near the
# x-axis and the y-axis.
# Calculate cov
cor2cov.bivariate = function(TE1, seTE1, TE2, seTE2, cor,
name1 = "", name2 = ""){
var1 = seTE1^2
var2 = seTE2^2
cov = seTE1*seTE2*cor
df = data.frame(TE1, var1, TE2, var2, cov)
if (name1 == "none" & name2 == "none"){
colnames(df) = c("TE1", "var1", "TE2", "var2", "cov")
} else {
colnames(df) = c(paste("TE_", name1, sep=""),
paste("var_", name1, sep=""),
paste("TE_", name2, sep=""),
paste("var_", name2, sep=""),
paste("cov_", name1, "_", name2, sep=""))
}
return(df)
}
cor2cov.bivariate(
TE1 = wvs94a$lifesat,
seTE1 = wvs94a$lifesat_var,
TE2 = wvs94a$lifecon,
seTE2 = wvs94a$lifecon_var,
cor = 0.23,
name1 = "lifesat",
name2 = "lifecon"
)
# Baseline model for simulataneous model test; fit model with null hypothesis that y=0
m3.baseline = meta(y = cbind(lifesat, lifecon), v = cbind(lifesat_var, inter_cov, lifecon_var),
data = wvs94a, intercept.constraints = c(0,0))
anova(m3, m3.baseline)
# Regress Effect size on Effect size
# Create A Matrix (regression coefficients)
A <- matrix(c(0, "0.1*beta1_2", 0, 0,
0, 0, 0, 0,
1, 0, 0, 0,
0, 1, 0, 0),
ncol=4, nrow=4, byrow=TRUE)
dimnames(A) <- list(c("f_lifesat","f_lifecon",
"lifesat","lifecon"),
c("f_lifesat","f_lifecon","lifesat",
"lifecon"))
A = as.mxMatrix(A)
# Sampling variance matrix S
S <- mxMatrix(type="Symm", nrow=4, ncol=4, byrow=TRUE,
free=c(TRUE,
FALSE,TRUE,
FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE),
values=c(0.1,
0,0.1,
0,0,0,
0,0,0,0),
labels=c("tau2_1_1",
NA,"tau2_2_2",
NA,NA,"data.lifesat_var",
NA,NA,"data.inter_cov","data.lifecon_var"),
name = "S")
# Selection matrix F (observed variables)
F <- matrix(c(0, 0, 1, 0,
0, 0, 0, 1), nrow = 2, ncol = 4, byrow = TRUE)
dimnames(F) <- list(c("lifesat","lifecon"),
c("f_lifesat","f_lifecon","lifesat",
"lifecon"))
F = as.mxMatrix(F)
# Mean Matrix (only latent f_x are estimated)
M <- matrix(c("0*beta1_0","0*beta2_0",0,0), nrow=1, ncol=4)
dimnames(M)[[2]] <- c("f_lifesat","f_lifecon",
"lifesat","lifecon")
M <- as.mxMatrix(M)
# Formula for R2
R2 = mxAlgebra(beta1_2^2*tau2_2_2/(beta1_2^2*tau2_2_2 + tau2_1_1), name="R2")
# Convert RAM
RAMexp = mxExpectationRAM(A="A", S="S",
F="F", M="M",
dimnames = c("f_lifesat","f_lifecon",
"lifesat","lifecon"))
# Data
my.df <- wvs94a[!is.na(wvs94a$gnp), -1]
my.df$gnp <- scale(my.df$gnp, scale=FALSE)/10000
mxdata = mxData(observed=my.df, type="raw")
# Build
reg = mxModel("Regression",
mxdata,
A, S, F, M, R2, mxCI("R2"), # Request CI on R2
RAMexp,
mxFitFunctionML())
# Run
reg.fit <- mxRun(reg, intervals=TRUE, silent=TRUE)
reg.fit@output$status[[1]]
summary(reg.fit)
library(semPlot)
# Mediation ###################################
# A Matrix
A <- matrix(c(0,0,0,0,0,
"0*gamma1",0,"0*beta1_2",0,0,
"0*gamma2",0,0,0,0,
0,1,0,0,0,
0,0,1,0,0), ncol=5, nrow=5, byrow=TRUE)
dimnames(A) <- list(c("gnp","f_lifesat","f_lifecon",
"lifesat","lifecon"),
c("gnp","f_lifesat","f_lifecon",
"lifesat","lifecon"))
A <- as.mxMatrix(A)
# S Matrix (Sampling Variance)
S = mxMatrix(type="Symm", nrow=5, ncol=5, byrow=TRUE,
free=c(TRUE,
FALSE,TRUE,
FALSE,FALSE,TRUE,
FALSE,FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE,FALSE),
values=c(1,
0,0.01,
0,0,0.1,
0,0,0,0,
0,0,0,0,0),
labels=c("sigma2_x",
NA,"tau2_1_1",
NA,NA,"tau2_2_2",
NA,NA,NA,"data.lifesat_var",
NA,NA,NA,"data.inter_cov","data.lifecon_var"),
name="S")
# Selection matrix F (observed variables)
F = matrix(c(1,0,0,0,0,
0,0,0,1,0,
0,0,0,0,1), nrow=3, ncol=5, byrow=TRUE)
dimnames(F) <- list(c("gnp","lifesat","lifecon"),
c("gnp","f_lifesat","f_lifecon",
"lifesat","lifecon"))
F <- as.mxMatrix(F)
# Mean Matrix (only latent f_x are estimated)
M <- matrix(c("0*mu_x","0*beta1_0","0*beta2_0",0,0),
nrow=1, ncol=5)
dimnames(M)[[2]] <- c("gnp", "f_lifesat","f_lifecon",
"lifesat","lifecon")
M <- as.mxMatrix(M)
# Define indirect, direct, total effect
direct <- mxAlgebra(gamma1, name="direct")
indirect <- mxAlgebra(gamma2*beta1_2, name="indirect")
total <- mxAlgebra(gamma1+gamma2*beta1_2, name="total")
med <- mxModel("Mediation",
mxData(observed=my.df, type="raw"),
A, S, F, M, direct, indirect, total,
mxCI(c("direct","indirect","total")),
mxExpectationRAM(A="A", S="S", F="F", M="M",
dimnames=c("gnp","f_lifesat","f_lifecon",
"lifesat","lifecon")),
mxFitFunctionML())
med.fit = mxRun(med, intervals = TRUE)
#### SEM full-blown #############
r1 = matrix(c( 1, 0.23, 0.56,
0.23, 1, 0.84,
0.56, 0.84, 1),
nrow=3, ncol=3, byrow = TRUE)
vechs(r1)
r2 = vector(0.56)
r3 = vector(0.23)
X1 = matrix(c(1, 0, 0,
0, 1, 0,
0, 0, 1),
nrow=3, ncol=3, ndim = 3, byrow = TRUE)
X2 = matrix(c(0, 0, 0,
0, 1, 0,
0, 0, 0),
nrow=3, ncol=3, byrow = TRUE)
X3 = matrix(c(1, 0, 0,
0, 0, 0,
0, 0, 0),
nrow=3, ncol=3, byrow = TRUE)
# CFA #######################################
Digman97$data
# Stage 1: FEM #######
fixed = tssem1(Digman97$data, Digman97$n, method="FEM")
fixed$total.n
fixed$no.es
fixed$no.miss
fixed$mx.model
summary(fixed)
coef(fixed)
# Define Variables: starting with observed, then latent
vars = c("A","C","ES","E","I","f_Alpha","f_Beta")
# S Matrix (Sampling Variance)
Observed = Diag(c("0.2*var_1", "0.2*var_2", "0.2*var_3", "0.2*var_4", "0.2*var_5"))
Latent = matrix(c(1, "0.3*cor",
"0.3*cor", 1), nrow=2, ncol=2)
S = bdiagMat(list(Observed, Latent))
dimnames(S)[[1]] = vars
dimnames(S)[[2]] = vars
S = as.mxMatrix(S)
# A Matrix (Arrows, unidirectional)
A_1 = matrix(rep(0, 5*5), nrow=5, ncol=5)
A_2 = matrix(rep(0, 7*2), nrow=2, ncol=7)
A_3 = matrix(c("0.3*a_A", 0,
"0.3*a_C", 0,
"0.3*a_ES", 0,
0, "0.3*b_E",
0, "0.3*b_I"),
nrow=5, ncol=2, byrow=TRUE)
gs = rbind(cbind(A_1, A_3), A_2)
dimnames(A)[[1]] = vars
dimnames(A)[[2]] = vars
A = as.mxMatrix(A)
# F matrix (observed variabled; fixed)
F_1 = diag(1, 5)
F_2 = matrix(rep(0, 5*2), nrow=5, ncol=2)
F = cbind(F_1, F_2)
dimnames(F)[[1]] = vars[1:5]
dimnames(F)[[2]] = vars
F = as.mxMatrix(F)
# Fit
fixed2 <- tssem2(fixed, Amatrix=A, Smatrix=S, Fmatrix=F,
diag.constraints=FALSE)
summary(fixed2)
# Plot
semplot = meta2semPlot(fixed2)
labels<-c("Ideal Life","Excellent","Satisfied","Important","Change","SWL\nScale", "No Change")
semPaths(semplot, whatLabels = "est", edge.color = "black", nodeLabels = labels)
# FEM with Clusters #################################
# Introduce NAs
Digman97$data$`Digman 1 (1994)`[,5] = NA
Digman97$data$`Digman 1 (1994)`[5,] = NA
random = tssem1(Digman97$data, Digman97$n, method = "REM", RE.type = "Diag")
summary(random)
fixed.mat = coef(random, "fixed")
fixed.mat = vec2symMat(fixed.mat, diag=FALSE)
random.mat = coef(random, "random")
random.mat = vec2symMat(random.mat, diag=FALSE)
random.mat[upper.tri(random.mat, diag=TRUE)] = NA
dimnames(random.mat)[[1]] = vars[1:5]
dimnames(random.mat)[[2]] = vars[1:5]
# Stage 2
random2 <- tssem2(random, Amatrix=A, Smatrix=S, Fmatrix=F,
diag.constraints=FALSE)
summary(random2)
semplot = meta2semPlot(random2)
labels<-c("Ideal Life","Excellent","Satisfied","Important","Change","SWL\nScale", "No\nChange")
semPaths(semplot, whatLabels = "est", edge.color = "black", nodeLabels = labels)
# Regression Models #######################################
# Mediation ###############################################
vars = c("SAT_Math", "Spatial", "SAT_Verbal")
fixed.cluster = tssem1(Becker94$data, Becker94$n, cluster = Becker94$gender, method = "FEM")
summary(fixed.cluster)
random = tssem1(Becker94$data, Becker94$n, method = "REM", RE.type = "Diag") # RE are independent
summary(random)
coef(random, "fixed")
mat.fe = vec2symMat(coef(random, "fixed"), diag = FALSE)
dimnames(mat.fe)[[1]] = vars
dimnames(mat.fe)[[2]] = vars
mat.i2 = summary(random)$I2.values[,2]
mat.i2 = vec2symMat(mat.i2, diag=FALSE)
dimnames(mat.i2)[[1]] = vars
dimnames(mat.i2)[[2]] = vars
# F Matrix
F = Diag(1, 3)
dimnames(F)[[1]] = vars
dimnames(F)[[2]] = vars
F = as.mxMatrix(F)
# S Matrix
S = matrix(c(1, 0, 0,
0, "0.2*var_spatial", 0,
0, 0, "0.2*var_verbal"),
nrow=3, ncol=3)
dimnames(S)[[1]] = vars
dimnames(S)[[2]] = vars
S = as.mxMatrix(S)
# A Matrix (directed arrows)
A = matrix(c(0, 0, 0,
"0.2*Math2Spatial", 0, 0,
"0.2*Math2Verbal", "0.2*Spatial2Verbal", 0),
nrow=3, ncol=3, byrow=TRUE)
dimnames(A)[[1]] = vars
dimnames(A)[[2]] = vars
A = as.mxMatrix(A)
# Model
random2 = tssem2(random, Amatrix = A, Smatrix = S,
diag.constraints = FALSE,
mx.algebras=list(indirect = mxAlgebra(Math2Spatial*Spatial2Verbal,
name = "indirect"),
total = mxAlgebra(Math2Verbal + Math2Spatial*Spatial2Verbal,
name = "total")),
intervals.type = "LB")
summary(random2)
semp = meta2semPlot(random2)
semPaths(semp)
### Mediation Model 2 ##########################
Hunter83
pattern.na(Hunter83$data, show.na=TRUE)
pattern.n(Hunter83$data, Hunter83$n)
# Are the matrices positive definitive? If not can
# lead to estimation problems
is.pd(Hunter83$data)
random = tssem1(Hunter83$data, Hunter83$n, method = "REM",
RE.type = "Diag")
summary(random)
rerun(random)
betas = vec2symMat(coef(random, "fixed"), diag=FALSE)
dimnames(betas)[[1]] = dimnames(betas)[[2]] = vars
# A Matrix
vars = dimnames(Hunter83$data$`Campbell et al. (1973)`)[[1]]
# Ability --> Job Knowledge --> Supervisor Rating
# | | A
# | V |
# L______>Work sample_____________|
i = matrix(rep(0, 4*4), nrow=4, ncol=4)
dimnames(i)[[1]] = dimnames(i)[[2]] = vars
A = matrix(c(0, 0, 0, 0,
"0.2*A2J", 0, 0, 0,
"0.2*A2W", "0.2*J2W", 0, 0,
0, "0.2*J2S", "0.2*W2S", 0),
ncol = 4, nrow=4, byrow=TRUE)
dimnames(A)[[1]] = dimnames(A)[[2]] = vars
A = as.mxMatrix(A)
# S Matrix
S = Diag(c(1, "0.1*ErrVarJ", "0.1*ErrVarW", "0.1*ErrVarS"))
dimnames(S)[[1]] = dimnames(S)[[2]] = vars
S = as.mxMatrix(S)
# Model
random2 = tssem2(random, Amatrix = A, Smatrix = S,
intervals.type = "LB", diag.constraints = TRUE,
mx.algebras = list(Ind=mxAlgebra(A2J*J2S+ A2J*J2W*W2S +A2W*W2S,
name="Ind")))
sem.path = meta2semPlot(random2)
labels = c("Resilience","Emotion\nRegulation","Coping","Depres-\nsion")
semPaths(sem.path, whatLabels = "est", edge.color = "black", layout="tree2", rotation=2,
nodeLabels = labels)
# Insomnia: Sleep Quality, Sleep Latency, Sleep Efficiency
# Lassitude: Daytime Dysfunction, Hypersomnia
# 7 studies
library(purrr)
for (i in 1:14){
dimnames(Digman97$data[[i]])[[1]] = dimnames(Digman97$data[[i]])[[2]] = c("Quality", "Latency",
"Efficiency",
"DTDysf", "HypSomnia")
}
names(Hunter83$data)
names = c("Guttman",
"McCaffrey",
"Loescher",
"O'Malley",
"Hay",
"Twiraga",
"Wanzer",
"Arthur",
"Frondel",
"Mill",
"Ilan",
"Severence",
"Devegvar",
"Matloff")
year = round(rnorm(14, 1999, 7))
name = paste(names, " et al. (", year, ")", sep="")
names(Hunter83$data) = name
Hunter83$data$`Guttman et al. (2003)`
for (i in 1:14){
dimnames(Hunter83$data[[i]])[[1]] = dimnames(Hunter83$data[[i]])[[2]] = c("Resilience",
"EmotReg",
"Coping",
"Depression")
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
mreg.multimodel.inference = function(TE,
seTE,
data,
predictors,
method="REML",
test="knha",
eval.criterion="aicc",
level=1){
# Parts of the computatations for this function are based on:
# http://www.metafor-project.org/doku.php/tips:model_selection_with_glmulti
library(metafor)
library(glmulti)
# Change supplied df to conform to glmulti
TE = data[TE]
seTE = data[seTE]
preds = data[predictors]
glm.data = data.frame("TE"=TE, "seTE"=seTE)
colnames(glm.data) = c("TE", "seTE")
glm.data = cbind(glm.data, preds)
# Build the formula
predictor.string = paste(predictors, collapse="+")
form = paste("TE ~", predictor.string, collapse = "")
# Set up function
rma.glmulti = function(formula, data, ...)
rma(formula, seTE, data=data, method=method, test=test)
# Loop over all possible models
result = glmulti(y="TE", xr=predictors, data=glm.data,
level=level,
fitfunction=rma.glmulti,
crit=eval.criterion,
confsetsize=1000)
# Save results for all models: all.models, top5.models
all.models = weightable(result)
top5.models = weightable(result)[1:5,]
# Create Multimodel Inference Coeffient Table and save: multimodel.coef
setOldClass("rma.uni")
setMethod('getfit', 'rma.uni', function(object, ...) {
if (object$test==test) {
cbind(estimate=coef(object), se=sqrt(diag(vcov(object))), df=Inf)
} else {
cbind(estimate=coef(object), se=sqrt(diag(vcov(object))), df=object$k-object$p)
}
})
multimodel.coef = coef(result)
# Create List with model results for all models: model.details
model.details = list()
for (x in 1:length(result@objects)){
model.details[[x]] = result@objects[x]
}
# Print out results
cat("\n", "Multimodel Inference: Final Results", "--------------------------", sep="\n")
cat("\n", "- Number of fitted models:", nrow(all.models))
cat("\n", "- Full formula:", form)
cat("\n", "- Coefficient significance test:", test)
cat("\n", "- Modeled interaction level:", level)
cat("\n", "- Evaluation criterion:", eval.criterion, "\n")
cat("\n", "Best 5 Models", "--------------------------", "\n", sep="\n")
print(top5.models)
cat("\n", "Multimodel Inference Coefficients", "--------------------------", "\n", sep="\n")
print(multimodel.coef)
# Print graph
plot(result, type="s")
# Return results
invisible(list("all.models"=all.models,
"top5.models"=top5.models,
"multimodel.coef"=multimodel.coef,
"model.details"=model.details,
"formula"=form,
"fitted.models"=nrow(all.models),
"eval.criterion"=eval.criterion))
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
## This is the function code for the subgroup.analysis.mixed.effects function
## Copy and paste the code underneath in its enterity into your console
## Then hit 'Enter ⏎'
subgroup.analysis.mixed.effects<-function(m, subgroups, exclude = "none", hakn=FALSE){
library(meta)
library(metafor)
# Define variables
m = m
subgroups = subgroups
value.hakn = hakn
exclude = exclude
# Levels of subgroup
subgroups = as.factor(subgroups)
k=as.vector(summary(subgroups))
levels = levels(subgroups)
k.level.df = data.frame("level"=levels, "k"=k)
# Out Loop for wrong input
if (length(subgroups)!=length(m$studlab)){
stop("Subgroup variable does not contain the same number of cases as the 'meta' object. You need to define a variable which provides a subgroup value for each effect size included in your 'meta' results object.")
}
# get "Exclude" Subgroup level names
if (exclude[1]!="none"){
levels = levels[!levels %in% exclude]
k = k.level.df[(k.level.df$level %in% levels),]$k
}
# Create Loop for subgroups
list = list()
for (x in levels){
list[[x]] = which(subgroups %in% c(paste(x)))
}
# Loop over list to generate subgroup results
sg.results = list()
for (x in 1:length(list)){
sg.results[[x]] = update.meta(m, subset = list[[x]])
}
# Loop over sg.results to get effect size estimates
ES = vector()
SE = vector()
Qsg = vector()
I2sg = vector()
I2sg.lower = vector()
I2sg.upper = vector()
for (x in 1:length(sg.results)){
ES[x] = sg.results[[x]]$TE.random
SE[x] = sg.results[[x]]$seTE.random
Qsg[x] = sg.results[[x]]$Q
I2sg[x] = sg.results[[x]]$I2
I2sg.lower[x] = sg.results[[x]]$lower.I2
I2sg.upper[x] = sg.results[[x]]$upper.I2
}
me.data = data.frame("Subgroup"=levels, "TE"=ES, "seTE"=SE)
# Fixed Meta-Analysis betweens subgroups
meta = metagen(TE,
seTE,
data=me.data,
comb.fixed = TRUE,
comb.random = FALSE,
byvar = Subgroup,
hakn = value.hakn)
# Create full output dataset
subgroup.results = data.frame("Subgroup"=me.data$Subgroup,
"k"=k,
"TE"=me.data$TE,
"seTE"=me.data$seTE,
"LLCI"=round(meta$lower,3),
"ULCI"=round(meta$upper,3),
"p"=meta$pval,
"Q"=Qsg,
"I2"=round(I2sg,2),
"I2.lower"=round(I2sg.lower,2),
"I2.upper"=round(I2sg.upper,2))
mixedeffects.results = data.frame("Q"=meta$Q, "df"=meta$df.Q, "p"=meta$pval.Q, row.names = "Between groups")
res = list("within.subgroup.results"=subgroup.results, "subgroup.analysis.results"=mixedeffects.results)
cat("Subgroup Results:","--------------", sep="\n")
print(subgroup.results)
cat("","Test for subgroup differences (mixed/fixed-effects (plural) model):","--------------", sep="\n")
print(mixedeffects.results)
cat("", sep="\n")
cat("- Total number of studies included in subgroup analysis: ", sum(k))
cat("", sep="\n")
cat("- Tau estimator used for within-group pooling: ", m$method.tau)
invisible(res)
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
library(MASS)
library(purrr)
library(extraDistr)
library(magrittr)
library(randomNames)
library(tidyr)
# Simulate Data
do.call(rbind, list(
# Individual vs. Group
rnorm(7, -0.32, sqrt(0.04)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "ind", treat2 = "grp"),
# Individual vs. GSH
rnorm(4, -0.12, sqrt(0.01)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "ind", treat2 = "gsh"),
# Individual vs. Telephone
rnorm(4, -0.04, sqrt(0.01)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "ind", treat2 = "tel"),
# Individual vs. WLC
rnorm(18, -1.08, sqrt(0.16)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "ind", treat2 = "wlc"),
# Individual vs. CAU
rnorm(30, -0.52, sqrt(0.07)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "ind", treat2 = "cau"),
# Individual vs. PLA
rnorm(2, -0.40, sqrt(0.01)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "ind", treat2 = "pla"),
# Group vs. gsh
rnorm(5, 0.20, sqrt(0.01)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "grp", treat2 = "gsh"),
# Group vs. ush
rnorm(1, -0.06, sqrt(0.01)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "grp", treat2 = "ush"),
# Group vs. wlc
rnorm(18, -1.32, sqrt(0.64)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "grp", treat2 = "wlc"),
# Group vs. cau
rnorm(21, -0.83, sqrt(0.39)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "grp", treat2 = "cau"),
# Gsh vs. ush
rnorm(5, -0.37, sqrt(0.01)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "gsh", treat2 = "ush"),
# gsh vs. wlc
rnorm(35, -0.81, sqrt(0.19)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "gsh", treat2 = "wlc"),
# gsh vs. cau
rnorm(8, -0.56, sqrt(0.1)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "gsh", treat2 = "cau"),
# tel vs. wlc
rnorm(1, -0.69, sqrt(0.01)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "tel", treat2 = "wlc"),
# tel vs. cau
rnorm(6, -0.63, sqrt(0.25)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "tel", treat2 = "cau"),
# ush vs. wlc
rnorm(11, -0.48, sqrt(0.01)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "ush", treat2 = "wlc"),
# ush vs. cau
rnorm(9, -0.14, sqrt(0.03)) %>%
map(function(x){rnorm(runif(1, 50, 300), x, runif(1, 0.05, 0.5))}) %>%
map(function(x) c(mean(x), sd(x))) %>%
do.call(rbind, .) %>%
data.frame() %>%
set_colnames(c("TE", "seTE")) %>%
cbind(., treat1 = "ush", treat2 = "cau")
)) -> data
# Sample mv normal for ind vs. gsh vs. wlc trial
# We assume a tau of sqrt(0.09) = 0.3
mu = c(-0.12, -1.08)
sigma = matrix(c(0.3, 0.3/2, 0.3/2, 0.3), 2, 2)
mvrnorm(1, mu, sigma) %>%
mvrnorm(100, ., sigma) %>%
data.frame() -> dat.multiarm
# Delete 2 individual ES
# Add study names
set.seed(123)
names = c(randomNames(nrow(data)-10, ethnicity = c(5), which.names = "last"),
randomNames(5, ethnicity = c(4), which.names = "last"),
randomNames(5, ethnicity = c(2), which.names = "last")) %>%
paste0(., ", ", round(runif(nrow(data), 1984, 2018)))
data = cbind(names, data)
data[data$treat1 == "ind" & data$treat2 == "gsh",][1,1:3] = c("Breiman, 2001", mean(dat.multiarm[,1]), sd(dat.multiarm[,1]))
data[data$treat1 == "ind" & data$treat2 == "wlc",][1,1:3] = c("Breiman, 2001", mean(dat.multiarm[,2]), sd(dat.multiarm[,2]))
within(data, {
TE = as.numeric(TE) %>% round(3)
seTE = as.numeric(seTE) %>% round(3)
}) -> data
rbind(data, data.frame(names = "Breiman, 2001", TE = -0.664, seTE = 0.514, treat1 = "gsh", treat2 = "wlc")) -> data
data$treat1.long = recode(data$treat1, ind = "Individual", grp = "Group",
gsh = "Guided Self-Help", tel = "Telephone", ush = "Unguided Self-Help")
data$treat2.long = recode(data$treat2, grp = "Group",
gsh = "Guided Self-Help", tel = "Telephone", wlc = "Waitlist",
cau = "Care As Usual", pla = "Pill Placebo", ush = "Unguided Self-Help")
m.netmeta <- netmeta(TE = TE,
seTE = seTE,
treat1 = treat1.long,
treat2 = treat2.long,
studlab = names,
data = data,
sm = "SMD",
comb.fixed = FALSE,
comb.random = TRUE,
reference.group = "Care As Usual",
details.chkmultiarm = TRUE,
sep.trts = " vs ")
netgraph(m.netmeta, seq = c("Group", "Guided Self-Help", "Telephone",
"Unguided Self-Help", "Waitlist", "Care As Usual",
"Pill Placebo", "Individual"))
library(dmetar)
direct.evidence.plot(m.netmeta, random = TRUE)
forest(m.netmeta, sortvar = TE)
colnames(data)[1] = "author"
data %>%
select(1:5) %>%
cbind(., "TE2" = NA, "seTE2" = NA) %>%
pivot_longer(cols = )
pivot_longer(data %>% select(1:5),
columns = all_of(TE, seTE, treat1, treat2, TE2, seTE2),
names_to = "treat",
values_to = "count") %>% arrange(author)
who %>% pivot_longer(
cols = new_sp_m014:newrel_f65,
names_to = c("diagnosis", "gender", "age"),
names_pattern = "new_?(.*)_(.)(.*)",
values_to = "count"
)
anscombe %>%
pivot_longer(everything(),
names_to = c(".value", "set"),
names_pattern = "(.)(.)"
)
data %>%
dplyr::select(1:5) %>%
pivot_longer(-author,
names_to = c(".value"),
names_pattern = "(..)"
) -> TherapyFormatsGeMTC
colnames(TherapyFormatsGeMTC) = c("study", "diff", "std.err", "treatment")
TherapyFormatsGeMTC = data.frame(TherapyFormatsGeMTC)
TherapyFormatsGeMTC[TherapyFormatsGeMTC$study == "Breiman, 2001",]
TherapyFormatsGeMTC[-c(15),] -> TherapyFormatsGeMTC
TherapyFormatsGeMTC = data.frame(TherapyFormatsGeMTC)
rownames(TherapyFormatsGeMTC) = 1:nrow(TherapyFormatsGeMTC)
data %>%
select(1:5) %>%
pivot_longer(-author,
names_to = c(".value", ".value"),
names_pattern = "(.....)(....)"
)
data %>%
select(1:5) %>%
set_colnames(c("author", "diff"))
pivot_longer(-author,
names_to = c(".value", "arm"),
names_sep = "\."
)
family %>%
pivot_longer(
-family,
names_to = c(".value", "child"),
names_sep = "_",
values_drop_na = FALSE
)
longer <- pivot_longer(dat,
cols=-1,
names_pattern = "(.*)(..)$",
names_to = c("limit", "name"))
colnames()
TherapyFormatsGeMTC %>% dplyr::arrange(diff) %>% dplyr::filter(!is.na(diff)) %>% dplyr::pull(study) -> RoB
TherapyFormatsGeMTC <- TherapyFormatsGeMTC2
rbind(TherapyFormatsGeMTC2, TherapyFormatsGeMTC$data[TherapyFormatsGeMTC$data$study == "Breiman, 2001",] ) -> TherapyFormatsGeMTC2
data.frame(study = RoB,
low.rob = c(rbinom(83, 1, 0.8), rbinom(100, 1, 0.2))) -> study.info
TherapyFormatsGeMTC = list(data = TherapyFormatsGeMTC,
study.info = study.info)
rbind(TherapyFormatsGeMTC$data[!TherapyFormatsGeMTC$data$study == "Breiman, 2001",],
TherapyFormatsGeMTC$data[TherapyFormatsGeMTC$data$study == "Breiman, 2001",]) -> TherapyFormatsGeMTC$data
TherapyFormatsGeMTC$data$treatment = recode(TherapyFormatsGeMTC$data$treatment, Group = "grp", "Individual" = "ind",
GuidedSelfHelp =" gsh", Telephone = "tel", Waitlist = "wlc",
CareAsUsual = "cau", UnguidedSelfHelp = "ush")
save(TherapyFormatsGeMTC, file = "data/TherapyFormatsGeMTC.rda")
treat.codes = data.frame(id = TherapyFormatsGeMTC$data$treatment %>% unique(),
description = TherapyFormatsGeMTC$data$treatment %>% unique() %>% dplyr::recode(grp = "Group", ind = "Individual",
gsh = "Guided Self-Help", tel = "Telephone", wlc = "Waitlist",
cau = "Care As Usual", pla = "Pill Placebo", ush = "Unguided Self-Help"))
TherapyFormatsGeMTC[["treat.codes"]] = treat.codes
str_replace_all(TherapyFormatsGeMTC$data$treatment, " ", "") %>%
str_replace_all("-", "") -> TherapyFormatsGeMTC$data$treatment
network <- mtc.network(data.re = TherapyFormatsGeMTC$data,
treatments = treat.codes)
model <- mtc.model(network,
likelihood = "normal",
link = "identity",
linearModel = "random",
n.chain = 4)
model <- mtc.model(network,
likelihood = "normal",
link = "identity",
linearModel = "random",
n.chain = 4)
mcmc1 <- mtc.run(model, n.adapt = 50, n.iter = 1000, thin = 10)
mcmc2 <- mtc.run(model, n.adapt = 5000, n.iter = 100000, thin = 10)
gemtc::forest(relative.effect(mcmc2, t1 = "cau"), use.description = TRUE)
regressor <- list(coefficient = 'shared',
variable = 'low.rob',
control = 'cau')
network.mr <- mtc.network(data.re = TherapyFormatsGeMTC$data,
studies = TherapyFormatsGeMTC$study.info)
model.mr <- mtc.model(network.mr,
type = "regression",
regressor = regressor)
mcmc3 <- mtc.run(model.mr,
n.adapt = 5000,
n.iter = 100000,
thin = 10)
summary(mcmc3)
forest(mcmc3)
library(MASS)
Sigma <- matrix(c(10,3,3,2),2,2)
rep(0.64)
theta = m.netmeta$TE.fixed
matrix(rep(0.15, 64), 8,8) -> tau
diag(tau) = 0.3
Sigma
var(mvrnorm(n = 1000, rep(0, 2), Sigma))
var(mvrnorm(n = 1000, rep(0, 2), Sigma, empirical = TRUE))
m.netmeta$TE.fixed
theta = c(-0.72, -0.63, -0.63, -0.47, -0.13, 0, 0.39)
matrix(rep(0.15, 64), 8,8) -> tau
diag(tau) = 0.3
theta = as.numeric(m.netmeta$TE.fixed)
matrix(rep(0.15, 4096), 64, 64) -> tau
diag(tau) = 0.3
mvrnorm(n = 186, theta, tau) -> dat
as.data.frame(dat) -> dat
cnames = dimnames(m.netmeta$TE.fixed)[[1]]
ls = list()
for (i in 1:8){
paste0(cnames[i], " vs ", cnames) -> ls[[i]]
}
unlist(ls) -> colnames
TherapyFormats$versus = paste0(TherapyFormats$treat1, " vs ", TherapyFormats$treat2)
replacer = function(data, var){
n = nrow(data[data$versus == var,])
data[data$versus == var,"seTE"] = runif(n, 0.09, 0.35) -> seTE
data[data$versus == var,"TE"] = sample(dat[,var], n)
return(data)
}
## Node-splitting analysis of inconsistency
## ========================================
##
## comparison p.value CrI
##
## 9 d.ind.grp 0.00020
## 10 -> direct 0.34 (0.035, 0.64)
## 11 -> indirect -0.31 (-0.48, -0.14)
## 12 -> network -0.15 (-0.31, 0.0046)
## 13 d.grp.gsh 0.00185
## 14 -> direct -0.17 (-0.53, 0.19)
## 15 -> indirect 0.46 (0.28, 0.65)
## 16 -> network 0.33 (0.17, 0.50)
## 17 d.grp.wlc 0.00000
## 18 -> direct 1.5 (1.3, 1.7)
## 19 -> indirect 0.87 (0.69, 1.1)
## 20 -> network 1.1 (1.0, 1.3)
## 21 d.grp.ush 0.07020
## 22 -> direct -0.025 (-0.77, 0.71)
## 23 -> indirect 0.68 (0.46, 0.90)
## 24 -> network 0.62 (0.41, 0.84)
## 29 d.gsh.wlc 0.00125
## 30 -> direct 0.68 (0.54, 0.83)
## 31 -> indirect 1.1 (0.90, 1.4)
## 32 -> network 0.81 (0.69, 0.94)
## 41 d.ind.cau 0.00640
## 42 -> direct 0.52 (0.37, 0.68)
## 43 -> indirect 0.89 (0.68, 1.1)
## 44 -> network 0.66 (0.53, 0.78)
## 45 d.grp.cau 0.07560
## 46 -> direct 0.92 (0.73, 1.1)
## 47 -> indirect 0.66 (0.45, 0.88)
## 48 -> network 0.81 (0.66, 0.95)
##
data("TherapyFormats")
Breiman = TherapyFormats[TherapyFormats$author == "Breiman, 2001",]
TherapyFormats$versus = paste0(TherapyFormats$treat1, " vs ", TherapyFormats$treat2)
TherapyFormats[TherapyFormats$versus == "ind vs grp", "TE"] = TherapyFormats[TherapyFormats$versus == "ind vs grp", "TE"] + 0.2
TherapyFormats[TherapyFormats$versus == "ind vs grp", "seTE"] = TherapyFormats[TherapyFormats$versus == "ind vs grp", "seTE"] + 0.1
TherapyFormats[TherapyFormats$versus == "grp vs gsh", "TE"] = TherapyFormats[TherapyFormats$versus == "grp vs gsh", "TE"] - 0.4
TherapyFormats[TherapyFormats$versus == "grp vs wlc", "TE"] = TherapyFormats[TherapyFormats$versus == "grp vs wlc", "TE"] + 0.8
TherapyFormats[TherapyFormats$versus == "grp vs ush", "TE"] = TherapyFormats[TherapyFormats$versus == "grp vs ush", "TE"] - 0.7
TherapyFormats[TherapyFormats$versus == "gsh vs wlc", "TE"] = TherapyFormats[TherapyFormats$versus == "grp vs wlc", "TE"] - 0.2
TherapyFormats[TherapyFormats$versus == "ind vs cau", "TE"] = TherapyFormats[TherapyFormats$versus == "ind vs cau", "TE"] - 0.2
TherapyFormats[TherapyFormats$versus == "grp vs cau", "TE"] = TherapyFormats[TherapyFormats$versus == "grp vs cau", "TE"] + 0.5
TherapyFormats[TherapyFormats$author == "Breiman, 2001",] = Breiman
TherapyFormats[TherapyFormats$treat1 != "pla" & TherapyFormats$treat2 != "pla",] -> TherapyFormats
m.netmeta <- netmeta(TE = TE, seTE = seTE,
treat1 = treat1, treat2 = treat2, studlab = author,
data = TherapyFormats,
sm = "SMD",
comb.fixed = F, comb.random = T,
reference.group = "cau", details.chkmultiarm = TRUE,
sep.trts = " vs ")
netsplit(m.netmeta)
netheat(m.netmeta)
TherapyFormatsGeMTC$data
class(TherapyFormats)
TFGMTC[TFGMTC$author == "Breiman, 2001",]
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
library(ggplot2)
power.analysis.bw = function (d, OR, k, n1, n2, p = 0.05, heterogeneity = "fixed")
{
odds = FALSE
if (missing(OR) & missing(d)) {
stop("Either 'd' or 'OR' must be provided.")
}
if (!(heterogeneity %in% c("fixed", "low", "moderate", "high"))) {
stop("'heterogeneity' must be either 'fixed', 'low', 'moderate', 'high'.")
}
if (missing(d)) {
odds = TRUE
d = log(OR) * (sqrt(3)/pi)
token1 = "log"
}
else {
token1 = "no.log"
}
es = d
if (heterogeneity == "fixed") {
het.factor = 1
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
power = 1 - (pnorm(zval - lambda)) + (pnorm(-zval - lambda))
token2 = "fixed"
}
if (heterogeneity == "low") {
het.factor = 1.33
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
v.m = 1.33 * v.m
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
power = 1 - (pnorm(zval - lambda)) + (pnorm(-zval - lambda))
token2 = "low"
}
if (heterogeneity == "moderate") {
het.factor = 1.67
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
v.m = 1.67 * v.m
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
power = 1 - (pnorm(zval - lambda)) + (pnorm(-zval - lambda))
token2 = "moderate"
}
if (heterogeneity == "high") {
het.factor = 2
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
v.m = 2 * v.m
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
power = 1 - (pnorm(zval - lambda)) + (pnorm(-zval - lambda))
token2 = "high"
}
dvec = (1:1000)/1000
if (d > 1) {
dvec = (1:(d * 1000))/1000
}
powvect = vector()
for (i in 1:length(dvec)) {
d = dvec[i]
v.d = ((n1 + n2)/(n1 * n2)) + ((d * d)/(2 * (n1 + n2)))
v.m = v.d/k
v.m = het.factor * v.m
lambda = (d/sqrt(v.m))
plevel = 1 - (p/2)
zval = qnorm(p = plevel, 0, 1)
powvect[i] = 1 - (pnorm(zval - lambda)) + (pnorm(-zval -
lambda))
}
if (odds == FALSE) {
plotdat = as.data.frame(cbind(dvec, powvect))
plot = ggplot(data = plotdat, aes(x = dvec, y = powvect)) +
geom_line(color = "gray70", size = 2) + geom_point(aes(x = es,
y = power), color = "gray30", size = 5) + theme_minimal() +
geom_hline(yintercept = 0.8, color = "black", linetype = "dashed") +
ylab("Power") + xlab("Effect size (SMD)")
}
else {
dvecs = exp(dvec * (pi/sqrt(3)))
dvec.inv = exp(-dvec * (pi/sqrt(3)))
dvec = as.vector(rbind(dvec.inv, dvecs))
powvect = as.vector(rbind(powvect, powvect))
plotdat = as.data.frame(cbind(dvec, powvect))
plot = ggplot(data = plotdat, aes(x = dvec, y = powvect)) +
geom_line(color = "gray70", size = 2) + geom_point(aes(x = exp(es *
(pi/sqrt(3))), y = power), color = "gray30", size = 5) +
theme_minimal() + geom_hline(yintercept = 0.8, color = "black",
linetype = "dashed") + ylab("Power") + xlab("Effect size (OR)") +
scale_x_log10()
}
return.list = list(Plot = plot, Power = power)
class(return.list) = c("power.analysis", token1, token2)
invisible(return.list)
return.list
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
\@ref(XXX)
# (PART) Getting Started {-}
# Introduction {#intro}
## What are Meta-Analyses? {#what-are-mas}
## "Exercises in Mega-Silliness": A Historical Anecdote {#history}
## Apples and Oranges: A Quick Tour of Meta-Analysis Pitfalls {#pitfalls}
## Problem Specification, Study Search and Coding {#spec-search-coding}
### Defining the Research Question {#research-question}
### Analysis Plan and Preregistration {#analysis-plan}
### Study Search {#study-search}
### Study Selection {#study-selection}
### Data Extraction and Coding {#data-extraction}
# Discovering \textsf{R} {#discovering-R}
## Installing \textsf{R} and R Studio {#install-R}
## Packages {#packages}
## The _{dmetar}_ Package {#dmetar}
## Data Preparation & Import {#data-prep-R}
## Data Manipulation {#data-manip-R}
### Class Conversion {#class-conversion}
### Data Slicing {#data-slicing}
### Data Transformation {#data-transform}
### Saving Data {#saving-data}
# Effect Sizes {#effects}
## What is an Effect Size? {#what-is-es}
## Measures and Effect Sizes in Single Group Designs {#single-group-es}
### Means {#means}
### Proportions {#props}
### Correlations {#cors}
#### Pearson Product-Moment Correlation {#pearson-cors}
#### Point-Biserial Correlation {#pb-cors}
### (Standardized) Mean Differences {#s-md}
#### Between-Group Mean Difference {#b-group-md}
#### Between-Group Standardized Mean Difference {#b-group-smd}
#### Within-Group (Standardized) Mean Difference {#w-group-smd}
### Risk & Odds Ratios {#ratios}
#### Risk Ratio {#rr}
#### Odds Ratio {#or}
### Incidence Rate Ratios {#irr}
## Effect Size Correction {#es-correction}
### Small Sample Bias {#hedges-g}
### Unreliability {#unrealiable}
### Range Restriction {#range}
### Different Effect Size Data Formats {#es-formats-different}
### The Unit-of-Analysis Problem {#unit-of-analysis}
# Pooling Effect Sizes {#pooling-es}
## The Fixed-Effect and Random-Effects Model {#fem-rem}
### The Fixed-Effect Model {#fem}
### The Random-Effects Model {#rem}
#### Estimators of the Between-Study Heterogeneity {#tau-estimators}
#### Knapp-Hartung Adjustments {#knapp-hartung}
## Effect Size Pooling in \textsf{R} {#pooling-es-r}
### Pre-Calculated Effect Size Data {#pre-calculated-es}
### (Standardized) Mean Differences {#pooling-smd}
#### Risk & Odds Ratios {#pooling-or-rr}
##### The Mantel-Haenszel Method {#mantel-haenszel}
#### Incidence Rate Ratios {#pooling-irr}
### Correlations {#pooling-cor}
### Means {#pooling-mean}
### Proportions {#pooling-props}
# Between-Study Heterogeneity {#heterogeneity}
## Measures of Heterogeneity {#het-measures}
### Cochran's $Q$ {#cochran-q}
### Higgins' & Thompsons' $I^2$ Statistic {#i-squared}
### Heterogeneity Variance $\tau^2$ and Standard Deviation $\tau$ {#tau}
## Which Measure Should I Use? {#het-measure-which}
## Assessing Heterogeneity in \textsf{R} {#het-R}
## Outliers & Influential Cases {#outliers}
### Basic Outlier Removal {#basic-outlier}
### Influence Analysis {#influence-analysis}
#### Baujat Plot {#baujat}
#### Influence Diagnostics {#inf-diags}
#### Leave-One-Out Meta-Analysis Results {#loo-ma}
### GOSH Plot Analysis {#gosh}
# Forest Plots {#forest}
## Forest Plots in \textsf{R} {#forest-R}
## Drapery Plots {#drapery}
# Subgroup Analyses {#subgroup}
## The Fixed-Effects (Plural) Model {#fixed-effect-plural}
## Limitations & Pitfalls of Subgroup Analyses {#limits-subgroup}
## Subgroup Analysis in \textsf{R} {#subgroup-R}
# Meta-Regression {#metareg}
## The Meta-Regression Model {#the-metareg-model}
## Meta-Regression in \textsf{R} {#metareg-R}
## Multiple Meta-Regression {#multiple-metareg}
### Interactions {#interact}
### Common Pitfalls in Multiple Meta-Regression {#limits-metareg}
### Multiple Meta-Regression in \textsf{R} {#multiple-metareg-R}
#### Multi-Model Inference {#multimodel-inference}
# Publication Bias {#pub-bias}
## What is Publication Bias? {#types-of-pub-biases}
## Addressing Publication Bias in Meta-Analyses {#addressing-pubbias}
### Small-Study Effects Methods {#small-study-effects}
#### The Funnel Plot {#funnel-plot}
#### Egger's Regression Test {#eggers-test}
#### Peters' Regression Test {#peters-test}
#### Duval & Tweedie Trim and Fill Method {#duval-and-tweedie}
#### PET-PEESE {#pet-peese}
#### Rücker's Limit Meta-Analysis Method {#rucker-ma}
### P-Curve {#p-curve}
#### P-Curve Effect Size Estimation {#p-curve-es}
### Selection Models {#selection-models}
#### Step Function Selection Models {#step-function-selmodels}
##### Three-Parameter Selection Model {#three-param-selmodel}
##### Fixed Weights Selection Model {#fixed-weights-selmodel}
## Which Method Should I Use? {#pub-bias-which-method}
# "Multilevel" Meta-Analysis {#multilevel-ma}
## The Multilevel Nature of Meta-Analysis {#multilevel-nature}
## Fitting Three-Level Meta-Analysis Models in \textsf{R} {#multilevel-R}
# Structural Equation Modeling Meta-Analysis {#sem}
## What Is Meta-Analytic Structural Equation Modeling? {#what-is-meta-sem}
## Multivariate Meta-Analysis {#multivariate-ma}
## Confirmatory Factor Analysis {#cfa}
# Network Meta-Analysis {#netwma}
## What Are Network Meta-Analyses? {#what-is-net-ma}
### Direct and Indirect Evidence {#direct-indirect-evidence}
### Transitivity & Consistency {#transitivity-consistency}
### Network Meta-Analysis Models {#netw-which-model}
## Frequentist Network Meta-Analysis {#frequentist-ma}
##### The Net Heat Plot {#net-heat-plot}
## Bayesian Network Meta-Analysis {#bayesian-net-ma}
### Bayesian Inference {#bayesian-inference}
### The Bayesian Network Meta-Analysis Model {#bayesian-net-ma-model}
# Bayesian Meta-Analysis {#bayesian-ma}
## The Bayesian Hierarchical Model {#bayes-hierarchical-model}
## Setting Prior Distributions {#priors}
## Bayesian Meta-Analysis in \textsf{R} {#bayes-ma-R}
# Power Analysis {#power}
# Risk of Bias Plots {#rob-plots}
# Reporting & Reproducibility {#reporting-reproducibility}
## OSF Repositories {#osf}
### Collaboration, Open Access & Pre-Registration {#pre-registration}
# Effect Size Calculation & Conversion {#es-calc}
## Number Needed To Treat {#nnt}
## Multi-Arm Studies {#pool-groups}
## Correlations {#convert-corr}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
power.analysis.random<-function(d,k,n1,n2,p,heterogeneity){
n1<-n1
n2<-n2
d<-d
k<-k
p<-p
heterogeneity<-heterogeneity
if(heterogeneity=="low"){
v.d<-((n1+n2)/(n1*n2))+((d*d)/(2*(n1+n2)))
v.m<-v.d/k
v.m<-1.33*v.m
lambda<-(d/sqrt(v.m))
plevel<-1-(p/2)
zval<-qnorm(p=plevel, 0,1)
power<-1-(pnorm(zval-lambda))+(pnorm(-zval-lambda))
return(power)
}
if(heterogeneity=="moderate"){
v.d<-((n1+n2)/(n1*n2))+((d*d)/(2*(n1+n2)))
v.m<-v.d/k
v.m<-1.67*v.m
lambda<-(d/sqrt(v.m))
plevel<-1-(p/2)
zval<-qnorm(p=plevel, 0,1)
power<-1-(pnorm(zval-lambda))+(pnorm(-zval-lambda))
return(power)
}
if(heterogeneity=="high"){
v.d<-((n1+n2)/(n1*n2))+((d*d)/(2*(n1+n2)))
v.m<-v.d/k
v.m<-2*v.m
lambda<-(d/sqrt(v.m))
plevel<-1-(p/2)
zval<-qnorm(p=plevel, 0,1)
power<-1-(pnorm(zval-lambda))+(pnorm(-zval-lambda))
return(power)
}
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
pcurve.bw = function(x, effect.estimation = FALSE, N, dmin = 0, dmax = 1){
# Rename x to metaobject, remove x
metaobject = x
rm(x)
# Stop if metaobject is not meta or does not contain TE or seTE column
if (!(class(metaobject)[1] %in% c("metagen", "metabin", "metacont", "metacor", "metainc", "meta", "metaprop"))){
for (i in 1:length(colnames(metaobject))){
te.exists = FALSE
if (colnames(metaobject)[i]=="TE"){
te.exists = TRUE
break
} else {
}
}
for (i in 1:length(colnames(metaobject))){
sete.exists = FALSE
if (colnames(metaobject)[i]=="seTE"){
sete.exists = TRUE
break
} else {
}
}
for (i in 1:length(colnames(metaobject))){
studlab.exists = FALSE
if (colnames(metaobject)[i]=="studlab"){
studlab.exists = TRUE
break
} else {
}
}
if(te.exists == FALSE | sete.exists ==FALSE | studlab.exists ==FALSE){
stop("x must be a meta-analysis object generated by meta functions or a data.frame with columns labeled studlab, TE, and seTE.")
}
}
#Disable scientific notation
options(scipen=999)
# Calculate Z
zvalues.input = abs(metaobject$TE/metaobject$seTE)
##############################################
# 1. Functions ###############################
##############################################
getncp.f =function(df1,df2, power) {
error = function(ncp_est, power, x, df1,df2) pf(x, df1 = df1, df2=df2, ncp = ncp_est) - (1-power)
xc=qf(p=.95, df1=df1,df2=df2)
return(uniroot(error, c(0, 1000), x = xc, df1 = df1,df2=df2, power=power)$root) }
getncp.c =function(df, power) {
xc=qchisq(p=.95, df=df)
error = function(ncp_est, power, x, df) pchisq(x, df = df, ncp = ncp_est) - (1-power)
return(uniroot(error, c(0, 1000), x = xc, df = df, power=power)$root) }
getncp=function(family,df1,df2,power) {
if (family=="f") ncp=getncp.f(df1=df1,df2=df2,power=power)
if (family=="c") ncp=getncp.c(df=df1,power=power)
return(ncp) }
percent <- function(x, digits = 0, format = "f", ...) {
paste(formatC(100 * x, format = format, digits = digits, ...), "%", sep = "")
}
pbound=function(p) pmin(pmax(p,2.2e-16),1-2.2e-16)
prop33=function(pc)
{
prop=ifelse(family=="f" & p<.05,1-pf(qf(1-pc,df1=df1, df2=df2),df1=df1, df2=df2, ncp=ncp33),NA)
prop=ifelse(family=="c" & p<.05,1-pchisq(qchisq(1-pc,df=df1), df=df1, ncp=ncp33),prop)
prop
}
stouffer=function(pp) sum(qnorm(pp),na.rm=TRUE)/sqrt(sum(!is.na(pp)))
###############################################################################
# 2. Process data ############################################################
###############################################################################
# Note: due to reliance on the pcurve-app function, z-scores are pasted into characters first
# and then screened to generate variables necessary for further computation
zvalues.input = paste("z=", zvalues.input, sep="")
filek = "input"
raw = zvalues.input
raw=tolower(raw)
ktot=length(raw)
k=seq(from=1,to=length(raw))
stat=substring(raw,1,1)
test=ifelse(stat=="r","t",stat)
# Create family
family=test
family=ifelse(test=="t","f",family)
family=ifelse(test=="z","c",family)
#family: f,c converting t-->f and z-->c
# Find comma,parentheses,equal sign
par1 =str_locate(raw,"\\(")[,1]
par2 =str_locate(raw,"\\)")[,1]
comma=str_locate(raw,",")[,1]
eq =str_locate(raw,"=")[,1]
# DF for t-tests
df=as.numeric(ifelse(test=="t",substring(raw,par1+1,par2 -1),NA))
# DF1
df1=as.numeric(ifelse(test=="f",substring(raw,par1+1,comma-1),NA))
df1=as.numeric(ifelse(test=="z",1,df1))
df1=as.numeric(ifelse(test=="t",1,df1))
df1=as.numeric(ifelse(test=="c",substring(raw,par1+1,par2 -1),df1))
# DF2
df2=as.numeric(ifelse(test=="f",substring(raw,comma+1,par2-1),NA))
df2=as.numeric(ifelse(test=="t",df,df2))
equal=abs(as.numeric(substring(raw,eq+1)))
value=ifelse((stat=="f" | stat=="c"),equal,NA)
value=ifelse(stat=="r", (equal/(sqrt((1-equal**2)/df2)))**2,value)
value=ifelse(stat=="t", equal**2 ,value)
value=ifelse(stat=="z", equal**2 ,value)
p=ifelse(family=="f",1-pf(value,df1=df1,df2=df2),NA)
p=ifelse(family=="c",1-pchisq(value,df=df1),p)
p=pbound(p) #Bound it to level of precision, see function 3 above
ksig= sum(p<.05,na.rm=TRUE) #significant studies
khalf=sum(p<.025,na.rm=TRUE) #half p-curve studies
if (ksig <= 2){
stop("Two or less significant (p<0.05) effect sizes were detected, so p-curve analysis cannot be conducted.")
}
##############################################################################
# 3. PP-values ###############################################################
##############################################################################
# Right Skew, Full p-curve
ppr=as.numeric(ifelse(p<.05,20*p,NA))
ppr=pbound(ppr)
# Right Skew, half p-curve
ppr.half=as.numeric(ifelse(p<.025,40*p,NA))
ppr.half=pbound(ppr.half)
# Power of 33%
ncp33=mapply(getncp,df1=df1,df2=df2,power=1/3,family=family)
# Full-p-curve
pp33=ifelse(family=="f" & p<.05,3*(pf(value, df1=df1, df2=df2, ncp=ncp33)-2/3),NA)
pp33=ifelse(family=="c" & p<.05,3*(pchisq(value, df=df1, ncp=ncp33)-2/3),pp33)
pp33=pbound(pp33)
# half p-curve
prop25=3*prop33(.025)
prop25.sig=prop25[p<.05]
#Compute pp-values for the half
pp33.half=ifelse(family=="f" & p<.025, (1/prop25)*(pf(value,df1=df1,df2=df2,ncp=ncp33)-(1-prop25)),NA)
pp33.half=ifelse(family=="c" & p<.025, (1/prop25)*(pchisq(value,df=df1, ncp=ncp33)-(1-prop25)),pp33.half)
pp33.half=pbound(pp33.half)
##############################################################################
# 4. Stouffer & Binomial test ################################################
##############################################################################
# Convert pp-values to Z scores, using Stouffer function above
Zppr = stouffer(ppr)
Zpp33 = stouffer(pp33)
Zppr.half = stouffer(ppr.half)
Zpp33.half = stouffer(pp33.half)
# Overall p-values from Stouffer test
p.Zppr = pnorm(Zppr)
p.Zpp33 = pnorm(Zpp33)
p.Zppr.half = pnorm(Zppr.half)
p.Zpp33.half = pnorm(Zpp33.half)
# Save results to file
main.results=as.numeric(c(ktot, ksig, khalf, Zppr,
p.Zppr, Zpp33, p.Zpp33, Zppr.half,
p.Zppr.half, Zpp33.half, p.Zpp33.half))
# BINOMIAL
# Observed share of p<.025
prop25.obs=sum(p<.025)/sum(p<.05)
# Flat null
binom.r=1-pbinom(q=prop25.obs*ksig- 1, prob=.5, size=ksig)
# Power of 33% null
binom.33=ppoibin(kk=prop25.obs*ksig,pp=prop25[p<.05])
# Save binomial results
binomial=c(mean(prop25.sig), prop25.obs, binom.r, binom.33)
# Beautifyier Function
cleanp=function(p)
{
p.clean=round(p,4) #Round it
p.clean=substr(p.clean,2,6) #Drop the 0
p.clean=paste0("= ",p.clean)
if (p < .0001) p.clean= " < .0001"
if (p > .9999) p.clean= " > .9999"
return(p.clean)
}
#If there are zero p<.025, change Stouffer values for half-p-curve tests for "N/A" messages
if (khalf==0) {
Zppr.half ="N/A"
p.Zppr.half ="=N/A"
Zpp33.half ="N/A"
p.Zpp33.half ="=N/A"
}
#If there are more than 1 p<.025, round the Z and beutify the p-values
if (khalf>0) {
Zppr.half =round(Zppr.half,2)
Zpp33.half =round(Zpp33.half,2)
p.Zppr.half=cleanp(p.Zppr.half)
p.Zpp33.half=cleanp(p.Zpp33.half)
}
#Clean results for full test
Zppr=round(Zppr,2)
Zpp33=round(Zpp33,2)
p.Zppr=cleanp(p.Zppr)
p.Zpp33=cleanp(p.Zpp33)
binom.r=cleanp(binom.r)
binom.33=cleanp(binom.33)
################################################
# 5. Power ####################################
################################################
powerfit=function(power_est)
{
ncp_est=mapply(getncp,df1=df1,df2=df2,power=power_est,family=family)
pp_est=ifelse(family=="f" & p<.05,(pf(value,df1=df1,df2=df2,ncp=ncp_est)-(1-power_est))/power_est,NA)
pp_est=ifelse(family=="c" & p<.05,(pchisq(value,df=df1,ncp=ncp_est)-(1-power_est))/power_est,pp_est)
pp_est=pbound(pp_est)
return(stouffer(pp_est))
}
fit=c()
fit=abs(powerfit(.051))
for (i in 6:99) fit=c(fit,abs(powerfit(i/100)))
mini=match(min(fit,na.rm=TRUE),fit)
hat=(mini+4)/100
x.power=seq(from=5,to=99)/100
get.power_pct =function(pct) {
#Function that finds power that gives p-value=pct for the Stouffer test
#for example, get.power_pct(.5) returns the level of power that leads to p=.5 for the stouffer test.
#half the time we would see p-curves more right skewed than the one we see, and half the time
#less right-skewed, if the true power were that get.power_pct(.5). So it is the median estimate of power
#similarliy, get.power_pct(.1) gives the 10th percentile estimate of power...
#Obtain the normalized equivalent of pct, e.g., for 5% it is -1.64, for 95% it is 1.64
z=qnorm(pct) #convert to z because powerfit() outputs a z-score.
#Quantify gap between computed p-value and desired pct
error = function(power_est, z) powerfit(power_est) - z
#Find the value of power that makes that gap zero, (root)
return(uniroot(error, c(.0501, .99),z)$root) }
# Boundary conditions
p.power.05=pnorm(powerfit(.051)) #Proability p-curve would be at least at right-skewed if power=.051
p.power.99=pnorm(powerfit(.99)) #Proability p-curve would be at least at right-skewed if power=.99
# Lower end of ci
if (p.power.05<=.95) power.ci.lb=.05
if (p.power.99>=.95) power.ci.lb=.99
if (p.power.05>.95 && p.power.99<.95) power.ci.lb=get.power_pct(.95)
# Higher end of CI
if (p.power.05<=.05) power.ci.ub=.05
if (p.power.99>=.05) power.ci.ub=.99
if (p.power.05>.05 && p.power.99<.05) power.ci.ub=get.power_pct(.05)
# Save power fit
power_results=c(power.ci.lb,hat,power.ci.ub)
##############################################################################
# 6. Plot ###################################################################
##############################################################################
# Green line (Expected p-curve for 33% power)
gcdf1=prop33(.01)
gcdf2=prop33(.02)
gcdf3=prop33(.03)
gcdf4=prop33(.04)
green1=mean(gcdf1,na.rm=TRUE)*3
green2=mean(gcdf2-gcdf1,na.rm=TRUE)*3
green3=mean(gcdf3-gcdf2,na.rm=TRUE)*3
green4=mean(gcdf4-gcdf3,na.rm=TRUE)*3
green5=mean(1/3-gcdf4,na.rm=TRUE)*3
green=100*c(green1,green2,green3,green4,green5)
# Blue line (observed p-curve)
ps=ceiling(p[p<.05]*100)/100
blue=c()
for (i in c(.01,.02,.03,.04,.05)) blue=c(blue,sum(ps==i,na.rm=TRUE)/ksig*100)
# Red line
red=c(20,20,20,20,20)
# Make the graph
x = c(.01,.02,.03,.04,.05)
par(mar=c(6,5.5,1.5,3))
moveup=max(max(blue[2:5])-66,0)
ylim=c(0,105+moveup)
legend.top=100+moveup
plot(x,blue, type='l', col='black', main="",
lwd=2, xlab="", ylab="", xaxt="n",yaxt="n", xlim=c(0.01,0.051),
ylim=ylim, bty='L', las=1,axes=F)
x_=c(".01",".02",".03",".04",".05")
axis(1,at=x,labels=x_)
y_=c("0%","25%","50%","75%","100%")
y=c(0,25,50,75,100)
axis(2,at=y,labels=y_,las=1,cex.axis=1.2)
mtext("Percentage of test results",font=2,side=2,line=3.85,cex=1.25)
mtext("p ",font=4,side=1,line=2.3,cex=1.25)
mtext(" -value", font=2,side=1,line=2.3,cex=1.25)
points(x,blue,type="p",pch=20,bg="black",col="black")
text(x+.00075,blue+3.5,percent(round(blue)/100),col='black', cex=.75)
lines(x,red, type='l', col='gray80', lwd=1.5, lty=3)
lines(x,green, type='l', col='gray80', lwd=1.5, lty=5)
tab1=.017 #Labels for line at p=.023 in x-axis
tab2=tab1+.0015 #Test results and power esimates at tab1+.0015
gap1=9 #between labels
gap2=4 #between lable and respective test (e.g., "OBserved p-curve" and "power estimate")
font.col='gray'
text.blue=paste0("Power estimate: ",percent(hat),", CI(",
percent(power.ci.lb),",",
percent(power.ci.ub),")")
text(tab1,legend.top, adj=0,cex=.85,bquote("Observed "*italic(p)*"-curve"))
text(tab2,legend.top-gap2,adj=0,cex=.68,text.blue,col=font.col)
text.red=bquote("Tests for right-skewness: "*italic(p)*""[Full]~.(p.Zppr)*", "*italic(p)*""[Half]~.(p.Zppr.half))
#note: .() within bquote prints the value rather than the variable name
text(tab1,legend.top-gap1, adj=0,cex=.85, "Null of no effect" )
text(tab2,legend.top-gap1-gap2, adj=0,cex=.68, text.red, col=font.col )
text.green=bquote("Tests for flatness: "*italic(p)*""[Full]~.(p.Zpp33)*", "*italic(p)*""[half]~.(p.Zpp33.half)*", "*italic(p)*""[Binomial]~.(binom.33))
text(tab1,legend.top-2*gap1, adj=0,cex=.85,"Null of 33% power")
text(tab2,legend.top-2*gap1-gap2, adj=0,cex=.68,text.green,col=font.col)
segments(x0=tab1-.005,x1=tab1-.001,y0=legend.top,y1=legend.top, col='black',lty=1,lwd=1.5)
segments(x0=tab1-.005,x1=tab1-.001,y0=legend.top-gap1, y1=legend.top-gap1,col='gray',lty=3,lwd=1.5)
segments(x0=tab1-.005,x1=tab1-.001,y0=legend.top-2*gap1,y1=legend.top-2*gap1,col='gray',lty=2,lwd=1.5)
rect(tab1-.0065,legend.top-2*gap1-gap2-3,tab1+.032,legend.top+3,border='gray')
msgx=bquote("Note: The observed "*italic(p)*"-curve includes "*.(ksig)*
" statistically significant ("*italic(p)*" < .05) results, of which "*.(khalf)*
" are "*italic(p)*" < .025.")
mtext(msgx,side=1,line=4,cex=.65,adj=0)
kns=ktot-ksig
if (kns==0) ns_msg="There were no non-significant results entered."
if (kns==1) ns_msg=bquote("There was one additional result entered but excluded from "*italic(p)*"-curve because it was "*italic(p)*" > .05.")
if (kns>1) ns_msg=bquote("There were "*.(kns)*" additional results entered but excluded from "*italic(p)*"-curve because they were "*italic(p)*" > .05.")
mtext(ns_msg,side=1,line=4.75,cex=.65,adj=0)
##############################################################################
# 7 Save Calculations #######################################################
##############################################################################
# table_calc
table_calc=data.frame(raw, p, ppr, ppr.half, pp33, pp33.half,
qnorm(ppr), qnorm(ppr.half), qnorm(pp33), qnorm(pp33.half))
headers1=c("Entered statistic","p-value", "ppr", "ppr half", "pp33%","pp33 half",
"Z-R","Z-R half","Z-33","z-33 half")
table_calc=setNames(table_calc,headers1)
# table_figure
headers2=c("p-value","Observed (blue)","Power 33% (Green)", "Flat (Red)")
table_figure=setNames(data.frame(x,blue,green,red),headers2)
################################################
# 8. Cumulative p-curves (Deprecated) ##########
################################################
#7.1 FUNCTION THAT RECOMPUTES OVERALL STOUFFER TEST WITHOUT (K) MOST EXTREME VALUES, ADJUSTING THE UNIFORM TO ONLY INCLUDE RANGE THAT REMAINS
dropk=function(pp,k,droplow)
{
#Syntax:
#pp: set of pp-values to analyze sensitivity to most extremes
#k: # of most extreme values to exclude
#dropsmall: 1 to drop smallest, 0 to drop largest
pp=pp[!is.na(pp)] #Drop missing values
n=length(pp) #See how many studies are left
pp=sort(pp) #Sort the pp-value from small to large
if (k==0) ppk=pp #If k=0 do nothing for nothing is being dropped
#If we are dropping low values
if (droplow==1 & k>0)
{
#Eliminate lowest k from the vector of pp-values
ppk=(pp[(1+k):n])
ppmin=min(pp[k],k/(n+1)) #Boundary used to define possible range of values after exclusion
ppk=(ppk-ppmin)/(1-ppmin) #Take the k+1 smallest pp-value up to the highest, subtract from each the boundary value, divide by the range, ~U(0,1) under the null
#This is explained in Supplement 1 of Simonsohn, Simmons Nelson, JEPG 2016 "Better p-curves" paper. See https://osf.io/mbw5g/
}
#If we are dropping high values
if (droplow==0 & k>0)
{
#Eliminate lowest k from the vector of pp-values
ppk=pp[1:(n-k)]
ppmax=max(pp[n-k+1],(n-k)/(n+1)) #Find new boundary of range
ppk=ppk/ppmax #Redefine range to make U(0,1)
}
#In case of a tie with two identical values we would have the ppk be 0 or 1, let's replace that with almost 0 and almost 1
ppk=pmax(ppk,.00001) #Adds small constant to the smallest redefined p-value, avoids problem if dropped p-value is "equal" to next highest, then that pp-value becomes 0
ppk=pmin(ppk,.99999) #Subtract small constant to the largest redefined pp-value, same reason
Z=sum(qnorm(ppk))/sqrt(n-k)
return(pnorm(Z))
} #End function dropk
#7.2 Apply function, in loop with increasing number of exclusions, to full p-curve
#Empty vectors for results
droplow.r=droplow.33=drophigh.r=drophigh.33=c()
#Loop over full p-curves
for (i in 0:(round(ksig/2)-1))
{
#Drop the lowest k studies in terms of respective overall test
#Right skew
droplow.r= c(droplow.r, dropk(pp=ppr,k=i,droplow=1))
drophigh.r=c(drophigh.r, dropk(pp=ppr,k=i,droplow=0))
#Power of 33%
droplow.33=c(droplow.33, dropk(pp=pp33,k=i,droplow=1))
drophigh.33=c(drophigh.33, dropk(pp=pp33,k=i,droplow=0))
}
#Half p-curves
if (khalf>0)
{
droplow.halfr=drophigh.halfr=c()
for (i in 0:(round(khalf/2)-1))
{
#Drop the lowest k studies in terms of respective overall test
droplow.halfr= c(droplow.halfr, dropk(pp=ppr.half,k=i,droplow=1))
drophigh.halfr=c(drophigh.halfr, dropk(pp=ppr.half,k=i,droplow=0))
} #End loop
}#End if that runs calculations only if khalf>0
#7.3 FUNCTION THAT DOES THE PLOT OF RESULTS
plotdrop=function(var,col)
{
k=length(var)
#Plot the dots
plot(0:(k-1),var,xlab="",ylab="",type="b",yaxt="n",xaxt="n",main="",
cex.main=1.15,ylim=c(0,1),col=col)
#Add marker in results with 0 drops
points(0,var[1],pch=19,cex=1.6)
#Red line at p=.05
abline(h=.05,col="red")
#Y-axis value labels
axis(2,c(.05,2:9/10),labels=c('.05','.2','.3','.4','.5','6','7','.8','.9'),las=1,cex.axis=1.5)
axis(1,c(0:(k-1)),las=1,cex.axis=1.4)
}
######################################################################################
# 9. Effect Estimation ###############################################################
######################################################################################
if (effect.estimation == TRUE){
# Define ci.to.t function
ci.to.t = function(TE, lower, upper, n){
z.to.d = function(z, n){
d = (2*z)/sqrt(n)
return(abs(d))
}
ci.to.p = function(est, lower, upper){
SE = (upper-lower)/(2*1.96)
z = abs(est/SE)
p = exp(-0.717*z - 0.416*z^2)
return(p)
}
d.to.t = function(d, n){
df = n-2
t = (d*sqrt(df))/2
return(t)
}
p = ci.to.p(TE, lower, upper)
z = abs(qnorm(p/2))
d = z.to.d(z, n)
t = d.to.t(d, n)
return(t)
}
#Function 13 - loss function
loss=function(t_obs,df_obs,d_est) {
#1.Convert all ts to the same sign (for justification see Supplement 5)
t_obs=abs(t_obs)
#2 Compute p-values
p_obs=2*(1-pt(t_obs,df=df_obs))
#3 Keep significant t-values and corresponding df.
t.sig=subset(t_obs,p_obs<.05)
df.sig=subset(df_obs,p_obs<.05)
#4.Compute non-centrality parameter implied by d_est and df_obs
#df+2 is total N.
#Becuase the noncentrality parameter for the student distribution is ncp=sqrt(n/2)*d,
#we add 2 to d.f. to get N, divide by 2 to get n, and by 2 again for ncp, so -->df+2/4
ncp_est=sqrt((df.sig+2)/4)*d_est
#5.Find critical t-value for p=.05 (two-sided)
#this is used below to compute power, it is a vector as different tests have different dfs
#and hence different critical values
tc=qt(.975,df.sig)
#4.Find power for ncp given tc, again, this is a vector of implied power, for ncp_est, for each test
power_est=1-pt(tc,df.sig,ncp_est)
#5.Compute pp-values
#5.1 First get the overall probability of a t>tobs, given ncp
p_larger=pt(t.sig,df=df.sig,ncp=ncp_est)
#5.2 Now, condition on p<.05
ppr=(p_larger-(1-power_est))/power_est #this is the pp-value for right-skew
#6. Compute the gap between the distribution of observed pp-values and a uniform distribution 0,1
KSD=ks.test(ppr,punif)$statistic #this is the D statistic outputted by the KS test against uniform
return(KSD)
}
if(missing(N)){
stop("If 'effect.estimation=TRUE', argument 'N' must be provided.")
}
if (length(N) != length(metaobject$TE)){
stop("N must be of same length as the number of studies contained in x.")
}
lower = metaobject$TE - (metaobject$seTE*1.96)
upper = metaobject$TE + (metaobject$seTE*1.96)
t_obs = ci.to.t(metaobject$TE, lower, upper, N)
df_obs = N-2
#Results will be stored in these vectors, create them first
loss.all=c()
di=c()
#Compute loss for effect sizes between d=c(dmin,dmax) in steps of .01
for (i in 0:((dmax-dmin)*100))
{
d=dmin+i/100 #effect size being considered
di=c(di,d) #add it to the vector (kind of silly, but kept for symmetry)
options(warn=-1) #turn off warning becuase R does not like its own pt() function!
loss.all=c(loss.all,loss(df_obs=df_obs,t_obs=t_obs,d_est=d))
#apply loss function so that effect size, store result
options(warn=0) #turn warnings back on
}
#find the effect leading to smallest loss in that set, that becomes the starting point in the optimize command
imin=match(min(loss.all),loss.all) #which i tested effect size lead to the overall minimum?
dstart=dmin+imin/100 #convert that i into a d.
#optimize around the global minimum
dhat=optimize(loss,c(dstart-.1,dstart+.1), df_obs=df_obs,t_obs=t_obs)
options(warn=-0)
#Plot results
plot(di,loss.all,xlab="Effect size\nCohen-d", ylab="Loss (D stat in KS test)",ylim=c(0,1), main="How well does each effect size fit? (lower is better)")
points(dhat$minimum,dhat$objective,pch=19,col="red",cex=2)
text(dhat$minimum,dhat$objective-.08,paste0("p-curve's estimate of effect size:\nd=",round(dhat$minimum,3)),col="red")
}
######################################################################################
# 10. Prepare Results for Return #####################################################
######################################################################################
# Get results
main.results = round(main.results, 3)
ktotal = round(main.results[1]) # Get the total number of inserted TEs
k.sign = round(main.results[2]) # Get the total number of significant TEs
k.025 = round(main.results[3]) # Get the number of p<0.25 TEs
skew.full.z = main.results[4] # Get the Z-score for the full curve skewness test
skew.full.p = main.results[5] # Get the p-value for the full curve skewness test
flat.full.z = main.results[6] # Get the Z-score for the full curve flatness test
flat.full.p = main.results[7] # Get the p-value for the full curve flatness test
skew.half.z = main.results[8] # Get the Z-score for the half curve skewness test
skew.half.p = main.results[9] # Get the p-value for the half curve skewness test
flat.half.z = main.results[10] # Get the Z-score for the half curve flatness test
flat.half.p = main.results[11] # Get the p-value for the half curve flatness test
skew.binomial.p = round(binomial[3], 3) # Get the skewness binomial p-value
flat.binomial.p = round(binomial[4], 3) # Get the flatness binomial p-value
# Make data.frame
skewness = c(skew.binomial.p, skew.full.z, skew.full.p, skew.half.z, skew.half.p)
flatness = c(flat.binomial.p, flat.full.z, flat.full.p, flat.half.z, flat.half.p)
colnames.df = c("pBinomial", "zFull", "pFull", "zHalf", "pHalf")
rownames.df = c("Right-skewness test", "Flatness test")
pcurveResults = rbind(skewness, flatness)
colnames(pcurveResults) = colnames.df
rownames(pcurveResults) = rownames.df
# Power results
power_results = round(power_results, 3)
powerEstimate = power_results[2]
powerLower = power_results[1]
powerUpper = power_results[3]
Power = as.data.frame(cbind(powerEstimate, powerLower, powerUpper))
rownames(Power) = ""
# Presence and absence of evidential value
# - If the half p-curve test is right-skewed with p<.05 or both the half and full test
# are right-skewed with p<.1, then p-curve analysis indicates the presence of evidential value
# - Evidential value is inadequate or absent if the 33% power test is p<.05 for the full p-curve
# or both the half p-curve and binomial 33% power test are p<.1
if (skew.half.p < 0.05 | (skew.half.p < 0.1 & skew.full.p < 0.1)){
presence.ev = "yes"
} else {
presence.ev = "no"
}
if (flat.full.p < 0.05 | (flat.half.p < 0.1 & flat.binomial.p < 0.1)){
absence.ev = "yes"
} else {
absence.ev = "no"
}
# Plot Data
PlotData = round(table_figure, 3)
# Input Data
table_calc[,1] = NULL
colnames(table_calc) = c("p", "ppSkewFull", "ppSkewHalf", "ppFlatFull", "ppFlatHalf", "zSkewFull", "zSkewHalf",
"zFlatFull", "zFlatHalf")
Input = cbind(metaobject$TE, round(table_calc,3))
rownames(Input) = paste(1:length(metaobject$TE), metaobject$studlab)
colnames(Input)[1] = "TE"
if (effect.estimation==TRUE){
dEstimate = round(dhat$minimum, 3)
return.list = list("pcurveResults" = pcurveResults,
"Power" = Power,
"PlotData" = PlotData,
"Input" = Input,
"EvidencePresent" = presence.ev,
"EvidenceAbsent" = absence.ev,
"kInput" = ktot,
"kAnalyzed" = k.sign,
"kp0.25" = k.025,
"dEstimate" = dEstimate,
"I2" = metaobject$I2,
"class.meta.object" = class(metaobject)[1])
class(return.list) = c("pcurve", "effect.estimation")
} else {
return.list = list("pcurveResults" = pcurveResults,
"Power" = Power,
"PlotData" = PlotData,
"Input" = Input,
"EvidencePresent" = presence.ev,
"EvidenceAbsent" = absence.ev,
"kInput" = ktot,
"kAnalyzed" = k.sign,
"kp0.25" = k.025,
"I2" = metaobject$I2,
"class.meta.object" = class(metaobject)[1])
class(return.list) = c("pcurve", "no.effect.estimation")
}
cat(" ", "\n")
invisible(return.list)
return.list
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
mlm.variance.distribution.bw = function(x){
m = x
# Check class
if (!(class(m)[1] %in% c("rma.mv", "rma"))){
stop("x must be of class 'rma.mv'.")
}
# Check for three level model
if (m$sigma2s != 2){
stop("The model you provided does not seem to be a three-level model. This function can only be used for three-level models.")
}
# Check for right specification (nested model)
if (sum(grepl("/", as.character(m$random[[1]]))) < 1){
stop("Model must contain nested random effects. Did you use the '~ 1 | cluster/effect-within-cluster' notation in 'random'? See ?metafor::rma.mv for more details.")
}
# Get variance diagonal and calculate total variance
n = m$k.eff
vector.inv.var = 1/(diag(m$V))
sum.inv.var = sum(vector.inv.var)
sum.sq.inv.var = (sum.inv.var)^2
vector.inv.var.sq = 1/(diag(m$V)^2)
sum.inv.var.sq = sum(vector.inv.var.sq)
num = (n-1)*sum.inv.var
den = sum.sq.inv.var - sum.inv.var.sq
est.samp.var = num/den
# Calculate variance proportions
level1=((est.samp.var)/(m$sigma2[1]+m$sigma2[2]+est.samp.var)*100)
level2=((m$sigma2[2])/(m$sigma2[1]+m$sigma2[2]+est.samp.var)*100)
level3=((m$sigma2[1])/(m$sigma2[1]+m$sigma2[2]+est.samp.var)*100)
# Prepare df for return
Level=c("Level 1", "Level 2", "Level 3")
Variance=c(level1, level2, level3)
df.res=data.frame(Variance)
colnames(df.res) = c("% of total variance")
rownames(df.res) = Level
I2 = c("---", round(Variance[2:3], 2))
df.res = as.data.frame(cbind(df.res, I2))
totalI2 = Variance[2] + Variance[3]
# Generate plot
df1 = data.frame("Level" = c("Sampling Error", "Total Heterogeneity"),
"Variance" = c(df.res[1,1], df.res[2,1]+df.res[3,1]),
"Type" = rep(1,2))
df2 = data.frame("Level" = rownames(df.res),
"Variance" = df.res[,1],
"Type" = rep(2,3))
df = as.data.frame(rbind(df1, df2))
g = ggplot(df, aes(fill=Level, y=Variance, x=as.factor(Type))) +
coord_cartesian(ylim = c(0,1), clip = "off") +
geom_bar(stat="identity", position="fill", width = 1, color="black") +
scale_y_continuous(labels = scales::percent)+
theme(axis.title.x=element_blank(),
axis.text.y = element_text(color="black"),
axis.line.y = element_blank(),
axis.title.y=element_blank(),
axis.line.x = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.y = element_line(lineend = "round"),
legend.position = "none",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
legend.background = element_rect(linetype="solid",
colour ="black"),
legend.title = element_blank(),
legend.key.size = unit(0.75,"cm"),
axis.ticks.length=unit(.25, "cm"),
plot.margin = unit(c(1,3,1,1), "lines")) +
scale_fill_manual(values = c("gray85", "gray90", "white", "gray85", "gray95")) +
# Add Annotation
# Total Variance
annotate("text", x = 1.5, y = 1.05,
label = paste("Total Variance:",
round(m$sigma2[1]+m$sigma2[2]+est.samp.var, 3))) +
# Sampling Error
annotate("text", x = 1, y = (df[1,2]/2+df[2,2])/100,
label = paste("Sampling Error Variance: \n", round(est.samp.var, 3)), size = 3) +
# Total I2
annotate("text", x = 1, y = ((df[2,2])/100)/2-0.02,
label = bquote("Total"~italic(I)^2*":"~.(round(df[2,2],2))*"%"), size = 3) +
annotate("text", x = 1, y = ((df[2,2])/100)/2+0.05,
label = paste("Variance not attributable \n to sampling error: \n", round(m$sigma2[1]+m$sigma2[2],3)), size = 3) +
# Level 1
annotate("text", x = 2, y = (df[1,2]/2+df[2,2])/100, label = paste("Level 1: \n",
round(df$Variance[3],2), "%", sep=""), size = 3) +
# Level 2
annotate("text", x = 2, y = (df[5,2]+(df[4,2]/2))/100,
label = bquote(italic(I)[Level2]^2*":"~.(round(df[4,2],2))*"%"), size = 3) +
# Level 3
annotate("text", x = 2, y = (df[5,2]/2)/100,
label = bquote(italic(I)[Level3]^2*":"~.(round(df[5,2],2))*"%"), size = 3)
print(df.res)
cat("Total I2: ", round(totalI2, 2), "% \n", sep="")
suppressWarnings(print(g))
invisible(df.res)
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
library(ggplot2)
library(gridExtra)
direct.evidence.plot.bw <- function (x, random = FALSE, comparison.label.size = 2, numeric.label.size = 3,
subplot.ratio = c(5, 1.3, 1.3))
{
x = x
random = random
cts = comparison.label.size
nts = numeric.label.size
spr = subplot.ratio
if (class(x) != "netmeta") {
stop("Input to this function has to be an object of class 'netmeta' created by the 'netmeta::netmeta' function.")
}
measures = netmeasures(x, random = random)$proportion
indirect = 1 - measures
measures = data.frame(comparison = names(measures), direct = measures,
indirect = indirect)
rownames(measures) = c()
measures$direct = round(measures$direct, 4)
measures$indirect = round(measures$indirect, 4)
measures.reshape = with(measures, {
data.frame(comparison = rep(comparison, 2), variable = rep(c("direct",
"indirect"), each = nrow(measures)), value = c(direct,
indirect))
})
names = measures.reshape[measures.reshape$variable == "direct",
]$comparison
direct = measures.reshape[measures.reshape$variable == "direct",
]$value
names = names[order(match(names, direct))]
measures$comparison = factor(measures$comparison, levels = measures$comparison[rev(order(measures$direct))])
levels = levels(measures$comparison)
measures.reshape$comparison = factor(measures.reshape$comparison,
levels = levels)
PlotDirectEvidence = ggplot2::ggplot(measures.reshape, aes(x = factor(comparison,
levels = rev(levels(comparison))), fill = factor(variable,
levels = c("indirect", "direct")), y = value)) + geom_bar(stat = "identity",
position = "fill") + coord_flip() + theme_minimal() +
theme(legend.position = "left") + scale_y_continuous(labels = scales::percent) +
ylab("Percentage") + xlab("Network Estimate") + guides(fill = guide_legend(title = "Evidence")) +
scale_fill_manual(values = c("lightgray", "gray30")) +
geom_hline(aes(yintercept = 0.25), color = "white") +
geom_hline(aes(yintercept = 0.5), color = "white") +
geom_hline(aes(yintercept = 0.75), color = "white")
mpath = netmeasures(x, random = random)$meanpath
path.df = data.frame(comparison = names(mpath), mpath = mpath)
rownames(path.df) = c()
path.df$comparison = factor(path.df$comparison, levels = levels)
PlotMeanPathLength_s = ggplot2::ggplot(path.df, aes(x = factor(comparison,
levels = rev(levels(comparison))), y = mpath)) + geom_bar(stat = "identity",
fill = "lightgray") + coord_flip() + geom_hline(aes(yintercept = 2),
color = "gray30") + geom_text(aes(x = comparison, y = 0.4,
label = comparison), color = "gray23", size = cts) +
geom_text(aes(x = comparison, y = mpath + 0.1, label = round(mpath,
1)), size = nts) + ylab("Mean Path Length") + theme(axis.title.y = element_blank(),
axis.text.y = element_blank(), axis.ticks.y = element_blank(),
axis.ticks.x = element_blank(), panel.background = element_blank()) +
scale_x_discrete(position = "top")
mpar = netmeasures(x, random = random)$minpar
mpar.df = data.frame(comparison = names(mpar), mpar = mpar)
rownames(mpar.df) = c()
mpar.df$comparison = factor(mpar.df$comparison, levels = levels)
PlotMinimalParallelism_s = ggplot2::ggplot(mpar.df, aes(x = factor(comparison,
levels = rev(levels(comparison))), y = mpar)) + geom_bar(stat = "identity",
fill = "lightgray") + coord_flip() + geom_text(aes(x = comparison,
y = mpar + 0.1, label = round(mpar, 1)), size = nts) +
geom_text(aes(x = comparison, y = 0.4, label = comparison),
color = "gray23", size = cts) + ylab("Minimal Parallelism") +
theme(axis.ticks.y = element_blank(), axis.ticks.x = element_blank(),
axis.title.y = element_blank(), axis.text.y = element_blank(),
panel.background = element_blank())
data = data.frame(proportion.direct = measures$direct, proportion.indirect = measures$indirect,
meanpath = mpath, minpar = mpar)
if (random == FALSE) {
plot_title = "Direct evidence proportion for each network estimate (fixed-effect model)"
}
else {
plot_title = "Direct evidence proportion for each network estimate (random-effects model)"
}
grid = gridExtra::arrangeGrob(PlotDirectEvidence, PlotMinimalParallelism_s,
PlotMeanPathLength_s, ncol = 3, widths = spr, heights = c(4),
top = plot_title)
returnlist = list(data = data, plot = grid)
class(returnlist) = "direct.evidence.plot"
invisible(returnlist)
returnlist
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
1. Install:
# Vorher R neu Starten
remotes::install_github("rstudio/bslib")
install.packages("downlit")
remotes::install_github("rstudio/bookdown")
# Nachher: R neu Starten
2. Regeln:
- _Kursiver_ Text (zur Vorherhebung) wird **fett**
- \textsf{R} --> _R_
- _{package}_ --> **{package}**
- Boxen müssen in blocks übertragen werden:
```{block, type='boxinfo'}.
- 'boxinfo': Glühbirne
- 'boximportant': Ausrufezeichen
- 'boxquestion': Fragenblock
- 'boxdmetar': "dmetar" Symbol
- 'boxreport': Reporting
- Auf zwei Zeilen verteilte Blocks: alles in einen block, ! entfernen.
- Formeln funktionieren in ^[Fußnoten] nicht; wenn möglich
normalen Text kursiv (z.B. _p_ statt $p$). Wenn nicht möglich,
Fußnoten als neuen 'boxinfo' Block direkt darunter adden.
- Leere Zeilen, sofern nötig, mit "<br></br>" einfügen.
- Neue Kapitel: neues Rmd, "leeres Dokument", dann Inhalte reinkopieren.
Titel muss mit Zahl + "-" beginnen; Titel wie in Original übernehmen und
Kapitelzahl anpassen (anders als in Buchversion).
- Bilder: hier nutzen wir Bilder in Farbe. Diese befinden sich auch in "images".
Man muss nur "_col" hinzufügen zum Titel.
- Coverbilder: jedes Kapitel bekommt ein Cover. Kann von pexels.com geholt werden,
oder vom derzeitigen Online-Guide: https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/
Bild passend zuschneiden: 3342x1338 pixel. In "_figs" speichern!
- \vspace: rausnehmen, das ist nur LaTeX-relevant!
- <br></br> vor jeder großen Überschrift (# oder ##)
- Tabellen: müssen wir mit kableExtra machen, gib Bescheid wenn die 1. kommt :)
http://haozhu233.github.io/kableExtra/awesome_table_in_html.html#Overview
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
pcurve.bw.es = function(x, effect.estimation = FALSE, N, dmin = 0, dmax = 1){
# Rename x to metaobject, remove x
metaobject = x
rm(x)
# Stop if metaobject is not meta or does not contain TE or seTE column
if (!(class(metaobject)[1] %in% c("metagen", "metabin", "metacont", "metacor", "metainc", "meta", "metaprop"))){
for (i in 1:length(colnames(metaobject))){
te.exists = FALSE
if (colnames(metaobject)[i]=="TE"){
te.exists = TRUE
break
} else {
}
}
for (i in 1:length(colnames(metaobject))){
sete.exists = FALSE
if (colnames(metaobject)[i]=="seTE"){
sete.exists = TRUE
break
} else {
}
}
for (i in 1:length(colnames(metaobject))){
studlab.exists = FALSE
if (colnames(metaobject)[i]=="studlab"){
studlab.exists = TRUE
break
} else {
}
}
if(te.exists == FALSE | sete.exists ==FALSE | studlab.exists ==FALSE){
stop("x must be a meta-analysis object generated by meta functions or a data.frame with columns labeled studlab, TE, and seTE.")
}
}
#Disable scientific notation
options(scipen=999)
# Calculate Z
zvalues.input = abs(metaobject$TE/metaobject$seTE)
##############################################
# 1. Functions ###############################
##############################################
getncp.f =function(df1,df2, power) {
error = function(ncp_est, power, x, df1,df2) pf(x, df1 = df1, df2=df2, ncp = ncp_est) - (1-power)
xc=qf(p=.95, df1=df1,df2=df2)
return(uniroot(error, c(0, 1000), x = xc, df1 = df1,df2=df2, power=power)$root) }
getncp.c =function(df, power) {
xc=qchisq(p=.95, df=df)
error = function(ncp_est, power, x, df) pchisq(x, df = df, ncp = ncp_est) - (1-power)
return(uniroot(error, c(0, 1000), x = xc, df = df, power=power)$root) }
getncp=function(family,df1,df2,power) {
if (family=="f") ncp=getncp.f(df1=df1,df2=df2,power=power)
if (family=="c") ncp=getncp.c(df=df1,power=power)
return(ncp) }
percent <- function(x, digits = 0, format = "f", ...) {
paste(formatC(100 * x, format = format, digits = digits, ...), "%", sep = "")
}
pbound=function(p) pmin(pmax(p,2.2e-16),1-2.2e-16)
prop33=function(pc)
{
prop=ifelse(family=="f" & p<.05,1-pf(qf(1-pc,df1=df1, df2=df2),df1=df1, df2=df2, ncp=ncp33),NA)
prop=ifelse(family=="c" & p<.05,1-pchisq(qchisq(1-pc,df=df1), df=df1, ncp=ncp33),prop)
prop
}
stouffer=function(pp) sum(qnorm(pp),na.rm=TRUE)/sqrt(sum(!is.na(pp)))
###############################################################################
# 2. Process data ############################################################
###############################################################################
# Note: due to reliance on the pcurve-app function, z-scores are pasted into characters first
# and then screened to generate variables necessary for further computation
zvalues.input = paste("z=", zvalues.input, sep="")
filek = "input"
raw = zvalues.input
raw=tolower(raw)
ktot=length(raw)
k=seq(from=1,to=length(raw))
stat=substring(raw,1,1)
test=ifelse(stat=="r","t",stat)
# Create family
family=test
family=ifelse(test=="t","f",family)
family=ifelse(test=="z","c",family)
#family: f,c converting t-->f and z-->c
# Find comma,parentheses,equal sign
par1 =str_locate(raw,"\\(")[,1]
par2 =str_locate(raw,"\\)")[,1]
comma=str_locate(raw,",")[,1]
eq =str_locate(raw,"=")[,1]
# DF for t-tests
df=as.numeric(ifelse(test=="t",substring(raw,par1+1,par2 -1),NA))
# DF1
df1=as.numeric(ifelse(test=="f",substring(raw,par1+1,comma-1),NA))
df1=as.numeric(ifelse(test=="z",1,df1))
df1=as.numeric(ifelse(test=="t",1,df1))
df1=as.numeric(ifelse(test=="c",substring(raw,par1+1,par2 -1),df1))
# DF2
df2=as.numeric(ifelse(test=="f",substring(raw,comma+1,par2-1),NA))
df2=as.numeric(ifelse(test=="t",df,df2))
equal=abs(as.numeric(substring(raw,eq+1)))
value=ifelse((stat=="f" | stat=="c"),equal,NA)
value=ifelse(stat=="r", (equal/(sqrt((1-equal**2)/df2)))**2,value)
value=ifelse(stat=="t", equal**2 ,value)
value=ifelse(stat=="z", equal**2 ,value)
p=ifelse(family=="f",1-pf(value,df1=df1,df2=df2),NA)
p=ifelse(family=="c",1-pchisq(value,df=df1),p)
p=pbound(p) #Bound it to level of precision, see function 3 above
ksig= sum(p<.05,na.rm=TRUE) #significant studies
khalf=sum(p<.025,na.rm=TRUE) #half p-curve studies
if (ksig <= 2){
stop("Two or less significant (p<0.05) effect sizes were detected, so p-curve analysis cannot be conducted.")
}
##############################################################################
# 3. PP-values ###############################################################
##############################################################################
# Right Skew, Full p-curve
ppr=as.numeric(ifelse(p<.05,20*p,NA))
ppr=pbound(ppr)
# Right Skew, half p-curve
ppr.half=as.numeric(ifelse(p<.025,40*p,NA))
ppr.half=pbound(ppr.half)
# Power of 33%
ncp33=mapply(getncp,df1=df1,df2=df2,power=1/3,family=family)
# Full-p-curve
pp33=ifelse(family=="f" & p<.05,3*(pf(value, df1=df1, df2=df2, ncp=ncp33)-2/3),NA)
pp33=ifelse(family=="c" & p<.05,3*(pchisq(value, df=df1, ncp=ncp33)-2/3),pp33)
pp33=pbound(pp33)
# half p-curve
prop25=3*prop33(.025)
prop25.sig=prop25[p<.05]
#Compute pp-values for the half
pp33.half=ifelse(family=="f" & p<.025, (1/prop25)*(pf(value,df1=df1,df2=df2,ncp=ncp33)-(1-prop25)),NA)
pp33.half=ifelse(family=="c" & p<.025, (1/prop25)*(pchisq(value,df=df1, ncp=ncp33)-(1-prop25)),pp33.half)
pp33.half=pbound(pp33.half)
##############################################################################
# 4. Stouffer & Binomial test ################################################
##############################################################################
# Convert pp-values to Z scores, using Stouffer function above
Zppr = stouffer(ppr)
Zpp33 = stouffer(pp33)
Zppr.half = stouffer(ppr.half)
Zpp33.half = stouffer(pp33.half)
# Overall p-values from Stouffer test
p.Zppr = pnorm(Zppr)
p.Zpp33 = pnorm(Zpp33)
p.Zppr.half = pnorm(Zppr.half)
p.Zpp33.half = pnorm(Zpp33.half)
# Save results to file
main.results=as.numeric(c(ktot, ksig, khalf, Zppr,
p.Zppr, Zpp33, p.Zpp33, Zppr.half,
p.Zppr.half, Zpp33.half, p.Zpp33.half))
# BINOMIAL
# Observed share of p<.025
prop25.obs=sum(p<.025)/sum(p<.05)
# Flat null
binom.r=1-pbinom(q=prop25.obs*ksig- 1, prob=.5, size=ksig)
# Power of 33% null
binom.33=ppoibin(kk=prop25.obs*ksig,pp=prop25[p<.05])
# Save binomial results
binomial=c(mean(prop25.sig), prop25.obs, binom.r, binom.33)
# Beautifyier Function
cleanp=function(p)
{
p.clean=round(p,4) #Round it
p.clean=substr(p.clean,2,6) #Drop the 0
p.clean=paste0("= ",p.clean)
if (p < .0001) p.clean= " < .0001"
if (p > .9999) p.clean= " > .9999"
return(p.clean)
}
#If there are zero p<.025, change Stouffer values for half-p-curve tests for "N/A" messages
if (khalf==0) {
Zppr.half ="N/A"
p.Zppr.half ="=N/A"
Zpp33.half ="N/A"
p.Zpp33.half ="=N/A"
}
#If there are more than 1 p<.025, round the Z and beutify the p-values
if (khalf>0) {
Zppr.half =round(Zppr.half,2)
Zpp33.half =round(Zpp33.half,2)
p.Zppr.half=cleanp(p.Zppr.half)
p.Zpp33.half=cleanp(p.Zpp33.half)
}
#Clean results for full test
Zppr=round(Zppr,2)
Zpp33=round(Zpp33,2)
p.Zppr=cleanp(p.Zppr)
p.Zpp33=cleanp(p.Zpp33)
binom.r=cleanp(binom.r)
binom.33=cleanp(binom.33)
################################################
# 5. Power ####################################
################################################
powerfit=function(power_est)
{
ncp_est=mapply(getncp,df1=df1,df2=df2,power=power_est,family=family)
pp_est=ifelse(family=="f" & p<.05,(pf(value,df1=df1,df2=df2,ncp=ncp_est)-(1-power_est))/power_est,NA)
pp_est=ifelse(family=="c" & p<.05,(pchisq(value,df=df1,ncp=ncp_est)-(1-power_est))/power_est,pp_est)
pp_est=pbound(pp_est)
return(stouffer(pp_est))
}
fit=c()
fit=abs(powerfit(.051))
for (i in 6:99) fit=c(fit,abs(powerfit(i/100)))
mini=match(min(fit,na.rm=TRUE),fit)
hat=(mini+4)/100
x.power=seq(from=5,to=99)/100
get.power_pct =function(pct) {
#Function that finds power that gives p-value=pct for the Stouffer test
#for example, get.power_pct(.5) returns the level of power that leads to p=.5 for the stouffer test.
#half the time we would see p-curves more right skewed than the one we see, and half the time
#less right-skewed, if the true power were that get.power_pct(.5). So it is the median estimate of power
#similarliy, get.power_pct(.1) gives the 10th percentile estimate of power...
#Obtain the normalized equivalent of pct, e.g., for 5% it is -1.64, for 95% it is 1.64
z=qnorm(pct) #convert to z because powerfit() outputs a z-score.
#Quantify gap between computed p-value and desired pct
error = function(power_est, z) powerfit(power_est) - z
#Find the value of power that makes that gap zero, (root)
return(uniroot(error, c(.0501, .99),z)$root) }
# Boundary conditions
p.power.05=pnorm(powerfit(.051)) #Proability p-curve would be at least at right-skewed if power=.051
p.power.99=pnorm(powerfit(.99)) #Proability p-curve would be at least at right-skewed if power=.99
# Lower end of ci
if (p.power.05<=.95) power.ci.lb=.05
if (p.power.99>=.95) power.ci.lb=.99
if (p.power.05>.95 && p.power.99<.95) power.ci.lb=get.power_pct(.95)
# Higher end of CI
if (p.power.05<=.05) power.ci.ub=.05
if (p.power.99>=.05) power.ci.ub=.99
if (p.power.05>.05 && p.power.99<.05) power.ci.ub=get.power_pct(.05)
# Save power fit
power_results=c(power.ci.lb,hat,power.ci.ub)
##############################################################################
# 6. Plot ###################################################################
##############################################################################
# Green line (Expected p-curve for 33% power)
# gcdf1=prop33(.01)
# gcdf2=prop33(.02)
# gcdf3=prop33(.03)
# gcdf4=prop33(.04)
#
# green1=mean(gcdf1,na.rm=TRUE)*3
# green2=mean(gcdf2-gcdf1,na.rm=TRUE)*3
# green3=mean(gcdf3-gcdf2,na.rm=TRUE)*3
# green4=mean(gcdf4-gcdf3,na.rm=TRUE)*3
# green5=mean(1/3-gcdf4,na.rm=TRUE)*3
# green=100*c(green1,green2,green3,green4,green5)
#
#
# # Blue line (observed p-curve)
# ps=ceiling(p[p<.05]*100)/100
# blue=c()
# for (i in c(.01,.02,.03,.04,.05)) blue=c(blue,sum(ps==i,na.rm=TRUE)/ksig*100)
#
#
# # Red line
# red=c(20,20,20,20,20)
#
# # Make the graph
# x = c(.01,.02,.03,.04,.05)
# par(mar=c(6,5.5,1.5,3))
# moveup=max(max(blue[2:5])-66,0)
# ylim=c(0,105+moveup)
# legend.top=100+moveup
# plot(x,blue, type='l', col='black', main="",
# lwd=2, xlab="", ylab="", xaxt="n",yaxt="n", xlim=c(0.01,0.051),
# ylim=ylim, bty='L', las=1,axes=F)
# x_=c(".01",".02",".03",".04",".05")
# axis(1,at=x,labels=x_)
# y_=c("0%","25%","50%","75%","100%")
# y=c(0,25,50,75,100)
# axis(2,at=y,labels=y_,las=1,cex.axis=1.2)
# mtext("Percentage of test results",font=2,side=2,line=3.85,cex=1.25)
# mtext("p ",font=4,side=1,line=2.3,cex=1.25)
# mtext(" -value", font=2,side=1,line=2.3,cex=1.25)
# points(x,blue,type="p",pch=20,bg="black",col="black")
# text(x+.00075,blue+3.5,percent(round(blue)/100),col='black', cex=.75)
# lines(x,red, type='l', col='gray80', lwd=1.5, lty=3)
# lines(x,green, type='l', col='gray80', lwd=1.5, lty=5)
# tab1=.017 #Labels for line at p=.023 in x-axis
# tab2=tab1+.0015 #Test results and power esimates at tab1+.0015
# gap1=9 #between labels
# gap2=4 #between lable and respective test (e.g., "OBserved p-curve" and "power estimate")
# font.col='gray'
# text.blue=paste0("Power estimate: ",percent(hat),", CI(",
# percent(power.ci.lb),",",
# percent(power.ci.ub),")")
# text(tab1,legend.top, adj=0,cex=.85,bquote("Observed "*italic(p)*"-curve"))
# text(tab2,legend.top-gap2,adj=0,cex=.68,text.blue,col=font.col)
# text.red=bquote("Tests for right-skewness: "*italic(p)*""[Full]~.(p.Zppr)*", "*italic(p)*""[Half]~.(p.Zppr.half))
# #note: .() within bquote prints the value rather than the variable name
# text(tab1,legend.top-gap1, adj=0,cex=.85, "Null of no effect" )
# text(tab2,legend.top-gap1-gap2, adj=0,cex=.68, text.red, col=font.col )
# text.green=bquote("Tests for flatness: "*italic(p)*""[Full]~.(p.Zpp33)*", "*italic(p)*""[half]~.(p.Zpp33.half)*", "*italic(p)*""[Binomial]~.(binom.33))
# text(tab1,legend.top-2*gap1, adj=0,cex=.85,"Null of 33% power")
# text(tab2,legend.top-2*gap1-gap2, adj=0,cex=.68,text.green,col=font.col)
# segments(x0=tab1-.005,x1=tab1-.001,y0=legend.top,y1=legend.top, col='black',lty=1,lwd=1.5)
# segments(x0=tab1-.005,x1=tab1-.001,y0=legend.top-gap1, y1=legend.top-gap1,col='gray',lty=3,lwd=1.5)
# segments(x0=tab1-.005,x1=tab1-.001,y0=legend.top-2*gap1,y1=legend.top-2*gap1,col='gray',lty=2,lwd=1.5)
# rect(tab1-.0065,legend.top-2*gap1-gap2-3,tab1+.032,legend.top+3,border='gray')
# msgx=bquote("Note: The observed "*italic(p)*"-curve includes "*.(ksig)*
# " statistically significant ("*italic(p)*" < .05) results, of which "*.(khalf)*
# " are "*italic(p)*" < .025.")
# mtext(msgx,side=1,line=4,cex=.65,adj=0)
# kns=ktot-ksig
# if (kns==0) ns_msg="There were no non-significant results entered."
# if (kns==1) ns_msg=bquote("There was one additional result entered but excluded from "*italic(p)*"-curve because it was "*italic(p)*" > .05.")
# if (kns>1) ns_msg=bquote("There were "*.(kns)*" additional results entered but excluded from "*italic(p)*"-curve because they were "*italic(p)*" > .05.")
# mtext(ns_msg,side=1,line=4.75,cex=.65,adj=0)
##############################################################################
# 7 Save Calculations #######################################################
##############################################################################
# # table_calc
# table_calc=data.frame(raw, p, ppr, ppr.half, pp33, pp33.half,
# qnorm(ppr), qnorm(ppr.half), qnorm(pp33), qnorm(pp33.half))
# headers1=c("Entered statistic","p-value", "ppr", "ppr half", "pp33%","pp33 half",
# "Z-R","Z-R half","Z-33","z-33 half")
# table_calc=setNames(table_calc,headers1)
#
# # table_figure
# headers2=c("p-value","Observed (blue)","Power 33% (Green)", "Flat (Red)")
# table_figure=setNames(data.frame(x,blue,green,red),headers2)
################################################
# 8. Cumulative p-curves (Deprecated) ##########
################################################
#7.1 FUNCTION THAT RECOMPUTES OVERALL STOUFFER TEST WITHOUT (K) MOST EXTREME VALUES, ADJUSTING THE UNIFORM TO ONLY INCLUDE RANGE THAT REMAINS
dropk=function(pp,k,droplow)
{
#Syntax:
#pp: set of pp-values to analyze sensitivity to most extremes
#k: # of most extreme values to exclude
#dropsmall: 1 to drop smallest, 0 to drop largest
pp=pp[!is.na(pp)] #Drop missing values
n=length(pp) #See how many studies are left
pp=sort(pp) #Sort the pp-value from small to large
if (k==0) ppk=pp #If k=0 do nothing for nothing is being dropped
#If we are dropping low values
if (droplow==1 & k>0)
{
#Eliminate lowest k from the vector of pp-values
ppk=(pp[(1+k):n])
ppmin=min(pp[k],k/(n+1)) #Boundary used to define possible range of values after exclusion
ppk=(ppk-ppmin)/(1-ppmin) #Take the k+1 smallest pp-value up to the highest, subtract from each the boundary value, divide by the range, ~U(0,1) under the null
#This is explained in Supplement 1 of Simonsohn, Simmons Nelson, JEPG 2016 "Better p-curves" paper. See https://osf.io/mbw5g/
}
#If we are dropping high values
if (droplow==0 & k>0)
{
#Eliminate lowest k from the vector of pp-values
ppk=pp[1:(n-k)]
ppmax=max(pp[n-k+1],(n-k)/(n+1)) #Find new boundary of range
ppk=ppk/ppmax #Redefine range to make U(0,1)
}
#In case of a tie with two identical values we would have the ppk be 0 or 1, let's replace that with almost 0 and almost 1
ppk=pmax(ppk,.00001) #Adds small constant to the smallest redefined p-value, avoids problem if dropped p-value is "equal" to next highest, then that pp-value becomes 0
ppk=pmin(ppk,.99999) #Subtract small constant to the largest redefined pp-value, same reason
Z=sum(qnorm(ppk))/sqrt(n-k)
return(pnorm(Z))
} #End function dropk
#7.2 Apply function, in loop with increasing number of exclusions, to full p-curve
#Empty vectors for results
droplow.r=droplow.33=drophigh.r=drophigh.33=c()
#Loop over full p-curves
for (i in 0:(round(ksig/2)-1))
{
#Drop the lowest k studies in terms of respective overall test
#Right skew
droplow.r= c(droplow.r, dropk(pp=ppr,k=i,droplow=1))
drophigh.r=c(drophigh.r, dropk(pp=ppr,k=i,droplow=0))
#Power of 33%
droplow.33=c(droplow.33, dropk(pp=pp33,k=i,droplow=1))
drophigh.33=c(drophigh.33, dropk(pp=pp33,k=i,droplow=0))
}
#Half p-curves
if (khalf>0)
{
droplow.halfr=drophigh.halfr=c()
for (i in 0:(round(khalf/2)-1))
{
#Drop the lowest k studies in terms of respective overall test
droplow.halfr= c(droplow.halfr, dropk(pp=ppr.half,k=i,droplow=1))
drophigh.halfr=c(drophigh.halfr, dropk(pp=ppr.half,k=i,droplow=0))
} #End loop
}#End if that runs calculations only if khalf>0
#7.3 FUNCTION THAT DOES THE PLOT OF RESULTS
plotdrop=function(var,col)
{
k=length(var)
#Plot the dots
plot(0:(k-1),var,xlab="",ylab="",type="b",yaxt="n",xaxt="n",main="",
cex.main=1.15,ylim=c(0,1),col=col)
#Add marker in results with 0 drops
points(0,var[1],pch=19,cex=1.6)
#Red line at p=.05
abline(h=.05,col="red")
#Y-axis value labels
axis(2,c(.05,2:9/10),labels=c('.05','.2','.3','.4','.5','6','7','.8','.9'),las=1,cex.axis=1.5)
axis(1,c(0:(k-1)),las=1,cex.axis=1.4)
}
######################################################################################
# 9. Effect Estimation ###############################################################
######################################################################################
if (effect.estimation == TRUE){
# Define ci.to.t function
ci.to.t = function(TE, lower, upper, n){
z.to.d = function(z, n){
d = (2*z)/sqrt(n)
return(abs(d))
}
ci.to.p = function(est, lower, upper){
SE = (upper-lower)/(2*1.96)
z = abs(est/SE)
p = exp(-0.717*z - 0.416*z^2)
return(p)
}
d.to.t = function(d, n){
df = n-2
t = (d*sqrt(df))/2
return(t)
}
p = ci.to.p(TE, lower, upper)
z = abs(qnorm(p/2))
d = z.to.d(z, n)
t = d.to.t(d, n)
return(t)
}
#Function 13 - loss function
loss=function(t_obs,df_obs,d_est) {
#1.Convert all ts to the same sign (for justification see Supplement 5)
t_obs=abs(t_obs)
#2 Compute p-values
p_obs=2*(1-pt(t_obs,df=df_obs))
#3 Keep significant t-values and corresponding df.
t.sig=subset(t_obs,p_obs<.05)
df.sig=subset(df_obs,p_obs<.05)
#4.Compute non-centrality parameter implied by d_est and df_obs
#df+2 is total N.
#Becuase the noncentrality parameter for the student distribution is ncp=sqrt(n/2)*d,
#we add 2 to d.f. to get N, divide by 2 to get n, and by 2 again for ncp, so -->df+2/4
ncp_est=sqrt((df.sig+2)/4)*d_est
#5.Find critical t-value for p=.05 (two-sided)
#this is used below to compute power, it is a vector as different tests have different dfs
#and hence different critical values
tc=qt(.975,df.sig)
#4.Find power for ncp given tc, again, this is a vector of implied power, for ncp_est, for each test
power_est=1-pt(tc,df.sig,ncp_est)
#5.Compute pp-values
#5.1 First get the overall probability of a t>tobs, given ncp
p_larger=pt(t.sig,df=df.sig,ncp=ncp_est)
#5.2 Now, condition on p<.05
ppr=(p_larger-(1-power_est))/power_est #this is the pp-value for right-skew
#6. Compute the gap between the distribution of observed pp-values and a uniform distribution 0,1
KSD=ks.test(ppr,punif)$statistic #this is the D statistic outputted by the KS test against uniform
return(KSD)
}
if(missing(N)){
stop("If 'effect.estimation=TRUE', argument 'N' must be provided.")
}
if (length(N) != length(metaobject$TE)){
stop("N must be of same length as the number of studies contained in x.")
}
lower = metaobject$TE - (metaobject$seTE*1.96)
upper = metaobject$TE + (metaobject$seTE*1.96)
t_obs = ci.to.t(metaobject$TE, lower, upper, N)
df_obs = N-2
#Results will be stored in these vectors, create them first
loss.all=c()
di=c()
#Compute loss for effect sizes between d=c(dmin,dmax) in steps of .01
for (i in 0:((dmax-dmin)*100))
{
d=dmin+i/100 #effect size being considered
di=c(di,d) #add it to the vector (kind of silly, but kept for symmetry)
options(warn=-1) #turn off warning becuase R does not like its own pt() function!
loss.all=c(loss.all,loss(df_obs=df_obs,t_obs=t_obs,d_est=d))
#apply loss function so that effect size, store result
options(warn=0) #turn warnings back on
}
#find the effect leading to smallest loss in that set, that becomes the starting point in the optimize command
imin=match(min(loss.all),loss.all) #which i tested effect size lead to the overall minimum?
dstart=dmin+imin/100 #convert that i into a d.
#optimize around the global minimum
dhat=optimize(loss,c(dstart-.1,dstart+.1), df_obs=df_obs,t_obs=t_obs)
options(warn=-0)
#Plot results
plot(di,loss.all,xlab="Effect size\nCohen-d", ylab="Loss (D stat in KS test)",ylim=c(0,1), main="How well does each effect size fit? (lower is better)")
points(dhat$minimum,dhat$objective,pch=19,col="black",cex=2)
text(dhat$minimum,dhat$objective-.08,paste0("p-curve's estimate of effect size:\nd=",round(dhat$minimum,3)),col="black")
}
######################################################################################
# 10. Prepare Results for Return #####################################################
######################################################################################
# Get results
main.results = round(main.results, 3)
ktotal = round(main.results[1]) # Get the total number of inserted TEs
k.sign = round(main.results[2]) # Get the total number of significant TEs
k.025 = round(main.results[3]) # Get the number of p<0.25 TEs
skew.full.z = main.results[4] # Get the Z-score for the full curve skewness test
skew.full.p = main.results[5] # Get the p-value for the full curve skewness test
flat.full.z = main.results[6] # Get the Z-score for the full curve flatness test
flat.full.p = main.results[7] # Get the p-value for the full curve flatness test
skew.half.z = main.results[8] # Get the Z-score for the half curve skewness test
skew.half.p = main.results[9] # Get the p-value for the half curve skewness test
flat.half.z = main.results[10] # Get the Z-score for the half curve flatness test
flat.half.p = main.results[11] # Get the p-value for the half curve flatness test
skew.binomial.p = round(binomial[3], 3) # Get the skewness binomial p-value
flat.binomial.p = round(binomial[4], 3) # Get the flatness binomial p-value
# Make data.frame
skewness = c(skew.binomial.p, skew.full.z, skew.full.p, skew.half.z, skew.half.p)
flatness = c(flat.binomial.p, flat.full.z, flat.full.p, flat.half.z, flat.half.p)
colnames.df = c("pBinomial", "zFull", "pFull", "zHalf", "pHalf")
rownames.df = c("Right-skewness test", "Flatness test")
pcurveResults = rbind(skewness, flatness)
colnames(pcurveResults) = colnames.df
rownames(pcurveResults) = rownames.df
# Power results
power_results = round(power_results, 3)
powerEstimate = power_results[2]
powerLower = power_results[1]
powerUpper = power_results[3]
Power = as.data.frame(cbind(powerEstimate, powerLower, powerUpper))
rownames(Power) = ""
# Presence and absence of evidential value
# - If the half p-curve test is right-skewed with p<.05 or both the half and full test
# are right-skewed with p<.1, then p-curve analysis indicates the presence of evidential value
# - Evidential value is inadequate or absent if the 33% power test is p<.05 for the full p-curve
# or both the half p-curve and binomial 33% power test are p<.1
if (skew.half.p < 0.05 | (skew.half.p < 0.1 & skew.full.p < 0.1)){
presence.ev = "yes"
} else {
presence.ev = "no"
}
if (flat.full.p < 0.05 | (flat.half.p < 0.1 & flat.binomial.p < 0.1)){
absence.ev = "yes"
} else {
absence.ev = "no"
}
# Plot Data
# PlotData = round(table_figure, 3)
# Input Data
# table_calc[,1] = NULL
# colnames(table_calc) = c("p", "ppSkewFull", "ppSkewHalf", "ppFlatFull", "ppFlatHalf", "zSkewFull", "zSkewHalf",
# "zFlatFull", "zFlatHalf")
# Input = cbind(metaobject$TE, round(table_calc,3))
# rownames(Input) = paste(1:length(metaobject$TE), metaobject$studlab)
# colnames(Input)[1] = "TE"
if (effect.estimation==TRUE){
dEstimate = round(dhat$minimum, 3)
return.list = list("pcurveResults" = pcurveResults,
"Power" = Power,
#"PlotData" = PlotData,
#"Input" = Input,
"EvidencePresent" = presence.ev,
"EvidenceAbsent" = absence.ev,
"kInput" = ktot,
"kAnalyzed" = k.sign,
"kp0.25" = k.025,
"dEstimate" = dEstimate,
"I2" = metaobject$I2,
"class.meta.object" = class(metaobject)[1])
class(return.list) = c("pcurve", "effect.estimation")
} else {
return.list = list("pcurveResults" = pcurveResults,
"Power" = Power,
#"PlotData" = PlotData,
#"Input" = Input,
"EvidencePresent" = presence.ev,
"EvidenceAbsent" = absence.ev,
"kInput" = ktot,
"kAnalyzed" = k.sign,
"kp0.25" = k.025,
"I2" = metaobject$I2,
"class.meta.object" = class(metaobject)[1])
class(return.list) = c("pcurve", "no.effect.estimation")
}
cat(" ", "\n")
invisible(return.list)
return.list
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Returns nixpkgs with the overlay from this repo applied.
import <nixpkgs> { overlays = [ (import ./overlay.nix) ]; }
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
final: prev:
rec {
esp-idf-full = prev.callPackage ./pkgs/esp-idf { };
esp-idf-esp32 = esp-idf-full.override {
toolsToInclude = [
"xtensa-esp32-elf"
"esp32ulp-elf"
"openocd-esp32"
"xtensa-esp-elf-gdb"
];
};
esp-idf-riscv = esp-idf-full.override {
toolsToInclude = [
"riscv32-esp-elf"
"openocd-esp32"
"riscv32-esp-elf-gdb"
];
};
esp-idf-esp32c3 = esp-idf-riscv;
esp-idf-esp32s2 = esp-idf-full.override {
toolsToInclude = [
"xtensa-esp32s2-elf"
"esp32ulp-elf"
"openocd-esp32"
"xtensa-esp-elf-gdb"
];
};
esp-idf-esp32s3 = esp-idf-full.override {
toolsToInclude = [
"xtensa-esp32s3-elf"
"esp32ulp-elf"
"openocd-esp32"
"xtensa-esp-elf-gdb"
];
};
esp-idf-esp32c6 = esp-idf-riscv;
esp-idf-esp32h2 = esp-idf-riscv;
# ESP8266
gcc-xtensa-lx106-elf-bin = prev.callPackage ./pkgs/esp8266-rtos-sdk/esp8266-toolchain-bin.nix { };
esp8266-rtos-sdk = prev.callPackage ./pkgs/esp8266-rtos-sdk/esp8266-rtos-sdk.nix { };
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{
description = "ESP8266/ESP32 development tools";
inputs = {
nixpkgs.url = "nixpkgs/nixpkgs-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }: {
overlays.default = import ./overlay.nix;
} // flake-utils.lib.eachSystem [ "x86_64-linux" ] (system:
let
pkgs = import nixpkgs { inherit system; overlays = [ self.overlays.default ]; };
in
{
packages = {
inherit (pkgs)
esp-idf-full
esp-idf-esp32c3
esp-idf-esp32s2
esp-idf-esp32s3
esp-idf-esp32c6
esp-idf-esp32h2
gcc-xtensa-lx106-elf-bin
esp8266-rtos-sdk;
};
devShells = {
esp-idf-full = import ./shells/esp-idf-full.nix { inherit pkgs; };
esp32-idf = import ./shells/esp32-idf.nix { inherit pkgs; };
esp32c3-idf = import ./shells/esp32c3-idf.nix { inherit pkgs; };
esp32s2-idf = import ./shells/esp32s2-idf.nix { inherit pkgs; };
esp32s3-idf = import ./shells/esp32s3-idf.nix { inherit pkgs; };
esp32c6-idf = import ./shells/esp32c6-idf.nix { inherit pkgs; };
esp32h2-idf = import ./shells/esp32h2-idf.nix { inherit pkgs; };
esp8266-rtos-sdk = import ./shells/esp8266-rtos-sdk.nix { inherit pkgs; };
};
checks = (import ./tests/build-idf-examples.nix { inherit pkgs; }) // (import ./tests/build-esp8266-example.nix { inherit pkgs; });
});
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
CC0 1.0 Universal
Statement of Purpose
The laws of most jurisdictions throughout the world automatically confer
exclusive Copyright and Related Rights (defined below) upon the creator and
subsequent owner(s) (each and all, an "owner") of an original work of
authorship and/or a database (each, a "Work").
Certain owners wish to permanently relinquish those rights to a Work for the
purpose of contributing to a commons of creative, cultural and scientific
works ("Commons") that the public can reliably and without fear of later
claims of infringement build upon, modify, incorporate in other works, reuse
and redistribute as freely as possible in any form whatsoever and for any
purposes, including without limitation commercial purposes. These owners may
contribute to the Commons to promote the ideal of a free culture and the
further production of creative, cultural and scientific works, or to gain
reputation or greater distribution for their Work in part through the use and
efforts of others.
For these and/or other purposes and motivations, and without any expectation
of additional consideration or compensation, the person associating CC0 with a
Work (the "Affirmer"), to the extent that he or she is an owner of Copyright
and Related Rights in the Work, voluntarily elects to apply CC0 to the Work
and publicly distribute the Work under its terms, with knowledge of his or her
Copyright and Related Rights in the Work and the meaning and intended legal
effect of CC0 on those rights.
1. Copyright and Related Rights. A Work made available under CC0 may be
protected by copyright and related or neighboring rights ("Copyright and
Related Rights"). Copyright and Related Rights include, but are not limited
to, the following:
i. the right to reproduce, adapt, distribute, perform, display, communicate,
and translate a Work;
ii. moral rights retained by the original author(s) and/or performer(s);
iii. publicity and privacy rights pertaining to a person's image or likeness
depicted in a Work;
iv. rights protecting against unfair competition in regards to a Work,
subject to the limitations in paragraph 4(a), below;
v. rights protecting the extraction, dissemination, use and reuse of data in
a Work;
vi. database rights (such as those arising under Directive 96/9/EC of the
European Parliament and of the Council of 11 March 1996 on the legal
protection of databases, and under any national implementation thereof,
including any amended or successor version of such directive); and
vii. other similar, equivalent or corresponding rights throughout the world
based on applicable law or treaty, and any national implementations thereof.
2. Waiver. To the greatest extent permitted by, but not in contravention of,
applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and
unconditionally waives, abandons, and surrenders all of Affirmer's Copyright
and Related Rights and associated claims and causes of action, whether now
known or unknown (including existing as well as future claims and causes of
action), in the Work (i) in all territories worldwide, (ii) for the maximum
duration provided by applicable law or treaty (including future time
extensions), (iii) in any current or future medium and for any number of
copies, and (iv) for any purpose whatsoever, including without limitation
commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes
the Waiver for the benefit of each member of the public at large and to the
detriment of Affirmer's heirs and successors, fully intending that such Waiver
shall not be subject to revocation, rescission, cancellation, termination, or
any other legal or equitable action to disrupt the quiet enjoyment of the Work
by the public as contemplated by Affirmer's express Statement of Purpose.
3. Public License Fallback. Should any part of the Waiver for any reason be
judged legally invalid or ineffective under applicable law, then the Waiver
shall be preserved to the maximum extent permitted taking into account
Affirmer's express Statement of Purpose. In addition, to the extent the Waiver
is so judged Affirmer hereby grants to each affected person a royalty-free,
non transferable, non sublicensable, non exclusive, irrevocable and
unconditional license to exercise Affirmer's Copyright and Related Rights in
the Work (i) in all territories worldwide, (ii) for the maximum duration
provided by applicable law or treaty (including future time extensions), (iii)
in any current or future medium and for any number of copies, and (iv) for any
purpose whatsoever, including without limitation commercial, advertising or
promotional purposes (the "License"). The License shall be deemed effective as
of the date CC0 was applied by Affirmer to the Work. Should any part of the
License for any reason be judged legally invalid or ineffective under
applicable law, such partial invalidity or ineffectiveness shall not
invalidate the remainder of the License, and in such case Affirmer hereby
affirms that he or she will not (i) exercise any of his or her remaining
Copyright and Related Rights in the Work or (ii) assert any associated claims
and causes of action with respect to the Work, in either case contrary to
Affirmer's express Statement of Purpose.
4. Limitations and Disclaimers.
a. No trademark or patent rights held by Affirmer are waived, abandoned,
surrendered, licensed or otherwise affected by this document.
b. Affirmer offers the Work as-is and makes no representations or warranties
of any kind concerning the Work, express, implied, statutory or otherwise,
including without limitation warranties of title, merchantability, fitness
for a particular purpose, non infringement, or the absence of latent or
other defects, accuracy, or the present or absence of errors, whether or not
discoverable, all to the greatest extent permissible under applicable law.
c. Affirmer disclaims responsibility for clearing rights of other persons
that may apply to the Work or any use thereof, including without limitation
any person's Copyright and Related Rights in the Work. Further, Affirmer
disclaims responsibility for obtaining any necessary consents, permissions
or other rights required for any use of the Work.
d. Affirmer understands and acknowledges that Creative Commons is not a
party to this document and has no duty or obligation with respect to this
CC0 or use of the Work.
For more information, please see
<http://creativecommons.org/publicdomain/zero/1.0/>
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
# nixpkgs-esp-dev
ESP8266 and ESP32(-C3, -S2, -S3, -C6, -H2) packages and development environments for Nix.
This repo contains derivations for ESP-IDF, and most of the toolchains and tools it depends on (compilers for all supported targets, custom OpenOCD for Espressif chips, etc.).
Released into the public domain via CC0 (see `COPYING`).
## Getting started
### `nix develop`
The easiest way to get started is to run one of these commands to get a development shell, without even needing to download the repository (requires Nix 2.4 or later):
- `nix --experimental-features 'nix-command flakes' develop github:mirrexagon/nixpkgs-esp-dev#esp32-idf`: for ESP32 development with [esp-idf](https://github.com/espressif/esp-idf).
- Includes the ESP32 toolchain, and downloads and sets up ESP-IDF with everything ready to use `idf.py`.
- `nix --experimental-features 'nix-command flakes' develop github:mirrexagon/nixpkgs-esp-dev#esp8266-rtos-sdk`: for ESP8266 development with [ESP8266_RTOS_SDK](https://github.com/espressif/ESP8266_RTOS_SDK).
- Includes the ESP8266 toolchain, ESP8266_RTOS_SDK, and esptool.
The list of available shells (to go after the `#` in the command) are:
- `esp-idf-full`: Includes toolchains for _all_ supported ESP32 chips (no ESP8266).
- `esp32-idf`: Includes toolchain for the ESP32.
- `esp32c3-idf`: Includes toolchain for the ESP32-C3.
- `esp32s2-idf`: Includes toolchain for the ESP32-S2.
- `esp32s3-idf`: Includes toolchain for the ESP32-S3.
- `esp32c6-idf`: Includes toolchain for the ESP32-C6.
- `esp32h2-idf`: Includes toolchain for the ESP32-H2.
- `esp8266-rtos-sdk`: Includes toolchain for ESP8266 and esptool.
### `nix-shell`
If you're not using Nix 2.4+ or prefer not to need to enable flakes, you can clone the repo and use one of:
- `nix-shell shells/esp32-idf-full.nix`
- `nix-shell shells/esp32-idf.nix`
- `nix-shell shells/esp32c3-idf.nix`
- `nix-shell shells/esp32s2-idf.nix`
- `nix-shell shells/esp32s3-idf.nix`
- `nix-shell shells/esp32c6-idf.nix`
- `nix-shell shells/esp32h2-idf.nix`
- `nix-shell shells/esp8266-rtos-sdk.nix`
to get the same shells as with `nix develop`.
Note: `nix develop` will use the nixpkgs revision specified in `flake.nix`/`flake.lock`, while using `nix-shell` will use your system nixpkgs by default.
## Creating a custom shell environment
You can create a standalone `shell.nix` for your project that downloads `nixpkgs-esp-dev` automatically and creates a shell with the necessary packages and environment setup to use ESP-IDF.
See `examples/shell-standalone.nix` for an example.
## Overriding ESP-IDF and ESP32 toolchain versions
There is a default version of ESP-IDF specified in `pkgs/esp-idf/default.nix`. To use a different version of ESP-IDF or to pin the version, override a `esp-idf-*` derivations with the desired version and the hash for it. The correct version of the tools will be downloaded automatically.
See `examples/shell-override-versions.nix` for an example.
## Overlay
This repo contains an overlay in `overlay.nix` containing all the packages defined by this repo. If you clone the repo into `~/.config/nixpkgs/overlays/`, nixpkgs will automatically pick up the overlay and effectively add the packages to your system nixpkgs.
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs ? import ../default.nix }:
pkgs.mkShell {
name = "esp-idf-esp32h2-shell";
buildInputs = with pkgs; [
esp-idf-esp32h2
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs ? import ../default.nix }:
pkgs.mkShell {
name = "esp-idf-full-shell";
buildInputs = with pkgs; [
esp-idf-full
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs ? import ../default.nix }:
pkgs.mkShell {
name = "esp-idf-esp32-shell";
buildInputs = with pkgs; [
esp-idf-esp32
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs ? import ../default.nix }:
pkgs.mkShell {
name = "esp-idf-esp32s2-shell";
buildInputs = with pkgs; [
esp-idf-esp32s2
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs ? import ../default.nix }:
pkgs.mkShell {
name = "esp8266-rtos-sdk-shell";
buildInputs = with pkgs; [
esp8266-rtos-sdk
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs ? import ../default.nix }:
pkgs.mkShell {
name = "esp-idf-esp32s3-shell";
buildInputs = with pkgs; [
esp-idf-esp32s3
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs ? import ../default.nix }:
pkgs.mkShell {
name = "esp-idf-esp32c6-shell";
buildInputs = with pkgs; [
esp-idf-esp32c6
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs ? import ../default.nix }:
pkgs.mkShell {
name = "esp-idf-esp32c3-shell";
buildInputs = with pkgs; [
esp-idf-esp32c3
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
# Versions based on
# https://dl.espressif.com/dl/esp-idf/espidf.constraints.v5.1.txt
# on 2023-07-05.
{ stdenv
, lib
, fetchPypi
, fetchFromGitHub
, pythonPackages
}:
with pythonPackages;
rec {
idf-component-manager = buildPythonPackage rec {
pname = "idf-component-manager";
version = "1.3.2";
src = fetchFromGitHub {
owner = "espressif";
repo = pname;
rev = "v${version}";
sha256 = "sha256-rHZHlvRKMZvvjf3S+nU2lCDXt0Ll4Ek04rdhtfIQ1R0=";
};
# For some reason, this 404s.
/*
src = fetchPypi {
inherit pname version;
sha256 = "sha256-12ozmQ4Eb5zL4rtNHSFjEynfObUkYlid1PgMDVmRkwY=";
};
*/
doCheck = false;
propagatedBuildInputs = [
cachecontrol
cffi
click
colorama
contextlib2
packaging
pyyaml
requests
urllib3
requests-file
requests-toolbelt
schema
six
tqdm
] ++ cachecontrol.optional-dependencies.filecache;
meta = {
homepage = "https://github.com/espressif/idf-component-manager";
};
};
esp-coredump = buildPythonPackage rec {
pname = "esp-coredump";
version = "1.5.2";
src = fetchPypi {
inherit pname version;
sha256 = "sha256-hQkXnGoAXCLk/PV7Y+C0hOgXGRY77zbIp2ZDC0cxfLo=";
};
doCheck = false;
propagatedBuildInputs = [
construct
pygdbmi
esptool
];
meta = {
homepage = "https://github.com/espressif/esp-coredump";
};
};
esptool = buildPythonPackage rec {
pname = "esptool";
version = "4.6.2";
src = fetchPypi {
inherit pname version;
sha256 = "sha256-VJ75Pu9C7n6UYs5aU8Ft96DHHZGz934Z7BV0mATN8wA=";
};
doCheck = false;
propagatedBuildInputs = [
bitstring
cryptography
ecdsa
pyserial
reedsolo
pyyaml
];
meta = {
homepage = "https://github.com/espressif/esptool";
};
};
esp-idf-kconfig = buildPythonPackage rec {
pname = "esp-idf-kconfig";
version = "1.1.0";
src = fetchPypi {
inherit pname version;
sha256 = "sha256-s8ZXt6cf5w2pZSxQNIs/SODAUvHNgxyQ+onaCa7UbFA=";
};
doCheck = false;
propagatedBuildInputs = [
kconfiglib
];
meta = {
homepage = "https://github.com/espressif/esp-idf-kconfig";
};
};
esp-idf-monitor = buildPythonPackage rec {
pname = "esp-idf-monitor";
version = "1.1.1";
src = fetchPypi {
inherit pname version;
sha256 = "sha256-c62X3ZHRShhbAFmuPc/d2keqE9T9SXYIlJTyn32LPaE=";
};
doCheck = false;
propagatedBuildInputs = [
pyserial
esp-coredump
pyelftools
];
meta = {
homepage = "https://github.com/espressif/esp-idf-monitor";
};
};
esp-idf-size = buildPythonPackage rec {
pname = "esp-idf-size";
version = "0.3.1";
src = fetchPypi {
inherit pname version;
sha256 = "sha256-OzthhzKGjyqDJrmJWs4LMkHz0rAwho+3Pyc2BYFK0EU=";
};
doCheck = false;
propagatedBuildInputs = [
pyyaml
];
meta = {
homepage = "https://github.com/espressif/esp-idf-size";
};
};
freertos_gdb = buildPythonPackage rec {
pname = "freertos-gdb";
version = "1.0.2";
src = fetchPypi {
inherit pname version;
sha256 = "sha256-o0ZoTy7OLVnrhSepya+MwaILgJSojs2hfmI86D9C3cs=";
};
doCheck = false;
propagatedBuildInputs = [
];
meta = {
homepage = "https://github.com/espressif/freertos-gdb";
};
};
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ rev ? "v5.1"
, sha256 ? "sha256-IEa9R9VCWvbRjZFRPb2Qq2Qw1RFxsnVALFVgQlBCXMw="
, toolsToInclude ? [
"xtensa-esp-elf-gdb"
"riscv32-esp-elf-gdb"
"xtensa-esp32-elf"
"xtensa-esp32s2-elf"
"xtensa-esp32s3-elf"
"esp-clang"
"riscv32-esp-elf"
"esp32ulp-elf"
"openocd-esp32"
]
, stdenv
, lib
, fetchFromGitHub
, makeWrapper
, callPackage
, python3
# Tools for using ESP-IDF.
, git
, wget
, gnumake
, flex
, bison
, gperf
, pkgconfig
, cmake
, ninja
, ncurses5
, dfu-util
}:
let
src = fetchFromGitHub {
owner = "espressif";
repo = "esp-idf";
rev = rev;
sha256 = sha256;
fetchSubmodules = true;
};
allTools = callPackage (import ./tools.nix) {
toolSpecList = (builtins.fromJSON (builtins.readFile "${src}/tools/tools.json")).tools;
versionSuffix = "esp-idf-${rev}";
};
toolDerivationsToInclude = builtins.map (toolName: allTools."${toolName}") toolsToInclude;
customPython =
(python3.withPackages
(pythonPackages:
let
customPythonPackages = callPackage (import ./python-packages.nix) { inherit pythonPackages; };
in
with pythonPackages;
with customPythonPackages;
[
# This list is from `tools/requirements/requirements.core.txt` in the
# ESP-IDF checkout.
setuptools
click
pyserial
cryptography
pyparsing
pyelftools
idf-component-manager
esp-coredump
esptool
esp-idf-kconfig
esp-idf-monitor
esp-idf-size
freertos_gdb
]));
in
stdenv.mkDerivation rec {
pname = "esp-idf";
version = rev;
inherit src;
# This is so that downstream derivations will have IDF_PATH set.
setupHook = ./setup-hook.sh;
nativeBuildInputs = [ makeWrapper ];
propagatedBuildInputs = [
# This is in propagatedBuildInputs so that downstream derivations will run
# the Python setup hook and get PYTHONPATH set up correctly.
customPython
# Tools required to use ESP-IDF.
git
wget
gnumake
flex
bison
gperf
pkgconfig
cmake
ninja
ncurses5
dfu-util
] ++ toolDerivationsToInclude;
# We are including cmake and ninja so that downstream derivations (eg. shells)
# get them in their environment, but we don't actually want any of their build
# hooks to run, since we aren't building anything with them right now.
dontUseCmakeConfigure = true;
dontUseNinjaBuild = true;
dontUseNinjaInstall = true;
dontUseNinjaCheck = true;
installPhase = ''
mkdir -p $out
cp -rv . $out/
# Link the Python environment in so that:
# - The setup hook can set IDF_PYTHON_ENV_PATH to it.
# - In shell derivations, the Python setup hook will add the site-packages
# directory to PYTHONPATH.
ln -s ${customPython} $out/python-env
ln -s ${customPython}/lib $out/lib
'';
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ toolSpecList # The `tools` entry in `tools/tools.json` in an ESP-IDF checkout.
, versionSuffix # A string to use in the version of the tool derivations.
, stdenv
, lib
, fetchurl
, buildFHSUserEnv
, makeWrapper
# Dependencies for the various binary tools.
, zlib
, libusb1
}:
let
toolFhsEnvTargetPackages = {
xtensa-esp-elf-gdb = pkgs: (with pkgs; [ ]);
riscv32-esp-elf-gdb = pkgs: (with pkgs; [ ]);
xtensa-esp32-elf = pkgs: (with pkgs; [ ]);
xtensa-esp32s2-elf = pkgs: (with pkgs; [ ]);
xtensa-esp32s3-elf = pkgs: (with pkgs; [ ]);
esp-clang = pkgs: (with pkgs; [ zlib libxml2 ]);
riscv32-esp-elf = pkgs: (with pkgs; [ ]);
esp32ulp-elf = pkgs: (with pkgs; [ ]);
openocd-esp32 = pkgs: (with pkgs; [ zlib libusb1 ]);
};
toolSpecToDerivation = toolSpec:
let
targetVersionSpec = (builtins.elemAt toolSpec.versions 0).linux-amd64;
in
mkToolDerivation {
pname = toolSpec.name;
# NOTE: tools.json does not separately specify the versions of tools,
# so short of extracting the versions from the tarball URLs, we will
# just put the ESP-IDF version as the tool version.
version = versionSuffix;
description = toolSpec.description;
homepage = toolSpec.info_url;
license = { spdxId = toolSpec.license; };
url = targetVersionSpec.url;
sha256 = targetVersionSpec.sha256;
targetPkgs = toolFhsEnvTargetPackages."${toolSpec.name}";
exportVars = toolSpec.export_vars;
};
mkToolDerivation =
{ pname
, version
, description
, homepage
, license
, url
, sha256
, targetPkgs
, exportVars
}:
let
fhsEnv = buildFHSUserEnv {
name = "${pname}-env";
inherit targetPkgs;
runScript = "";
};
exportVarsWrapperArgsList = lib.attrsets.mapAttrsToList (name: value: "--set \"${name}\" \"${value}\"") exportVars;
in
assert stdenv.system == "x86_64-linux";
stdenv.mkDerivation rec {
inherit pname version;
src = fetchurl {
inherit url sha256;
};
buildInputs = [ makeWrapper ];
phases = [ "unpackPhase" "installPhase" ];
installPhase = ''
cp -r . $out
# For setting exported variables (see exportVarsWrapperArgsList).
TOOL_PATH=$out
for FILE in $(ls $out/bin); do
FILE_PATH="$out/bin/$FILE"
if [[ -x $FILE_PATH ]]; then
mv $FILE_PATH $FILE_PATH-unwrapped
makeWrapper ${fhsEnv}/bin/${pname}-env $FILE_PATH --add-flags "$FILE_PATH-unwrapped" ${lib.strings.concatStringsSep " " exportVarsWrapperArgsList}
fi
done
'';
meta = with lib; {
inherit description homepage license;
};
};
in
builtins.listToAttrs (builtins.map (toolSpec: lib.attrsets.nameValuePair toolSpec.name (toolSpecToDerivation toolSpec)) toolSpecList)
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
# Export the necessary environment variables to use ESP-IDF.
addIdfEnvVars() {
# Crude way to detect if $1 is the ESP-IDF derivation.
if [ -e "$1/tools/idf.py" ]; then
export IDF_PATH="$1"
export IDF_PYTHON_CHECK_CONSTRAINTS=no
export IDF_PYTHON_ENV_PATH="$IDF_PATH/python-env"
addToSearchPath PATH "$IDF_PATH/tools"
fi
}
addEnvHooks "$hostOffset" addIdfEnvVars
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ stdenv, lib, fetchurl, makeWrapper, buildFHSUserEnv }:
let
fhsEnv = buildFHSUserEnv {
name = "esp8266-toolchain-env";
targetPkgs = pkgs: with pkgs; [ ];
runScript = "";
};
in
assert stdenv.system == "x86_64-linux";
stdenv.mkDerivation rec {
pname = "esp8266-toolchain";
version = "2020r3";
src = fetchurl {
url = "https://dl.espressif.com/dl/xtensa-lx106-elf-gcc8_4_0-esp-${version}-linux-amd64.tar.gz";
hash = "sha256-ChgEteIjHG24tyr2vCoPmltplM+6KZVtQSZREJ8T/n4=";
};
buildInputs = [ makeWrapper ];
phases = [ "unpackPhase" "installPhase" ];
installPhase = ''
cp -r . $out
for FILE in $(ls $out/bin); do
FILE_PATH="$out/bin/$FILE"
if [[ -x $FILE_PATH ]]; then
mv $FILE_PATH $FILE_PATH-unwrapped
makeWrapper ${fhsEnv}/bin/esp8266-toolchain-env $FILE_PATH --add-flags "$FILE_PATH-unwrapped"
fi
done
'';
meta = with lib; {
description = "ESP8266 compiler toolchain";
homepage = "https://docs.espressif.com/projects/esp8266-rtos-sdk/en/latest/get-started/linux-setup.html";
license = licenses.gpl3;
};
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ rev ? "v3.4"
, sha256 ? "sha256-WhGVo4NDOkOlu9tsLFhOcZYthmVfxquOibZ+nGetbuo="
, stdenv
, lib
, fetchFromGitHub
, python3
, fetchPypi
# Tools for using ESP8266_RTOS_SDK.
, git
, wget
, gnumake
, flex
, bison
, gperf
, pkgconfig
, ncurses5
, cmake
, ninja
, gcc-xtensa-lx106-elf-bin
, esptool
}:
let
customPython =
(python3.withPackages
(pythonPackages:
with pythonPackages;
[
# This list is from `requirements.txt` in the ESP8266_RTOS_SDK
# checkout.
setuptools
click
pyserial
future
cryptography
(pyparsing.overrideAttrs (oldAttrs: {
src = fetchPypi {
pname = "pyparsing";
version = "2.3.1";
sha256 = "sha256-ZskmiGJkGrysSpa6dFBuWUyITj9XaQppbSGtghDtZno=";
};
buildInputs = [ setuptools ];
}))
pyelftools
]));
in
stdenv.mkDerivation rec {
pname = "esp8266-rtos-sdk";
version = rev;
src = fetchFromGitHub {
owner = "espressif";
repo = "ESP8266_RTOS_SDK";
rev = rev;
sha256 = sha256;
fetchSubmodules = true;
};
setupHook = ./setup-hook.sh;
propagatedBuildInputs = [
# This is in propagatedBuildInputs so that downstream derivations will run
# the Python setup hook and get PYTHONPATH set up correctly.
customPython
gcc-xtensa-lx106-elf-bin
esptool
# Tools required to use ESP8266_RTOS_SDK.
git
wget
gnumake
flex
bison
gperf
pkgconfig
cmake
ninja
ncurses5
];
# We are including cmake and ninja so that downstream derivations (eg. shells)
# get them in their environment, but we don't actually want any of their build
# hooks to run, since we aren't building anything with them right now.
dontUseCmakeConfigure = true;
dontUseNinjaBuild = true;
dontUseNinjaInstall = true;
dontUseNinjaCheck = true;
installPhase = ''
mkdir -p $out
cp -rv . $out/
# Link the Python environment in so that:
# - In shell derivations, the Python setup hook will add the site-packages
# directory to PYTHONPATH.
ln -s ${customPython} $out/python-env
ln -s ${customPython}/lib $out/lib
'';
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
# Export the necessary environment variables to use ESP8266_RTOS_SDK.
addIdfEnvVars() {
# Crude way to detect if $1 is the ESP8266_RTOS_SDK derivation.
if [ -e "$1/tools/idf.py" ]; then
export IDF_PATH="$1"
addToSearchPath PATH "$IDF_PATH/tools"
fi
}
addEnvHooks "$hostOffset" addIdfEnvVars
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs }:
let
build-idf-example =
{ target, example, esp-idf, suffix }:
(pkgs.stdenv.mkDerivation {
name = "test-build-${target}-${builtins.replaceStrings [ "/" ] [ "-" ] example}-${suffix}";
buildInputs = [
esp-idf
];
phases = [ "buildPhase" ];
buildPhase = ''
cp -r $IDF_PATH/examples/${example}/* .
chmod -R +w .
# The build system wants to create a cache directory somewhere in the home
# directory, so we make up a home for it.
mkdir temp-home
export HOME=$(readlink -f temp-home)
# idf-component-manager wants to access the network, so we disable it.
export IDF_COMPONENT_MANAGER=0
idf.py set-target ${target}
idf.py build
mkdir $out
cp -r * $out
'';
});
buildsNameList = pkgs.lib.attrsets.cartesianProductOfSets {
target = [ "esp32" "esp32c3" "esp32s2" "esp32s3" "esp32c6" "esp32h2" ];
example = [ "get-started/hello_world" ];
};
buildsList = pkgs.lib.lists.flatten (builtins.map
(spec:
let
# Build each of these with both esp-idf-full and the appropriate esp-idf-esp32xx.
buildFull = build-idf-example (spec // { esp-idf = pkgs.esp-idf-full; suffix = "full"; });
buildSpecific = build-idf-example (spec // { esp-idf = pkgs."esp-idf-${spec.target}"; suffix = "specific"; });
in
[
(pkgs.lib.attrsets.nameValuePair buildFull.name buildFull)
(pkgs.lib.attrsets.nameValuePair buildSpecific.name buildSpecific)
])
buildsNameList);
in
builtins.listToAttrs buildsList
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
{ pkgs }:
let
build-esp8266-example =
{ example }:
(pkgs.stdenv.mkDerivation {
name = "test-build-esp8266-${builtins.replaceStrings [ "/" ] [ "-" ] example}";
buildInputs = with pkgs; [
esp8266-rtos-sdk
];
phases = [ "buildPhase" ];
buildPhase = ''
cp -r $IDF_PATH/examples/${example}/* .
chmod -R +w .
# The build system wants to create a cache directory somewhere in the home
# directory, so we make up a home for it.
mkdir temp-home
export HOME=$(readlink -f temp-home)
idf.py build
mkdir $out
cp -r * $out
'';
});
examplesToBuild = [ "get-started/hello_world" ];
buildsList = builtins.map
(example:
let
build = build-esp8266-example { inherit example; };
in
pkgs.lib.attrsets.nameValuePair build.name build)
examplesToBuild;
in
builtins.listToAttrs buildsList
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
# A standalone shell definition that overrides the versions of ESP-IDF and the ESP32 toolchain.
let
nixpkgs-esp-dev = builtins.fetchGit {
url = "https://github.com/mirrexagon/nixpkgs-esp-dev.git";
};
pkgs = import <nixpkgs> { overlays = [ (import "${nixpkgs-esp-dev}/overlay.nix") ]; };
in
pkgs.mkShell {
name = "esp-project";
buildInputs = with pkgs; [
(esp-idf-esp32.override {
rev = "cf7e743a9b2e5fd2520be4ad047c8584188d54da";
sha256 = "sha256-tqWUTJlOWk4ayfQIxgiLkTrrTFU0ZXuh76xEZWKRZ/s=";
})
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
# A standalone shell definition that downloads and uses packages from `nixpkgs-esp-dev` automatically.
let
nixpkgs-esp-dev = builtins.fetchGit {
url = "https://github.com/mirrexagon/nixpkgs-esp-dev.git";
# Optionally pin to a specific commit of `nixpkgs-esp-dev`.
# rev = "<commit hash>";
};
pkgs = import <nixpkgs> { overlays = [ (import "${nixpkgs-esp-dev}/overlay.nix") ]; };
in
pkgs.mkShell {
name = "esp-project";
buildInputs = with pkgs; [
esp-idf-full
];
}
| {
"repo_name": "mirrexagon/nixpkgs-esp-dev",
"stars": "63",
"repo_language": "Nix",
"file_name": "shell-standalone.nix",
"mime_type": "text/plain"
} |
[tool.poetry]
name = "ytdl-nfo"
version = "0.2.3"
description = "Utility to convert youtube-dl/yt-dlp json metadata to .nfo"
authors = ["Owen <[email protected]>"]
license = "Unlicense"
repository = "https://github.com/owdevel/ytdl-nfo"
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.8"
PyYAML = "^6.0"
[tool.poetry.dev-dependencies]
flake8 = "^5.0.4"
autopep8 = "^1.6.0"
[tool.poetry.scripts]
ytdl-nfo = "ytdl_nfo:main"
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
| {
"repo_name": "owdevel/ytdl-nfo",
"stars": "35",
"repo_language": "Python",
"file_name": "youtube.yaml",
"mime_type": "text/plain"
} |
# ytdl-nfo : youtube-dl NFO generator
[youtube-dl](https://github.com/ytdl-org/youtube-dl) is an incredibly useful resource to download and archive footage from across the web. Viewing and organising these files however can be a bit of a hassle.
**ytdl-nfo** takes the `--write-info-json` output from youtube-dl and parses it into Kodi-compatible .nfo files. The aim is to prepare and move files so as to be easily imported into media centers such as Plex, Emby, Jellyfin, etc.
**Warning**
This package is still in early stages and breaking changes may be introduced.
### NOTE: youtube-dl derivatives
This package was originally built for youtube-dl, however the aim is to be compatible with related forks as well. Currently these are:
- [youtube-dl](https://github.com/ytdl-org/youtube-dl)
- [yt-dlp](https://github.com/yt-dlp/yt-dlp)
## Installation
Requirements: Python 3.8
### Python 3 pipx (recommended)
[pipx](https://github.com/pipxproject/pipx) is tool that installs a package and its dependencies in an isolated environment.
1. Ensure Python 3.8 and [pipx](https://github.com/pipxproject/pipx) is installed
2. Install with `pipx install ytdl-nfo`
### Python 3 pip
1. Ensure Python 3.8 is installed
2. Install with `pip install ytdl-nfo`
### Package from source
1. Ensure Python 3.8 and [Python Poetry](https://python-poetry.org/) is installed
2. Clone the repo using `git clone https://github.com/owdevel/ytdl_nfo.git`
3. Create a dev environment with `poetry install`
3. Build with `poetry build`
4. Install from the `dist` directory with `pip install ./dist/ytdl_nfo-x.x.x.tar.gz`
### Development Environment
1. Perform steps 1-3 of package from source
2. Run using `poetry run ytdl-nfo` or use `poetry shell` to enter the virtual env
## Usage
### Automatic
Run `ytdl-nfo JSON_FILE` replacing `JSON_FILE` with either the path to the file you wish to convert, or a folder containing files to convert. The tool will automatically take any files ending with `.json` and convert them to `.nfo` using the included extractor templates.
#### Examples
Convert a single file
```bash
ytdl-nfo great_video.info.json
```
Convert a directory and all sub directories with `.info.json` files
```bash
ytdl-nfo video_folder
```
### Manual
ytdl-nfo uses a set of YAML configs to determine the output format and what data comes across. This is dependent on the extractor flag which is set by youtube-dl. Should this fail to be set or if a custom extractor is wanted there is the `--extractor` flag. ytdl-nfo will then use extractor with the given name as long as it is in the config directory with the format `custom_extractor_name.yaml`.
```bash
ytdl-nfo --extractor custom_extractor_name great_video.info.json
```
#### Config Location
Run the following command to get the configuration location.
```bash
ytdl-nfo --config
```
## Extractors
Issues/Pull Requests are welcome to add more youtube-dl supported extractors to the repo.
### Custom Extractors
Coming Soon...
## Todo
- [ ] Add try catches to pretty print errors
- [ ] Documentation and templates for creating custom extractors
- [ ] Documentation of CLI arguments
- [x] Recursive folder searching
- [x] Add package to pypi
## Authors Note
This is a small project I started to learn how to use python packaging system whilst providing some useful functionality for my home server setup.
Issues/pull requests and constructive criticism is welcome.
| {
"repo_name": "owdevel/ytdl-nfo",
"stars": "35",
"repo_language": "Python",
"file_name": "youtube.yaml",
"mime_type": "text/plain"
} |
import os
import json
from .nfo import get_config
class Ytdl_nfo:
def __init__(self, file_path, extractor=None):
self.path = file_path
self.dir = os.path.dirname(file_path)
# Read json data
with open(self.path, "rt", encoding="utf-8") as f:
self.data = json.load(f)
self.extractor = extractor
if extractor is None:
self.extractor = self.data['extractor'].lower()
if file_path.endswith(".info.json"):
self.filename = file_path[:-10]
else:
self.filename = os.path.splitext(self.data['_filename'])[0]
self.nfo = get_config(self.extractor)
def process(self):
self.nfo.generate(self.data)
self.write_nfo()
return True
def write_nfo(self):
self.nfo.write_nfo(f'{self.filename}.nfo')
def print_data(self):
print(json.dumps(self.data, indent=4, sort_keys=True))
def get_nfo(self):
return self.nfo.get_nfo()
| {
"repo_name": "owdevel/ytdl-nfo",
"stars": "35",
"repo_language": "Python",
"file_name": "youtube.yaml",
"mime_type": "text/plain"
} |
import argparse
import os
import re
from .Ytdl_nfo import Ytdl_nfo
def main():
parser = argparse.ArgumentParser(
description='ytdl_nfo, a youtube-dl helper to convert the output of \'youtube-dl --write-info-json\' to an NFO for use in kodi/plex/etc')
parser.add_argument('input', metavar='JSON_FILE', type=str,
help='Json file to convert or folder to convert in')
parser.add_argument('-e', '--extractor', help='Specify specific extractor')
parser.add_argument('-w', '--overwrite', action="store_true",
help='Overwrite existing NFO files')
parser.add_argument(
'--regex', type=str, help='Specify regex search string to match files', default=r".json$")
parser.add_argument('--config', help='Prints the path to the config directory',
action='version', version=f'{get_config_path()}')
args = parser.parse_args()
extractor_str = args.extractor if args.extractor is not None else "file specific"
if os.path.isfile(args.input):
print(f'Processing {args.input} with {extractor_str} extractor')
file = Ytdl_nfo(args.input, args.extractor)
file.process()
else:
for root, dirs, files in os.walk(args.input):
for file_name in files:
file_path = os.path.join(root, file_name)
if re.search(args.regex, file_name):
path_no_ext = os.path.splitext(file_path)[0]
info_re = r".info$"
if re.search(info_re, file_name):
path_no_ext = re.sub(info_re, '', path_no_ext)
if args.overwrite or not os.path.exists(path_no_ext + ".nfo"):
print(
f'Processing {args.input} with {extractor_str} extractor')
file = Ytdl_nfo(file_path, args.extractor)
file.process()
def get_config_path():
return os.path.join(os.path.dirname(__file__), 'configs')
__all__ = ['main', 'Ytdl_nfo', 'nfo']
| {
"repo_name": "owdevel/ytdl-nfo",
"stars": "35",
"repo_language": "Python",
"file_name": "youtube.yaml",
"mime_type": "text/plain"
} |
import yaml
import ast
import datetime as dt
import xml.etree.ElementTree as ET
import pkg_resources
from xml.dom import minidom
class Nfo:
def __init__(self, extractor):
with pkg_resources.resource_stream("ytdl_nfo", f"configs/{extractor}.yaml") as f:
self.data = yaml.load(f, Loader=yaml.FullLoader)
def generate(self, raw_data):
# There should only be one top level node
top_name = list(self.data.keys())[0]
self.top = ET.Element(top_name)
# Recursively generate the rest of the NFO
self.__create_child(self.top, self.data[top_name], raw_data)
def __create_child(self, parent, subtree, raw_data):
# Some .info.json files may not include an upload_date.
if raw_data.get("upload_date") is None:
date = dt.datetime.fromtimestamp(raw_data["epoch"])
raw_data["upload_date"] = date.strftime("%Y%m%d")
# Check if current node is a list
if isinstance(subtree, list):
# Process individual nodes
for child in subtree:
self.__create_child(parent, child, raw_data)
return
# Process data in child node
child_name = list(subtree.keys())[0]
table = child_name[-1] == '!'
attributes = {}
children = []
# Check if attributes are present
if isinstance(subtree[child_name], dict):
attributes = subtree[child_name]
value = subtree[child_name]['value']
# Set children if value flag
if table:
children = ast.literal_eval(value.format(**raw_data))
else:
children = [value.format(**raw_data)]
if 'convert' in attributes.keys():
target_type = attributes['convert']
input_f = attributes['input_f']
output_f = attributes['output_f']
for i in range(len(children)):
if target_type == 'date':
date = dt.datetime.strptime(children[i], input_f)
children[i] = date.strftime(output_f)
# Value only
else:
if table:
children = ast.literal_eval(
subtree[child_name].format(**raw_data))
else:
children = [subtree[child_name].format(**raw_data)]
# Add the child node(s)
child_name = child_name.rstrip('!')
for value in children:
child = ET.SubElement(parent, child_name)
child.text = value
# Add attributes
if 'attr' in attributes.keys():
for attribute, attr_value in attributes['attr'].items():
child.set(attribute, attr_value.format(**raw_data))
def print_nfo(self):
xmlstr = minidom.parseString(ET.tostring(
self.top, 'utf-8')).toprettyxml(indent=" ")
print(xmlstr)
def write_nfo(self, filename):
xmlstr = minidom.parseString(ET.tostring(
self.top, 'utf-8')).toprettyxml(indent=" ")
with open(filename, 'wt', encoding="utf-8") as f:
f.write(xmlstr)
def get_nfo(self):
xmlstr = minidom.parseString(ET.tostring(
self.top, 'utf-8')).toprettyxml(indent=" ")
return xmlstr
def get_config(extractor):
return Nfo(extractor.replace(":tab", ""))
| {
"repo_name": "owdevel/ytdl-nfo",
"stars": "35",
"repo_language": "Python",
"file_name": "youtube.yaml",
"mime_type": "text/plain"
} |
#!/usr/bin/env python
import ytdl_nfo
if __name__ == '__main__':
ytdl_nfo.main()
| {
"repo_name": "owdevel/ytdl-nfo",
"stars": "35",
"repo_language": "Python",
"file_name": "youtube.yaml",
"mime_type": "text/plain"
} |
episodedetails:
- title: '{title}'
- showtitle: '{uploader}'
- uniqueid:
attr:
type: 'youtube'
default: "true"
value: '{id}'
- plot: '{description}'
- premiered:
convert: 'date'
input_f: '%Y%m%d'
output_f: '%Y-%m-%d'
value: '{upload_date}'
| {
"repo_name": "owdevel/ytdl-nfo",
"stars": "35",
"repo_language": "Python",
"file_name": "youtube.yaml",
"mime_type": "text/plain"
} |
# opendotnet
dotNET跨平台研究的相关文档,文档针对的Linux 发行版是CentOS 6和 7,主要是在CentOS平台上进行dotNET跨平台开发的相关文档。希望对Windows上的.NET开发人员顺利跨入Linux 的Mono平台开发提供帮助。对于Linux平台上的Mono开发人员也有借鉴意义。
资料来源于微信公众号opendotnet、 http://www.cnblogs.com/shanyou/archive/2012/07/28/2612919.html 的文章汇集和http://www.linuxdot.net 的资料整理,以及在QQ群: 102732979、103810355 里群友的讨论 ,在这里为国内积极贡献dotNET跨平台实践的同仁表示感谢。
欢迎关注微信公众号dotNET跨平台,微信号opendotnet。
| {
"repo_name": "geffzhang/opendotnet",
"stars": "492",
"repo_language": "None",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Linux简要
Linux 简要,文档针对的Linux 发行版是CentOS 6和 7, 希望对Windows上的.NET开发人员顺利跨入Linux 的Mono平台开发提供帮助。 从零开始逐步掌握Linux基础知识。
| {
"repo_name": "geffzhang/opendotnet",
"stars": "492",
"repo_language": "None",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# google-music-electron changelog
2.20.0 - Moved to fake user agent to trick Google auth, via https://github.com/MarshallOfSound/Google-Play-Music-Desktop-Player-UNOFFICIAL-/commit/a6065183e767e48a34a3f57eea852d69d13bc007
2.19.0 - Upgraded to [email protected] to fix playback issues
2.18.0 - Upgraded to [email protected] to fix GitHub vulnerability warning
2.17.1 - Replaced Gratipay with support me page
2.17.0 - Added macOS hide controls via @RickyRomero in #46
2.16.1 - Fixed Node.js supported versions for Travis CI
2.16.0 - Repaired arrow bindings timings
2.15.0 - Upgraded to `[email protected]` to attempt to finally repair 2FA
2.14.0 - Fixed missing and small arrows
2.13.0 - Fixed navigation arrow size for new Google Music UI
2.12.1 - Corrected forward/back enabled/disabled after page reload
2.12.0 - Added enabling/disabling of forward/back buttons when available/not
2.11.0 - Followed back rename of `gmusic.js` to `google-music`
2.10.1 - Repaired `google-music-electron` failing to launch due to a lack of `window-info`
2.10.0 - Added window size/location preservation via @JordanRobinson in #42
2.9.0 - Updated deprecated requires and Electron methods
2.8.0 - Added Edit menu for OS X editing support via @chushao in #36
2.7.0 - Upgraded to `[email protected]` to patch OS specific crashes. Fixes #29
2.6.0 - Relocated `install-mpris` command from running inside `electron` to `node` launcher (part of #25)
2.5.1 - Followed renamed `google-music` to `gmusic.js`
2.5.0 - Upgraded to `[email protected]` to receive error noise patches
2.4.0 - Increased `min-width` of arrow container to prevent shrinking arrows. Fixes #26
2.3.0 - Added truncation to tooltip to stop Windows crashes. Fixes #24
2.2.1 - Corrected license to SPDX format by @execat in #23
2.2.0 - Added support for `paper-icon-button` navigation
2.1.1 - Upgraded `electron-rebuild` to fix `[email protected]` (in Electron) issues
2.1.0 - Upgraded to `[email protected]` for cross-version selectors and added `setTimeout` loop for binding initialization
2.0.1 - Added `Node.js` version to about window
2.0.0 - Moved to using single instance by default. Fixes #19
1.23.1 - Repaired respecting CLI overrides
1.23.0 - Added CLI options to preferences
1.22.0 - Added configuration bindings for shortcuts
1.21.0 - Upgraded to `[email protected]` to pick up Windows hide patches. Fixes #16
1.20.0 - Added `icon` to browser window. Fixes #17
1.19.1 - Added `foundry` for release
1.19.0 - Repaired missing forward/back buttons
1.18.1 - Added newsletter subscription to README.md
1.18.0 - Upgraded to `[email protected]` to repair duplicate playback events and detect stops
1.17.2 - Repaired lint error
1.17.1 - Updated MPRIS screenshot
1.17.0 - Added playback time tracking for MPRIS
1.16.0 - Added album art, duration, exit, and raise events/actions to MPRIS
1.15.0 - Added MPRIS support via @jck in #10
1.14.1 - Added documentation on how to upgrade via @Q11x in #9
1.14.0 - Added "Forward/Back" navigation buttons. Fixed #6
1.13.0 - Added `--minimize-to-tray` via @kempniu in #8
1.12.0 - Added `--hide-via-tray` CLI option
1.11.0 - Upgraded to `[email protected]` and added tray click for minimization
1.10.1 - Added documentation for development
1.10.0 - Repaired separator menu bug for OSX via @arboleya in #5. Fixes #4
1.9.0 - Added support for Chromium flags
1.8.0 - Added debug repl option
1.7.0 - Refactored again to keep all application state/methods under one roof
1.6.0 - Repaired bug with restoring minimized window from tray
1.5.1 - Updated CLI documentation
1.5.0 - Added `winston` as our logger
1.4.0 - Repaired electron PATH issues
1.3.0 - Added `--version` and `--skip-taskbar` support
1.2.0 - Added menu item for show/hide application window
1.1.0 - Abstracted menu/tray/shortcut hooks into separate modules
1.0.1 - Added missing bin script
1.0.0 - Initial release
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
# google-music-electron [](https://travis-ci.org/twolfson/google-music-electron)
Desktop app for [Google Music][] on top of [Electron][]
**Features:**
- Google Music as a standalone application
- Tray for quick play/pause/quit and tooltip with information
- Media key shortcuts
- MPRIS integration (for GNU/Linux desktop environments)

This was written as a successsor to [google-music-webkit][]. When upgrading between versions of [nw.js][], there were regressions with taskbar and shortcut bindings. We wrote this as an alternative.
[Google Music]: https://play.google.com/music/listen
[Electron]: http://electron.atom.io/
[google-music-webkit]: https://github.com/twolfson/google-music-webkit
[nw.js]: https://github.com/nwjs/nw.js
## Requirements
- [npm][], usually installed with [node][]
[npm]: http://npmjs.org/
[node]: http://nodejs.org/
## Getting Started
`google-music-electron` can be installed globally via `npm`:
```js
# Install google-music-electron via npm
npm install -g google-music-electron
# Run google-music-electron
google-music-electron
```
When the application has launched, it will appear in your taskbar and via a tray icon, .

## Newsletter
Interested in hearing about updates and new releases of `google-music-electron`?
[Subscribe to our newsletter!](https://groups.google.com/forum/#!forum/google-music-electron)
## MPRIS integration
If you are on GNU/Linux and your desktop environment supports [MPRIS][], you can install our [MPRIS][] integration via:
```bash
google-music-electron install-mpris
# Once this succeeds, MRPIS will be integrated on `google-music-electron` restart
```

[MPRIS]: http://specifications.freedesktop.org/mpris-spec/latest/
## Updating
`google-music-electron` can be updated via `npm`:
```js
# Update google-music-electron to a newer version via npm
npm update -g google-music-electron
# Alternatively, the following can be used as well to specify a version
# npm install -g google-music-electron@latest
```
## Documentation
### CLI
We have a few CLI options available for you:
```
Usage: google-music-electron [options] [command]
Commands:
install-mpris Install integration with MPRIS (Linux only)
Options:
-h, --help output usage information
-V, --version output the version number
-S, --skip-taskbar Skip showing the application in the taskbar
--minimize-to-tray Hide window to tray instead of minimizing
--hide-via-tray Hide window to tray instead of minimizing (only for tray icon)
--allow-multiple-instances Allow multiple instances of `google-music-electron` to run
--verbose Display verbose log output in stdout
--debug-repl Starts a `replify` server as `google-music-electron` for debugging
```
## Development
### Running locally
To get a local development copy running, you will need:
- [npm][], usually installed with [node][]. Same `npm` that is used during installation
- [git][], version control tool
[git]: http://git-scm.com/
Follow the steps below to get a development copy set up:
```bash
# Clone our repository
git clone https://github.com/twolfson/google-music-electron.git
cd google-music-electron/
# Install our dependencies and dev dependencies
npm install
# Start up `google-music-electron`
npm start
```
After running the above steps, a copy of `google-music-electron` should begin running.

#### Adding local setup as a global installation
After getting our local development set up, we can go one step further and get `google-music-electron` working on our CLI as if it were installed via `npm install -g`.
```bash
# Link local copy as a global copy
# WARNING: Make sure that `npm install` has been run before this point
# or your local copy's permissions may get messed up
npm link
# Run `google-music-electron` for local copy
google-music-electron
```
More information on `npm link` can be found in `npm's` documentation:
https://docs.npmjs.com/cli/link
### Icons
Source images are kept in the `resources/` folder. Icons are maintained via Inkscape and the `play/pause` buttons are isolated in layers.
To generate icons:
1. Export each of the play/pause/clean variants as a `.svg` file
2. Load the icons via GIMP as a 32x32 SVG
3. Export via GIMP as a `.png`
At the time of writing, Inkscape and Image Magick seemed to be generating non-transparent backgrounds upon converting SVG to PNG.
## Contributing
In lieu of a formal styleguide, take care to maintain the existing coding style. Add unit tests for any new or changed functionality. Lint via `npm run lint` and test via `npm test`.
## Donating
Support this project and [others by twolfson][twolfson-projects] via [donations][twolfson-support-me].
<http://twolfson.com/support-me>
[twolfson-projects]: http://twolfson.com/projects
[twolfson-support-me]: http://twolfson.com/support-me
## Attribution
Headphones designed by Jake Dunham from [the Noun Project][headphones-icon]
[headphones-icon]: http://thenounproject.com/term/headphones/16097/
## Unlicense
As of May 16 2015, Todd Wolfson has released this repository and its contents to the public domain.
It has been released under the [UNLICENSE][].
[UNLICENSE]: UNLICENSE
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
custom: https://twolfson.com/support-me
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
#!/usr/bin/env node
// Load in our dependencies
var path = require('path');
var spawn = require('child_process').spawn;
var electronPath = require('electron');
var parseCli = require('../lib/cli-parser').parse;
// Process our arguments (catches any `--help` and `install-mpris` commands)
var program = parseCli(process.argv);
// If didn't match a command (e.g. `install-mpris`), then launch our application
if (program.args.length === 0) {
// Find our application
var googleMusicElectronPath = path.join(__dirname, '..');
var args = [googleMusicElectronPath];
// Append all arguments after our node invocation
// e.g. `node bin/google-music-electron.js --version` -> `--version`
args = args.concat(process.argv.slice(2));
// Run electron on our application and forward all stdio
spawn(electronPath, args, {stdio: [0, 1, 2]});
}
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var app = require('electron').app;
var path = require('path');
var winston = require('winston');
// Load in our constants
// e.g. ~/.config/google-music-electron/verbose.log
var logPath = path.join(app.getPath('userData'), 'verbose.log');
// Define our logger setup
module.exports = function (options) {
// Create our logger
// https://github.com/winstonjs/winston/blob/v1.0.0/lib/winston/config/npm-config.js
// https://github.com/winstonjs/winston/tree/v1.0.0#using-logging-levels
var logger = new winston.Logger({
transports: [
// https://github.com/winstonjs/winston/tree/v1.0.0#console-transport
new winston.transports.Console({
level: options.verbose ? 'silly' : 'info',
colorize: true,
timestamp: true
}),
// https://github.com/winstonjs/winston/tree/v1.0.0#file-transport
new winston.transports.File({
level: 'silly',
filename: logPath,
colorize: false,
timestamp: true
})
]
});
// Log for sanity
logger.info('Logger initialized. Writing info/warnings/errors to stdout. ' +
'Writing all logs to "%s"', logPath);
// Return our logger
return logger;
};
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var ipcMain = require('electron').ipcMain;
var _ = require('underscore');
var Configstore = require('configstore');
var pkg = require('../package.json');
// Define config constructor
function GmeConfig(cliOverrides, cliInfo) {
// Create our config
this.config = new Configstore(pkg.name, {
'playpause-shortcut': 'mediaplaypause',
'next-shortcut': 'medianexttrack',
'previous-shortcut': 'mediaprevioustrack'
});
this.cliOverrides = cliOverrides;
// Generate IPC bindings for config and its info
var that = this;
ipcMain.on('get-config-sync', function handleGetConfigSync (evt) {
evt.returnValue = JSON.stringify(that.getAll());
});
ipcMain.on('get-config-info-sync', function handleGetConfigInfoSync (evt) {
evt.returnValue = JSON.stringify(cliInfo);
});
ipcMain.on('get-config-overrides-sync', function handleGetConfigInfoSync (evt) {
evt.returnValue = JSON.stringify(cliOverrides);
});
ipcMain.on('set-config-item-sync', function handleSetConfigItemSync (evt, key, val) {
that.set(key, val);
evt.returnValue = JSON.stringify({success: true});
});
}
// DEV: We need to define our own `getAll` since we can't subclass `Configstore#all`
// Also, since the `setAll` behavior is confusing because we don't want cliOverrides to contaminate anything
// so we don't ever allow setting it =_=
// https://github.com/yeoman/configstore/blob/v1.2.1/index.js
GmeConfig.prototype = {
getAll: function () {
return _.defaults({}, this.cliOverrides, this.config.all);
},
get: function (key) {
var all = this.getAll();
return all[key];
},
set: function (key, val) {
return this.config.set(key, val);
},
del: function (key) {
return this.config.del(key);
},
clear: function () {
return this.config.clear();
}
};
// Export our constructor
module.exports = GmeConfig;
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var ipcRenderer = require('electron').ipcRenderer;
var webContents = require('electron').remote.getCurrentWebContents();
var GoogleMusic = require('google-music');
// Overload `window.addEventListener` to prevent `unload` bindings
var _addEventListener = window.addEventListener;
window.addEventListener = function (eventName, fn, bubbles) {
// If we received an unload binding, ignore it
if (eventName === 'unload' || eventName === 'beforeunload') {
return;
}
// Otherwise, run our normal addEventListener
return _addEventListener.apply(window, arguments);
};
// When the page loads
// DEV: We originally used `DOMContentLoaded` but Google decided to stop eagerly rendering navigation
window.addEventListener('load', function handleNavigationLoad () {
// Find our attachment point for nav buttons
var leftNavContainer = document.querySelector('#topBar #material-one-left');
var navOpenEl = leftNavContainer ? leftNavContainer.querySelector('#left-nav-open-button') : null;
// If there is one
if (navOpenEl) {
// Generate our buttons
// https://github.com/google/material-design-icons
// Match aria info for existing "back" button (role/tabindex given by Chrome/Polymer)
// DEV: We use `nodeName` to to guarantee `sj-icon-button` or `paper-icon-button` on their respective pages
var nodeName = navOpenEl.nodeName;
var backEl = document.createElement(nodeName);
backEl.setAttribute('aria-label', 'Back');
backEl.setAttribute('icon', 'arrow-back');
backEl.setAttribute('id', 'gme-back-button');
var forwardEl = document.createElement(nodeName);
forwardEl.setAttribute('aria-label', 'Forward');
forwardEl.setAttribute('icon', 'arrow-forward');
forwardEl.setAttribute('id', 'gme-forward-button');
// Apply one-off styles to repair positioning and padding
// DEV: Taken from CSS styles on hidden "back" button
var cssFixes = [
'align-self: center;',
'min-width: 24px;'
].join('');
backEl.style.cssText = cssFixes;
forwardEl.style.cssText = cssFixes;
// Determine the current size of the menu button
// 40px -> 40
var navOpenElWidthStr = window.getComputedStyle(navOpenEl).width;
var navOpenElWidthPx = parseInt(navOpenElWidthStr.replace(/px$/, ''), 10);
// Increase the `min-width` for our leftNavContainer
// 226px -> 226 -> 306px
var leftNavContainerMinWidthStr = window.getComputedStyle(leftNavContainer).minWidth;
var leftNavContainerMinWidthPx = parseInt(leftNavContainerMinWidthStr.replace(/px$/, ''), 10);
leftNavContainer.style.minWidth = (leftNavContainerMinWidthPx + (navOpenElWidthPx * 2)) + 'px';
// Strip away `min-width` from breadcrumbs as it leads to issues
// See: "New releases"
// calc(100% - 80px) -> N/A
var breadcrumbsEl = leftNavContainer.querySelector('#material-breadcrumbs');
if (breadcrumbsEl) {
breadcrumbsEl.style.minWidth = '0px';
}
// Attach event listeners
backEl.addEventListener('click', function onBackClick () {
window.history.back();
});
forwardEl.addEventListener('click', function onBackClick () {
window.history.forward();
});
// When our page changes, update enabled/disabled navigation
var updateNavigation = function () {
if (webContents.canGoBack()) {
// DEV: Google Music automatically sets `aria-disabled` as well
backEl.removeAttribute('disabled');
} else {
backEl.setAttribute('disabled', true);
}
if (webContents.canGoForward()) {
forwardEl.removeAttribute('disabled');
} else {
forwardEl.setAttribute('disabled', true);
}
};
window.addEventListener('hashchange', updateNavigation);
// Update navigation immediately
// DEV: On initial page load/reload, we won't have disabled our arrows otherwise
// DEV: We might still show "back" as navigable on reload, it is but the page will refresh
// This is caused by the page not being in push state (same behavior in Chrome)
updateNavigation();
// Expose our buttons adjacent to the hidden back element
navOpenEl.parentNode.insertBefore(forwardEl, navOpenEl.nextSibling);
navOpenEl.parentNode.insertBefore(backEl, forwardEl);
console.info('Added navigation buttons');
// Watch our music logo for hide/show events
// DEV: Logo is hidden on non-root page but its container isn't (yet)
// DEV: This occupies dead whitespace that shrinks our arrows otherwise
// https://github.com/twolfson/google-music-electron/issues/43
var musicLogoContainer = leftNavContainer.querySelector('.music-logo-link');
var musicLogoEl = musicLogoContainer.querySelector('.music-logo');
var updateLogoDisplay = function () {
var displayVal = window.getComputedStyle(musicLogoEl).display;
musicLogoContainer.style.display = displayVal === 'none' ? 'none' : 'initial';
};
var musicLogoObserver = new MutationObserver(function handleMutations (mutations) {
mutations.forEach(function handleMutation (mutation) {
var targetEl = mutation.target;
if (targetEl === musicLogoEl) {
updateLogoDisplay();
}
});
});
musicLogoObserver.observe(musicLogoEl, {
attributes: true
});
// Update music logo immediately as it starts as `display: none` on page refresh
updateLogoDisplay();
// Notify user of our changes
console.info('Added monitor for music logo visibility');
} else {
console.error('Failed to find navigation button');
}
});
// When we finish loading
// DEV: We must wait until the UI fully loads otherwise mutation observers won't bind
// DEV: Even with the `onload` event, we still could not have JS fully loaded so use a setTimeout loop
var loadAttempts = 0;
function handleLoad() {
// Try to bind GoogleMusic to the UI
var googleMusic;
try {
googleMusic = new GoogleMusic(window);
console.info('Successfully initialized `GoogleMusic`');
// If there was an error
} catch (err) {
// If this is our 60th attempt (i.e. 1 minute of failures), then throw the error
if (loadAttempts > 60) {
throw err;
// Otherwise, try again in 1 second
} else {
console.info('Failed to initialize `GoogleMusic`. Trying again in 1 second');
loadAttempts += 1;
return setTimeout(handleLoad, 1000);
}
}
// Forward events over `ipc`
var events = ['change:song', 'change:playback', 'change:playback-time'];
events.forEach(function bindForwardEvent (event) {
googleMusic.on(event, function forwardEvent (data) {
// Send same event with data (e.g. `change:song` `GoogleMusic.Playback.PLAYING`)
ipcRenderer.send(event, data);
});
});
// When we receive requests to control playback, run them
ipcRenderer.on('control:play-pause', function handlePlayPause (evt) {
googleMusic.playback.playPause();
});
ipcRenderer.on('control:next', function handleNext (evt) {
googleMusic.playback.forward();
});
ipcRenderer.on('control:previous', function handlePrevious (evt) {
googleMusic.playback.rewind();
});
}
window.addEventListener('load', handleLoad);
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var ipcMain = require('electron').ipcMain;
var _ = require('underscore');
var GoogleMusic = require('google-music');
var MprisService = require('mpris-service');
// Define a function to set up mpris
exports.init = function (gme) {
// https://github.com/emersion/mpris-service/tree/a245730635b55c8eb06c605f4ece61e251f04e20
// https://github.com/emersion/mpris-service/blob/a245730635b55c8eb06c605f4ece61e251f04e20/index.js
// http://www.freedesktop.org/wiki/Specifications/mpris-spec/metadata/
// http://specifications.freedesktop.org/mpris-spec/latest/Player_Interface.html
var mpris = new MprisService({
name: 'google-music-electron'
});
mpris.on('next', gme.controlNext);
mpris.on('playpause', gme.controlPlayPause);
mpris.on('previous', gme.controlPrevious);
mpris.on('quit', gme.quitApplication);
mpris.on('raise', gme.onRaise);
// Currently position and seek aren't supported due to not receiving events in Cinnamon =(
// DEV: Stop isn't supported in Google Music (unless it's pause + set position 0)
// DEV: We choose to let the OS volume be controlled by MPRIS
var songInfo = {};
ipcMain.on('change:song', function handleSongChange (evt, _songInfo) {
mpris.metadata = songInfo = {
'mpris:artUrl': _songInfo.art,
// Convert milliseconds to microseconds (1s = 1e3ms = 1e6µs)
'mpris:length': _songInfo.duration * 1e3,
'xesam:album': _songInfo.album,
'xesam:artist': _songInfo.artist,
'xesam:title': _songInfo.title
};
});
ipcMain.on('change:playback-time', function handlePlaybackUpdate (evt, playbackInfo) {
// Convert milliseconds to microseconds (1s = 1e3ms = 1e6µs)
var newPosition = playbackInfo.current * 1e3;
var newTotal = playbackInfo.total * 1e3;
// If the total has been updated, update our songInfo cache
// DEV: This is due to `google-music.js` not always having an up to date length upon song change
if (songInfo['mpris:length'] !== newTotal) {
mpris.metadata = _.extend(songInfo, {
'mpris:length': newTotal
});
}
// If our position varies by 2 seconds, consider it a seek
// DEV: Seeked takes the delta (positive/negative depending on position
var delta = newPosition - mpris.position;
if (Math.abs(delta) > 2e6) {
mpris.seeked(delta);
}
});
var playbackStrings = {};
playbackStrings[GoogleMusic.Playback.PLAYING] = 'Playing';
playbackStrings[GoogleMusic.Playback.PAUSED] = 'Paused';
playbackStrings[GoogleMusic.Playback.STOPPED] = 'Stopped';
ipcMain.on('change:playback', function handlePlaybackChange (evt, playbackState) {
mpris.playbackStatus = playbackStrings[playbackState];
});
};
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var Menu = require('electron').Menu;
// Load in JSON for our menus (e.g. `./menus/linux.json`)
// https://github.com/atom/electron-starter/blob/96f6117b4c1f33c0881d504d655467fc049db433/src/browser/appmenu.coffee#L15
var menuTemplate = require('./menus/' + process.platform + '.json');
// Define a function to set up our application menu
exports.init = function (gme) {
// Parse and set up our menu
// https://github.com/atom/electron-starter/blob/96f6117b4c1f33c0881d504d655467fc049db433/src/browser/appmenu.coffee#L27-L41
function bindMenuItems(menuItems) {
menuItems.forEach(function bindMenuItemFn (menuItem) {
// If there is a role, continue
if (menuItem.role !== undefined) {
return;
}
// If there is a separator, continue
if (menuItem.type === 'separator') {
return;
}
// If there is a submenu, recurse it
if (menuItem.submenu) {
bindMenuItems(menuItem.submenu);
return;
}
// Otherwise, find the function for our command
var cmd = menuItem.command;
if (cmd === 'application:about') {
menuItem.click = gme.openAboutWindow;
} else if (cmd === 'application:show-settings') {
menuItem.click = gme.openConfigWindow;
} else if (cmd === 'application:quit') {
menuItem.click = gme.quitApplication;
} else if (cmd === 'window:reload') {
menuItem.click = gme.reloadWindow;
} else if (cmd === 'window:toggle-dev-tools') {
menuItem.click = gme.toggleDevTools;
} else if (cmd === 'window:toggle-full-screen') {
menuItem.click = gme.toggleFullScreen;
} else {
throw new Error('Could not find function for menu command "' + cmd + '" ' +
'under label "' + menuItem.label + '"');
}
});
}
bindMenuItems(menuTemplate.menu);
Menu.setApplicationMenu(Menu.buildFromTemplate(menuTemplate.menu));
};
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var ipcRenderer = require('electron').ipcRenderer;
// When the DOM loads
window.addEventListener('DOMContentLoaded', function handleDOMLoad () {
// Request our config
var config = JSON.parse(ipcRenderer.sendSync('get-config-sync'));
var configInfo = JSON.parse(ipcRenderer.sendSync('get-config-info-sync'));
var configOverrides = JSON.parse(ipcRenderer.sendSync('get-config-overrides-sync'));
// Find and bind all known shortcuts
var $shortcutContainers = document.querySelectorAll('[data-save-shortcut]');
[].slice.call($shortcutContainers).forEach(function bindShortcut ($shortcutContainer) {
// Fill in our existing value
var shortcutName = $shortcutContainer.dataset.saveShortcut;
var $input = $shortcutContainer.querySelector('input[type=text]');
var $output = $shortcutContainer.querySelector('.output');
$input.value = config[shortcutName];
// Add change binding for our shortcut
$input.addEventListener('change', function handleShortcutChange (evt) {
// Register our new handler
var result = JSON.parse(ipcRenderer.sendSync('set-shortcut-sync', shortcutName, $input.value));
// Reset output state
$output.classList.remove('success');
$output.classList.remove('error');
// Provide feedback to user
if (result.success === false) {
$output.classList.add('error');
$output.textContent = 'Failed to bind shortcut "' + result.accelerator + '". ' +
'Keeping current shortcut "' + result.previousAccelerator + '".';
} else if (result.previousAccelerator === result.accelerator) {
$output.textContent = '';
} else {
$output.classList.add('success');
$output.textContent = 'Successfully moved from "' + result.previousAccelerator + '" ' +
'to "' + result.accelerator + '"!';
}
});
});
// Find and bind all known checkboxes
var $checkboxContainers = document.querySelectorAll('[data-save-checkbox]');
[].slice.call($checkboxContainers).forEach(function bindCheckbox ($checkboxContainer) {
// Fill in our existing value
var configItemName = $checkboxContainer.dataset.saveCheckbox;
var $input = $checkboxContainer.querySelector('input[type=checkbox]');
$input.checked = config[configItemName];
// If our config item is overridden, then disable it
if (configOverrides[configItemName] !== undefined) {
$checkboxContainer.classList.add('muted');
$input.disabled = true;
var $overriddenSpan = $checkboxContainer.querySelector('.overridden');
$overriddenSpan.classList.remove('hidden');
}
// If we have config information, fill out that content as well
if (configInfo[configItemName]) {
var $cliFlags = $checkboxContainer.querySelector('.cli-flags');
// e.g. Overridden by `-S, --skip-taskbar` in CLI
$cliFlags.textContent = configInfo[configItemName].flags;
var $description = $checkboxContainer.querySelector('.description');
// e.g. Skip showing the application in the taskbar
$description.textContent += configInfo[configItemName].description;
}
// If the container is mutually exclusive
var $unsetTarget;
if ($checkboxContainer.dataset.unsetCheckbox) {
$unsetTarget = document.querySelector($checkboxContainer.dataset.unsetCheckbox);
}
// Add change binding for our setting
$input.addEventListener('change', function handleCheckboxChange (evt) {
// Update our setting
var result = JSON.parse(ipcRenderer.sendSync('set-config-item-sync', configItemName, $input.checked));
// If there was an error, complain about it
if (result.success === false) {
window.alert('Attempted to set "' + configItemName + '" to "' + $input.checked + '" but failed. ' +
'Please see console output for more info.');
}
// If there is a target to unset and we are truthy, unset them and trigger a change
if ($unsetTarget && $input.checked) {
// http://youmightnotneedjquery.com/#trigger_native
$unsetTarget.checked = false;
var triggerEvt = document.createEvent('HTMLEvents');
triggerEvt.initEvent('change', true, false);
$unsetTarget.dispatchEvent(triggerEvt);
}
});
});
});
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var _ = require('underscore');
var program = require('commander');
var installMpris = require('./install-mpris');
// Load in package info
var pkg = require('../package.json');
// Define our CLI parser
exports.parse = function (argv) {
// Handle CLI arguments
program
.version(pkg.version)
.option('-S, --skip-taskbar', 'Skip showing the application in the taskbar')
.option('--minimize-to-tray', 'Hide window to tray instead of minimizing')
.option('--hide-via-tray', 'Hide window to tray instead of minimizing (only for tray icon)')
.option('--allow-multiple-instances', 'Allow multiple instances of `google-music-electron` to run')
.option('--verbose', 'Display verbose log output in stdout')
.option('--debug-repl', 'Starts a `replify` server as `google-music-electron` for debugging')
// Allow unknown Chromium flags
// https://github.com/atom/electron/blob/v0.26.0/docs/api/chrome-command-line-switches.md
.allowUnknownOption();
// Specify keys that can be used by config if CLI isn't provided
var cliConfigKeys = ['skip-taskbar', 'minimize-to-tray', 'hide-via-tray', 'allow-multiple-instances'];
var cliInfo = _.object(cliConfigKeys.map(function generateCliInfo (key) {
return [key, _.findWhere(program.options, {long: '--' + key})];
}));
// Define our commands
program
.command('install-mpris')
.description('Install integration with MPRIS (Linux only)')
.action(function handleInstallMrpis () {
// If we are in Electron, then raise an error
// https://github.com/twolfson/google-music-electron/issues/25#issuecomment-167368775
if (process.versions.electron) {
throw new Error('`install-mpris` command should be handled by `bin/google-music-electron.js`');
}
// Otherwise, run our installer
installMpris();
});
// Process our arguments
program.parse(argv);
// Amend cliConfigKeys and cliInfo as attributes
program._cliConfigKeys = cliConfigKeys;
program._cliInfo = cliInfo;
// Return our parsed info
return program;
};
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var globalShortcut = require('electron').globalShortcut;
var ipcMain = require('electron').ipcMain;
// Define a function to bind shortcuts
exports.init = function (gme) {
// Set up media keys
var shortcutCallbacks = {
'playpause-shortcut': gme.controlPlayPause,
'next-shortcut': gme.controlNext,
'previous-shortcut': gme.controlPrevious
};
var playpauseShortcut = gme.config.get('playpause-shortcut');
if (playpauseShortcut && !globalShortcut.register(playpauseShortcut, shortcutCallbacks['playpause-shortcut'])) {
gme.logger.warn('Failed to bind `' + playpauseShortcut + '` shortcut');
}
var nextShortcut = gme.config.get('next-shortcut');
if (nextShortcut && !globalShortcut.register(nextShortcut, shortcutCallbacks['next-shortcut'])) {
gme.logger.warn('Failed to bind `' + nextShortcut + '` shortcut');
}
var previousShortcut = gme.config.get('previous-shortcut');
if (previousShortcut && !globalShortcut.register(previousShortcut, shortcutCallbacks['previous-shortcut'])) {
gme.logger.warn('Failed to bind `' + previousShortcut + '` shortcut');
}
// When a shortcut change is requested
ipcMain.on('set-shortcut-sync', function handleShortcutChange (evt, shortcutName, accelerator) {
// Prepare common set of results
var previousAccelerator = gme.config.get(shortcutName);
var retVal = {
success: false,
previousAccelerator: previousAccelerator,
accelerator: accelerator
};
// If the accelerator is the same as the current one, exit with success
if (previousAccelerator === accelerator) {
retVal.success = true;
evt.returnValue = JSON.stringify(retVal);
return;
}
// If the accelerator is nothing, then consider it a success
if (accelerator === '') {
retVal.success = true;
// Otherwise, attempt to register the new shortcut
} else {
gme.logger.info('Attempting to register shortcut "' + shortcutName + '" under "' + accelerator + '"');
try {
retVal.success = globalShortcut.register(accelerator, shortcutCallbacks[shortcutName]);
gme.logger.info('Registration successful');
} catch (err) {
// Catch any unrecognized accelerators
}
}
// If we were successful, remove the last binding and update our config
if (retVal.success) {
if (previousAccelerator) {
gme.logger.info('Unregistering shortcut "' +
shortcutName + '" from "' + previousAccelerator + '"');
globalShortcut.unregister(previousAccelerator);
}
gme.logger.info('Updating config...');
gme.config.set(shortcutName, accelerator);
// Otherwise, log failure
} else {
gme.logger.info('Registration failed. Couldn\'t register shortcut "' +
shortcutName + '" to "' + accelerator + '"');
}
// In any event, return with our success status
evt.returnValue = JSON.stringify(retVal);
});
};
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var assert = require('assert');
var path = require('path');
var spawn = require('child_process').spawn;
// Load in package info
var pkg = require('../package.json');
// Define our installer
module.exports = function () {
// Resolve our mpris dependencies
var installArgs = Object.keys(pkg.mprisDependencies).map(function getInstallArg (dependencyName) {
return dependencyName + '@' + pkg.mprisDependencies[dependencyName];
});
// Run our install command
// DEV: We are inside of `io.js` of Electron which allows us to use the latest hotness
var child = spawn(
'npm',
// Use `--ignore-scripts` to avoid compiling against system's node
// Use `--save false` prevent saving to `package.json` during development
['install', '--ignore-scripts', '--save', 'false'].concat(installArgs),
{cwd: path.join(__dirname, '..'), stdio: 'inherit'});
// If there is an error, throw it
child.on('error', function handleError (err) {
throw err;
});
// When the child exits
child.on('exit', function handleExit (code, signal) {
// Verify we received a zero exit code
assert.strictEqual(code, 0, 'Expected "npm install" exit code to be "0" but it was "' + code + '"');
// Rebuild electron with our new `mpris-service`
var electronRebuildCmd = require.resolve('electron-rebuild/lib/cli.js');
child = spawn(electronRebuildCmd, {cwd: path.join(__dirname, '..'), stdio: 'inherit'});
// If there is an error, throw it
child.on('error', function handleError (err) {
throw err;
});
// When the child exits
child.on('exit', function handleExit (code, signal) {
// Verify we received a zero exit code
assert.strictEqual(code, 0, 'Expected "electron-rebuild" exit code to be "0" but it was "' + code + '"');
// Log our success and exit
console.log('MPRIS integration successfully installed! ' +
'Please start `google-music-electron` to see it in action!');
process.exit();
});
});
};
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var app = require('electron').app;
var BrowserWindow = require('electron').BrowserWindow;
var monogamous = require('monogamous');
var _ = require('underscore');
var replify = require('replify');
var assets = require('./assets');
var appMenu = require('./app-menu');
var appTray = require('./app-tray');
var Config = require('./config');
var getLogger = require('./logger');
var shortcuts = require('./shortcuts');
var mpris;
try {
mpris = require('./mpris');
} catch (err) {
// Optionally allow `mpris` to be installed
}
// Set our User Agent to trick Google auth
app.userAgentFallback = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:73.0) Gecko/20100101 Firefox/73.0';
// Load in package info and process our CLI
var pkg = require('../package.json');
var program = require('./cli-parser').parse(process.argv);
// Generate a logger
var logger = getLogger({verbose: program.verbose});
// Log our CLI arguments
logger.debug('CLI arguments received', {argv: process.argv});
// When all Windows are closed
app.on('window-all-closed', function handleWindowsClosed () {
// If we are not on OSX, exit
// DEV: OSX requires users to quit via the menu/cmd+q
if (process.platform !== 'darwin') {
logger.debug('All windows closed. Exiting application');
app.quit();
} else {
logger.debug('All windows closed but not exiting because OSX');
}
});
// Generate a config based on our CLI arguments
// DEV: We need to build cliConfig off of options since we are using `camelCase` from `commander`
// https://github.com/tj/commander.js/blob/v2.9.0/index.js#L1046-L1050
function camelcase(flag) {
return flag.split('-').reduce(function (str, word) {
return str + word[0].toUpperCase() + word.slice(1);
});
}
var cliConfig = _.object(program._cliConfigKeys.map(function getCliValue (dashCaseKey) {
var camelCaseKey = camelcase(dashCaseKey);
return [dashCaseKey, program[camelCaseKey]];
}));
logger.debug('CLI options overriding config', cliConfig);
var config = new Config(cliConfig, program._cliInfo);
logger.debug('Generated starting config options', config.getAll());
// Define helpers for controlling/sending messages to our window
// https://github.com/atom/electron-starter/blob/96f6117b4c1f33c0881d504d655467fc049db433/src/browser/application.coffee#L87-L104
// DEV: We are choosing to dodge classes to avoid `.bind` calls
// DEV: This must be in the top level scope, otherwise our window gets GC'd
var gme = {
browserWindow: null,
config: config,
controlPlayPause: function () {
if (gme.browserWindow && gme.browserWindow.webContents) {
logger.debug('Sending `control:play-pause` to browser window');
gme.browserWindow.webContents.send('control:play-pause');
} else {
logger.debug('`control:play-pause` requested but couldn\'t find browser window');
}
},
controlNext: function () {
if (gme.browserWindow && gme.browserWindow.webContents) {
logger.debug('Sending `control:next` to browser window');
gme.browserWindow.webContents.send('control:next');
} else {
logger.debug('`control:next` requested but couldn\'t find browser window');
}
},
controlPrevious: function () {
if (gme.browserWindow && gme.browserWindow.webContents) {
logger.debug('Sending `control:previous` to browser window');
gme.browserWindow.webContents.send('control:previous');
} else {
logger.debug('`control:previous` requested but couldn\'t find browser window');
}
},
logger: logger,
openAboutWindow: function () {
logger.debug('Showing `about` window for `google-music-electron`');
var info = [
// https://github.com/corysimmons/typographic/blob/2.9.3/scss/typographic.scss#L34
'<div style="text-align: center; font-family: \'Helvetica Neue\', \'Helvetica\', \'Arial\', \'sans-serif\'">',
'<h1>google-music-electron</h1>',
'<p>',
'Version: ' + pkg.version,
'<br/>',
'Electron version: ' + process.versions.electron,
'<br/>',
'Node.js version: ' + process.versions.node,
'<br/>',
'Chromium version: ' + process.versions.chrome,
'</p>',
'</div>'
].join('');
// DEV: aboutWindow will be garbage collection automatically
var aboutWindow = new BrowserWindow({
height: 180,
icon: assets['icon-32'],
width: 400
});
aboutWindow.loadURL('data:text/html,' + info);
},
openConfigWindow: function () {
logger.debug('Showing `config` window for `google-music-electron`');
// DEV: configWindow will be garbage collection automatically
var configWindow = new BrowserWindow({
height: 440,
icon: assets['icon-32'],
width: 620
});
configWindow.loadURL('file://' + __dirname + '/views/config.html');
},
quitApplication: function () {
logger.debug('Exiting `google-music-electron`');
app.quit();
},
reloadWindow: function () {
logger.debug('Reloading focused browser window');
BrowserWindow.getFocusedWindow().reload();
},
showMinimizedWindow: function () {
// DEV: Focus is necessary when there is no taskbar and we have lost focus for the app
gme.browserWindow.restore();
gme.browserWindow.focus();
},
showInvisibleWindow: function () {
gme.browserWindow.show();
},
toggleDevTools: function () {
logger.debug('Toggling developer tools in focused browser window');
BrowserWindow.getFocusedWindow().toggleDevTools();
},
toggleFullScreen: function () {
var focusedWindow = BrowserWindow.getFocusedWindow();
// Move to other full screen state (e.g. true -> false)
var wasFullScreen = focusedWindow.isFullScreen();
var toggledFullScreen = !wasFullScreen;
logger.debug('Toggling focused browser window full screen', {
wasFullScreen: wasFullScreen,
toggledFullScreen: toggledFullScreen
});
focusedWindow.setFullScreen(toggledFullScreen);
},
toggleMinimize: function () {
if (gme.browserWindow) {
var isMinimized = gme.browserWindow.isMinimized();
logger.debug('Toggling browser window minimization', {
isMinimized: isMinimized
});
if (isMinimized) {
gme.showMinimizedWindow();
} else {
gme.browserWindow.minimize();
}
} else {
logger.debug('Browser window minimization toggling requested but browser window as not found');
}
},
toggleVisibility: function () {
if (gme.browserWindow) {
var isVisible = gme.browserWindow.isVisible();
logger.debug('Toggling browser window visibility', {
isVisible: isVisible
});
if (isVisible) {
gme.browserWindow.hide();
} else {
gme.showInvisibleWindow();
}
} else {
logger.debug('Browser window visibility toggling requested but browser window as not found');
}
}
};
// Assign tray click behavior
gme.onTrayClick = (config.get('hide-via-tray') || config.get('minimize-to-tray')) ?
gme.toggleVisibility : gme.toggleMinimize;
gme.onRaise = (config.get('hide-via-tray') || config.get('minimize-to-tray')) ?
gme.showInvisibleWindow : gme.showMinimizedWindow;
// Define our launch handler
function launchGme() {
// Create our browser window for Google Music
var windowInfo = config.get('window-info') || {};
var windowOpts = {
height: windowInfo.height || 920,
icon: assets['icon-32'],
skipTaskbar: config.get('skip-taskbar'),
// Load in our Google Music bindings on the page
webPreferences: {
preload: __dirname + '/browser.js'
},
width: windowInfo.width || 1024,
x: windowInfo.x || null,
y: windowInfo.y || null
};
logger.info('App ready. Opening Google Music window', {
options: windowOpts,
processVersions: process.versions,
version: pkg.version
});
gme.browserWindow = new BrowserWindow(windowOpts);
gme.browserWindow.loadURL('https://play.google.com/music/listen');
// If hiding to tray was requested, trigger a visibility toggle when the window is minimized
if (config.get('minimize-to-tray')) {
gme.browserWindow.on('minimize', gme.toggleVisibility);
}
// Save the window position after moving
function saveWindowInfo() {
config.set('window-info', gme.browserWindow.getBounds());
}
gme.browserWindow.on('move', _.debounce(function handleWindowMove () {
logger.debug('Browser window moved, saving window info in config.');
saveWindowInfo();
}, 250));
// Save the window size after resizing
gme.browserWindow.on('resize', _.debounce(function handleWindowResize () {
logger.debug('Browser window resized, saving window info in config.');
saveWindowInfo();
}, 250));
// When our window is closed, clean up the reference to our window
gme.browserWindow.on('closed', function handleWindowClose () {
logger.debug('Browser window closed, garbage collecting `browserWindow`');
gme.browserWindow = null;
});
// Save browser window context to replify
// http://dshaw.github.io/2012-10-nodedublin/#/
if (program.debugRepl) {
var replServer = replify('google-music-electron', null, {gme: gme});
replServer.on('listening', function handleReplServerListen () {
var socketPath = replServer.address();
logger.info('Debug repl opened at "%s". This should be accessible via `npm run debug-repl`', socketPath);
});
}
// Set up our application menu, tray, and shortcuts
appMenu.init(gme);
appTray.init(gme);
shortcuts.init(gme);
if (mpris) {
mpris.init(gme);
}
}
// If we are only allowing single instances
var booter;
if (!config.get('allow-multiple-instances')) {
// Start up/connect to a monogamous server (detects other instances)
booter = monogamous({sock: pkg.name});
// If we are the first instance, start up gme
booter.on('boot', launchGme);
// Otherwise, focus it
booter.on('reboot', gme.onRaise);
// If we encounter an error, log it and start anyway
booter.on('error', function handleError (err) {
logger.error('Error while starting/connecting to monogamous server', err);
logger.info('Ignoring monogamous error, starting google-music-electron');
launchGme();
});
}
// When Electron is done loading
app.on('ready', function handleReady () {
// If we have a booter, invoke it
if (booter) {
booter.boot();
// Otherwise, launch immediately
} else {
launchGme();
}
});
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
// Load in our dependencies
var Tray = require('electron').Tray;
var ipcMain = require('electron').ipcMain;
var Menu = require('electron').Menu;
var MenuItem = require('electron').MenuItem;
var GoogleMusic = require('google-music');
var assets = require('./assets');
// Define a truncation utility for tooltip
function truncateStr(str, len) {
// If the string is over the length, then truncate it
// DEV: We go 1 under length so we have room for ellipses
if (str.length > len) {
return str.slice(0, len - 2) + '…';
}
// Otherwise, return the string
return str;
}
// Define a function to set up our tray icon
exports.init = function (gme) {
// Set up our tray
var trayMenu = new Menu();
trayMenu.append(new MenuItem({
label: 'Show/hide window',
click: gme.onTrayClick
}));
trayMenu.append(new MenuItem({
type: 'separator'
}));
trayMenu.append(new MenuItem({
label: 'Play/Pause',
click: gme.controlPlayPause
}));
trayMenu.append(new MenuItem({
label: 'Next',
click: gme.controlNext
}));
trayMenu.append(new MenuItem({
label: 'Previous',
click: gme.controlPrevious
}));
trayMenu.append(new MenuItem({
type: 'separator'
}));
trayMenu.append(new MenuItem({
label: 'Quit',
click: gme.quitApplication
}));
var tray = new Tray(assets['icon-32']);
tray.setContextMenu(trayMenu);
// When our tray is clicked, toggle visibility of the window
tray.on('click', gme.onTrayClick);
// When the song changes, update our tooltip
ipcMain.on('change:song', function handleSongChange (evt, songInfo) {
gme.logger.debug('Song has changed. Updating tray tooltip', {
songInfo: songInfo
});
// We have a max length of 127 characters on Windows
// so divvy up 47, 31, 47 (with 2 characters for line breaks)
// https://github.com/twolfson/google-music-electron/issues/24
var infoStr = [
truncateStr('Title: ' + songInfo.title, 47),
truncateStr('Artist: ' + songInfo.artist, 31),
truncateStr('Album: ' + songInfo.album, 47)
].join('\n');
tray.setToolTip(infoStr);
});
// When the playback state changes, update the icon
ipcMain.on('change:playback', function handlePlaybackChange (evt, playbackState) {
// Determine which icon to display based on state
// By default, render the clean icon (stopped state)
gme.logger.debug('Playback state has changed. Updating tray icon', {
playbackState: playbackState
});
var icon = assets['icon-32'];
if (playbackState === GoogleMusic.Playback.PLAYING) {
icon = assets['icon-playing-32'];
} else if (playbackState === GoogleMusic.Playback.PAUSED) {
icon = assets['icon-paused-32'];
}
// Update the icon
tray.setImage(icon);
});
};
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
module.exports = {
'icon-32': __dirname + '/icon-32.png',
'icon-paused-32': __dirname + '/icon-paused-32.png',
'icon-playing-32': __dirname + '/icon-playing-32.png'
};
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
<!doctype>
<html>
<head>
<title>google-music-electron config</title>
<style type="text/css">
/* TODO: Use common CSS file with `about` */
/* https://github.com/corysimmons/typographic/blob/2.9.3/scss/typographic.scss#L34 */
body {
font-family: 'Helvetica Neue', 'Helvetica', 'Arial', 'sans-serif';
}
.hidden {
display: none;
}
.muted {
opacity: 0.8;
}
.error {
color: red;
}
.success {
color: limegreen;
}
</style>
<script src="../config-browser.js"></script>
</head>
<body>
<h1>google-music-electron config</h1>
<p><em>Shortcut examples: mediaplaypause, ctrl+shift+p, alt+p</em></p>
<p data-save-shortcut="playpause-shortcut">
<label>
<span>Play/Pause shortcut:</span>
<br />
<input type="text" placeholder="Enter a shortcut" />
<br />
<span class="output"></span>
</label>
</p>
<p data-save-shortcut="next-shortcut">
<label>
<span>Next shortcut:</span>
<br />
<input id="next-shortcut" name="next-shortcut" type="text" placeholder="Enter a shortcut" />
<br />
<span class="output"></span>
</label>
</p>
<p data-save-shortcut="previous-shortcut">
<label>
<span>Previous shortcut:</span>
<br />
<input type="text" placeholder="Enter a shortcut" />
<br />
<span class="output"></span>
</label>
</p>
<p data-save-checkbox="skip-taskbar">
<label>
<span>Skip taskbar<span class="hidden overridden"> (overridden):</span>
<input type="checkbox" />
</label>
<span> Requires restart (</span>
<code class="cli-flags"></code>
<span>)</span>
<br/>
<em class="description"></em>
</p>
<p data-save-checkbox="minimize-to-tray" data-unset-checkbox="#hide-via-tray">
<label>
<span>Minimize to tray<span class="hidden overridden"> (overridden):</span>
<input type="checkbox" id="minimize-to-tray" />
</label>
<span> Requires restart (</span>
<code class="cli-flags"></code>
<span>)</span>
<br/>
<em class="description"></em>
</p>
<p data-save-checkbox="hide-via-tray" data-unset-checkbox="#minimize-to-tray">
<label>
<span>Hide via tray<span class="hidden overridden"> (overridden):</span>
<input type="checkbox" id="hide-via-tray" />
</label>
<span> Requires restart (</span>
<code class="cli-flags"></code>
<span>)</span>
<br/>
<em class="description"></em>
</p>
<p data-save-checkbox="allow-multiple-instances">
<label>
<span>Allow multiple instances<span class="hidden overridden"> (overridden):</span>
<input type="checkbox" id="allow-multiple-instances" />
</label>
<span> Requires restart (</span>
<code class="cli-flags"></code>
<span>)</span>
<br/>
<em class="description"></em>
</p>
</body>
</html>
| {
"repo_name": "twolfson/google-music-electron",
"stars": "266",
"repo_language": "JavaScript",
"file_name": "config.html",
"mime_type": "text/html"
} |
<html>
<head>
<link rel="stylesheet" href="/steam_resource/css/2.css">
<link rel="stylesheet" href="/steam_resource/css/39.css">
<link rel="stylesheet" href="/steam_resource/css/library.css">
<script src="/static/library.js"></script>
</head>
<body>
<h2>Hello World</h2>
</body>
</html> | {
"repo_name": "SteamDeckHomebrew/Plugin-Template",
"stars": "29",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
class Plugin:
# A normal method. It can be called from JavaScript using call_plugin_function("method_1", argument1, argument2)
async def method_1(self, *args):
pass
# A normal method. It can be called from JavaScript using call_plugin_function("method_2", argument1, argument2)
async def method_2(self, *args):
pass
# Asyncio-compatible long-running code, executed in a task when the plugin is loaded
async def _main(self):
pass | {
"repo_name": "SteamDeckHomebrew/Plugin-Template",
"stars": "29",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Plugin-Template
This is the template plugin for developing plugins for the [SteamOS Plugin Loader](https://github.com/SteamDeckHomebrew/PluginLoader).
## Usage
1. Click on the green `Use this template` button to create a new repository for your plugin
2. Rename the `plugin_template.py` file to something unique
3. Add your code to the plugin's python and html files.
4. To use it, simply `git clone` the repo into the `/home/deck/homebrew/plugins` folder on your Steam Deck
## License
This Template Project is under The Unlicense. You may license your own plugin under whatever license you prefer.
| {
"repo_name": "SteamDeckHomebrew/Plugin-Template",
"stars": "29",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.