text
stringlengths 2
104M
| meta
dict |
---|---|
# Mathematics > Fundamentals > Filling Jars
# Perform the multiple queries on the list. And print average. - 20 Points
#
# https://www.hackerrank.com/challenges/filling-jars/problem
#
# dans ce challenge, nul besoin de calculer les contenus de chaque jarre
# il suffit de calculer une somme globale
# sinon c'est trop lent
n, m = map(int, input().split())
sum_k = 0
for _ in range(m):
a, b, k = map(int, input().split())
sum_k += k * (b - a + 1)
print(sum_k // n)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Reverse Game
#
# https://www.hackerrank.com/challenges/reverse-game/problem
t = int(input())
for _ in range(t):
n, k = map(int, input().split())
# il faut calculer uniquement la position de k
for i in range(n):
if k < i:
break
k = n - 1 - k + i
print(k)
""" méthode triviale (imite le jeu): trop longue
a = [i for i in range(n)]
for i in range(n):
b= list(reversed(a[i:]))
a = a[:i] + b
print(a.index(k))
""" | {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# https://www.hackerrank.com/domains/mathematics?filters%5Bsubdomains%5D%5B%5D=fundamentals
add_hackerrank_py(handshake.py)
add_hackerrank_py(connecting-towns.py)
add_hackerrank_py(russian-peasant-exponentiation.py)
add_hackerrank_py(maximum-draws.py)
add_hackerrank_py(special-multiple.py)
add_hackerrank_py(lowest-triangle.py)
add_hackerrank_py(harry-potter-and-the-floating-rocks.py)
add_hackerrank_py(sherlock-and-divisors.py)
add_hackerrank(find-point find-point.cpp)
add_hackerrank_py(summing-the-n-series.py)
add_hackerrank_py(best-divisor.py)
add_hackerrank_py(reverse-game.py)
add_hackerrank_py(restaurant.py)
add_hackerrank_py(diwali-lights.py)
add_hackerrank_py(sherlock-and-permutations.py)
add_hackerrank_py(die-hard-3.py)
add_hackerrank(special-multiple special-multiple.c)
add_hackerrank_py(sherlock-and-moving-tiles.py)
add_hackerrank_py(most-distant.py)
add_hackerrank_py(strange-grid.py)
add_hackerrank_py(matrix-tracing.py)
add_hackerrank_py(leonardo-and-prime.py)
add_hackerrank_py(game-with-cells.py)
add_hackerrank(even-odd-query even-odd-query.cpp)
add_hackerrank_py(even-odd-query.py)
add_hackerrank_py(p1-paper-cutting.py)
add_hackerrank_py(halloween-party.py)
add_hackerrank_py(is-fibo.py)
add_hackerrank_py(filling-jars.py)
add_hackerrank_py(k-candy-store.py)
add_hackerrank_py(bus-station.py)
add_hackerrank_py(jim-and-the-jokes.py)
add_hackerrank_py(possible-path.py)
add_hackerrank_py(mutual-recurrences.py)
add_hackerrank(mutual-recurrences mutual-recurrences.cpp)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Die Hard 3
# Help Bruce and Samuel save the city by solving their puzzle
#
# https://www.hackerrank.com/challenges/die-hard-3/problem
#
from math import gcd
for _ in range(int(input())):
a, b, c = map(int, input().split())
print("YES" if c <= max(a, b) and c % gcd(a, b) == 0 else "NO")
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
// Mathematics > Fundamentals > Mutual Recurrences
// Compute terms in a mutual recurrence.
//
// https://www.hackerrank.com/challenges/mutual-recurrences/problem
// https://www.hackerrank.com/contests/infinitum14/challenges/mutual-recurrences
// challenge id: 15898
//
#include <bits/stdc++.h>
using namespace std;
#pragma GCC diagnostic ignored "-Wsign-conversion"
constexpr long MOD = 1e9;
class Vecteur
{
vector<long> a;
size_t n;
public:
Vecteur(size_t n)
{
this->n = n;
a.resize(n, 0L);
}
size_t dim() const
{
return n;
}
long operator()(size_t x) const
{
assert(x < n);
return a[x];
}
long &operator()(size_t x)
{
assert(x < n);
return a[x];
}
};
class Matrice
{
vector<long> a;
size_t n;
public:
Matrice(size_t n)
{
this->n = n;
a.resize(n * n, 0L);
}
static Matrice I(size_t n)
{
Matrice M(n);
for (size_t i = 0; i < n; ++i)
M(i, i) = 1;
return M;
}
long operator()(size_t x, size_t y) const
{
assert(x < n && y < n);
return a[x + y * n];
}
long &operator()(size_t x, size_t y)
{
assert(x < n && y < n);
return a[x + y * n];
}
Matrice &operator%=(long m)
{
for (auto &&i : a)
i %= m;
return *this;
}
Matrice &operator*=(const Matrice &B)
{
assert(n == B.n);
Matrice &A = *this;
Matrice C(n);
for (size_t x = 0; x < n; ++x)
{
for (size_t y = 0; y < n; ++y)
{
long s = 0;
for (size_t i = 0; i < n; ++i)
s += (A(i, y) * B(x, i)) % MOD;
C(x, y) = s % MOD;
}
}
a = C.a;
return *this;
}
Vecteur operator*(const Vecteur &V) const
{
assert(n == V.dim());
const Matrice &A = *this;
Vecteur C(n);
for (size_t y = 0; y < n; ++y)
{
long s = 0;
for (size_t i = 0; i < n; ++i)
s += (A(i, y) * V(i)) % MOD;
C(y) = s % MOD;
}
return C;
}
Matrice &operator^=(unsigned long k)
{
if (k == 1)
return *this;
Matrice &M = *this;
Matrice P = Matrice::I(n);
while (k != 0)
{
if (k % 2 == 1)
P *= M;
M *= M;
k /= 2;
}
a = P.a;
return *this;
}
void print() const
{
const Matrice &A = *this;
for (size_t y = 0; y < n; ++y)
{
cout << ((y == 0) ? '[' : ' ');
for (size_t x = 0; x < n; ++x)
{
cout << A(x, y);
if (x == n - 1)
{
if (y == n - 1)
cout << "]\n";
else
cout << "\n";
}
else
{
cout << " ";
}
}
}
}
};
long fibonacci_easy(long a, long b, unsigned long n)
{
Matrice A(2);
A(0, 0) = 1;
A(0, 1) = 1;
A(1, 0) = 1;
A(1, 1) = 0;
A ^= n;
Vecteur F(2);
F(0) = b;
F(1) = a;
F = A * F;
return F(1);
}
// Complete the solve function below.
void solve(int a, int b, int c, int d, int e, int f, int g, int h, unsigned long n)
{
Matrice A(20 + 4);
A(a - 1, 0) += 1; // x(n) = x(n - a)
A(10 + b - 1, 0) += 1; // + y(n - b)
A(10 + c - 1, 0) += 1; // + y(n - c)
A(20 + 2, 0) += 1; // + n * d ** n
A(10 + e - 1, 10) += 1; // y(n) = y(n - e)
A(f - 1, 10) += 1; // + x(n - f)
A(g - 1, 10) += 1; // + x(n - g)
A(20, 10) += 1; // + n * h ** n
for (size_t i = 1; i < 10; ++i)
{
A(i - 1, i) += 1;
A(10 + i - 1, 10 + i) += 1;
}
A(20, 20) = h;
A(20 + 1, 20) = h;
A(20 + 1, 20 + 1) = h;
A(20 + 2, 20 + 2) = d;
A(20 + 3, 20 + 2) = d;
A(20 + 3, 20 + 3) = d;
//A.print();
Vecteur F(20 + 4);
for (size_t i = 0; i < 20 + 4; ++i)
F(i) = 1;
F(20) = 0;
F(20 + 2) = 0;
A ^= (n + 1);
F = A * F;
cout << F(0) << " " << F(10) << endl;
}
int main()
{
// cerr << fibonacci_easy(2, 4, 9) << endl; // [55, 34, 34, 21] 178
int t;
cin >> t;
cin.ignore(numeric_limits<streamsize>::max(), '\n');
while (t--)
{
int a, b, c, d, e, f, g, h;
unsigned long n;
cin >> a >> b >> c >> d >> e >> f >> g >> h >> n;
solve(a, b, c, d, e, f, g, h, n);
}
return 0;
}
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Weekly Challenges - Week 5 > Even Odd Query
# Is the number odd or even?
#
# https://www.hackerrank.com/contests/w5/challenges/even-odd-query
#
input()
A = list(map(int, input().split()))
for _ in range(int(input())):
x, y = map(int, input().split())
if A[x - 1] % 2 == 1 or x > y or (x < y and A[x] == 0):
print("Odd")
else:
print("Even")
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Cutting Paper Squares
# Determine the number of cuts needed to cut a paper into $1 \times 1$ squares.
#
# https://www.hackerrank.com/challenges/p1-paper-cutting/problem
#
import sys
def solve(n, m):
# Complete this function
return n * m - 1
n, m = input().strip().split(' ')
n, m = [int(n), int(m)]
result = solve(n, m)
print(result)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Connecting Towns
# Find the Number of ways to in which one can travel from one town to another.
#
# https://www.hackerrank.com/challenges/connecting-towns/problem
#
# Python gère nativement les big integers, il suffit de lire l'énoncé
for _ in range(int(input())):
n = int(input())
r = list(map(int, input().split()))
p = 1
for i in r:
p = (p * i) % 1234567
print(p)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Restaurant
# Help Martha with her interview at Subway
#
# https://www.hackerrank.com/challenges/restaurant/problem
#
from math import gcd
for _ in range(int(input())):
l, b = map(int, input().split())
if l == b:
print(1)
else:
print((l * b) // gcd(l, b) ** 2)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Leonardo's Prime Factors
# Find the maximum number of prime factors for any number in the inclusive range from 1 to n.
#
# https://www.hackerrank.com/challenges/leonardo-and-prime/problem
#
class bitset:
"""
Implémentation d'un bitset à stockage optimisé
"""
def __init__(self, taille):
self.taille = taille
self.bits = bytearray((taille + 7) // 8)
def set(self, pos, val):
""" poitionne le bit `pos` à `val` """
assert pos >= 0 and pos < self.taille
if val is True:
self.bits[pos // 8] = self.bits[pos // 8] | (1 << (pos % 8))
else:
self.bits[pos // 8] = self.bits[pos // 8] & ~(1 << (pos % 8))
def is_set(self, pos):
""" lit l'état du bit `pos` """
assert pos >= 0 and pos < self.taille
return (self.bits[pos // 8] & (1 << (pos % 8))) != 0
def __getitem__(self, key):
return self.is_set(key)
def __setitem__(self, key, value):
return self.set(key, value)
class Crible:
""" Crible d'Eratosthène optimisé """
def __init__(self, n_max):
self.n_max = n_max
self.maximum = n = (n_max - 3) // 2 + 1
self.crible = crible = bitset(n)
self._premiers = None
self._phi = None
i = 0
while i < n:
while i < n:
if not crible.is_set(i):
break
i += 1
k = 3
while True:
j = k * i + 3 * (k - 1) // 2
if j >= n:
break
crible.set(j, True)
k += 2
i += 1
def liste(self):
if self._premiers is None:
premiers = [2]
for i in range(1, self.maximum + 1):
if not self.crible.is_set(i - 1):
premiers.append(2 * i + 1)
self._premiers = premiers
return self._premiers
primes = Crible(1000).liste()
for _ in range(int(input())):
n = int(input())
r = 1
nb = 0
for p in primes:
if p * r > n:
break
r *= p
nb += 1
print(nb) | {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Fundamentals > Possible Path
# Help Adam in reaching at aa particular point.
#
# https://www.hackerrank.com/challenges/possible-path/problem
#
from math import gcd
for _ in range(int(input())):
a, b, x, y = map(int, input().split())
print(["NO", "YES"][gcd(a, b) == gcd(x, y)])
# il faut bien comprendre le phrasé de l'énoncé...
# (a,b) change à chaque déplacement
# print(["NO", "YES"][(x - a) % b == 0 and (y - b) % a == 0])
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Matrix Tracing
# How many ways can you trace a given matrix? - 30 Points
#
# https://www.hackerrank.com/challenges/matrix-tracing/problem
#
MOD = 1000000007
def fact(n):
""" calcule n! mod MOD """
f = 1
for i in range(2, n + 1):
f = (f * i) % MOD
return f
def pow(a, b):
""" calcule a^b mod MOD """
a %= MOD
# exponentiation rapide
p = 1
while b > 0:
if b % 2 == 1:
p = (p * a) % MOD
a = (a * a) % MOD
b //= 2
return p
for _ in range(int(input())):
m, n = map(int, input().split())
# la solution est C(m+n-2, m-1) ou C(m+n-2, n-1)
# soit (m+n-2)! / (m-1)! / (n-1)!
# compte tenu des valeurs de m et n, le calcul direct est impossible
# on va utiliser le petit théorème de Fermat:
# a^(p-1) = 1 mod p si p est premier et gcd(a,p)=1
# => a^(p-2) = a^(-1) mod p
# => a^(-1) = a^(p-2) mod p
# le résultat modulo MOD est donc:
# (m+n-2)! * [ (m-1)! * (n-1)!) ^ (MOD-2) ]
r = fact(n + m - 2) * pow(fact(n - 1) * fact(m - 1), MOD - 2)
print(r % MOD)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
// https://www.hackerrank.com/challenges/even-odd-query/problem
#include <iostream>
#include <vector>
using namespace std;
#pragma GCC diagnostic ignored "-Wsign-conversion"
int main()
{
vector<int> a;
int n, q, x, y, t;
cin >> n;
while (n--)
{
cin >> t;
a.push_back(t);
}
cin >> q;
while (q--)
{
cin >> x >> y;
if (a[x - 1] % 2 == 1 || x > y || (x < y && a[x] == 0))
cout << "Odd" << endl;
else
cout << "Even" << endl;
}
return 0;
} | {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Sherlock and Moving Tiles
# Help Sherlock in identifying the overlapping area.
#
# https://www.hackerrank.com/challenges/sherlock-and-moving-tiles/problem
#
L, S1, S2 = map(int, input().split())
if S1 > S2:
S1, S2 = S2, S1
for _ in range(int(input())):
q = int(input())
# le long de la droite 𝓎=𝓍 :
# coin supérieur du carré lent = S1 * t + √2 * L
# coin inférieur du carré rapide = S2 * t
# diagonale du carré de surface q: √(q * 2)
t = ((q * 2) ** 0.5 - L * 2 ** 0.5) / (S1 - S2)
if t <= 0: t = 0
print("%.20f" % t)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Best Divisor
# Find the best divisor of the number!
#
# https://www.hackerrank.com/challenges/best-divisor/problem
#
def diviseurs(n):
div = [1]
i = 2
while i * i <= n:
q, r = divmod(n, i)
if r == 0:
div.append(i)
if i != q:
div.append(q)
i += 1
if n != 1:
div.append(n)
return div
n = int(input())
r = -max((sum(int(i) for i in str(d)), -d) for d in diviseurs(n))[1]
print(r)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Fundamentals > K Candy Store
# In how many ways can you select K candies out of N different types of candies when each of the N candies are infinite in number?
#
# https://www.hackerrank.com/challenges/k-candy-store/problem
#
from math import factorial
def C(n, k):
return factorial(n) // factorial(n - k) // factorial(k)
t = int(input())
for _ in range(t):
n = int(input())
k = int(input())
print(C(n + k - 1, n - 1) % 10 ** 9)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Maximum Draws
# Count the minimum Draws
#
# https://www.hackerrank.com/challenges/maximum-draws/problem
#
for _ in range(int(input())):
n = int(input())
# dans le pire des cas, on retire toutes les chaussettes de chaque paire
# à la suivante, on aura forcément une paire assortie
print(n + 1)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
// Find the Point
//
// https://www.hackerrank.com/challenges/find-point/problem
#include <iostream>
using namespace std;
int main()
{
int n;
cin >> n;
for (auto i = 0; i < n; ++i)
{
int px, py, qx, qy;
cin >> px >> py >> qx >> qy;
cout << (qx + qx - px) << " " << (qy + qy - py) << endl;
}
return 0;
}
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
### [Mathematics](https://www.hackerrank.com/domains/mathematics)
Without mathematics, there's nothing you can do. Everything around you is mathematics. Everything around you is numbers.
#### [Fundamentals](https://www.hackerrank.com/domains/mathematics/fundamentals)
Name | Preview | Code | Difficulty
---- | ------- | ---- | ----------
[Find the Point](https://www.hackerrank.com/challenges/find-point)|Given two points P and Q, output the symmetric point of point P about Q.|[C++](find-point.cpp)|Easy
[Maximum Draws](https://www.hackerrank.com/challenges/maximum-draws)|Count the minimum Draws|[Python](maximum-draws.py)|Easy
[Handshake](https://www.hackerrank.com/challenges/handshake)|Count the number of Handshakes in a board meeting.|[Python](handshake.py)|Easy
[Minimum Height Triangle](https://www.hackerrank.com/challenges/lowest-triangle)|Find the smallest height of a triangle preserving the given constraints.|[Python](lowest-triangle.py)|Easy
[Army Game](https://www.hackerrank.com/challenges/game-with-cells)|Find the minimum number of supply packages Luke must drop to supply all of his army bases.|[Python](game-with-cells.py)|Easy
[Leonardo's Prime Factors](https://www.hackerrank.com/challenges/leonardo-and-prime)|Find the maximum number of prime factors for any number in the inclusive range from 1 to n.|[Python](leonardo-and-prime.py)|Easy
[Connecting Towns](https://www.hackerrank.com/challenges/connecting-towns)|Find the Number of ways to in which one can travel from one town to another.|[Python](connecting-towns.py)|Easy
[Cutting Paper Squares](https://www.hackerrank.com/challenges/p1-paper-cutting)|Determine the number of cuts needed to cut a paper into $1 \times 1$ squares.|[Python](p1-paper-cutting.py)|Easy
[Summing the N series ](https://www.hackerrank.com/challenges/summing-the-n-series)|Sum the N series.|[Python](summing-the-n-series.py)|Medium
[Sherlock and Moving Tiles](https://www.hackerrank.com/challenges/sherlock-and-moving-tiles)|Help Sherlock in identifying the overlapping area.|[Python](sherlock-and-moving-tiles.py)|Easy
[Best Divisor](https://www.hackerrank.com/challenges/best-divisor)|Find the best divisor of the number!|[Python](best-divisor.py)|Easy
[Restaurant](https://www.hackerrank.com/challenges/restaurant)|Help Martha with her interview at Subway|[Python](restaurant.py)|Easy
[Reverse Game](https://www.hackerrank.com/challenges/reverse-game)|Print the final position of the index.|[Python](reverse-game.py)|Easy
[Strange Grid Again](https://www.hackerrank.com/challenges/strange-grid)|find the integer in c-th column in r-th row of the grid.|[Python](strange-grid.py)|Easy
[Diwali Lights](https://www.hackerrank.com/challenges/diwali-lights)|Number of ways to light the room|[Python](diwali-lights.py)|Medium
[Sherlock and Divisors](https://www.hackerrank.com/challenges/sherlock-and-divisors)|Help Sherlock in Counting Divisors.|[Python](sherlock-and-divisors.py)|Easy
[Sherlock and Permutations](https://www.hackerrank.com/challenges/sherlock-and-permutations)|Help Sherlock in counting permutations.|[Python](sherlock-and-permutations.py)|Hard
[Even Odd Query](https://www.hackerrank.com/challenges/even-odd-query)|Is the number odd or even?|[C++](even-odd-query.cpp) [Python](even-odd-query.py)|Hard
[Special Multiple](https://www.hackerrank.com/challenges/special-multiple)|Can you find the least positive integer that is made of only 0s and 9s? - 30 Points|[C](special-multiple.c) [Python](special-multiple.py)|Medium
[Matrix Tracing](https://www.hackerrank.com/challenges/matrix-tracing)|How many ways can you trace a given matrix? - 30 Points|[Python](matrix-tracing.py)|Hard
[Die Hard 3](https://www.hackerrank.com/challenges/die-hard-3)|Help Bruce and Samuel save the city by solving their puzzle|[Python](die-hard-3.py)|Medium
[Halloween party](https://www.hackerrank.com/challenges/halloween-party)|Help Alex give Silvia the maximum number of chocolates|[Python](halloween-party.py)|Easy
[Filling Jars](https://www.hackerrank.com/challenges/filling-jars)|Perform the multiple queries on the list. And print average. - 20 Points|[Python](filling-jars.py)|Easy
[Is Fibo](https://www.hackerrank.com/challenges/is-fibo)|Find out if a number is a Fibonacci Number or not.|[Python](is-fibo.py)|Medium
[K Candy Store](https://www.hackerrank.com/challenges/k-candy-store)|In how many ways can you select K candies out of N different types of candies when each of the N candies are infinite in number?|[Python](k-candy-store.py)|Medium
[Sumar and the Floating Rocks](https://www.hackerrank.com/challenges/harry-potter-and-the-floating-rocks)|Count the number of integral rocks between Harry and Hermoine|[Python](harry-potter-and-the-floating-rocks.py)|Easy
[Russian Peasant Exponentiation](https://www.hackerrank.com/challenges/russian-peasant-exponentiation)|The only correct way to raise numbers in powers.|[Python](russian-peasant-exponentiation.py)|Easy
[Bus Station](https://www.hackerrank.com/challenges/bus-station)|Find all suitable bus sizes|[Python](bus-station.py)|Medium
[Most Distant](https://www.hackerrank.com/challenges/most-distant)|Measure the gap between the two most distant coordinates.|[Python](most-distant.py)|Easy
[Jim and the Jokes](https://www.hackerrank.com/challenges/jim-and-the-jokes)|Jim is running out of jokes! Help him finding new jokes.|[Python](jim-and-the-jokes.py)|Medium
[Possible Path](https://www.hackerrank.com/challenges/possible-path)|Help Adam in reaching at aa particular point.|[Python](possible-path.py)|Easy
[Mutual Recurrences](https://www.hackerrank.com/challenges/mutual-recurrences)|Compute terms in a mutual recurrence.|[C++](mutual-recurrences.cpp) [Python](mutual-recurrences.py)|Medium
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Strange Grid Again
# find the integer in c-th column in r-th row of the grid.
#
# https://www.hackerrank.com/challenges/strange-grid/problem
#
r, c = map(int, input().split())
r -= 1
c -= 1
print((r // 2) * 10 + c * 2 + r % 2)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
// Mathematics > Combinatorics > nCr table
// Help Jim calculating nCr values
//
// https://www.hackerrank.com/challenges/ncr-table/problem
// challenge id: 1232
//
#include <bits/stdc++.h>
using namespace std;
constexpr unsigned MOD = 1000000000;
constexpr unsigned MAX = 1001;
unsigned C[MAX][MAX];
int main()
{
// Caculate value of Binomial Coefficient in bottom up manner
//https://www.geeksforgeeks.org/dynamic-programming-set-9-binomial-coefficient/
memset(C, 0, sizeof(C));
for (unsigned i = 0; i < MAX; i++)
{
for (unsigned j = 0; j <= i; j++)
{
// Base Cases
if (j == 0 || j == i)
C[i][j] = 1;
// Calculate value using previosly stored values
else
C[i][j] = (C[i-1][j-1] + C[i-1][j]) % MOD;
}
}
int t;
cin >> t;
while (t--)
{
unsigned n;
cin >> n;
for (unsigned i = 0; i <= n; ++i)
cout << C[n][i] << " ";
cout << endl;
}
return 0;
}
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# nCr table
# Help Jim calculating nCr values
#
# https://www.hackerrank.com/challenges/ncr-table/problem
#
from math import factorial
def C(n, r):
return factorial(n) // factorial(r) // factorial(n - r)
for i in range(int(input())):
n = int(input())
print(' '.join(str(C(n, r) % 1000000000) for r in range(n + 1)))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
add_hackerrank_py(merge-list.py)
add_hackerrank_py(a-chocolate-fiesta.py)
add_hackerrank_py(ncr-table.py)
add_hackerrank_py(building-a-list.py)
add_hackerrank_py(coinage.py)
add_hackerrank(ncr-table ncr-table.cpp)
add_hackerrank_py(sherlock-and-pairs.py)
add_hackerrank_py(picking-cards.py)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Combinatorics > Sherlock and Pairs
# Count the number of pairs that satisfy a given constraint.
#
# https://www.hackerrank.com/challenges/sherlock-and-pairs/problem
# https://www.hackerrank.com/contests/101feb14/challenges/sherlock-and-pairs
# challenge id: 1932
#
for _ in range(int(input())):
input()
a = list(map(int, input().split()))
s = {}
for i in a:
if i in s:
s[i] += 1
else:
s[i] = 1
print(sum(i * (i - 1) for i in s.values()))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Merge List
# Help Shashank in merging two list.
#
# https://www.hackerrank.com/challenges/merge-list/problem
#
from math import factorial
def C(n, r):
return factorial(n) // factorial(r) // factorial(n - r)
for i in range(int(input())):
n, m = map(int, input().split())
print(C(n + m, n) % 1000000007)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Combinatorics > Coinage
# Find the number of ways to pay a given amount, given a set of coins with prescribed denominations.
#
# https://www.hackerrank.com/challenges/coinage/problem
#
import sys
def subcase(t, c1, c2, n1, n2):
minC2 = (t - c1 * min(n1, t // c1) +c2-1) // c2
maxC2 = min(t // c2, n2)
result = max(maxC2 - minC2 + 1, 0)
return result
def coinage(n, _, q):
a, b, c, d = q
result = 0
for i in range(0, n // 5 + 1):
result += subcase(5 * i, 5, 10, c, d) * subcase(n - 5 * i, 1, 2, a, b)
return result
for _ in range(int(input())):
N = int(input())
quantite = list(map(int, input().split()))
print(N, quantite, file=sys.stderr)
print(coinage(N, 0, quantite))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# A Chocolate Fiesta
# Find the number of even subsets in the given set of numbers.
#
# https://www.hackerrank.com/challenges/a-chocolate-fiesta/problem
# https://www.hackerrank.com/contests/infinitum-mar14/challenges/a-chocolate-fiesta
#
input()
a = list(map(int, input().split()))
n = len(a)
impairs = sum(1 for i in a if i % 2 == 1)
if impairs == 0:
# nombre de combinaisons avec tous les nombres
resultat = 2 ** n - 1
else:
# il faut additionner un nombre pair de nombres impairs
# i.e. considérer qu'on a un nombre impair de moins
# c'est-à-dire un nombre en moins
resultat = 2 ** (n - 1) - 1
print(resultat % 1000000007)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
### [Mathematics](https://www.hackerrank.com/domains/mathematics)
Without mathematics, there's nothing you can do. Everything around you is mathematics. Everything around you is numbers.
#### [Combinatorics](https://www.hackerrank.com/domains/mathematics/combinatorics)
Name | Preview | Code | Difficulty
---- | ------- | ---- | ----------
[nCr table](https://www.hackerrank.com/challenges/ncr-table)|Help Jim calculating nCr values|[C++](ncr-table.cpp) [Python](ncr-table.py)|Medium
[Coinage](https://www.hackerrank.com/challenges/coinage)|Find the number of ways to pay a given amount, given a set of coins with prescribed denominations.|[Python](coinage.py)|Medium
[Building a List](https://www.hackerrank.com/challenges/building-a-list)|Generate all possible combinations of a string|[Python](building-a-list.py)|Medium
[Merge List](https://www.hackerrank.com/challenges/merge-list)|Help Shashank in merging two list.|[Python](merge-list.py)|Medium
[A Chocolate Fiesta](https://www.hackerrank.com/challenges/a-chocolate-fiesta)|Find the number of even subsets in the given set of numbers.|[Python](a-chocolate-fiesta.py)|Easy
[Sherlock and Pairs](https://www.hackerrank.com/challenges/sherlock-and-pairs)|Count the number of pairs that satisfy a given constraint.|[Python](sherlock-and-pairs.py)|Medium
[Picking Cards](https://www.hackerrank.com/challenges/picking-cards)|How many ways can you pick up all the cards from a table?|[Python](picking-cards.py)|Easy
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Combinatorics > Picking Cards
# How many ways can you pick up all the cards from a table?
#
# https://www.hackerrank.com/challenges/picking-cards/problem
# challenge id: 69
#
MOD = 1000000007
for _ in range(int(input())):
n = int(input())
cards = list(map(int, input().split()))
cards.sort(reverse=True)
res = 1
for i, c in enumerate(cards):
val = n - c
if val <= 0:
print(0)
break
res = (res * (val - i)) % MOD
else:
print(res)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Building a List
# Generate all possible combinations of a string
#
# https://www.hackerrank.com/challenges/building-a-list/problem
#
import itertools
def combo(s):
n = len(s)
def lex():
for i in range(1, 2 ** n):
j = i
w = ''
k = 0
while j != 0:
j, r = divmod(j, 2)
if r:
w += s[k]
k += 1
yield w
for w in sorted(lex()):
print(w)
for _ in range(int(input())):
input()
s = input()
combo(s) | {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > Binomial Distribution #2
# Problems based on basic statistical distributions.
#
# https://www.hackerrank.com/challenges/binomial-distribution-2/problem
# challenge id: 12840
#
from __future__ import print_function
from math import factorial
# b(x,n,p) = C(n,p) * p^x * (1-p)^(n-x)
# x: number of successes
# n: total number of trials
# p: probability of success of 1 trial
def b(n, x, p):
return factorial(n) // factorial(n - x) // factorial(x) * (p ** x) * (1 - p) ** (n - x)
boys, girls = 1.09, 1.00
p = boys / (boys + girls)
r = b(6, 3, p) + b(6, 4, p) + b(6, 5, p) + b(6, 6, p)
print("%.3f" % r)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > Binomial Distribution #3
# Problems based on basic statistical distributions.
#
# https://www.hackerrank.com/challenges/binomial-distribution-3/problem
# challenge id: 12841
#
from __future__ import print_function
from math import factorial
def b(n, x, p):
return factorial(n) / factorial(n - x) / factorial(x) * (p ** x) * (1 - p) ** (n - x)
p = 0.12 # proba piston is rejected
n = 10 # number of trials
r = b(n, 0, p) + b(n, 1, p) + b(n, 2, p) # 0 ou 1 ou 2 rejets
print("%.3f" % r)
r = sum(b(n, i, p) for i in range(2, n + 1)) # 2 à 12 rejets
print("%.3f" % r)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > Random number generator
# what's the probability that x + y is less than C?
#
# https://www.hackerrank.com/challenges/random-number-generator/problem
#
# la probabilité est le rapport de la surface de l'intersection entre
# le triangle isocèle rectangle (0,c,c) et le rectangle (0,0,a,b)
# avec l'aire du rectangle
from fractions import Fraction
for _ in range(int(input())):
a, b, c = map(int, input().split())
if a + b <= c:
# le rectangle est entièrement inclus dans le triangle
print("1/1")
else:
if a > b:
a, b = b, a
if c < a:
# le triangle est entièrement inclus dans le rectangle
# ___________
# | |
# a|\ |
# |_\________|
# c b
p = Fraction(c * c, 2 * a * b)
elif a <= c < b:
# intersetion: trapèze
p = Fraction((2 * c - a) * a, 2 * a * b)
# _______
# | \
# a| \
# |________\
# c
else:
# intersection: complément du triangle isocèle de côté a+b-c
# _________
# | \
# a| |
# |_________|
# b
p = 1 - Fraction((a + b - c) ** 2, 2 * a * b)
print(p)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > Day 4: Normal Distribution #2
# Problems based on basic statistical distributions.
#
# https://www.hackerrank.com/challenges/normal-distribution-2/problem
# https://www.hackerrank.com/contests/intro-to-statistics/challenges/normal-distribution-2
# challenge id: 12845
#
from __future__ import print_function
import math
def phi(x, m, s):
""" Cumulative Probability """
return 1. / 2 * (1 + math.erf((x - m) / s / math.sqrt(2)))
m, s = 20., 2.
print("{:.3f}".format(phi(19.5, m, s)))
print("{:.3f}".format(phi(22, m, s) - phi(20, m, s)))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
add_hackerrank_py(bday-gift.py)
add_hackerrank_py(random-number-generator.py)
add_hackerrank_py(normal-distribution-1.py)
add_hackerrank_py(normal-distribution-2.py)
add_hackerrank_py(normal-distribution-3.py)
add_hackerrank_py(binomial-distribution-1.py)
add_hackerrank_py(binomial-distribution-2.py)
add_hackerrank_py(binomial-distribution-3.py)
add_hackerrank_py(sherlock-and-probability.py)
add_hackerrank_py(extremely-dangerous-virus.py)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > Binomial Distribution #1
# Problems based on basic statistical distributions.
#
# https://www.hackerrank.com/challenges/binomial-distribution-1/problem
# challenge id: 12839
#
from __future__ import print_function
from math import factorial
# b(x,n,p) = C(n,p) * p^x * (1-p)^(n-x)
# x: number of successes
# n: total number of trials
# p: probability of success of 1 trial
def b(n, x, p):
return factorial(n) / factorial(n - x) / factorial(x) * (p ** x) * (1 - p) ** (n - x)
print("%.3f" % sum(b(4, i, 4. / 5) for i in range(3, 5)))
print("%.3f" % sum(b(4, i, 4. / 5) for i in range(0, 2)))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > Sherlock and Probability
# Help Sherlock in finding the probability.
#
# https://www.hackerrank.com/challenges/sherlock-and-probability/problem
# https://www.hackerrank.com/contests/infinitum-jul14/challenges/sherlock-and-probability
# challenge id: 2534
#
from fractions import Fraction
for _ in range(int(input())):
n, k = map(int, input().split())
bits = input()
counter = [0] * (n + 1)
for i, bit in enumerate(bits):
counter[i + 1] = counter[i] + int(bit)
p = 0
for i, bit in enumerate(bits):
if bit == "1":
p += counter[min(n, i + k + 1)] - counter[max(0, i - k)]
r = Fraction(p, n * n)
print("{}/{}".format(r.numerator, r.denominator))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > Day 4: Normal Distribution #1
# Problems based on basic statistical distributions.
#
# https://www.hackerrank.com/challenges/normal-distribution-1/problem
# https://www.hackerrank.com/contests/intro-to-statistics/challenges/normal-distribution-1
# challenge id: 12844
#
from __future__ import print_function
import math
def phi(x, m, s):
""" Cumulative Probability """
return 1. / 2 * (1 + math.erf((x - m) / s / math.sqrt(2)))
m, s = 30., 4.
print("{:.3f}".format(phi(40, m, s)))
print("{:.3f}".format(1 - phi(21, m, s)))
print("{:.3f}".format(phi(35, m, s) - phi(30, m, s)))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > B'day Gift
# Whats the price Isaac has to pay for HackerPhone
#
# https://www.hackerrank.com/challenges/bday-gift/problem
# https://www.hackerrank.com/contests/nov13/challenges/bday-gift
#
# chaque boule a une probabilité 0.5 d'être ramassée
n = int(input())
e = sum(int(input()) for _ in range(n))
print(e / 2)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > Extremely Dangerous Virus
# Estimate how large the virus will grow.
#
# https://www.hackerrank.com/challenges/extremely-dangerous-virus/problem
# https://www.hackerrank.com/contests/rookierank/challenges/extremely-dangerous-virus
# challenge id: 22940
#
a, b, t = map(int, input().split())
print(pow((a + b) // 2, t, 1000000007))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Probability > Normal Distribution #3
# Problems based on basic statistical distributions.
#
# https://www.hackerrank.com/challenges/normal-distribution-3/problem
# challenge id: 12846
#
from __future__ import print_function
try:
from scipy.stats import norm
f = norm(70, 10).cdf
print("{:.2f}".format(100 - f(80) * 100))
print("{:.2f}".format(100 - f(60) * 100))
print("{:.2f}".format(f(60) * 100))
except ImportError:
import math
def phi(x, m, s):
""" Cumulative Probability """
return 1. / 2 * (1 + math.erf((x - m) / s / math.sqrt(2)))
print("{:.2f}".format(100 - phi(80, 70, 10) * 100))
print("{:.2f}".format(100 - phi(60, 70, 10) * 100))
print("{:.2f}".format(phi(60, 70, 10) * 100))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
### [Mathematics](https://www.hackerrank.com/domains/mathematics)
Without mathematics, there's nothing you can do. Everything around you is mathematics. Everything around you is numbers.
#### [Probability](https://www.hackerrank.com/domains/mathematics/probability)
Name | Preview | Code | Difficulty
---- | ------- | ---- | ----------
[Random number generator](https://www.hackerrank.com/challenges/random-number-generator)|what's the probability that x + y is less than C?|[Python](random-number-generator.py)|Easy
[Sherlock and Probability](https://www.hackerrank.com/challenges/sherlock-and-probability)|Help Sherlock in finding the probability.|[Python](sherlock-and-probability.py)|Hard
[Day 4: Normal Distribution #1](https://www.hackerrank.com/challenges/normal-distribution-1)|Problems based on basic statistical distributions.|[Python](normal-distribution-1.py)|Medium
[Day 4: Normal Distribution #2](https://www.hackerrank.com/challenges/normal-distribution-2)|Problems based on basic statistical distributions.|[Python](normal-distribution-2.py)|Medium
[Normal Distribution #3](https://www.hackerrank.com/challenges/normal-distribution-3)|Problems based on basic statistical distributions.|[Python](normal-distribution-3.py)|Hard
[B'day Gift](https://www.hackerrank.com/challenges/bday-gift)|Whats the price Isaac has to pay for HackerPhone|[Python](bday-gift.py)|Easy
[Extremely Dangerous Virus](https://www.hackerrank.com/challenges/extremely-dangerous-virus)|Estimate how large the virus will grow.|[Python](extremely-dangerous-virus.py)|Medium
[Binomial Distribution #1](https://www.hackerrank.com/challenges/binomial-distribution-1)|Problems based on basic statistical distributions.|[Python](binomial-distribution-1.py)|Medium
[Binomial Distribution #2](https://www.hackerrank.com/challenges/binomial-distribution-2)|Problems based on basic statistical distributions.|[Python](binomial-distribution-2.py)|Hard
[Binomial Distribution #3](https://www.hackerrank.com/challenges/binomial-distribution-3)|Problems based on basic statistical distributions.|[Python](binomial-distribution-3.py)|Hard
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Pythagorean Triple
# Find the Pythagorean triple for the given side a.
#
# https://www.hackerrank.com/challenges/pythagorean-triple/problem
#
def pythagoreanTriple(a):
if a == 4:
# cas spécial pour la récursion lorsque a est pair
return 4, 3, 5
if a % 2 == 1:
# a impair: on peut utiliser les égalités données dans l'énoncé
k = (a - 1) // 2
m = k + 1
n = k
assert a == m * m - n * n
b = 2 * m * n
c = m * m + n * n
return (a, b, c)
else:
# a pair, il faut "simplifier" jusqu'à 4 au max (sinon on va trouver b=0 et c=a)
e = 1
while a % 2 == 0 and a > 4:
e *= 2
a //= 2
return map(lambda x: x * e, pythagoreanTriple(a))
a = int(input().strip())
a, b, c = pythagoreanTriple(a)
print(a, b, c)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Little Gaurav and Sequence
# Help Gaurav in calculating last digit of a sequence.
#
# https://www.hackerrank.com/challenges/little-gaurav-and-sequence/problem
#
def S_brutforce(n):
s = 0
i = 0
while 2 ** i <= n:
for j in range(0, n + 1):
s += 2 ** (2 ** i + 2 * j)
i += 1
return s
def S(n):
# s1 = 0
# i = 0
# while 2 ** i <= n:
# s1 += 2 ** (2 ** i)
# i += 1
i = n
k = 0
while i != 0:
i //= 2
k += 1
if k == 1:
s1 = 2
else:
s1 = [6, 2, 8, 4, 0][(k - 2) % 5]
# s2 = (4 ** (n + 1) - 1) // (4 - 1)
# s2 = 1 mod 10 si n pair
# s2 = 5 mod 10 si n impair
s2 = 1 if n % 2 == 0 else 5
return (s1 * s2) % 10
def test():
for n in range(1, 100):
s = S(n)
assert (s % 10) == (S_brutforce(n) % 10)
print("{:4} {}".format(n, s % 10))
for _ in range(int(input())):
n = int(input())
if n % 2 == 1:
print(0)
else:
s = S(n)
print(s)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Triangle Numbers
# Given a triangle numbers where each number is equal to the sum of the three top numbers, find the first even number in a row.
#
# https://www.hackerrank.com/challenges/triangle-numbers/problem
#
def v(row, col):
# print("v", row, col)
if abs(col) == row:
return 1
elif abs(col) > row:
return 0
else:
return v(row - 1, col) + v(row-1, col - 1) + v(row-1, col + 1)
def test():
N = 14
for n in range(1, N):
s = " " * (N - n)
for i in range(-n , n + 1):
x = v(n, i)
s += "{:5d} ".format(x)
#if x % 2 == 0: break
print("{:4} {}".format(n + 1, s[:100]))
for _ in range(int(input())):
n = int(input())
if n <= 2:
print(-1) # pas de pair dans les deux premières rangées
elif n % 2 == 1:
print(2) # rangée impaire: position 2
elif n % 4 == 0:
print(3) # rangée doublement paire: position 3
else:
print(4) # rangée simplement paire: position 4
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Number Groups
# Find the sum of consecutive odd number groups.
#
# https://www.hackerrank.com/challenges/number-groups/problem
#
#!/bin/python3
import sys
def sumOfGroup(k):
# Return the sum of the elements of the k'th group.
# début de la séquence: n
# k=1: 1 = 2*0+1
# k=2: 3,5 = 2*1+1 = 2*k*(k-1)/2+1
# k=3: 7,9,11 = 2*3+1 = 2*k*(k-1)/2+1
# k=4: 13,15,17,19 = 2*6+1 = 2*k*(k-1)/2+1
# n = k * (k - 1) + 1
# somme des k impairs à partir de n: s
# n + n+2 + n+4 + n+6 + ... + n+2(k-1)
# k * n + 2*(0+1+2+...+k-1)
# k * n + 2*(k * (k-1) / 2)
# s = k * n + k * (k - 1)
# et en simplifiant:
return k ** 3
if __name__ == "__main__":
k = int(input().strip())
answer = sumOfGroup(k)
print(answer)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Manasa and Sub-sequences
# Help Manasa in getting candies
#
# https://www.hackerrank.com/challenges/manasa-and-sub-sequences/problem
#
def manasa_simpliste(n):
d = []
for i in n:
d.append(int(i))
result = 0
for i in range(1, 2 ** len(d)):
x = 0
j = i
k = 0
while j != 0:
j, r = divmod(j, 2)
if r == 1:
x = x * 10 + d[k]
k += 1
result += x
return result
def powmod(x, k, MOD):
""" fast exponentiation x^k % MOD """
p = 1
if k == 0:
return p
if k == 1:
return x
while k != 0:
if k % 2 == 1:
p = (p * x) % MOD
x = (x * x) % MOD
k //= 2
return p
def manasa(n):
MOD = 1000000007
result = 0
m = len(n) - 1
q = 1
for c in n[::-1]:
c = int(c)
p = powmod(2, m, MOD)
x = (c * p) % MOD
result = (result + x * q) % MOD
m -= 1
q = (q * 11) % MOD
return result % MOD
assert manasa("111") == 147
assert manasa("123") == 177
print(manasa(input()))
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
add_hackerrank_py(combo-meal.py)
add_hackerrank_py(difference-and-product.py)
add_hackerrank_py(pythagorean-triple.py)
add_hackerrank_py(stepping-stones-game.py)
add_hackerrank_py(little-gaurav-and-sequence.py)
add_hackerrank_py(triangle-numbers.py)
add_hackerrank_py(easy-sum.py)
add_hackerrank_py(manasa-and-sub-sequences.py)
add_hackerrank_py(number-groups.py)
add_hackerrank_py(shashank-and-list.py)
add_hackerrank_py(tell-the-average.py)
add_hackerrank_py(wet-shark-and-42.py)
add_hackerrank_py(sherlock-and-square.py)
add_hackerrank_py(simple-one.py)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Stepping Stones Game
# Can you tell Bob, if he should play Stepping Stones or not ?
#
# https://www.hackerrank.com/challenges/stepping-stones-game/problem
# https://www.hackerrank.com/contests/infinitum-aug14/challenges/stepping-stones-game
#
# il faut vérifier si n est un nombre triangulaire
# 1+2+...+n = n(n+1)/2
from math import sqrt
# n = x * (x + 1) / 2
# x^2 + x - 2 * n = 0
# x = (-1 ± sqrt(1+8n)/2)
for _ in range(int(input())):
n = int(input())
d = 1 + 8 * n
r = int(sqrt(d))
if r ** 2 == d and (r - 1) % 2 == 0:
print("Go On Bob", (r - 1) // 2)
else:
print("Better Luck Next Time")
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Wet Shark and 42
# Help Wet Shark escape the gods of 42.
#
# https://www.hackerrank.com/challenges/wet-shark-and-42/problem
# https://www.hackerrank.com/contests/infinitum9/challenges/wet-shark-and-42
#
def distance(n):
return (n * 21 - 1) // 20 * 2
for _ in range(int(input())):
n = int(input())
print(distance(n) % 1000000007)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Simple One
# Calculate the tan function of a given equation.
#
# https://www.hackerrank.com/challenges/simple-one/problem
# https://www.hackerrank.com/contests/infinitum14/challenges/simple-one
# challenge id: 9549
#
from fractions import Fraction
MOD = 1000000007
def egcd(b, a):
""" algortihme d'Euclide étendu: (g, x, y) tel que ax + by = g = gcd(a, b) """
# https://fr.wikipedia.org/wiki/Algorithme_d%27Euclide_étendu
x0, x1, y0, y1 = 1, 0, 0, 1
while a != 0:
q, b, a = b // a, a, b % a
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return b, x0, y0
def modinv(a, m):
""" modular inverse avec Bachet-Bézout """
# https://fr.wikipedia.org/wiki/Théorème_de_Bachet-Bézout
_, x, _ = egcd(a, m)
return x % m
def ntan(a, k):
# tan(a+b) = (tan(a) + tan(b)) / (1 - tan(a) * tan(b))
# tan(2a) = 2 * tan(a) / (1 - tan(a)^2)
b = Fraction(0)
if k == 0:
return b
if k == 1:
return a
while k != 0:
if k % 2 == 1:
b = (a + b) / (1 - a * b)
b = Fraction(b.numerator % MOD, b.denominator % MOD)
a = 2 * a / (1 - a * a)
a = Fraction(a.numerator % MOD, a.denominator % MOD)
k //= 2
return b
for _ in range(int(input())):
p, q, n = map(int, input().split())
a = ntan(Fraction(p, q), n)
print((a.numerator * modinv(a.denominator, MOD)) % MOD)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Sherlock and Square
# Help Sherlock in finding the total side lengths of squares.
#
# https://www.hackerrank.com/challenges/sherlock-and-square/problem
# https://www.hackerrank.com/contests/w11/challenges/sherlock-and-square
# challenge id: 4429
#
MOD = 1000000007
for _ in range(int(input())):
n = int(input())
print((pow(2, n + 1, MOD) + 2) % MOD)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Tell the Average
# Tell me average of all list value.
#
# https://www.hackerrank.com/challenges/tell-the-average/problem
# https://www.hackerrank.com/contests/infinitum8/challenges/tell-the-average
#
# l'ordre des élément de L ne change pas le résultat, heureusement
# a+a.b+b = (a+1)(b+1) - 1
# par récurrence, on trouve que S = ∏(Lᵢ+1) - 1
# une autre solution issue de l'égalité précédente:
# a = 1
# for b in L: a = (a * (b + 1)) % 1000000007
# print((a - 1) % 1000000007)
#
_, L = input(), list(map(int, input().split()))
a = 0
for b in L:
a = a + b + a * b
print(a % 1000000007)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Shashank and List
# Help Shashank in Huge calculations.
#
# https://www.hackerrank.com/challenges/shashank-and-list/problem
# https://www.hackerrank.com/contests/infinitum-may14/challenges/shashank-and-list
#
MOD = 1000000007
def powmod(x, k, MOD):
""" fast exponentiation x^k % MOD """
p = 1
if k == 0:
return p
if k == 1:
return x
while k != 0:
if k % 2 == 1:
p = (p * x) % MOD
x = (x * x) % MOD
k //= 2
return p
n = int(input())
answer = 1
for i in map(int, input().split()):
answer *= powmod(2, i, MOD) + 1
answer %= MOD
print(answer - 1) | {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Easy sum
# Find the mod sum
#
# https://www.hackerrank.com/challenges/easy-sum/problem
#
def s_brutforce(n, m):
x = 0
for i in range(1, n + 1):
x += i % m
return x
def s(n, m):
if m > n:
# modulo > n: c'est la somme des i de 1 à n
return n * (n + 1) // 2
else:
# sinon:
# c'est la somme de 0 à m-1 autant de fois qu'il y a m dans n
# et il faut ajouter la somme de 1 à ce qu'il reste
q, r = divmod(n, m)
s = q * (m * (m - 1) // 2)
s += r * (r + 1) // 2
return s
for _ in range(int(input())):
n, m = map(int, input().split())
print(s(n, m)) | {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Difference and Product
# Answer a question about Difference and Product
#
# https://www.hackerrank.com/challenges/difference-and-product/problem
#
from math import sqrt
for _ in range(int(input())):
d, p = map(int, input().split())
# une valeur absolue ne peut pas être négative
if d < 0:
print(0)
continue
nb = 0
# d = a - b
# p = a * b
# a = d + b
# p = (d + b) * b
# b * b + d * b - p = 0
# b = (-d +- sqrt(d * d + 4 * p)) / 2
D = d * d + 4 * p
# discriminant négatif: aucune solution
if D < 0:
print(0)
continue
b = int((-d - sqrt(D)) / 2)
a = d + b
if a * b == p:
nb += 1
# a et b différents: 2 solutions (en intervertissant a et b)
if a != b:
nb += 1
# discriminant nul: une seule solution
if D != 0:
b = int((-d + sqrt(D)) / 2)
a = d + b
if a * b == p:
nb += 1
if a != b:
nb += 1
print(nb)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# Mathematics > Algebra > Combo Meal
# Find the profit that a fast-food chain earns at each purchase.
#
# https://www.hackerrank.com/challenges/combo-meal/problem
#
def profit(b, s, c):
# Return the fixed profit.
# b = b0 + profit
# s = s0 + profit
# c = b0 + s0 + profit
# donc b + s - c = b0 + profit + s0 + profit - (b0 + s0 + profit)
# = b0 + profit + s0 + profit - b0 - s0 - profit
# = profit
return b + s - c
if __name__ == "__main__":
t = int(input().strip())
for a0 in range(t):
b, s, c = input().strip().split(' ')
b, s, c = [int(b), int(s), int(c)]
result = profit(b, s, c)
print(result)
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
### [Mathematics](https://www.hackerrank.com/domains/mathematics)
Without mathematics, there's nothing you can do. Everything around you is mathematics. Everything around you is numbers.
#### [Algebra](https://www.hackerrank.com/domains/mathematics/algebra)
Name | Preview | Code | Difficulty
---- | ------- | ---- | ----------
[Combo Meal](https://www.hackerrank.com/challenges/combo-meal)|Find the profit that a fast-food chain earns at each purchase.|[Python](combo-meal.py)|Easy
[Stepping Stones Game](https://www.hackerrank.com/challenges/stepping-stones-game)|Can you tell Bob, if he should play Stepping Stones or not ?|[Python](stepping-stones-game.py)|Medium
[Shashank and List](https://www.hackerrank.com/challenges/shashank-and-list)|Help Shashank in Huge calculations.|[Python](shashank-and-list.py)|Medium
[Triangle Numbers](https://www.hackerrank.com/challenges/triangle-numbers)|Given a triangle numbers where each number is equal to the sum of the three top numbers, find the first even number in a row.|[Python](triangle-numbers.py)|Medium
[Little Gaurav and Sequence](https://www.hackerrank.com/challenges/little-gaurav-and-sequence)|Help Gaurav in calculating last digit of a sequence.|[Python](little-gaurav-and-sequence.py)|Medium
[Easy sum](https://www.hackerrank.com/challenges/easy-sum)|Find the mod sum|[Python](easy-sum.py)|Hard
[Difference and Product](https://www.hackerrank.com/challenges/difference-and-product)|Answer a question about Difference and Product|[Python](difference-and-product.py)|Easy
[Pythagorean Triple](https://www.hackerrank.com/challenges/pythagorean-triple)|Find the Pythagorean triple for the given side a.|[Python](pythagorean-triple.py)|Easy
[Number Groups](https://www.hackerrank.com/challenges/number-groups)|Find the sum of consecutive odd number groups.|[Python](number-groups.py)|Easy
[Tell the Average](https://www.hackerrank.com/challenges/tell-the-average)|Tell me average of all list value.|[Python](tell-the-average.py)|Medium
[Wet Shark and 42](https://www.hackerrank.com/challenges/wet-shark-and-42)|Help Wet Shark escape the gods of 42.|[Python](wet-shark-and-42.py)|Easy
[Sherlock and Square](https://www.hackerrank.com/challenges/sherlock-and-square)|Help Sherlock in finding the total side lengths of squares.|[Python](sherlock-and-square.py)|Hard
[Manasa and Sub-sequences ](https://www.hackerrank.com/challenges/manasa-and-sub-sequences)|Help Manasa in getting candies|[Python](manasa-and-sub-sequences.py)|Medium
[Simple One](https://www.hackerrank.com/challenges/simple-one)|Calculate the tan function of a given equation.|[Python](simple-one.py)|Easy
| {
"repo_name": "rene-d/hackerrank",
"stars": "65",
"repo_language": "Python",
"file_name": "README.md",
"mime_type": "text/plain"
} |
# ino-hardware-package-list
A list of all known [Arduino](http://arduino.cc) hardware packages.
Hardware packages provide the [boards platform](https://arduino.github.io/arduino-cli/latest/platform-specification) (AKA "core") and toolchain needed to add support for a board to the Arduino development software (e.g., Arduino IDE).
The list can be viewed online [here](ino-hardware-package-list.tsv).
### Columns
- **Name**: The package index `packages[].platforms[].name` value, the platform.txt `name` property value, or an arbitrary name determined from looking at the repository content.
- **Vendor**: The package index `packages[].name` value or the name of the platform's vendor folder. The machine-friendly name of the package is `{vendor}:{architecture}`.
- **Architecture**: The package index `packages[].platforms[].architecture` value or the name of the platform's architecture folder. The machine-friendly name of the package is `{vendor}:{architecture}`.
- **Repository**: The website where the platform files are stored.
- **Boards Manager URL**: The URL for the [package index](https://arduino.github.io/arduino-cli/latest/package_index_json-specification/) file that provides [Boards Manager](https://docs.arduino.cc/learn/starting-guide/cores) installation support. This URL must be added to the Arduino IDE's **File > Preferences > Additional Boards Manager URLs**.
- **Repository Data Folder**: The folder in the repository that contains [boards.txt](https://arduino.github.io/arduino-cli/latest/platform-specification/#boardstxt).
- **Branch Name**: The branch of the repository that contains the platform files.
- **Notes**: Additional information.
### Related
- https://github.com/arduino/Arduino/wiki/Unofficial-list-of-3rd-party-boards-support-urls
- The "Unofficial list of 3rd party boards support urls" differs from ino-hardware-package-list in:
- Less comprehensive due to consisting of submissions from boards platform authors.
- Is not in a machine-readable form.
### Contributing
Additions/corrections/updates to the list are welcome! Please submit a pull request or issue.
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
# See: https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#about-the-dependabotyml-file
version: 2
updates:
# Configure check for outdated GitHub Actions actions in workflows.
# Source: https://github.com/arduino/tooling-project-assets/blob/main/workflow-templates/assets/dependabot/README.md
# See: https://docs.github.com/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot
- package-ecosystem: github-actions
directory: / # Check the repository's workflows under /.github/workflows/
schedule:
interval: daily
labels:
- "topic: infrastructure"
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
name: Check Prettier Formatting
# See: https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
on:
push:
pull_request:
schedule:
# Run every Tuesday at 8 AM UTC to catch breakage caused by changes to Prettier.
- cron: "0 8 * * TUE"
workflow_dispatch:
repository_dispatch:
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Prettier
run: sudo npm install --global prettier
- name: Format with Prettier
run: prettier --write .
- name: Check formatting
run: git diff --color --exit-code
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
name: Check Links
# See: https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
on:
push:
paths:
- ".github/workflows/check-links.yml"
- "**.md"
pull_request:
paths:
- ".github/workflows/check-links.yml"
- "**.md"
schedule:
# Run every Tuesday at 8 AM UTC to catch breakage caused by changes to the linked sites.
- cron: "0 8 * * TUE"
workflow_dispatch:
repository_dispatch:
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Check links
uses: gaurav-nelson/github-action-markdown-link-check@v1
with:
use-quiet-mode: yes
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
# https://github.com/per1234/formatting-checks
name: General Formatting Checks
# See: https://docs.github.com/en/actions/reference/events-that-trigger-workflows
on:
pull_request:
push:
workflow_dispatch:
repository_dispatch:
jobs:
utf-8-bom:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Check for UTF-8 BOM file encoding
run: |
find . \
-path './.git' -prune -or \
-type f \
-exec \
grep \
--files-with-matches \
--binary-files=without-match $'\xEF\xBB\xBF' \
'{}' \; \
-exec \
echo 'UTF-8 BOM encoding detected.' \; \
-exec false \
'{}' +
blank-first-line:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Check for files starting with a blank line
run: |
find . \
-path './.git' -prune -or \
-print0 \
| \
xargs \
-0 \
-L1 \
bash -c \
' \
head \
-1 \
"$0" \
| \
grep \
--binary-files=without-match \
--regexp="^$" \
; \
if [[ "$?" == "0" ]]; then \
echo "Blank line found at start of $0."; \
false; \
fi \
'
tabs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Check for unnecessary use of true tabs
run: |
find . \
-path './.git' -prune -or \
\( -not -path './ino-hardware-package-list.tsv' -and -type f \) \
-exec \
grep \
--with-filename \
--line-number \
--binary-files=without-match \
--regexp=$'\t' \
'{}' \
\; \
-exec \
echo 'Tab found.' \; \
-exec \
false \
'{}' +
trailing:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Check for trailing whitespace
run: |
find . \
-path './.git' -prune -or \
\( -not -path './ino-hardware-package-list.tsv' -and -type f \) \
-exec \
grep \
--with-filename \
--line-number \
--binary-files=without-match \
--regexp='[[:blank:]]$' \
'{}' \
\; \
-exec \
echo 'Trailing whitespace found.' \; \
-exec \
false \
'{}' +
line-endings:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Check for non-Unix line endings
run: |
find . \
-path './.git' -prune -or \
-exec \
grep \
--files-with-matches \
--binary-files=without-match \
--regexp=$'\r$' \
'{}' \
\; \
-exec \
echo 'Non-Unix EOL detected.' \; \
-exec \
false \
'{}' +
blank-last-line:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Check for blank lines at end of files
run: |
find . \
-path './.git' -prune -or \
-print0 \
| \
xargs \
-0 \
-L1 \
bash -c \
' \
tail -1 "$0" \
| \
grep \
--binary-files=without-match \
--regexp="^$" \
; \
if [[ "$?" == "0" ]]; then \
echo "Blank line found at end of $0."; \
false; \
fi \
'
no-last-newline:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Check for files that don't end in a newline
# https://stackoverflow.com/a/25686825
run: |
find . \
-path './.git' -prune -or \
-type f \
-print0 \
| \
xargs \
-0 \
-L1 \
bash -c \
' \
if \
test \
"$( \
grep \
--files-with-matches \
--binary-files=without-match \
--max-count=1 \
--regexp='.*' \
"$0" \
)" \
&& \
test \
"$( \
tail \
--bytes=1 \
"$0" \
)"; \
then \
echo "No new line at end of $0."; \
false; \
fi \
'
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
name: Check TSV
# See: https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
on:
push:
paths:
- ".github/workflows/check-tsv.yml"
- "**.tsv"
pull_request:
paths:
- ".github/workflows/check-tsv.yml"
- "**.tsv"
schedule:
# Run every Tuesday at 8 AM UTC to catch breakage caused by changes to checkcsv
- cron: "0 8 * * TUE"
workflow_dispatch:
repository_dispatch:
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Install Go
uses: actions/setup-go@v4
with:
# `actions/setup-go` only adds the `go install` target path to PATH when you specify a `go-version` input.
go-version: ">=1.x"
- name: Install checkcsv
run: go install github.com/Clever/csvlint/cmd/csvlint@latest
- name: Check TSV format
run: |
# No idea why this only works with quadruple escaping on the delimiter (specific to bash -c though).
find . \
-path './.git' -prune -or \
\( -name '*.tsv' -and -type f \) \
-print0 | \
xargs \
--null \
--max-lines=1 \
bash \
-c \
' \
echo "Checking: $0";
csvlint \
--delimiter='\\\\t' \
"$0" \
'
line-leading:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Check for leading whitespace on line
run: |
find . \
-path './.git' -prune -or \
\( -name '*.tsv' -and -type f \) \
-exec \
grep \
--with-filename \
--line-number \
--binary-files=without-match \
--regexp='^ ' \
'{}' \
\; \
-exec \
echo 'Leading whitespace found on line.' \; \
-exec \
false \
'{}' +
cell-leading:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Check for leading space in cells
run: |
find . \
-path './.git' -prune -or \
\( -name '*.tsv' -and -type f \) \
-exec \
grep \
--with-filename \
--line-number \
--binary-files=without-match \
--regexp=$'\t ' \
'{}' \
\; \
-exec \
echo 'Leading whitespace found in cell.' \; \
-exec \
false \
'{}' +
cell-trailing:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Check for trailing space in cells
run: |
find . \
-path './.git' -prune -or \
\( -name '*.tsv' -and -type f \) \
-exec \
grep \
--with-filename \
--line-number \
--binary-files=without-match \
--regexp=$' \t' \
'{}' \
\; \
-exec \
echo 'Trailing whitespace found in cell.' \; \
-exec \
false \
'{}' +
line-trailing:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Check for trailing whitespace on line
run: |
find . \
-path './.git' -prune -or \
\( -name '*.tsv' -and -type f \) \
-exec \
grep \
--with-filename \
--line-number \
--binary-files=without-match \
--regexp=' $' \
'{}' \
\; \
-exec \
echo 'Trailing whitespace found on line.' \; \
-exec \
false \
'{}' +
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
name: Check License
env:
EXPECTED_LICENSE_FILENAME: LICENSE
# SPDX identifier: https://spdx.org/licenses/
EXPECTED_LICENSE_TYPE: CC0-1.0
# See: https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
on:
push:
paths:
- ".github/workflows/check-license.yml"
# See: https://github.com/licensee/licensee/blob/master/docs/what-we-look-at.md#detecting-the-license-file
- "COPYING*"
- "LICENCE*"
- "LICENSE*"
pull_request:
paths:
- ".github/workflows/check-license.yml"
- "COPYING*"
- "LICENCE*"
- "LICENSE*"
workflow_dispatch:
repository_dispatch:
jobs:
check-license:
runs-on: ubuntu-latest
steps:
- name: Checkout local repository
uses: actions/checkout@v3
- name: Install Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: ruby # Install latest version
- name: Install licensee
run: gem install licensee
- name: Check license file
run: |
# See: https://github.com/licensee/licensee
LICENSEE_OUTPUT="$(licensee detect --json --confidence=100)"
DETECTED_LICENSE_FILE="$(echo "$LICENSEE_OUTPUT" | jq .matched_files[0].filename | tr --delete '\r')"
echo "Detected license file: $DETECTED_LICENSE_FILE"
if [ "$DETECTED_LICENSE_FILE" != "\"$EXPECTED_LICENSE_FILENAME\"" ]; then
echo "ERROR: detected license file doesn't match expected: $EXPECTED_LICENSE_FILENAME"
exit 1
fi
DETECTED_LICENSE_TYPE="$(echo "$LICENSEE_OUTPUT" | jq .matched_files[0].matched_license | tr --delete '\r')"
echo "Detected license type: $DETECTED_LICENSE_TYPE"
if [ "$DETECTED_LICENSE_TYPE" != "\"$EXPECTED_LICENSE_TYPE\"" ]; then
echo "ERROR: detected license type doesn't match expected $EXPECTED_LICENSE_TYPE"
exit 1
fi
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
name: Check YAML
# See: https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
on:
push:
paths:
- ".github/workflows/check-yaml.yml"
- ".yamllint*"
- "**.yaml"
- "**.yml"
pull_request:
paths:
- ".github/workflows/check-yaml.yml"
- ".yamllint*"
- "**.yaml"
- "**.yml"
schedule:
# Run every Tuesday at 8 AM UTC to catch breakage caused by changes to yamllint.
- cron: "0 8 * * TUE"
workflow_dispatch:
repository_dispatch:
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Check YAML
run: yamllint --config-file "${{ github.workspace }}/.github/.yamllint.yml" .
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
# Source: https://github.com/arduino/tooling-project-assets/blob/main/workflow-templates/sync-labels.md
name: Sync Labels
# See: https://docs.github.com/actions/using-workflows/events-that-trigger-workflows
on:
push:
paths:
- ".github/workflows/sync-labels.ya?ml"
- ".github/label-configuration-files/*.ya?ml"
pull_request:
paths:
- ".github/workflows/sync-labels.ya?ml"
- ".github/label-configuration-files/*.ya?ml"
schedule:
# Run daily at 8 AM UTC to sync with changes to shared label configurations.
- cron: "0 8 * * *"
workflow_dispatch:
repository_dispatch:
env:
CONFIGURATIONS_FOLDER: .github/label-configuration-files
CONFIGURATIONS_ARTIFACT: label-configuration-files
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Download JSON schema for labels configuration file
id: download-schema
uses: carlosperate/download-file-action@v2
with:
file-url: https://raw.githubusercontent.com/arduino/tooling-project-assets/main/workflow-templates/assets/sync-labels/arduino-tooling-gh-label-configuration-schema.json
location: ${{ runner.temp }}/label-configuration-schema
- name: Install JSON schema validator
run: |
sudo npm install \
--global \
ajv-cli \
ajv-formats
- name: Validate local labels configuration
run: |
# See: https://github.com/ajv-validator/ajv-cli#readme
ajv validate \
--all-errors \
-c ajv-formats \
-s "${{ steps.download-schema.outputs.file-path }}" \
-d "${{ env.CONFIGURATIONS_FOLDER }}/*.{yml,yaml}"
download:
needs: check
runs-on: ubuntu-latest
strategy:
matrix:
filename:
# Filenames of the shared configurations to apply to the repository in addition to the local configuration.
# https://github.com/arduino/tooling-project-assets/blob/main/workflow-templates/assets/sync-labels
- universal.yml
steps:
- name: Download
uses: carlosperate/download-file-action@v2
with:
file-url: https://raw.githubusercontent.com/arduino/tooling-project-assets/main/workflow-templates/assets/sync-labels/${{ matrix.filename }}
- name: Pass configuration files to next job via workflow artifact
uses: actions/upload-artifact@v3
with:
path: |
*.yaml
*.yml
if-no-files-found: error
name: ${{ env.CONFIGURATIONS_ARTIFACT }}
sync:
needs: download
runs-on: ubuntu-latest
steps:
- name: Set environment variables
run: |
# See: https://docs.github.com/actions/using-workflows/workflow-commands-for-github-actions#setting-an-environment-variable
echo "MERGED_CONFIGURATION_PATH=${{ runner.temp }}/labels.yml" >> "$GITHUB_ENV"
- name: Determine whether to dry run
id: dry-run
if: >
github.event_name == 'pull_request' ||
(
(
github.event_name == 'push' ||
github.event_name == 'workflow_dispatch'
) &&
github.ref != format('refs/heads/{0}', github.event.repository.default_branch)
)
run: |
# Use of this flag in the github-label-sync command will cause it to only check the validity of the
# configuration.
echo "::set-output name=flag::--dry-run"
- name: Checkout repository
uses: actions/checkout@v3
- name: Download configuration files artifact
uses: actions/download-artifact@v3
with:
name: ${{ env.CONFIGURATIONS_ARTIFACT }}
path: ${{ env.CONFIGURATIONS_FOLDER }}
- name: Remove unneeded artifact
uses: geekyeggo/delete-artifact@v2
with:
name: ${{ env.CONFIGURATIONS_ARTIFACT }}
- name: Merge label configuration files
run: |
# Merge all configuration files
shopt -s extglob
cat "${{ env.CONFIGURATIONS_FOLDER }}"/*.@(yml|yaml) > "${{ env.MERGED_CONFIGURATION_PATH }}"
- name: Install github-label-sync
run: sudo npm install --global github-label-sync
- name: Sync labels
env:
GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# See: https://github.com/Financial-Times/github-label-sync
github-label-sync \
--labels "${{ env.MERGED_CONFIGURATION_PATH }}" \
${{ steps.dry-run.outputs.flag }} \
${{ github.repository }}
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
name: Check Workflows
# See: https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
on:
push:
paths:
- ".github/workflows/*.yaml"
- ".github/workflows/*.yml"
pull_request:
paths:
- ".github/workflows/*.yaml"
- ".github/workflows/*.yml"
schedule:
# Run every Tuesday at 8 AM UTC to catch breakage resulting from changes to the JSON schema.
- cron: "0 8 * * TUE"
workflow_dispatch:
repository_dispatch:
jobs:
validate:
runs-on: ubuntu-latest
env:
JSON_SCHEMA_FOLDER: etc/github-workflow-json-schema
JSON_SCHEMA_FILENAME: github-workflow.json
steps:
- name: Checkout local repository
uses: actions/checkout@v3
- name: Download JSON schema for GitHub Actions workflows
uses: carlosperate/download-file-action@v2
with:
# See: https://github.com/SchemaStore/schemastore/blob/master/src/schemas/json/github-workflow.json
file-url: https://json.schemastore.org/github-workflow
location: ${{ env.JSON_SCHEMA_FOLDER }}
file-name: ${{ env.JSON_SCHEMA_FILENAME }}
- name: Install JSON schema validator
run: sudo npm install --global ajv-cli
- name: Validate GitHub Actions workflows
run: |
# See: https://github.com/ajv-validator/ajv-cli#readme
ajv \
--strict=false \
-s "${{ env.JSON_SCHEMA_FOLDER }}/${{ env.JSON_SCHEMA_FILENAME }}" \
-d "./.github/workflows/*.{yml,yaml}"
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
name: Spell Check
# See: https://docs.github.com/en/free-pro-team@latest/actions/reference/events-that-trigger-workflows
on:
push:
pull_request:
schedule:
# Run every Tuesday at 8 AM UTC to catch new misspelling detections resulting from dictionary updates.
- cron: "0 8 * * TUE"
workflow_dispatch:
repository_dispatch:
jobs:
spellcheck:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Spell check
uses: codespell-project/actions-codespell@master
| {
"repo_name": "per1234/ino-hardware-package-list",
"stars": "26",
"repo_language": "None",
"file_name": "spell-check.yml",
"mime_type": "text/plain"
} |
Package: placeholder
Type: Book
Title: Does not matter.
Version: 0.0.1
Imports: bookdown
Remotes: rstudio/bookdown
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
`r if (knitr:::is_html_output()) '
# References {-}
'`
---
<style>
p { margin-top: 0; margin-bottom: 20px; }
</style>
<br></br>
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Bayesian Meta-Analysis {#bayesian-ma}
---
<img src="_figs/waves.jpg" />
<br></br>
<span class="firstcharacter">I</span>
n the last chapters, we have delved into somewhat more sophisticated extensions of meta-analysis, such as "multilevel" models (Chapter \@ref(multilevel-ma)), meta-analytic structural equation modeling (Chapter \@ref(sem)), and network meta-analysis (Chapter \@ref(netwma)). Now, we will take one step back and revisit "conventional" meta-analysis again--but this time from another angle. In this chapter, we deal with **Bayesian meta-analysis**.
\index{gemtc Package}
\index{Frequentist Statistics}
We already covered a Bayesian model in the last chapter on network meta-analysis. There, we discussed the main ideas behind Bayesian statistics, including Bayes' theorem and the idea of prior distributions (see Chapter \@ref(bayesian-inference)). In the present chapter, we build on this knowledge and try to get a more thorough understanding of the "Bayesian way" to do meta-analysis. When we set up our Bayesian network meta-analysis model, for example, the **{gemtc}** package specified the priors automatically for us. Here, we will do this ourselves.
While its background is slightly more involved, we will see that Bayesian meta-analysis essentially aims to do the same thing as any "conventional" meta-analysis: it pools observed effect sizes into one overall (true) effect. Using a Bayesian model, however, also comes with several practical advantages compared to frequentist approaches. This makes it worthwhile to learn how we can implement such models using _R_.
<br></br>
## The Bayesian Hierarchical Model {#bayes-hierarchical-model}
---
\index{Bayesian Hierarchical Model}
To perform a Bayesian meta-analysis, we employ a so-called **Bayesian hierarchical model** [@rover2017bayesian; @higgins2009re]. We already briefly covered this type of model in the network meta-analysis chapter (Chapter \@ref(bayesian-net-ma-model)).
In Chapter \@ref(multilevel-ma), we learned that every meta-analytic model comes with an inherent "multilevel", and thus **hierarchical**, structure. On the first level, we have the individual participants. Data on this level usually reaches us in the form of calculated effect sizes $\hat\theta_k$ of each study $k$. We assume that participants are nested within studies on the second level and that the true effect sizes $\theta_k$ of different studies in our meta-analysis follow their own distribution. This distribution of true effects has a mean $\mu$ (the pooled “true” effect we want to estimate) and variance $\tau^2$, representing the between-study heterogeneity.
Let us try to formalize this. On the first level, we assume that the observed effect size $\hat\theta_k$ reported in study $k$ is an estimate of the "true" effect $\theta_k$ in this study. The observed effect $\hat\theta_k$ deviates from $\theta_k$ due to the sampling error $\epsilon_k$. This is because we assume that $\hat\theta_k$ was drawn (sampled) from the population underlying $k$. This population can be seen as a distribution with mean $\theta_k$, the "true" effect of the study, and a variance $\sigma^2$.
In the second step, we assume that the true effect sizes $\theta_k$ themselves are only samples of an overarching distribution of true effect sizes. The mean of this distribution $\mu$ is the pooled effect size we want to estimate. The study-specific true effects $\theta_k$ deviate from $\mu$ because the overarching distribution also has a variance $\tau^2$, signifying the between-study heterogeneity. Taken together, this gives these two equations:
\begin{align}
\hat\theta_k &\sim \mathcal{N}(\theta_k,\sigma_k^2) \notag \\
\theta_k &\sim \mathcal{N}(\mu,\tau^2) (\#eq:by1)
\end{align}
Here, we use $\mathcal{N}$ to indicate that parameters to the left were sampled from a **normal** distribution. Some may argue that this is an unnecessarily strict assumption for the second equation [@higgins2009re], but the formulation as shown here is the one that is used most of the time. As covered before, the fixed-effect model is simply a special case of this model in which we assume that $\tau^2 = 0$, meaning that there is no between-study heterogeneity, and that all studies share one single true effect size (i.e. that for all studies $k$: $\theta_k = \mu$).
We can also simplify this formula by using the marginal form:
\begin{equation}
\hat\theta_k \sim \mathcal{N}(\mu,\sigma_k^2 + \tau^2)
(\#eq:by2)
\end{equation}
You may have already detected that these formulas look a lot like the ones we defined when discussing the random-effects (Chapter \@ref(rem)) and three-level meta-analysis (Chapter \@ref(multilevel-nature)) model. Indeed, there is nothing particularly "Bayesian" about this formulation. This changes, however, when we add the following equations [@williams2018bayesian]:
\begin{align}
(\mu, \tau^2) &\sim p(.) \notag \\
\tau^2 &> 0 (\#eq:by3)
\end{align}
\index{Prior Distribution}
The first line is particularly important. It defines **prior distributions** for the parameters $\mu$ and $\tau^2$. This allows us to specify **a priori** how we think the true pooled effect size $\mu$ and between-study heterogeneity $\tau^2$ may look like, and how certain we are about this. The second equation adds the constraint that the between-study heterogeneity variance must be larger than zero. However, this formula does not specify the exact **type** of prior distribution used for $\mu$ and $\tau^2$. It only tells us that **some** prior distribution is assumed. We will cover reasonable, specific priors for Bayesian meta-analysis models in more detail later.
\index{Markov Chain Monte Carlo}
\index{Gibbs Sampler}
\index{brms Package}
\index{No-U-Turn Sampler (NUTS)}
In the chapter on network meta-analysis, we already covered the method through which Bayesian approaches estimate model parameters. To recap, this involves using **Markov Chain Monte Carlo**-based sampling procedures, for example **Gibbs sampling**. In the **{brms}** package, which we will be using in this chapter, so-called **No-U-Turn** sampling [NUTS, @hoffman2014no] is used^[NUTS is an extension of so-called **Hamiltonian Monte Carlo** (HMC), with the latter being another type of Markov Chain Monte Carlo method. Compared to other approaches (e.g. Gibbs sampling), HMC can provide a more efficient solution to estimate hierarchical models [such as those used for meta-analysis, @betancourt2015hamiltonian]. A brief description of HMC and NUTS can be found in the **Stan** <a href="https://mc-stan.org/docs/2_26/reference-manual/hamiltonian-monte-carlo.html" target="_blank">reference manual</a> (Stan is the low-level programming language on which **{brms}** is based, see Chapter \@ref(bayes-ma-R)).].
\index{meta Package}
\index{metafor Package}
\index{Posterior Distribution}
In the previous chapters, we primarily used the **{meta}** and **{metafor}** packages. These packages allow to conduct meta-analyses based on a non-Bayesian, or **frequentist** framework. Therefore, you might be wondering why one should start using Bayesian methods, given that we can already resort to such powerful tools using "conventional" approaches. The reason is that Bayesian meta-analysis comes with a few distinct advantages [@williams2018bayesian; @mcneish2016using; @chung2013nondegenerate]:
* Bayesian methods allow to directly model the **uncertainty** in our estimate of $\tau^2$. They can also be superior in estimating pooled effects, particularly when the number of included studies is small (which is very often the case in practice).
* Bayesian methods produce full **posterior distributions** for both $\mu$ and $\tau^2$. This allows to calculate the exact **probability** that $\mu$ or $\tau^2$ is smaller or larger than some specified value. This is in contrast to frequentist methods, where we only calculate confidence intervals. However, (95%) confidence intervals only state that, if data sampling were repeated many, many times, the true value of a population parameter (such as $\mu$ or $\tau^2$) would fall into the range of the confidence interval in 95% of the samples. They do not tell us the **probability** that the true parameter lies between two specified values.
* Bayesian methods allow us to integrate **prior knowledge** and assumptions when calculating meta-analyses.
<br></br>
## Setting Prior Distributions {#priors}
---
Before, we formalized the hierarchical model we can use to pool effects in a Bayesian meta-analysis. However, to run such a model, we have to specify the prior distributions of $\mu$ and $\tau^2$. Particularly when the number of studies is small, priors can have a considerable impact on the results, so we should choose them wisely.
\index{Uninformative Prior}
\index{Weakly Informative Prior}
Generally, a good approach is to use **weakly informative** priors [@williams2018bayesian]. Weakly informative priors can be contrasted with **non-informative** priors. Non-informative priors are the simplest form of a prior distribution. They are usually based on **uniform** distributions, and are used to represent that all values are equally credible.
Weakly informative priors, on the other hand, are a little more sophisticated. They rely on distributions which represent that we have a **weak** belief that some values are more credible than others. However, they are still not making any specific statements concerning the value of the parameter to be estimated from our data.
\index{Standardized Mean Difference}
Intuitively, this makes a lot of sense. In many meta-analyses, for example, it seems reasonable to assume that the true effect lies somewhere between SMD = -2.0 and 2.0, but will unlikely be SMD = 50. Based on this rationale, a good starting point for our $\mu$ prior may therefore be a normal distribution with mean 0 and variance 1. This means that we grant an approximate 95% prior probability that the true pooled effect size $\mu$ lies between −2.0 and 2.0:
\begin{equation}
\mu \sim \mathcal{N}(0,1)
(\#eq:by4)
\end{equation}
The next prior we have to specify is the one for $\tau^2$. This one is a little more difficult since we know that $\tau^2$ should always be non-negative, but can be (close to) zero. A recommended distribution for this case, and one which is often used for variances such as $\tau^2$, is the **Half-Cauchy** prior. The Half-Cauchy distribution is a special case of a Cauchy distribution, which is only defined for one "half" (i.e. the positive side) of the distribution^[The standard Cauchy distribution itself is a special case of the $t$ distribution with $\text{d.f.}=1$.].
The Half-Cauchy distribution is controlled by two parameters. The first one is the location parameter $x_0$, which specifies the peak of the distribution. The second one is $s$, the scaling parameter. It controls how **heavy-tailed** the distribution is (i.e. how much it "spreads out" to higher values). The Half-Cauchy distribution is denoted with $\mathcal{HC}(x_0,s)$.
The graph below visualizes the Half-Cauchy distribution for varying values of $s$, with the value of $x_0$ fixed at 0.
\vspace{2mm}
```{r, echo=F, fig.width=5, fig.height=4, fig.align='center', out.width="55%"}
library(ggplot2)
hc_03 = function(x) {(0.3^2/(x^2+0.3^2)*(1/(pi*0.3)))}
hc_05 = function(x) {(0.5^2/(x^2+0.5^2)*(1/(pi*0.5)))}
hc_1 = function(x) {(1/(x^2+1)*(1/(pi)))}
ggplot(data = data.frame(x = 0), mapping = aes(x = x)) +
stat_function(fun = hc_03,fill = "gray80",color = "black", alpha = 0.3, geom="area", size = 0.1) +
stat_function(fun = hc_05,fill = "gray50",color = "black", alpha = 0.3, geom="area", size = 0.1) +
stat_function(fun = hc_1, fill= "gray20", color = "black", alpha = 0.3, geom="area", size = 0.1) +
ylab(bquote(italic(y))) +
xlab(bquote(italic(x))) +
scale_x_continuous(expand = c(0, 0), limits = c(0, 1.5)) +
scale_y_continuous(expand = c(0, 0), limits = c(0, 1.1)) +
theme_classic() +
annotate("text", x = 0.4, y = 0.9,
label = "atop(bold(HC(0,0.3)), y==(frac(0.3^2, x^2+0.3^2)) (frac(1,pi~0.3)))",
hjust = "left", parse = TRUE, color = "black") +
annotate("text", x = 0.75, y = 0.6,
label = "atop(bold(HC(0,0.5)), y==(frac(0.5^2, x^2+0.5^2)) (frac(1,pi~0.5)))",
hjust = "left", parse = TRUE, color = "black") +
annotate("text", x = 1.2, y = 0.4,
label = "atop(bold(HC(0,1)), y==(frac(1, x^2+1)) (frac(1,pi)))",
hjust = "left", parse = TRUE, color = "black") +
annotate(geom = "curve", color = "black", x = 0.38, y = 0.85, xend = 0.2, yend = hc_03(0.2),
curvature = .1, arrow = arrow(length = unit(2, "mm"))) +
annotate(geom = "curve", color = "black", x = 0.73, y = 0.55, xend = 0.51, yend = hc_05(0.51),
curvature = .1, arrow = arrow(length = unit(2, "mm"))) +
annotate(geom = "curve", color = "black", x = 1.18, y = 0.35, xend = 1.1, yend = hc_1(1.1),
curvature = .1, arrow = arrow(length = unit(2, "mm"))) +
theme(panel.background = element_rect(fill = "#FFFEFA", size = 0),
plot.background = element_rect(fill = "#FFFEFA", size = 0))
```
\vspace{2mm}
The Half-Cauchy distribution typically has rather heavy tails, which makes it particularly useful as a prior distribution for $\tau$. The heavy tails ensure that we still give very high values of $\tau$ **some** probability, while at the same time assuming that lower values are more likely.
In many meta-analyses, $\tau$ (the square root of $\tau^2$) lies somewhere around 0.3, or at least in the same ballpark. To specify the Half-Cauchy prior, we may therefore use $s=$ 0.3. This ensures that a value of less than $\tau=$ 0.3 has a 50% probability [@williams2018bayesian]. We can confirm this using the Half-Cauchy distribution function implemented in the `phcauchy` function of the **{extraDistr}** package [@extradistr].
\index{extraDistr Package}
```{r, message=F, warning=F}
library(extraDistr)
phcauchy(0.3, sigma = 0.3)
```
However, this is already a quite specific assumption concerning the true value of $\tau$. A more conservative approach, which we will follow in our hands-on example, is to set $s$ to 0.5; this makes the distribution flatter. In general, it is advised to always conduct sensitivity analyses with different prior specifications to check if they affect the results substantially. Using $s=$ 0.5 as our parameter of the Half-Cauchy distribution, we can write down our $\tau$ prior like this:
\begin{equation}
\tau \sim \mathcal{HC}(0,0.5)
(\#eq:by5)
\end{equation}
We can now put together the formulas of the hierarchical model, and our prior specifications. This leads to the complete model we can use for our Bayesian meta-analysis:
\begin{align}
\hat\theta_k &\sim \mathcal{N}(\theta_k,\sigma_k^2) \notag \\
\theta_k &\sim \mathcal{N}(\mu,\tau^2) \notag \\
\mu &\sim \mathcal{N}(0,1) \notag \\
\tau &\sim \mathcal{HC}(0,0.5) (\#eq:by5)
\end{align}
<br></br>
## Bayesian Meta-Analysis in _R_ {#bayes-ma-R}
---
\index{brms Package}
\index{STAN}
\index{Generalized Additive Model}
Now that we have defined the Bayesian model for our meta-analysis, it is time to implement it in _R_. Here, we use the **{brms}** package [@burknerJSS; @burkner2017advanced] to fit our model. The **{brms}** package is a very versatile and powerful tool to fit Bayesian regression models. It can be used for a wide range of applications, including multilevel (mixed-effects) models, generalized linear models, multivariate models, and generalized additive models, to name just a few. Most of these models require person-level data but **{brms}** can also be used for meta-analysis, where we deal with (weighted) study-level data^[The **{brms}** package is based on **Stan**, a low-level programming language for Bayesian modeling. The Stan project has its own, actively maintained online forum (https://discourse.mc-stan.org/), where issues pertaining to **{brms}** can also be discussed. The forum also has a "meta-analysis" tag, which allows to filter out potentially relevant threads.].
Before we start fitting the model, we first have to install and load the **{brms}** package.
```{r, message=F, warning=F, eval=F}
library(brms)
```
<br></br>
### Fitting the Model
---
In our hands-on example, we again use the `ThirdWave` data set, which contains information from a meta-analysis investigating the effects of "third wave" psychotherapies in college students (Chapter \@ref(pre-calculated-es)). Before we fit the model, let us first specify the prior distribution of the overall effect size $\mu$ and the between-study heterogeneity $\tau$. Previously, we defined that $\mu \sim \mathcal{N}(0,1)$ and $\tau \sim \mathcal{HC}(0,0.5)$.
We can use the `prior` function to specify the distributions. The function takes two arguments. In the first argument, we specify the distribution we want to assume for our prior, including the distribution parameters. In the second argument, we have to define the `class` of the prior. For $\mu$, the appropriate class is `Intercept`, since it is a fixed population-level effect. For $\tau$, the class is `sd`, because it is a variance (or, to be more precise, a **standard deviation**). We can define both priors using the `prior` function, then concatenate them, and save the resulting object as `priors`.
```{r, eval=F}
priors <- c(prior(normal(0,1), class = Intercept),
prior(cauchy(0,0.5), class = sd))
```
Now, we can proceed and fit the model. To do this, we use the `brm` function in **{brms}**. The function has many arguments, but only a few are relevant for us.
In the **`formula`** argument, the formula for the model is specified. The **{brms}** package uses a regression formula notation, in which an outcome (in our case, an observed effect size) `y` is predicted by one or more predictors `x`. A tilde (`~`) is used to specify that there is a predictive relationship: `y ~ x`.
Meta-analyses are somewhat special, because we do not have a variable predicting the effect size (unless when we perform a meta-regression). This means that `x` has to be replaced with `1`, indicating an **intercept-only** model. Furthermore, we cannot simply use the effect size of each study in `y` **as is**. We also have to give studies with higher precision (i.e. sample size) a greater weight. This can be done by using `y|se(se_y)` instead of only `y`, where the `se(se_y)` part stands for the standard error of each effect size `y` in our data set.
If we want to use a random-effects model, the last step is to add a random-effects term `(1|study)` to the right side of the formula. This specifies that the effect sizes in `y` are assumed to be nested within studies, the true effects of which are themselves random draws from an overarching population of true effect sizes. If we want to use a fixed-effect model, we can simply omit this term. The generic full formula for a random-effects model therefore looks like this: `y|se(se_y) ~ 1 + (1|random)`. To learn more about the formula setup in `brm` models, you can type `?brmsformula` in your console to open the documentation.
The other arguments are fairly straightforward. In `prior`, we specify the priors we want to define for our model. In our example, we can simply plug in the `priors` object we created previously. The `iter` argument specifies the number of iterations of the MCMC algorithm. The more complex your model, the higher this number should be. However, more iterations also mean that the function will take longer to finish. Lastly, we also have to specify `data`, where we simply provide the name of our data set.
We save our fitted Bayesian meta-analysis model under the name `m.brm`. The code looks like this:
```{r, eval=F}
m.brm <- brm(TE|se(seTE) ~ 1 + (1|Author),
data = ThirdWave,
prior = priors,
iter = 4000)
```
Please be aware that Bayesian methods are much more computationally expensive compared to standard meta-analytic techniques we covered before. It may therefore take a few minutes until the sampling is completed.
<br></br>
### Assessing Convergence & Model Validity
---
\index{Markov Chain Monte Carlo}
Before we start analyzing the results, we have to make sure that the model has **converged** (i.e. that the MCMC algorithm found the optimal solution). If this is not the case, the parameters are not trustworthy and should not be interpreted. Non-convergence happens frequently in Bayesian models and can often be resolved by re-running the model with more iterations (`iter`). To assess the convergence and overall validity of our model, we should always do two things. First, check the $\hat{R}$ values of the parameter estimates, and secondly, conduct **posterior predictive checks**.
The $\hat{R}$ value represents the **Potential Scale Reduction Factor** (PSRF) we already covered when discussing Bayesian network meta-analysis (Chapter \@ref(bayesian-model-convergence)). The $\hat{R}$ value of our estimates should be smaller than 1.01. To check this, we can produce a `summary` of our `m.brm` object.
```{r, eval=F}
summary(m.brm)
```
```
## Family: gaussian
## Links: mu = identity; sigma = identity
## Formula: TE | se(seTE) ~ 1 + (1 | Author)
## Data: ThirdWave (Number of observations: 18)
## Samples: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
## total post-warmup samples = 8000
##
## Group-Level Effects:
## ~Author (Number of levels: 18)
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## sd(Intercept) 0.29 0.10 0.11 0.51 1.00 2086
##
## Population-Level Effects:
## Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
## Intercept 0.57 0.09 0.39 0.76 1.00 3660
##
##
## [...]
##
## Samples were drawn using sampling(NUTS). For each parameter,
## Bulk_ESS and Tail_ESS are effective sample size measures,
## and Rhat is the potential scale reduction factor on split
## chains (at convergence, Rhat = 1).
```
As we can see, the `Rhat` value for both parameters is 1, signifying convergence. This means that the results can be interpreted.
In a posterior predictive check, on the other hand, data are simulated through random draws from the posterior predictive distribution and then compared to the observed data. If a model has converged and captures the data well, we can expect that the densities of the replications are roughly similar to the one of the observed data. This can easily be checked using the output of the `pp_check` function.
\vspace{2mm}
```{r, eval=F}
pp_check(m.brm)
```
```{r, message=F, warning=F, echo=F, fig.width=5, fig.height=4, fig.align='center', out.width="50%", eval = F}
load("data/m.brm.rda")
library(bayesplot)
library(brms)
color_scheme_set(scheme = "darkgray")
set.seed(123)
pp_check(m.brm)
```
```{r, message = F, out.width = '50%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/ppcheck_sep.png')
```
\index{Potential Scale Reduction Factor}
<br></br>
### Interpreting the Results
---
We can begin to interpret the results by looking at the `Group-Level Effects` in our summary output first. This section is reserved for the random effect we defined in our formula. Since we fitted a random-effects meta-analysis model, the variable `~Author`, signifying the individual studies, has been modeled with a random intercept. As we described before, this represents our assumption on level 2 that each study has its own "true" effect size, which has been sampled from an overarching distribution of true effect sizes. We also see that our group-level effect has 18 levels, corresponding with the $K=$ 18 studies in our data.
The estimate of the between-study heterogeneity, `sd(Intercept)`, is $\tau=$ 0.29, thus closely resembling our initial "best guess" when setting the priors. Using the `ranef` function, we can also extract the estimated deviation of each study's "true" effect size from the pooled effect:
```{r, eval=F}
ranef(m.brm)
```
```
## $Author
## , , Intercept
## Estimate Est.Error Q2.5 Q97.5
## Call et al. 0.06836636 0.1991649 -0.327463365 0.47663987
## Cavanagh et al. -0.14151644 0.1767123 -0.510165576 0.18799272
## DanitzOrsillo 0.48091338 0.2829719 -0.003425284 1.08636421
## de Vibe et al. -0.31923470 0.1454819 -0.612269461 -0.03795683
## Frazier et al. -0.11388029 0.1497128 -0.417029387 0.17085917
## [...]
```
The next part of the output we can interpret are the `Population-Level Effects`. This section represents the "fixed" population parameters we modeled. In our case, this is $\mu$, the overall effect size of our meta-analysis.
\index{Credible Interval}
In the output, we see that the estimate is a (bias-corrected) SMD of 0.57, with the 95% credible interval ranging from 95%CrI: 0.39−0.76. This indicates that the interventions studied in this meta-analysis have a moderate-sized overall effect.
Because this is a Bayesian model, we do not find any $p$-values here. But our example should underline that we can also make reasonable inferences without having to resort to classical significance testing. A great thing we can do in Bayesian, but not in frequentist meta-analysis, is to model the parameters we want to estimate **probabilistically**. The Bayesian model not only estimates the parameters of interest but a whole posterior distribution for $\tau^2$ and $\mu$, which we can access quite easily. We only have to use the `posterior_samples` function.
```{r, eval=F}
post.samples <- posterior_samples(m.brm, c("^b", "^sd"))
names(post.samples)
```
```
## [1] "b_Intercept" "sd_Author__Intercept"
```
The resulting data frame contains two columns: `b_Intercept`, the posterior sample data for the pooled effect size, and `sd_Author_Intercept`, the one for the between-study heterogeneity $\tau$. We rename the columns `smd` and `tau` to make the name more informative.
```{r, eval=F}
names(post.samples) <- c("smd", "tau")
```
\vspace{2mm}
\index{Posterior Distribution}
Using the data in `post.samples`, we can now generate a **density plot** of the posterior distributions. We use the **{ggplot2}** package for plotting.
```{r, eval=F}
ggplot(aes(x = smd), data = post.samples) +
geom_density(fill = "lightblue", # set the color
color = "lightblue", alpha = 0.7) +
geom_point(y = 0, # add point at mean
x = mean(post.samples$smd)) +
labs(x = expression(italic(SMD)),
y = element_blank()) +
theme_minimal()
ggplot(aes(x = tau), data = post.samples) +
geom_density(fill = "lightgreen", # set the color
color = "lightgreen", alpha = 0.7) +
geom_point(y = 0,
x = mean(post.samples$tau)) + # add point at mean
labs(x = expression(tau),
y = element_blank()) +
theme_minimal()
```
```{r, echo=F, fig.width = 4, fig.height=3, fig.align="center", out.width="30%", fig.show='hold', eval=F}
ggplot(aes(x = smd), data = post.samples) +
geom_density(fill = "gray70", # set the color
color = "gray70", alpha = 0.7) +
geom_point(y = 0, # add point at mean
x = mean(post.samples$smd)) +
labs(x = expression(italic(SMD)),
y = element_blank()) +
theme_minimal()
ggplot(aes(x = tau), data = post.samples) +
geom_density(fill = "gray20", # set the color
color = "gray20", alpha = 0.7) +
geom_point(y = 0,
x = mean(post.samples$tau)) + # add point at mean
labs(x = expression(tau),
y = element_blank()) +
theme_minimal()
```
```{r, message = F, out.width = '49%', echo = F, fig.show='hold'}
library(OpenImageR)
knitr::include_graphics('images/posterior1_sep.png')
knitr::include_graphics('images/posterior2_sep.png')
```
We see that the posterior distributions follow a unimodal, and roughly normal distribution, peaking around the estimated values for $\mu$ and $\tau$.
The fact that Bayesian methods create an actual sampling distribution for our parameters of interest means that we can calculate **exact probabilities** that $\mu$ or $\tau$ is larger or smaller than some specific value. Imagine that found in previous literature that, if effects of an intervention are below SMD = 0.30, they are not meaningful anymore. We could therefore calculate the probability that the true overall effect in our meta-analysis is smaller than SMD = 0.30, based on our model.
\index{Empirical Cumulative Distribution Function (ECDF)}
This can be done by looking at the **empirical cumulative distribution function** (ECDF). The ECDF lets us select one specific value $X$, and returns the probability of some value $x$ being smaller than $X$, based on provided data. The ECDF of $\mu$'s posterior distribution in our example can be seen below.
\vspace{2mm}
```{r, warning=F, message=F, fig.width=4, fig.height=3, fig.align='center', echo=F, out.width="50%", eval=F}
load("data/post.samples.rda")
library(ggplot2)
smd.ecdf = ecdf(post.samples$smd)
ecdf.dat = data.frame(smd = 1:1000/1000,
p = smd.ecdf(1:1000/1000))
ggplot(aes(x = smd, y = p), data = ecdf.dat) +
geom_vline(xintercept = mean(post.samples$smd), color = "grey") +
geom_line(size = 2, color = "black") +
theme_minimal() +
labs(x = "SMD", y = "Cumulative Probability") +
ggtitle("ECDF: Posterior Distribution of the Pooled Effect Size")
```
```{r, message = F, out.width = '50%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/ecdf_sep.png')
```
We can use the `ecdf` function to define an ECDF in _R_, and then check the probability of our pooled effect being smaller than 0.30. The code looks like this:
```{r, eval = F}
smd.ecdf <- ecdf(post.samples$smd)
smd.ecdf(0.3)
```
```
## [1] 0.002125
```
We see that with 0.21%, the probability of our pooled effect being smaller than 0.30 is very, very low. Assuming the cut-off is valid, this would mean that the overall effect of the intervention we find in this meta-analysis is very likely to be meaningful.
<br></br>
### Generating a Forest Plot
---
\index{Forest Plot}
\index{tidybayes Package}
As you have seen, Bayesian models allow us to extract their sampled posterior distribution. This can be extremely helpful to directly assess the probability of specific values given our model. We can also exploit this feature to create enhanced **forest plots** (Chapter \@ref(forest)), which are both very informative and pleasing to the eye^[Some of the code we are about present here was inspired by a [blog post](https://mvuorre.github.io/posts/2016-09-29-bayesian-meta-analysis/) written by Matti Vuorre [-@vuorre2016bayesian].].
Unfortunately, there is currently no maintained package to directly create forest plots from **{brms}** models. But it is possible to build them oneself using functions of the **{tidybayes}** package [@tidybayes]. So, let us first load this package along with a few other ones before we proceed.
```{r, message=F, warning=F, eval=F}
library(tidybayes)
library(dplyr)
library(ggplot2)
library(ggridges)
library(glue)
library(stringr)
library(forcats)
```
\vspace{2mm}
Before we can generate the plot, we have to prepare the data. In particular, we need to extract the posterior distribution for **each study individually** (since forest plots also depict the specific effect size of each study). To achieve this, we can use the `spread_draws` function in the **{tidybayes}** package. The function needs three arguments as input: our fitted **{brms}** model, the random-effects factor by which the results should be indexed, and the parameter we want to extract (here `b_Intercept`, since we want to extract the fixed term: the effect size).
Using the pipe operator, we can directly manipulate the output. Using the `mutate` function in **{dplyr}**, we calculate the actual effect size of each study by adding the pooled effect size `b_Intercept` to the estimated deviation of each study. We save the result as `study.draws`.
```{r, eval=F}
study.draws <- spread_draws(m.brm, r_Author[Author,], b_Intercept) %>%
mutate(b_Intercept = r_Author + b_Intercept)
```
\vspace{2mm}
Next, we want to generate the distribution of the pooled effect in a similar way (since in forest plots, the summary effect is usually displayed in the last row). We therefore slightly adapt the code from before, dropping the second argument to only get the pooled effect. The call to `mutate` only adds an extra column called `"Author"`. We save the result as `pooled.effect.draws`.
```{r, eval=F}
pooled.effect.draws <- spread_draws(m.brm, b_Intercept) %>%
mutate(Author = "Pooled Effect")
```
\vspace{2mm}
Next, we bind `study.draws` and `pooled.effect.draws` together in one data frame. We then start a pipe again, calling `ungroup` first, and then use `mutate` to (1) clean the study labels (i.e. replace dots with spaces), and (2) reorder the study factor levels by effect size (high to low). The result is the data we need for plotting, which we save as `forest.data`.
```{r, eval=F}
forest.data <- bind_rows(study.draws,
pooled.effect.draws) %>%
ungroup() %>%
mutate(Author = str_replace_all(Author, "[.]", " ")) %>%
mutate(Author = reorder(Author, b_Intercept))
```
Lastly, the forest plot should also display the effect size (SMD and credible interval) of each study. To do this, we use our newly generated `forest.data` data set, group it by `Author`, and then use the `mean_qi` function to calculate these values. We save the output as `forest.data.summary`.
```{r, eval = F}
forest.data.summary <- group_by(forest.data, Author) %>%
mean_qi(b_Intercept)
```
\vspace{2mm}
We are now ready to generate the forest plot using the **{ggplot2}** package. The code to generate the plot looks like this:
```{r, message=F, warning=F, fig.width=5, fig.height=4, eval = F}
ggplot(aes(b_Intercept,
relevel(Author, "Pooled Effect",
after = Inf)),
data = forest.data) +
# Add vertical lines for pooled effect and CI
geom_vline(xintercept = fixef(m.brm)[1, 1],
color = "grey", size = 1) +
geom_vline(xintercept = fixef(m.brm)[1, 3:4],
color = "grey", linetype = 2) +
geom_vline(xintercept = 0, color = "black",
size = 1) +
# Add densities
geom_density_ridges(fill = "blue",
rel_min_height = 0.01,
col = NA, scale = 1,
alpha = 0.8) +
geom_pointintervalh(data = forest.data.summary,
size = 1) +
# Add text and labels
geom_text(data = mutate_if(forest.data.summary,
is.numeric, round, 2),
aes(label = glue("{b_Intercept} [{.lower}, {.upper}]"),
x = Inf), hjust = "inward") +
labs(x = "Standardized Mean Difference", # summary measure
y = element_blank()) +
theme_minimal()
```
```{r, message=F, warning=F, fig.width=5, fig.height=4, echo = F, fig.align='center', out.width="75%", eval=F}
load("data/forest.data.rda")
load("data/forest.data.summary.rda")
png("images/tidybayes.png", 4000, 3000, res = 600)
ggplot(aes(b_Intercept,
relevel(Author, "Pooled Effect",
after = Inf)),
data = forest.data) +
geom_vline(xintercept = fixef(m.brm)[1, 1],
color = "grey", size = 1) +
geom_vline(xintercept = fixef(m.brm)[1, 3:4],
color = "grey", linetype = 2) +
geom_vline(xintercept = 0, color = "black",
size = 1) +
geom_density_ridges(fill = "gray40",
rel_min_height = 0.01,
col = NA, scale = 1,
alpha = 0.8) +
tidybayes::geom_pointintervalh(data = forest.data.summary,
size = 1) +
geom_text(data = mutate_if(forest.data.summary,
is.numeric, round, 2),
aes(label = glue("{b_Intercept} [{.lower}, {.upper}]"),
x = Inf), hjust = "inward") +
labs(x = "Standardized Mean Difference", # summary measure
y = element_blank()) +
theme_minimal()
dev.off()
```
\vspace{4mm}
```{r, message = F, out.width = '80%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/tidybayes_sep.png')
```
```{block2, type='boximportant'}
**Observed Versus Model-Based Effect Sizes**
\vspace{4mm}
One thing is very important to mention here. The effect sizes displayed in the forest plot do **not** represent the **observed** effect sizes of the original studies, but the estimate of the "true" effect size ($\theta_k$) of a study **based on the Bayesian model**. The dots shown in the forest plot are equivalent to the study-wise estimates we saw when extracting the random effects using `ranef` (except that these values were centered around the pooled effect).
Furthermore, looking at the values of studies with very high effect sizes (e.g. outliers such as "DanitzOrsillo" and "Shapiro et al."), we see that the model-based effect sizes are **closer to the overall effect** $\hat\mu$ than their initially observed values^[e.g. the observed effect in "DanitzOrsillo" was 1.79, while the estimated effect within the model is 1.05].
This **shrinkage to the mean** is typical for hierarchical models with a common overarching distribution, such as meta-analytic random-effects models. During the estimation process, the Bayesian model "supplements" information on the effect of **one** study $k$ with information on the **overall** distribution of true effect sizes as jointly estimated by all effect sizes $K$ in the meta-analysis.
Such **"borrowing of strength"** means that the values of studies with extreme effects are pulled towards the mean [@lunn2012bugs, chapter 10.1]. This behavior is more pronounced for studies which provide relatively few information (i.e. studies with large standard errors).
```
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxinfo'}
**Test your knowledge!**
\vspace{4mm}
1. What are the differences and similarities between the "conventional" random-effects model and a Bayesian hierarchical model?
\vspace{-2mm}
2. Name three advantages of Bayesian meta-analyses compared to their frequentist counterpart.
\vspace{-2mm}
3. Explain the difference between a weakly informative and uninformative prior.
\vspace{-2mm}
4. What is a Half-Cauchy distribution, and why is it useful for Bayesian meta-analysis?
\vspace{-2mm}
5. What is an ECDF, and how can it be used in Bayesian meta-analyses?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda13) at the end of this book.**
```
<br></br>
## Summary
* While meta-analysis is usually conducted using frequentist statistics, it is also possible to conduct Bayesian meta-analyses.
* Bayesian meta-analysis is based on the Bayesian hierarchical model. The core tenets of this model are identical to the "conventional" random-effects model. The difference, however, is that (informative, weakly informative or uninformative) **prior distributions** are assumed for $\mu$ and $\tau^2$.
* For Bayesian meta-analysis models, it is usually a good idea to assume **weakly informative** priors. Weakly informative priors are used to represent a **weak** belief that some values of more credible than others.
* To specify the prior distribution for the between-study heterogeneity variance $\tau^2$, the Half-Cauchy distribution can be used. Half-Cauchy distributions are particularly suited for this task because they are only defined for positive values, and possess heavier tails. This can be used to represent that very high values of $\tau^2$ are less likely, but still very much possible.
* When fitting Bayesian meta-analysis models, it is important to (1) always check if the model included **enough iterations** to converge (for example by checking the $\hat{R}$ values), and to (2) conduct **sensitivity analyses** with different prior specifications to evaluate the impact on the results.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#!/bin/sh
set -ev
Rscript -e "bookdown::render_book('index.Rmd', 'bookdown::gitbook')"
Rscript -e "bookdown::render_book('index.Rmd', 'bookdown::pdf_book')"
Rscript -e "bookdown::render_book('index.Rmd', 'bookdown::epub_book')"
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Risk of Bias Plots {#risk-of-bias-plots}
**by Luke A. McGuinness**
---
<img src="_figs/traffic_light.jpg" />
```{block2, type='boxempty'}
**Please cite this chapter as:**
McGuinness, L. A. (2021). Risk of Bias Plots. In Harrer, M., Cuijpers, P., Furukawa, T.A., & Ebert, D.D., _Doing Meta-Analysis with R: A Hands-On Guide_ (online version). bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/rob-plots.html.
```
<br></br>
<span class="firstcharacter">I</span>
n this chapter, we will describe how to create risk of bias plots in _R_, using the **{robvis}** package.
<br></br>
## Introduction
---
As part of a systematic review and meta-analysis, you may also want to examine the internal validity (risk of bias) of included studies using the relevant [domain-based risk of bias assessment tool](https://handbook-5-1.cochrane.org/chapter_8/8_3_1_types_of_tools.htm), and present the results of this assessment in a graphical format.
The Cochrane Handbook recommends two types of figure: a summary barplot figure showing the proportion of studies with a given risk of bias judgement within each domain, and a traffic light plot which presents the domain level judgments for each study.
However, the options available to researchers when creating these figures are limited. While RevMan has the functionality to create the plots, many researchers do not use it to conduct their systematic review and so copying the relevant data into the system is an inefficient solution.
Similarly, producing the graphs by hand, using software such as MS PowerPoint, is time consuming and means the figures have to manually updated if changes are needed. Additionally, journals usually require figures to be of publication quality (above ~300-400dpi), which can be hard to achieve when exporting the risk of bias figures from RevMan or creating them by hand.
```{r rob-revman, fig.cap="Example RevMan output.", out.width='75%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/robsummaryrevman.jpg')
```
To avoid all of this, you can now easily plot the risk of bias figures yourself within R Studio, using the **{robvis}** package [@mcguinness2020risk; @robvis] which provides functions to convert a risk of bias assessment summary table into a summary plot or a traffic-light plot.
<br></br>
### Load **{robvis}**
---
Assuming that you have already installed the **{dmetar}** package (see Chapter \@ref(dmetar)), load the **{robvis}** package using:
```{r, message=F}
library(robvis)
```
<br></br>
### Importing Your Risk of Bias Summary Table Data
---
To produce our plots, we first have to import the results of our risk of bias assessment from **Excel** into _R_. Please note that **{robvis}** expects certain facts about the data you provide it, so be sure to follow the guidance below when setting up your table in Excel:
1. The first column is labelled “Study” and contains the study identifier (e.g. **Anthony et al, 2019**)
2. The second-to-last column is labelled “Overall” and contains the overall risk-of-bias judgments
3. The last column is labelled “Weight” and contains some measure of study precision e.g. the weight assigned to each study in the meta-analysis, or if no meta-analysis was performed, the sample size of each study). See Chapter \@ref(fem) for more details.
4. All other columns contain the results of the risk-of bias assessment for a specific domain.
To elaborate on the above guidance, consider as an example the ROB2 tool which has 5 domains. The resulting data set that **{robvis}** would expect for this tool would have 8 columns:
* **Column 1**: Study identifier
* **Column 2-6**: One RoB2 domain per column
* **Column 7**: Overall risk-of-bias judgments
* **Column 8**: Weight.
In Excel, this risk of bias summary table would look like this:
```{r rob-example-data, out.width='75%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rob_excel.png')
```
```{block, type='boximportant'}
**Column Names**
For three of the four tool templates (ROB2, ROBINS-I, QUADAS-2), what you name the columns containing the domain-level judgments is not important, as the templates within robvis will relabel each domain with the correct tool-specific heading.
```
Once you have saved the table you created in Excel to the working directory as a comma-separated-file (e.g. “robdata.csv”), you can either read the file into _R_ programmatically using the command below or via the “import assistant” method as described in Chapter \@ref(data-prep-R).
```{r, eval=F}
my_rob_data <- read.csv("robdata.csv", header = TRUE)
```
<br></br>
### Templates
---
**{robvis}** produces the risk of bias figures by using the data you provide to populate a template figure specific to the risk of bias assessment tool you used. At present, **{robvis}** contains templates for the following three tools:
* **ROB2**, the new Cochrane risk of bias tool for randomized controlled trials;
* **ROBINS-I**, the Risk of Bias In Non-randomized Studies - of Interventions tool;
* **QUADAS-2**, the Quality and Applicability of Diagnostic Accuracy Studies, Version 2.
**{robvis}** also contains a special generic template, labeled as ROB1. Designed for use with the original Cochrane risk of bias tool for randomized controlled trials, it can also be used to visualize the results of assessments performed with other domain-based tools not included in the list above. See Chapter \@ref(rob1-template) for more information on the additional steps required when using this template.
<br></br>
### Example Data Sets
---
The **{robvis}** package contains an example data set for each template outlined above. These are stored in the following objects:
* `data_rob2`: Example data for the ROB2 tool
* `data_robins`: Example data for the ROBINS-I tool
* `data_quadas`: Example data for the QUADAS-2 tool
* `data_rob1`: Example data for the RoB-1 tool.
You can explore these data sets using the `glimpse` function (see Chapter \@ref(class-conversion)). For example, once you have loaded the package using `library(robvis)`, viewing the ROBINS-I example data set can be achieved by running the following command:
```{r, message=F, eval=F}
glimpse(data_robins)
```
```{r, message=F, echo=F}
dplyr::glimpse(data_robins)
```
These example data sets are used to create the plots presented through the remainder of this guide.
<br></br>
## Summary Plots
---
### Basics
---
Once we have successfully imported the risk of bias summary table into _R_, creating the risk of bias figures is quite straightforward.
To get started, a simple weighted summary bar plot using the ROB2 example data set (`data_rob2`) is created by running the following code:
```{r, fig.width=9, fig.height=2.5, fig.align='center', out.width='90%'}
rob_summary(data = data_rob2,
tool = "ROB2")
```
<br></br>
### Modifying the Plot
---
The `rob_summary` function has the following parameters:
* `data`. A data frame containing summary (domain) level risk-of-bias assessments, with the first column containing the study details, the second column containing the first domain of your assessments, and the final column containing a weight to assign to each study. The function assumes that the data includes a column for overall risk-of-bias. For example, a ROB2.0 dataset would have 8 columns (1 for study details, 5 for domain level judgments, 1 for overall judgments, and 1 for weights, in that order).
* `tool`. The risk of bias assessment tool used. RoB2.0 (`"ROB2"`), `"ROBINS-I"`, and `"QUADAS-2"` are currently supported.
* `overall`. An option to include an additional bar for overall risk-of-bias in the figure. Default is `FALSE`.
* `weighted`. An option to specify whether weights should be used in the bar plot. Default is `TRUE`, in line with current Cochrane Collaboration guidance.
* `colour`. An argument to specify the colour scheme for the plot. Default is `"cochrane"`, which used the ubiquitous Cochrane colours, while a preset option for a colour-blind friendly palette is also available (`colour = "colourblind"`).
* `quiet`. A logical option to quietly produce the plot without displaying it. Default is `FALSE`.
Examples of the functionality of each argument are described below.
<br></br>
#### Tool
---
An argument to define the tool template you wish to use. In the example above, the ROB2 template is used. The two other primary templates - the ROBINS-I and QUADAS-2 templates - are demonstrated below:
```{r, fig.width=9, fig.height=3, fig.align='center', out.width='90%'}
rob_summary(data = data_robins,
tool = "ROBINS-I")
```
```{r, fig.width=9, fig.height=2.5, fig.align='center', out.width='90%'}
rob_summary(data = data_quadas,
tool = "QUADAS-2")
```
<br></br>
#### Overall
---
By default, an additional bar representing the overall risk of bias judgments is not included in the plot. If you would like to include this, set `overall = TRUE`. For example:
```{r, fig.width=9, fig.height=2.5, fig.align='center', out.width='90%'}
rob_summary(data = data_rob2,
tool = "ROB2",
overall = TRUE)
```
<br></br>
#### Weighted or Unweighted Bar Plots
---
By default, the bar plot is weighted by some measure of study precision, so that the bar plot shows the proportion of information rather than the proportion of studies that is at a particular risk of bias. This approach is in line with the [Cochrane Handbook](https://training.cochrane.org/handbook/current/chapter-07#section-7-4).
You can turn off this option by setting `weighted = FALSE` to create an unweighted bar plot. For example, compare the following two plots:
```{r, fig.width=9, fig.height=2.5, fig.align='center', out.width='90%'}
rob_summary(data = data_rob2,
tool = "ROB2")
```
```{r, fig.width=9, fig.height=2.5, fig.align='center', out.width='90%'}
rob_summary(data = data_rob2,
tool = "ROB2",
weighted = FALSE)
```
<br></br>
#### Colour Scheme
---
```{block2, type='boximportant'}
**British English Spelling**
Please note the non-US English spelling of **colour**!
```
The `colour` argument of both plotting functions allows users to select from two predefined colour schemes, `"cochrane"` (default) or `"colourblind"`, or to define their own palette by providing a vector of **hex codes**. For example, to use the predefined `"colourblind"` palette:
```{r, fig.width=9, fig.height=2.5, fig.align='center', out.width='90%'}
rob_summary(data = data_rob2,
tool = "ROB2",
colour = "colourblind")
```
And to define your own colour scheme:
```{r, fig.width=9, fig.height=2.5, fig.align='center', out.width='90%'}
rob_summary(data = data_rob2,
tool = "ROB2",
colour = c("#f442c8","#bef441","#000000"))
```
When defining your own colour scheme, you must ensure that the number of discrete judgments (e.g. “Low”, “Moderate”, “High”, “Critical”) and the number of colours specified are the same. Additionally, colours must be specified in order of ascending risk-of-bias (e.g. “Low” to “Critical”), with the first hex corresponding to “Low” risk of bias.
<br></br>
## Traffic Light Plots
---
Frequently, researchers will want to present the risk of bias in each domain for each study assessed. The resulting plots are commonly called traffic light plots, and can be produced with **{robvis}** via the `rob_traffic_light` function.
<br></br>
### Basics
---
To get started, a traffic light plot using the ROB2 example dataset (`data_rob2`) is created by running the following code:
```{r, fig.width=8, fig.height=10, fig.align='center', out.width='65%'}
rob_traffic_light(data = data_rob2,
tool = "ROB2")
```
<br></br>
### Modifying the Plot
---
The `rob_summary` function has the following parameters:
* `data`. A data frame containing summary (domain) level risk-of-bias assessments, with the first column containing the study details, the second column containing the first domain of your assessments, and the final column containing a weight to assign to each study. The function assumes that the data includes a column for overall risk-of-bias. For example, a ROB2.0 data set would have 8 columns (1 for study details, 5 for domain level judgments, 1 for overall judgments, and 1 for weights, in that order).
* `tool`. The risk of bias assessment tool used. RoB2.0 (`"ROB2"`), `"ROBINS-I"`, and `"QUADAS-2"` are currently supported.
* `colour`. An argument to specify the colour scheme for the plot. Default is `"cochrane"` which used the ubiquitous Cochrane colours, while a preset option for a colour-blind friendly palette is also available (`"colourblind"`).
* `psize`. An option to change the size of the "traffic light" points. Default is `20`.
* `quiet`. A logical option to quietly produce the plot without displaying it. Default is `FALSE`.
<br></br>
#### Tool
---
An argument to define the tool template you wish to use. The ROB2 template is demonstrated and the two other primary templates - the ROBINS-I and QUADAS-2 templates - are displayed below:
```{r, fig.width=10, fig.height=11, fig.align='center', out.width='65%'}
rob_traffic_light(data = data_robins,
tool = "ROBINS-I")
```
```{r, fig.width=8, fig.height=11, fig.align='center', out.width='65%'}
rob_traffic_light(data = data_quadas,
tool = "QUADAS-2")
```
<br></br>
#### Colour Scheme
---
```{block2, type='boximportant'}
**British English Spelling**
Please note the non-US English spelling of **colour**!
```
The `colour` argument of both plotting functions allows users to select from two predefined colour schemes, `"cochrane"` (default) or `"colourblind"`, or to define their own palette by providing a vector of hex codes.
For example, to use the predefined `"colourblind"` palette:
```{r, fig.width=8, fig.height=9, fig.align='center', out.width='65%'}
rob_traffic_light(data = data_rob2,
tool = "ROB2",
colour = "colourblind")
```
And to define your own colour scheme:
```{r, fig.width=8, fig.height=9, fig.align='center', out.width='65%'}
rob_traffic_light(data = data_rob2,
tool = "ROB2",
colour = c("#f442c8","#bef441","#000000"))
```
When defining your own colour scheme, you must ensure that the number of discrete judgments (e.g. “Low”, “Moderate”, “High”, “Critical”) and the number of colours specified are the same. Additionally, colours must be specified in order of ascending risk-of-bias (e.g. “Low” to “Critical”), with the first hex corresponding to “Low” risk of bias.
<br></br>
#### Point Size
---
Occasionally, when a large number of risk of bias assessment have been performed, the resulting traffic light plot may be too long to be useful. Users can address this by modifying the `psize` argument of the `rob_traffic_light` function to a smaller number (default is `20`). For example:
```{r, fig.width=5.5, fig.height=13, fig.align='center', out.width='45%'}
# Create bigger dataset (18 studies)
new_rob2_data <- rbind(data_rob2, data_rob2)
new_rob2_data$Study <- paste("Study", seq(1:length(new_rob2_data$Study)))
# Plot bigger dataset, reducing the psize argument from 20 to 8
rob_traffic_light(data = new_rob2_data,
tool = "ROB2",
psize = 8)
```
<br></br>
## "ROB1" Generic Template {#rob1-template}
---
### Motivation
---
This template offers increased flexibility in the domains that are included in the plot. It can handle any number of domains (cf. the other tool templates that have a set number of domains) and uses the user-defined column headings as domain titles in the resulting figures.
<br></br>
### Varying Numbers of Domains
---
The "ROB1" template (`tool = "ROB1"`) can handle varying numbers of columns. This was originally designed for use with the ROB1 assessment tool, to which frequently added or removed domains. **While this template could be used to present the result of assessments performed using adjusted versions of the other tools (ROB2, QUADAS-2, ROBINS-I), we would strongly discourage authors from doing so**. Authors using other published tools should use the stricter templates presented in the previous chapters to ensure they conform with the guidance.
<br></br>
### Domain Names
---
For the other tools listed in the previous sections, the names of the columns containing the domain-level risk of bias judgments are not important. For example, they are commonly named _D1_, _D2_, _D3_, etc. However, this is not the case when using the `"ROB1"` template.
Compare the column headings of the data_rob2 and the data_rob1 (presented horizontally here for ease of comparison):
```{r, echo=F, message=FALSE}
library(kableExtra)
k<-c(colnames(robvis::data_rob2),".",".")
kk<-colnames(robvis::data_rob1)
kkk<-seq(1:10)
ms3 <- data.frame(colnum = kkk, data_rob2 = k, data_rob1 = kk)
kableExtra::kable(
list(
ms3[,c(1,2)],
ms3[,c(1,3)]
), col.names = c("No.", "Column name"),
longtable = T,
booktabs = T,
caption = 'Comparison of column names in the `data_rob2` (left) and `data_rob1` (right) datasets.'
) %>%
kable_styling()
```
The domain columns (Columns 2-6) in the ROB2 example dataset have been given arbitrary names of _D1_ - _D5_, as they will be overwritten by the tool to correspond to the correct domain titles given by the ROB2 guidance.
In contrast, the domain columns (Columns 2-8) in the ROB1 example dataset are labelled correctly, as these will be used in the figures produced by `rob_summary` and `rob_traffic_light`.
As an example, suppose we change the name of the “Random.sequence.generation” column to “This is a test”. In the `rob_summary` figure, the title of the first bar is changed, while in the `rob_traffic_light` figure, the caption is updated to reflect this change.
```{r, fig.width=9, fig.height=2.5, fig.align='center', out.width='90%'}
# Create copy of the data_rob1 dataset
new_rob1_data <- data_rob1
# Change the column heading for the first domain
colnames(new_rob1_data)[2] <- "This is a test"
# Create the summary barplot
rob_summary(data = new_rob1_data, tool = "ROB1")
```
```{r, fig.width=10, fig.height=11, fig.align='center', out.width='65%'}
# Create the traffic light plot
rob_traffic_light(data = new_rob1_data,
tool = "ROB1")
```
<br></br>
## Costumizing and Saving
---
### The **{ggplot2}** Package
---
Both **{robvis}** functions (`rob_summary` and `rob_traffic_light`) produce a `ggplot` object, and so can be customized and saved using functions from the **{ggplot2}** package. Use the following code to load this package:
```{r, message=F}
library(ggplot2)
```
<br></br>
### Modifying Your Plots
---
There are a range of post-production modifications you can make to you plots using **{ggplot2}** functions. A useful example is adding a title to the plot:
```{r, fig.width=9, fig.height=2.5, fig.align='center', out.width='90%'}
# Make sure you have the ggplot2 package installed and loaded
rob_summary(data_rob2, "ROB2") +
ggtitle("Your custom title")
```
<br></br>
### Saving the Plot
---
In order to save a risk of bias plot, we first assign it to an object using the <- operator and then save it using the `ggsave` function of the **{ggplot2}** package.
When saving the summary bar plot, we recommend using the following code, with the default height and width values.
```{r, eval=F}
# Create your plot, and assign it to an object
rob_barplot <- rob_summary(data_rob2, "ROB2")
# Save your plot
ggsave(plot = rob_barplot, # Plot object to save
filename = "robplot2.png", # Destination file
width = 8, # Width of image (recommended)
height = 2.41, # Height of image (recommended)
dpi = 1000) # Resolution of image
```
When saving the traffic light plots, the approach is the same. However, there are no recommended values for the `width` and `height` parameters, as the best values for these parameters will vary from plot to plot as the number and names of included studies change.
<br></br>
### Saving in a Different Format
---
The plots can be saved in a range of formats using the function outlined above, simply by changing the extension of the file-name (e.g. from ".png" to ".pdf"). Acceptable formats include .png, .pdf, .tiff and .svg^[This format requires you to install and load the **{svglite}** package: `install.packages("svglite")`; `library(svglite)`.].
For example, to save the bar plot created above (`rob_barplot`) as a PDF:
```{r, eval=F}
# Save your plot
ggsave(plot = rob_barplot,
filename = "robplot2.pdf", # File extension now ".pdf"
width = 8,
height = 2.41,
dpi = 1000)
```
<br></br>
## Web App
---
In an effort to allow users to quickly explore the functionality of robvis, a web application was created which provides a graphically interface to the **{robvis}** package.
The web-app is available [here](https://mcguinlu.shinyapps.io/robvis). A brief guided walk-through is presented below.
<br></br>
### Landing Page
---
```{r, out.width='90%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/robvis-app-landingpage.png')
```
The page presents a concise version of the guidance found in the previous chapters, specifically relating to setting up your dataset. More importantly, users can download the example datasets for each tool as a CSV file and use these to interact with the app and explore its functionality.
<br></br>
### Traffic Light Plot Page
---
Clicking on the second tab will bring you to the screen displayed below.
```{r, out.width='45%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/robvis-app-traffic-light.png')
```
This menu acts as a graphical interface for the `rob_traffic_light` function:
* Upload your risk of bias summary table by clicking “Browse…” and navigating to where you stored your CSV file.
* Use the drop-down box to select the tool used to perform your risk of bias assessments.
The basic traffic light plot should now appear on the right hand side of the window. You can customize the plot using the following options:
* Select the colour scheme you wish to use (either “Cochrane” or “Colour-blind friendly”)
* Modify the point size (useful when you wish to plot a large number of studies on a single traffic light plot)
* Modify the text size.
Once you are happy with the plot, you can download it by selecting the required format (.png, .jpg,.tiff, .eps) and clicking the “Download plot” button. Note, if you do not first select a format, you will get a download error.
<br></br>
### Summary Plot Page
---
Clicking on the third tab will bring you to the screen displayed below.
```{r, out.width='45%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/robvis-app-summary-plot.png')
```
**This menu acts as a graphical interface for the `rob_summary` function:**
* Upload your risk of bias summary table by clicking “Browse…” and navigating to where you stored your CSV file.
* Use the drop-down box to select the tool used to perform your risk of bias assessments.
The basic weighted summary bar plot should now appear on the right hand side of the window.
**You can customize the plot using the following options:**
* Choose whether or not to use weights when creating the figure
* Include an additional bar representing distribution of overall risk of bias judgments
* Select the colour scheme you wish to use (either “Cochrane” or “Colour-blind friendly”)
As with the traffic light plot tab, you can download your plot by selecting the required format and clicking the “Download plot” button.
$$\tag*{$\blacksquare$}$$
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
**Accessing the _R_ Documentation**
\vspace{2mm}
Many functions in _R_ require several arguments, and it is impossible to remember how all functions are used correctly. Thankfully, it is not necessary to know how each function is used by heart. R Studio makes it easy for us to access the _R_ documentation, where every function has a detailed description page.
There are two ways to search for a function documentation page. The first is to access the **Help** pane in the lower left corner of R Studio, and then use the search bar to find information on a specific function. A more convenient way is to simply run `?` followed by the name of the function in the console, e.g. `?mean`. This will open the documentation entry for this function automatically.
The _R_ documentation of a function usually at least contains a **Usage**, **Arguments** and **Examples** section. The **Arguments** and **Examples** section is often particularly helpful to understand how a function is used.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
theme: jekyll-theme-cayman
show_downloads: true
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Discovering R {#discovering-R}
---
<img src="_figs/discover.jpg" />
<br></br>
<span class="firstcharacter">I</span>
n this chapter, we begin our journey into the _R_ universe. Possibly, this is your first-ever exposure to programming, and you might feel a little anxious. That is understandable, but there is no reason to worry. Over the last two decades, thousands of intelligent people all around the world have contributed ways to make working with _R_ easier and more convenient for its users. We will also get to know an extremely powerful computer program that we can use to make writing and running _R_ code much less cumbersome.
Nevertheless, it is still harder to work with _R_ compared to other data analysis programs you may have used before. Hadley Wickham, one of the most important figures in the _R_ community, once stressed that _R_ is fundamentally different to **Graphical User Interface** (GUI) based software for statistics [@grolemund2014hands, Foreword]. GUIs allow you to perform data analyses just by clicking a few buttons, but you are ultimately limited to the functionality that the developers have deemed important.
_R_, on the other hand, has none of these limitations but can require more background knowledge. Like any language, _R_ has to be learned, and it needs practice to become a proficient user. Frustration is an unpleasant, but natural part of this process. In the preface, you can find an entire section in which we describe a few things you can do if you get stuck.
We want to assure you that learning _R_ **is** worth it. _R_ is the most versatile, comprehensive and most frequently used statistical programming language. The _R_ community is rapidly expanding each and every year, and _R_ 's appeal is so large that even the New York Times found it newsworthy to report on it [@vance2009data].
Whether you are working in academia or at a company, the things you can do in _R_ will often seem like a superpower to others. But it is a superpower that everyone can learn, provided some time and effort. That being said, it is time to get started.
<br></br>
## Installing _R_ and R Studio {#install-R}
---
\index{R Studio}
Before we can begin, we have to download and prepare a computer program which allows us to use _R_ in a convenient way for our statistical analyses. Probably the best option for this at the moment is [**R Studio**](https://rstudio.com/). This program gives us a user interface which makes it easier to handle our data, packages and output. The best part is that R Studio is completely free and can be downloaded anytime on the Internet. Recently, an online version of R Studio has been [released](https://rstudio.cloud/), which provides you with largely the same interface and functionality through your web browser. In this book, however, we will focus on the R Studio version we install directly on our computer.
```{block, type='boxinfo'}
In this chapter, we will focus on how you can **install** _R_ and R Studio **on your computer**. If you have installed R Studio on your computer already, and if you are an experienced _R_ user, **none of this might be new for you**. You may skip this chapter then. If you have never used _R_ before, bear with us.
```
\index{Comprehensive _R_ Archive Network (CRAN)}
Let us go through the necessary steps to set up _R_ and R Studio for our first coding endeavors.
1. R Studio is an interface which allows us to write _R_ code and run it in an easy way. But R Studio is not identical with _R_; it requires that _R_ software is already installed on your computer. First, we therefore have to install the latest _R_ version. Like R Studio, _R_ is completely free. It can be downloaded from the **Comprehensive `R` Archive Network**, or CRAN, website. The type of _R_ you have to download depends on whether you are using a [Windows PC](https://cran.r-project.org/bin/windows/base/) or a [Mac](https://cran.r-project.org/bin/macosx/). An important detail about _R_ is its **version**. _R_ is regularly updated, meaning that new versions become available. When your _R_ version becomes too outdated, it may happen that some things will not work anymore. It is therefore helpful to update the _R_ version regularly, maybe roughly every year, by re-installing _R_. For this book, we are using _R_ version 4.0.3. By the time you install _R_, there may already be a higher version available, and it is advised to always install the latest version.
2. After you have downloaded and installed _R_, you can download "R Studio Desktop" from the [R Studio website](https://rstudio.com/products/rstudio/download/). There are also versions of R Studio for which you have to buy a license, but this is definitely not required for our purposes. Simply download and install the free version of R Studio Desktop.
3. The first time you open R Studio, it will probably look a lot like in Figure \@ref(fig:rstudio-1). There are three panes in R Studio. In the upper right corner, we have the **Environment** pane, which displays objects that we defined (i.e. saved) internally in _R_. In the bottom right corner, you can find the **Files, Plots, Packages and Help** pane. This pane serves several functions; for example, it is used to display files on your computer, show plots and installed packages, and to access the help pages. The heart of R Studio, however, is the left side, where you can find the **Console**. The console is where all the _R_ code is entered and then run.
```{r rstudio-1, fig.cap='Panes in R Studio.', out.width='100%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rstudio_1_col_sep.png')
```
4. There is a fourth pane in R Studio that is usually not shown in the beginning, the **source** pane. You can open the source pane by clicking on **File** > **New File** > **R Script** in the menu. This will open a new pane in the upper left corner, containing an empty _R_ **script**. _R_ scripts are a great way to collect your code in one place; you can also save them as a file with the extension ".R" (e.g. **myscript.R**) on your computer. To run code in an _R_ script, select it by dragging your cursor across all relevant lines, and then click on the "Run" button to the right. This will send the code to the console, where it is evaluated. A shortcut for this is Ctrl + R (Windows) or Cmd + R (Mac).
<br></br>
## Packages {#packages}
---
\index{Package, _R_}
\index{Function, _R_}
\index{Function Argument}
We will now install a few **packages** using _R_ code. Packages are one of the main reasons why _R_ is so powerful. They allow experts all around the world to develop sets of **functions** that others can download and then use in _R_. Functions are the core elements of _R_ ; they allow us to perform a pre-defined type of operation, usually on our own data.
There is a parallel between the mathematical formulation of a function, $f(x)$, and the way functions are defined in _R_. In _R_, a function is coded by first writing down its name, followed by brackets which contain the input and/or specifications for the function (so-called **arguments**).
Say that we want to know what the square root of 9 is. In _R_, we can use the `sqrt` function for this. We simply have to provide `9` as the input to the function to get the result. You can try this yourself. Next to the little arrow (`>`) in your console, write down `sqrt(9)` and then hit Enter. Let us see what happens.
```{r}
sqrt(9)
```
We now received our first **output** from _R_. It tells us that the square root of 9 is 3. Although there are much more complex functions in _R_ than this one, they are all governed by the same principle: you provide information on parameters that a function requires, the function uses this information to do its calculations, and in the end, it provides you with the output.
\index{tidyverse Package}
\index{meta Package}
\index{metafor Package}
In _R_, we also use a function called `install.packages` to **install packages**. The only thing we have to tell this function is the name of the package we want to install. For now, there are three packages we should install because they will be helpful later on.
* **{tidyverse}**. The **{tidyverse}** package [@tidyverse] is not a single package, but actually a bundle of packages which make it easy to manipulate and visualize data in _R_. When we install the **{tidyverse}** package, this provides us with the **{ggplot2}**, **{dplyr}**, **{tidyr}**, **{readr}**, **{purrr}**, **{stringr}** and **{forcats}** package at the same time. Functions included in the tidyverse have become very popular in the _R_ community in recent years, and are used by many researchers, programmers and data scientists. If you want to learn more about the tidyverse, you can visit its [website](https://www.tidyverse.org/).
* **{meta}**. This package contains functions which make it easy to run different types of meta-analyses [@meta]. We will primarily focus on this package in the guide, because it is easy to use, well documented, and very versatile. More info on the **{meta}** package can be found on its [website](http://www.imbi.uni-freiburg.de/lehre/lehrbuecher/meta-analysis-with-r).
* **{metafor}**. The **{metafor}** package [@urviecht] is also dedicated to conducting meta-analyses, and a true powerhouse in terms of functionality. Since we will use this package at times in later chapters, and because **{metafor}** is used by the **{meta}** package for many applications, it is best to have it installed. The **{metafor}** package also has an excellent [documentation](http://www.metafor-project.org/doku.php/metafor) for various meta-analysis-related topics.
The `install.packages` function only requires the name of the package we want to install as input. One package after another, our code should look like this:
```{r, eval=F}
install.packages("tidyverse")
install.packages("meta")
install.packages("metafor")
```
Simply type the code above into the console; then hit Enter to start the installation (see Figure \@ref(fig:rstudio-1)).
```{r rstudio-2, fig.cap='Installing a package.', out.width='75%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rstudio_2_col.png')
```
```{block2, type='boximportant'}
Do not forget to put the package names into **quotation marks** (`""`). Otherwise, you will get an error message.
```
After you hit Enter, _R_ will start to install the package and print some information on the progress of the installation. When the `install.packages` function is finished, the package is ready to be used. Installed packages are added to _R_ 's **system library**. This system library can be accessed in the **Packages** pane in the bottom left corner of your R Studio screen. Whenever we want to use the installed package, we can load it from our library using the `library` function. Let us try this out, and load the **{tidyverse}** package.
```{r, eval=F}
library(tidyverse)
```
<br></br>
## The **{dmetar}** Package {#dmetar}
---
\index{dmetar Package}
In this guide, we want to make conducting meta-analyses as accessible and easy as possible for you as a researcher. Although there are fantastic packages like the **{meta}** and **{metafor}** package which do most of the heavy lifting, there are still some aspects of meta-analyses which we consider important, but not easy to do in _R_ currently, particularly if you do not have a programming or statistics background.
To fill this gap, we developed the **{dmetar}** package, which serves as the companion _R_ package for this book. The **{dmetar}** package has its own documentation, which can be found [online](https://dmetar.protectlab.org/). Functions of the **{dmetar}** package provide additional functionality for the **{meta}** and **{metafor}** packages (and a few other, more advanced packages), which we will be using frequently in this guide. Most of the functions included in the **{dmetar}** package and how they can improve your meta-analysis work flow will be described in detail throughout the book. Most of the example data sets we are using in this guide are also contained in **{dmetar}**.
```{block2, type='boxinfo'}
Although highly recommended, it is **not essential** to have the **{dmetar}** package installed to work through the guide. For each function of the package, we will also provide the source code, which can be used to save the function locally on your computer, and the additional _R_ packages those functions rely on. We will also provide supplementary download links for data sets included in the package.
However, installing the **{dmetar}** package beforehand is much more convenient because this pre-installs all the functions and data sets on your computer.
```
\index{Version, _R_}
To install the **{dmetar}** package, the _R_ version on your computer must be 3.6 or higher. If you have (re-)installed _R_ recently, this will probably be the case. To check if your _R_ version is new enough, you can paste this line of code into the Console, and then hit Enter.
```{r, eval=F}
R.Version()$version.string
```
This will display the current _R_ version you have. If the _R_ version is below 3.6, you will have to update it. There are good [blog posts](https://www.linkedin.com/pulse/3-methods-update-r-rstudio-windows-mac-woratana-ngarmtrakulchol/) on the Internet providing guidance on how to do this.
If you want to install **{dmetar}**, one package already needs to be installed on your computer first. This package is called **{devtools}**. So, if **{devtools}** is not on your computer yet, you can install it like we did before.
```{r, eval=F}
install.packages("devtools")
```
You can then install **{dmetar}** using this line of code:
```{r, eval=F}
devtools::install_github("MathiasHarrer/dmetar")
```
This will initiate the installation process. It is likely that the installation will take some time because several other packages have to be installed along with the **{dmetar}** package for it to function properly. During the installation process, the installation manager may ask you if you want to update existing _R_ packages on your computer. The output may look something like this:
```
## These packages have more recent versions available.
## Which would you like to update?
##
## 1: All
## 2: CRAN packages only
## 3: None
## 4: ggpubr (0.2.2 -> 0.2.3) [CRAN]
## 5: zip (2.0.3 -> 2.0.4) [CRAN]
##
## Enter one or more numbers, or an empty line to skip updates:
```
When you get this message, it is best to tell the installation manager that no packages should be updated. In our example, this means pasting `3` into the console and then hitting Enter. In the same vein, when the installation manager asks this question:
```
## There are binary versions available but the source versions are later:
##
## [...]
##
## Do you want to install from sources the package which needs compilation?
## y/n:
```
It is best to choose `n` (no). If the installation fails with this strategy (meaning that you get an `Error`), run the installation again, but update all packages this time.
When writing this book and developing the package, we made sure that everyone can install it without errors. Nevertheless, there is still a chance that installing the package does not work at the first try. If the installation problem persists, you can have a look at the "Contact Us" section in the Preface of this book.
<br></br>
## Data Preparation & Import {#data-prep-R}
---
This chapter will tell you how to import data into _R_ using R Studio. Data preparation can be tedious and exhausting at times, but it is the backbone of all later steps. Therefore, we have to pay close attention to bringing the data into the correct format before we can proceed.
Usually, data imported into _R_ is stored in **Microsoft Excel** spreadsheets first. We recommend to store your data there because this makes it very easy to do the import. There are a few "Dos and Don'ts" when preparing the data in _Excel_.
* It is very important how you name the columns of your spreadsheet. If you already named the columns of your sheet adequately in _Excel_, you can save a lot of time later because your data does not have to be transformed using _R_. “Naming” the columns of the spreadsheet simply means to write the name of the variable into the first line of the column; _R_ will automatically detect that this is the name of the column then.
* Column names should not contain any spaces. To separate two words in a column name, you can use underscores or points (e.g. "column_name").
* It does **not** matter how columns are ordered in your _Excel_ spreadsheet. They just have to be labeled correctly.
* There is also **no** need to format the columns in any way. If you type the column name in the first line of your spreadsheet, _R_ will automatically detect it as a column name.
* It is also important to know that the import may distort special characters like ä, ü, ö, á, é, ê, etc. You might want to transform them into “normal” letters before you proceed.
* Make sure that your _Excel_ file only contains one sheet.
* If you have one or several empty rows or columns which used to contain data, make sure to delete those columns/rows completely, because _R_ could think that these columns contain (missing) data and import them also.
Let us start with an example data set. Imagine that you plan to conduct a meta-analysis of suicide prevention programs. The outcome you want to focus on in your study is the severity of suicidal ideation (i.e. to what degree individuals think about, consider, or plan to end their life), assessed by questionnaires. You already completed the study search and data extraction, and now want to import your meta-analysis data in _R_.
The next task is therefore to prepare an _Excel_ sheet containing all the relevant data. Table \@ref(tab:suicidedata) presents all the data we want to import. In the first row, this table also shows how we can name our columns in the _Excel_ file based on the rules we listed above. We can see that the spreadsheet lists each study in one row. For each study, the sample size ($n$), mean and standard deviation ($SD$) is included for both the intervention and control group. This is the outcome data needed to calculate effect sizes, which is something we will cover in detail in Chapter \@ref(effects). The following three columns contain variables we want to analyze later on as part of the meta-analysis.
We have prepared an _Excel_ file for you called **"SuicidePrevention.xlsx"**, containing exactly this data. The file can be downloaded from the [Internet](https://protectlab.org/en/datasets/suicide-prevention/).
```{r suicidedata, echo=F, message=F, warning=F}
library(openxlsx)
library(kableExtra)
openxlsx::read.xlsx("data/SuicidePrevention.xlsx") -> SuicidePrevention
rbind(SuicidePrevention[1:5,], rep("...", 10)) -> SuicidePrevention
colnames(SuicidePrevention) = c("Author", "N ", "Mean ", "SD", "N", "Mean", "SD ", "Year", "Age Group", "Control Group")
kableExtra::kable(SuicidePrevention, "html", booktabs = T,
caption = 'The suicide prevention dataset.',
linesep = "") %>%
kable_styling(bootstrap_options = c("striped", "condensed", "responsive", "hover"), font_size = 12) %>%
add_header_above(c(" ", "Intervention Group" = 3, "Control Group" = 3, "Subgroups" = 3)) %>%
add_header_above(c("'author'", "'n.e'", "'mean.e'", "'sd.e'", "'n.c'",
"'mean.c'", "'sd.c'", "'pubyear'", "'age_group'", "'control'"))
```
To import our _Excel_ file in R Studio, we have to set a **working directory** first. The working directory is a folder on your computer from which _R_ can use data, and in which outputs are saved. To set a working directory, you first have to create a folder on your computer in which you want all your meta-analysis data and results to be saved. You should also save the **"SuicidePrevention.xlsx"** file we want to import in this folder.
Then start R Studio and open your newly created folder in the bottom left **Files** pane. Once you have opened your folder, the _Excel_ file you just saved there should be displayed. Then set this folder as the working directory by clicking on the little gear wheel on top of the pane, and then on **Set as working directory** in the pop-up menu. This will make the currently opened folder the working directory.
```{r wd, fig.cap='Setting the working directory; data set loaded in the R environment.', out.width='100%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/wd_col_sep.png')
```
We can now proceed and import the data into _R_. In the **Files** pane, simply click on the **"SuicidePrevention.xlsx"** file. Then click on **Import Dataset...**. An import assistant should now pop up, which is also loading a preview of your data. This can be time-consuming sometimes, so you can skip this step if you want to, and click straight on **Import**.
\index{Data Frame}
Your data set should then be listed with the name `SuicidePrevention` in the top right **Environment** pane. This means that your data is now loaded and can be used by _R_ code. Tabular data sets like the one we imported here are called **data frames** (`data.frame`) in _R_. Data frames are data sets with columns and rows, just like the _Excel_ spreadsheet we just imported.
\index{openxlsx Package}
```{block2, type='boxinfo'}
**{openxlsx}**
\vspace{4mm}
It is also possible to import data files directly using code. A good package we can use to do this is called **{openxslx}** [@openxlsx]. As with all _R_ packages, you have to install it first. You can then use the `read.xlsx` function to import an _Excel_ sheet.
\vspace{2mm}
If the file is saved in your working directory, you only have to provide the function with the file's name, and assign the imported data to an object in _R_. If we want our dataset to have the name `data` in _R_, for example, we can use this code:
`library(openxlsx)` <br/>
`data <- read.xlsx("SuicidePrevention.xlsx")`
```
<br></br>
## Data Manipulation {#data-manip-R}
---
Now that we have imported our first data set using R Studio, let us do a few manipulations. **Data wrangling**, meaning the transformation of data to make it usable for further analysis, is an essential part of all data analytics. Some professions, such as data scientists, spend the majority of their time turning raw, "untidy" data into "tidy" data sets. Functions of the **{tidyverse}** provide an excellent toolbox for data wrangling. If you have not loaded the package from your library yet, you should do so now for the following examples.
```{r, eval=F}
library(tidyverse)
```
<br></br>
### Class Conversion {#class-conversion}
---
First, we should have a peek at the `SuicidePrevention` data set we imported in the last chapter. To do this, we can use the `glimpse` function provided by the **{tidyverse}**.
```{r, echo=F, message=F, warning=F}
library(tidyverse)
library(openxlsx)
SuicidePrevention = read.xlsx("data/SuicidePrevention.xlsx")
```
```{r}
glimpse(SuicidePrevention)
```
We see that this gives us details on the type of data we have stored in each column of our data set. There are different abbreviations signifying different types of data. In _R_, they are called **classes**.
* `<num>` stands for **numeric**. This is all data stored as numbers (e.g. 1.02).
* `<chr>` stands for **character**. This is all data stored as words.
* `<log>` stands for **logical**. These are variables which are binary, meaning that they signify that a condition is either `TRUE` or `FALSE`.
* `<factor>` stands for **factor**. Factors are stored as numbers, with each number signifying a different level of a variable. Possible factor levels of a variable might be 1 = "low", 2 = "medium", 3 = "high".
We can also check the class of a column using the `class` function. We can access the column in a data frame directly by adding the `$` operator to its name and then the name of the column. Let us try this out. First, we let _R_ provide us with the data contained in the column `n.e`. After that, we check the class of the column.
```{r}
SuicidePrevention$n.e
class(SuicidePrevention$n.e)
```
We see that the column `n.e` containing the sample sizes in the intervention group has the class `character`. But wait, that is the wrong class! During the import, this column was wrongly classified as a `character` variable, while it should actually have the class `numeric`. This has implications on further analysis steps. For example, if we want to calculate the mean sample size, we get this warning:
```{r, results='hold'}
mean(SuicidePrevention$n.e)
```
To make our data set usable, we often have to convert our columns to the right classes first. To do this, we can use a set of functions which all begin with "`as.`": `as.numeric`, `as.character`, `as.logical` and `as.factor`. Let us go through a few examples.
In the output of the `glimpse` function from before, we see that several columns have been given the `character` class, while they should be `numeric`. This concerns columns `n.e`, `mean.e`, `sd.e`, `mean.c` and `sd.c`. We see that the publication year `pubyear` has the class `<dbl>`. This stands for **double** and means that the column is a numeric vector. It is a historical anomaly that both `double` and `numeric` are used in _R_ to refer to numeric data types. Usually, however, this has no actual practical implications.
The fact that some numerical values are coded as **characters** in our data set, however, will lead to problems downstream, so we should change the class using the `as.numeric` function. We provide the function with the column we want to change and then save the output back to its original place using the **assignment operator** (`<-`). This leads to the following code.
```{r}
SuicidePrevention$n.e <- as.numeric(SuicidePrevention$n.e)
SuicidePrevention$mean.e <- as.numeric(SuicidePrevention$mean.e)
SuicidePrevention$sd.e <- as.numeric(SuicidePrevention$sd.e)
SuicidePrevention$n.c <- as.numeric(SuicidePrevention$n.c)
SuicidePrevention$mean.c <- as.numeric(SuicidePrevention$mean.c)
SuicidePrevention$sd.c <- as.numeric(SuicidePrevention$sd.c)
SuicidePrevention$n.c <- as.numeric(SuicidePrevention$n.c)
```
We also see in the `glimpse` output that `age_group` and `control`, the subgroups in our data, are coded as characters. In reality, however, it is more adequate to encode them as factors, with two factor levels each. We can use the `as.factor` function to change the class.
```{r}
SuicidePrevention$age_group <- as.factor(SuicidePrevention$age_group)
SuicidePrevention$control <- as.factor(SuicidePrevention$control)
```
Using the `levels` and `nlevels` function, we can also have a look at the factor labels and number of levels in a factor.
```{r}
levels(SuicidePrevention$age_group)
nlevels(SuicidePrevention$age_group)
```
We can also use the `levels` function to change the name of the factor labels. We simply have to assign new names to the original labels. To do this in _R_, we have to use the **concatenate**, or `c` function. This function can tie two or more words or numbers together and create one element. Let us try this out.
```{r}
new.factor.levels <- c("gen", "older")
new.factor.levels
```
Perfect. We can now use the newly created `new.factor.levels` object and assign it to the factor labels of our `age_group` column.
```{r}
levels(SuicidePrevention$age_group) <- new.factor.levels
```
Let us check if the renaming has worked.
```{r}
SuicidePrevention$age_group
```
It is also possible to create logicals using `as.logical`. Let us say we want to recode the column `pubyear`, so that it only displays if a study was published after 2009. To do this, we have to define a yes/no rule via code. We can do this using the "greater or equal than" operator `>=`, and then use this as the input for the `as.logical` function.
```{r}
SuicidePrevention$pubyear
as.logical(SuicidePrevention$pubyear >= 2010)
```
We can see that this encodes every element in `pubyear` as `TRUE` or `FALSE`, depending on whether the publication year was greater than or equal to 2010, or not.
<br></br>
### Data Slicing {#data-slicing}
---
In _R_, there are several ways to extract a subset of a data frame. We have already covered one way, the `$` operator, which can be used to extract columns. A more generic way to extract slices from a data set is to use square brackets. The generic form we have to follow when using square brackets is `data.frame[rows, columns]`. It is always possible to extract rows and columns by using the number in which they appear in the data set. For example, we can use the following code to extract the data in the second row of the data frame.
```{r}
SuicidePrevention[2,]
```
We can be even more specific and tell _R_ that we want only the information in the first column of the second row.
```{r}
SuicidePrevention[2, 1]
```
To select specific slices, we have to use the concatenate (`c`) function again. For example, if we want to extract rows 2 and 3 as well as columns 4 and 6, we can use this code.
```{r}
SuicidePrevention[c(2,3), c(4,6)]
```
It is usually only possible to select rows by their number. For columns, however, it is also possible to provide the column **name** instead of the number.
```{r}
SuicidePrevention[, c("author", "control")]
```
Another possibility is to **filter** a data set based on row values. We can use the `filter` function to do this. In the function, we need to specify our data set name, as well as a filtering logic. A relatively straightforward example is to filter all studies in which `n.e` is equal or smaller than 50.
```{r}
filter(SuicidePrevention, n.e <= 50)
```
But it is also possible to filter by names. Imagine we want to extract the studies by authors **Meijer** and **Zaytsev**. To do this, we have to define our filtering logic using the `%in%` operator and the concatenate function.
```{r}
filter(SuicidePrevention, author %in% c("Meijer et al.",
"Zaytsev et al."))
```
Conversely, we can also extract all studies **except** the one by **Meijer** and **Zaytsev** by putting an exclamation mark in front of the filtering logic.
```{r, eval=F}
filter(SuicidePrevention, !author %in% c("Meijer et al.",
"Zaytsev et al."))
```
<br></br>
### Data Transformation {#data-transform}
---
Of course it is also possible to change specific values in an _R_ data frame, or to expand them. To change data we saved internally in _R_, we have to use the **assignment operator**. Let us re-use what we previously learned about data slicing to change a specific value in our data set. Imagine that we made a mistake and that the publication year of the study by **DeVries et al.** is wrongly reported as 2019 when it should be 2018. We can change the value by slicing our data set accordingly, and then assigning the new value. Remember that the results of **DeVries et al.** are reported in the second row of the data set.
```{r, eval=F}
SuicidePrevention[2, "pubyear"] <- 2018
SuicidePrevention[2, "pubyear"]
```
```
## [1] 2018
```
It is also possible to change more than one value at once. For example, if we want to add 5 to every intervention group mean in our data set, we can do that using this code.
```{r}
SuicidePrevention$mean.e + 5
```
We can also use two or more columns to do calculations. A practically relevant example is that we might be interested in calculating the **mean difference** between the intervention and control group means for each study. Compared to other programming languages, this is spectacularly easy in _R_.
```{r}
SuicidePrevention$mean.e - SuicidePrevention$mean.c
```
As you can see, this takes the intervention group mean of each study, and then subtracts the control group mean, each time using the value of the same row. Imagine that we want to use this mean difference later on. Therefore, we want to save it as an extra object called `md`, and add it as a new column to our `SuicidePrevention` data frame. Both is easy using the assignment operator.
```{r}
md <- SuicidePrevention$mean.e - SuicidePrevention$mean.c
SuicidePrevention$md <- SuicidePrevention$mean.e -
SuicidePrevention$mean.c
```
\index{Pipe, _R_}
The last thing we want to show you are **pipe operators**. In _R_, pipes are written as `%>%`. Pipes essentially allow us to apply a function to an object without having to specify the object name directly in the function call. We simply connect the object and the function using the pipe operator. Let us give you an easy example. If we want to calculate the mean sample size in the control group, we can use the `mean` function and the pipe operator like this:
```{r}
SuicidePrevention$n.c %>% mean()
```
Admittedly, in this example, it is hard to see the added values of such pipes. The special strength of pipes comes from the fact that they allow us to **chain** many functions together. Imagine we want to know the square root of the mean control group sample size, but only of studies published after 2009. Pipes let us do this conveniently in one step.
```{r}
SuicidePrevention %>%
filter(pubyear > 2009) %>%
pull(n.c) %>%
mean() %>%
sqrt()
```
In the pipe, we used one function we have not covered before, the `pull` function. This function can be seen as an equivalent of the `$` operator that we can use in pipes. It simply "pulls out" the variable we specify in the function, so it can be fed forward to the next part of the pipe.
```{block, type='boxinfo'}
**Accessing the _R_ Documentation**
\vspace{2mm}
Many functions in _R_ require several arguments, and it is impossible to remember how all functions are used correctly. Thankfully, it is not necessary to know how each function is used by heart. R Studio makes it easy for us to access the _R_ documentation, where every function has a detailed description page.
There are two ways to search for a function documentation page. The first is to access the **Help** pane in the lower left corner of R Studio, and then use the search bar to find information on a specific function. A more convenient way is to simply run `?` followed by the name of the function in the console, e.g. `?mean`. This will open the documentation entry for this function automatically.
The _R_ documentation of a function usually at least contains a **Usage**, **Arguments** and **Examples** section. The **Arguments** and **Examples** section is often particularly helpful to understand how a function is used.
```
<br></br>
### Saving Data {#saving-data}
---
Once we have done transformations with our data and saved them internally in _R_, we have to **export** it at some point. There are two types of file formats which we advise you to use when saving _R_ data frames: _.rda_ and _.csv_.
The file ending _.rda_ stands for _R_ **Data**. It is a file type specifically for _R_, with all advantages and disadvantages. An advantage of _.rda_ files is that they can easily be re-opened in _R_, and that there is no risk that your data may be distorted during the export. They are also very versatile and can save data that does not fit into a spreadsheet format. The disadvantage is that they can only be opened in _R_ ; but for some projects, this is sufficient.
To save an object as an _.rda_ data file, you can use the `save` function. In the function, you have to provide (1) the name of the object, and (2) the **exact** name you want the file to have, including the file ending. Running the function will then save the file to your working directory.
```{r, eval=F}
save(SuicidePrevention, file = "suicideprevention.rda")
```
The file ending _.csv_ stands for **comma-separated values**. This format is one of the most commonly used ones for data in general. It can be opened by many programs, including _Excel_. You can use the `write.csv` function to save your data as a _.csv_. The code structure and behavior is nearly identical to the one of `save`, but the supplied object **needs** to be a data frame or other tabular data object. And of course, you need to specify the file type ".csv".
```{r, eval=F}
write.csv(SuicidePrevention, file = "suicideprevention.csv")
```
This has only been a quick overview of data manipulation strategies in _R_. Learning _R_ from scratch can be exhausting at times, especially when we deal with something as supposedly easy as manipulating data. However, the best way to get accustomed to the way _R_ works is to practice. After some time, common _R_ commands will become second nature to you.
A good way to continue learning is to have a look at Hadley Wickham and Garrett Grolemund's book **R for Data Science** [-@wickham2016r]. Like this guide, the book can be completely read online for [free](https://r4ds.had.co.nz/transform.html). Additionally, we also collected a few exercises on the next page, which you can use for practicing what we learned here.
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Data Manipulation Exercises**
\vspace{2mm}
For these exercises, we will use a new data set called `data`. You can create this data set directly in _R_ using this code:
```{r}
data <- data.frame("Author" = c("Jones", "Goldman",
"Townsend", "Martin",
"Rose"),
"TE" = c(0.23, 0.56,
0.78, 0.23,
0.33),
"seTE" = c(0.324, 0.235,
0.394, 0.275,
0.348),
"subgroup" = c("one", "one",
"two", "two",
"three"))
```
```{block, type="boxempty"}
Here are the exercises for this data set.
1. Show the variable `Author`.
\vspace{-2mm}
2. Convert `subgroup` to a factor.
\vspace{-2mm}
3. Select all the data of the "Jones" and "Martin" study.
\vspace{-2mm}
4. Change the name of the study "Rose" to "Bloom".
\vspace{-2mm}
5. Create a new variable `TE_seTE_diff` by subtracting `seTE` from `TE`. Save the results in `data`.
\vspace{-2mm}
6. Use a pipe to (1) filter all studies in `subgroup` "one" or "two", (2) select the variable `TE_seTE_diff`, (3) take the mean of the variable, and then apply the `exp` function to it. Access the _R_ documentation to find out what the `exp` function does.
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda2) at the end of this book.**
```
<br></br>
## Summary
* _R_ has become one of the most powerful and frequently used statistical programming languages in the world.
* _R_ is not a computer program with a graphical user interface and pre-defined functionality. It is a full programming language to which people all around the world can contribute freely available add-ons, so-called **packages**.
* R Studio is a computer program which allows us to use _R_ for statistical analyses in a convenient way.
* The fundamental building blocks of _R_ are functions. Many of these functions can be imported through packages which we can install from the internet.
* Functions can be used to import, manipulate, analyze and save data using _R_.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
\documentclass[]{book}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage[margin=1in]{geometry}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdftitle={A Minimal Book Example},
pdfauthor={Yihui Xie},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{natbib}
\bibliographystyle{apalike}
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\ImportTok}[1]{#1}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}}
\newcommand{\BuiltInTok}[1]{#1}
\newcommand{\ExtensionTok}[1]{#1}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}}
\newcommand{\RegionMarkerTok}[1]{#1}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}}
\newcommand{\NormalTok}[1]{#1}
\usepackage{longtable,booktabs}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{5}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
%%% Use protect on footnotes to avoid problems with footnotes in titles
\let\rmarkdownfootnote\footnote%
\def\footnote{\protect\rmarkdownfootnote}
%%% Change title format to be more compact
\usepackage{titling}
% Create subtitle command for use in maketitle
\newcommand{\subtitle}[1]{
\posttitle{
\begin{center}\large#1\end{center}
}
}
\setlength{\droptitle}{-2em}
\title{A Minimal Book Example}
\pretitle{\vspace{\droptitle}\centering\huge}
\posttitle{\par}
\author{Yihui Xie}
\preauthor{\centering\large\emph}
\postauthor{\par}
\predate{\centering\large\emph}
\postdate{\par}
\date{2018-10-06}
\usepackage{booktabs}
\usepackage{amsthm}
\makeatletter
\def\thm@space@setup{%
\thm@preskip=8pt plus 2pt minus 4pt
\thm@postskip=\thm@preskip
}
\makeatother
\usepackage{amsthm}
\newtheorem{theorem}{Theorem}[chapter]
\newtheorem{lemma}{Lemma}[chapter]
\theoremstyle{definition}
\newtheorem{definition}{Definition}[chapter]
\newtheorem{corollary}{Corollary}[chapter]
\newtheorem{proposition}{Proposition}[chapter]
\theoremstyle{definition}
\newtheorem{example}{Example}[chapter]
\theoremstyle{definition}
\newtheorem{exercise}{Exercise}[chapter]
\theoremstyle{remark}
\newtheorem*{remark}{Remark}
\newtheorem*{solution}{Solution}
\begin{document}
\maketitle
{
\setcounter{tocdepth}{1}
\tableofcontents
}
\chapter{Prerequisites}\label{prerequisites}
This is a \emph{sample} book written in \textbf{Markdown}. You can use
anything that Pandoc's Markdown supports, e.g., a math equation
\(a^2 + b^2 = c^2\).
The \textbf{bookdown} package can be installed from CRAN or Github:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{install.packages}\NormalTok{(}\StringTok{"bookdown"}\NormalTok{)}
\CommentTok{# or the development version}
\CommentTok{# devtools::install_github("rstudio/bookdown")}
\end{Highlighting}
\end{Shaded}
Remember each Rmd file contains one and only one chapter, and a chapter
is defined by the first-level heading \texttt{\#}.
To compile this example to PDF, you need XeLaTeX. You are recommended to
install TinyTeX (which includes XeLaTeX):
\url{https://yihui.name/tinytex/}.
\chapter{Introduction}\label{intro}
You can label chapter and section titles using \texttt{\{\#label\}}
after them, e.g., we can reference Chapter \ref{intro}. If you do not
manually label them, there will be automatic labels anyway, e.g.,
Chapter \ref{methods}.
Figures and tables with captions will be placed in \texttt{figure} and
\texttt{table} environments, respectively.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{par}\NormalTok{(}\DataTypeTok{mar =} \KeywordTok{c}\NormalTok{(}\DecValTok{4}\NormalTok{, }\DecValTok{4}\NormalTok{, .}\DecValTok{1}\NormalTok{, .}\DecValTok{1}\NormalTok{))}
\KeywordTok{plot}\NormalTok{(pressure, }\DataTypeTok{type =} \StringTok{'b'}\NormalTok{, }\DataTypeTok{pch =} \DecValTok{19}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics[width=0.8\linewidth]{bookdown-demo_files/figure-latex/nice-fig-1}
}
\caption{Here is a nice figure!}\label{fig:nice-fig}
\end{figure}
Reference a figure by its code chunk label with the \texttt{fig:}
prefix, e.g., see Figure \ref{fig:nice-fig}. Similarly, you can
reference tables generated from \texttt{knitr::kable()}, e.g., see Table
\ref{tab:nice-tab}.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{knitr}\OperatorTok{::}\KeywordTok{kable}\NormalTok{(}
\KeywordTok{head}\NormalTok{(iris, }\DecValTok{20}\NormalTok{), }\DataTypeTok{caption =} \StringTok{'Here is a nice table!'}\NormalTok{,}
\DataTypeTok{booktabs =} \OtherTok{TRUE}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{table}
\caption{\label{tab:nice-tab}Here is a nice table!}
\centering
\begin{tabular}[t]{rrrrl}
\toprule
Sepal.Length & Sepal.Width & Petal.Length & Petal.Width & Species\\
\midrule
5.1 & 3.5 & 1.4 & 0.2 & setosa\\
4.9 & 3.0 & 1.4 & 0.2 & setosa\\
4.7 & 3.2 & 1.3 & 0.2 & setosa\\
4.6 & 3.1 & 1.5 & 0.2 & setosa\\
5.0 & 3.6 & 1.4 & 0.2 & setosa\\
\addlinespace
5.4 & 3.9 & 1.7 & 0.4 & setosa\\
4.6 & 3.4 & 1.4 & 0.3 & setosa\\
5.0 & 3.4 & 1.5 & 0.2 & setosa\\
4.4 & 2.9 & 1.4 & 0.2 & setosa\\
4.9 & 3.1 & 1.5 & 0.1 & setosa\\
\addlinespace
5.4 & 3.7 & 1.5 & 0.2 & setosa\\
4.8 & 3.4 & 1.6 & 0.2 & setosa\\
4.8 & 3.0 & 1.4 & 0.1 & setosa\\
4.3 & 3.0 & 1.1 & 0.1 & setosa\\
5.8 & 4.0 & 1.2 & 0.2 & setosa\\
\addlinespace
5.7 & 4.4 & 1.5 & 0.4 & setosa\\
5.4 & 3.9 & 1.3 & 0.4 & setosa\\
5.1 & 3.5 & 1.4 & 0.3 & setosa\\
5.7 & 3.8 & 1.7 & 0.3 & setosa\\
5.1 & 3.8 & 1.5 & 0.3 & setosa\\
\bottomrule
\end{tabular}
\end{table}
You can write citations, too. For example, we are using the
\textbf{bookdown} package \citep{R-bookdown} in this sample book, which
was built on top of R Markdown and \textbf{knitr} \citep{xie2015}.
\chapter{Literature}\label{literature}
Here is a review of existing methods.
\chapter{Methods}\label{methods}
We describe our methods in this chapter.
\chapter{Applications}\label{applications}
Some \emph{significant} applications are demonstrated in this chapter.
\section{Example one}\label{example-one}
\section{Example two}\label{example-two}
\chapter{Final Words}\label{final-words}
We have finished a nice book.
\bibliography{book.bib,packages.bib}
\end{document}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Structural Equation Modeling Meta-Analysis {#sem}
---
<img src="_figs/semtitle_leaf.jpg" />
<br></br>
<span class="firstcharacter">I</span>
n the last chapter, we showed that meta-analytic models have an inherent multilevel structure. This quality can be used, for example, to extend conventional meta-analysis to three-level models.
\index{Analysis of Variance}
\index{Subgroup Analysis}
A peculiar thing about statistical methods is that they are often put into separate “boxes”. They are treated as unrelated in research and practice, when in fact they are not. For many social science students, for example, it often comes as a surprise to hear that an **analysis of variance** (ANOVA) and a linear regression with a categorical predictor are doing essentially the same thing^[ANOVAs are based the model $y_{ij} = \mu + \tau_i + \epsilon_{ij}$, where $\tau_i$ is the effect of the $i$th factor level/treatment, and $\epsilon_{ij}$ represents deviations due to (unexplained) random error [@montgomery, chapter 3.2]. This is nothing but a special case of a linear regression model. The primary difference is that $\tau_i$ is **effect-coded** (e.g. the treatment variable is either -1 and 1 so that the categorical treatment effects sum up to zero: $\sum_{i=1}^{a} \tau_i = 0$). In contrast, categorical predictors are usually dummy-coded (e.g. 0 and 1) in linear regression models.]. This often happens because these two methods are traditionally used in different contexts, and taught as separate entities.
\index{Structural Equation Model}
In a similar vein, it has been only fairly recently that researchers have started seeing multilevel models as a special form of a **structural equation model**, or SEM [@mehta2005people; @bauer2003estimating]. As we learned, every meta-analysis is based on a multilevel model. As consequence, we can also treat meta-analyses as structural equation models in which the pooled effect size is treated as a latent (or unobserved) variable [@cheung2015meta, chapter 4.6]. In short: meta-analyses are multilevel models; therefore, they can be expressed as structural equation models too.
\index{Multivariate Meta-Analysis}
\index{Factor Analysis}
This does not only mean that we can conceptualize previously covered types of meta-analyses from a structural equation modeling perspective. It also allows us to use SEM to build more complex meta-analysis models. Using **meta-analytic** SEM, we can test **factor analytic** models, or perform **multivariate meta-analyses** which include more than one outcome of interest (to name just a few applications).
Meta-analytic SEM can be helpful when we want to evaluate if certain models in the literature actually hold up once we consider all available evidence. Conversely, it can also be used to check if a theory is not backed by the evidence; or, even more interestingly, if it only applies to a subgroup of individuals or entities.
Application of meta-analytic SEM techniques, of course, presupposes a basic familiarity with structural equation modeling. In the next section, we therefore briefly discuss the general idea behind structural equation modeling, as well as its meta-analytic extension.
<br></br>
## What Is Meta-Analytic Structural Equation Modeling? {#what-is-meta-sem}
---
Structural equation modeling is a statistical technique used to test hypotheses about the relationship of **manifest** (observed) and **latent** variables [@kline2015principles, chapter 1]. Latent variables are either not observed or **observable**. Personality, for example, is a construct which can only be measured indirectly, for instance through different items in a questionnaire. In SEM, an assumed relationship between manifest and latent variables (a "structure") is modeled using the manifest, measured variables, while taking their measurement error into account.
SEM analysis is somewhat different to "conventional" statistical hypothesis tests (such as $t$-tests, for example). Usually, statistical tests involve testing against a **null hypothesis**, such as $H_0: \mu_1 = \mu_2$ (where $\mu_1$ and $\mu_2$ are the means of two groups). In such a test, the researcher "aims" to **reject** the null hypothesis, since this allows to conclude that the two groups differ. Yet in SEM, a specific structural model is proposed beforehand, and the researcher instead "aims" to **accept** this model if the goodness of fit is sufficient [@cheung2015meta, chapter 2.4.6].
<br></br>
### Model Specification
---
Typically, SEM are specified and represented mathematically through a series of **matrices**. You can think of a matrix as a simple table containing rows and columns, much like a `data.frame` object in _R_ (in fact, most data frames can be easily converted to a matrix using the `as.matrix` function). Visually, SEM can be represented as **path diagrams**. Such path diagrams are usually very intuitive, and straightforward in their interpretation. Thus, we will start by specifying a SEM **visually** first, and then move on to the matrix notation.
<br></br>
#### Path Diagrams
---
\index{Path Diagram}
Path diagrams represent our SEM graphically. There is no full consensus on how path diagrams should be drawn, yet there are a few conventions. Here are the main components of path diagrams, and what they represent.
```{r, echo=F, message=F, warning=F}
library(kableExtra)
library(dplyr)
df = data.frame(Symbol = c("$\\square$", "$\\circ$", "$\\triangle$", "$\\rightarrow$", "$\\leftrightarrow$"),
Name = c("Rectangle", "Circle", "Triangle", "Arrow", "Double Arrow"),
Description = c("Manifest/observed variables.", "Latent/unobserved variables.",
"Intercept (fixed vector of 1s).", "Prediction. The variable at the start of the arrow predicts the variable at the end of the arrow: Predictor $\\rightarrow$ Target.",
"(Co-)Variance. If a double arrow connects two variables (rectangles/circles), it signifies the covariance/ correlation between the two variables. If a double arrow forms a loop on top of one single variable, it signifies the variance of that variable."))
kable(df %>% mutate_all(linebreak), "html", booktabs = T, escape = FALSE, longtable = T) %>%
kable_styling(latex_options = c("repeat_header"),
bootstrap_options = c("condensed",
"striped")) %>%
column_spec(3, width = "13cm")
```
As an illustration, let us create a path diagram for a simple linear ("non-meta-analytic") regression model, in which we want to predict $y$ with $x$. The model formula looks like this:
\begin{equation}
y_i = \beta_0 + \beta_1x_i + e_i
(\#eq:sem1)
\end{equation}
Now, let us "deconstruct" this formula. In the model, $x_i$ and $y_i$ are the observed variables. There are no unobserved (latent) variables. The true population mean of $y$ is the regression intercept $\beta_0$, while $\mu_x$ denotes the population mean of $x$. The variance of our observed predictor $x$ is denoted with $\sigma^2_x$. Provided that $x$ is not a perfect predictor of $y$, there will be some amount of residual error variance $\sigma^2_{e_y}$ associated with $y$. There are two regression coefficients: $\beta_0$, the intercept, and $\beta_1$, the slope coefficient of $x$.
Using these components, we can build a path diagram for our linear regression model, as seen below.
```{r regression_path, message = F, out.width = '43%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/regression_path_sep.png')
```
We can also use this graphical model as a starting point to reassemble the regression model equation. From the model, we can infer that $y$ is influenced by two components: $x \times \beta_1$ and $1 \times \beta_0$. If we add these two parts together, we again arrive at the formula for $y$ from before.
<br></br>
#### Matrix Representation
---
There are several ways to represent SEM through matrices [@joreskog2006lisrel; @muthen2012mplus; @mcardle1984some]. Here, we will focus on the **Reticular Action Model** formulation, or RAM [@mcardle1984some]. We do this because this formulation is used by the **{metaSEM}** package which we will be introducing later on. RAM uses four matrices: $\boldsymbol{F}$, $\boldsymbol{A}$, $\boldsymbol{S}$, and $\boldsymbol{M}$. Because the $\boldsymbol{M}$ matrix is not necessary to fit the meta-analytic SEM we cover, we omit it here [see @cheung2015meta for a more extensive introduction].
We will now specify the remaining $\boldsymbol{A}$, $\boldsymbol{F}$ and $\boldsymbol{S}$ matrices for our linear regression model from before. The three matrices all have the same number of rows and columns, corresponding with the variables we have in our model: $x$ and $y$. Therefore, the generic matrix structure of our regression model always looks like this:
```{r, message = F, out.width = '18%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/M1.png')
```
<br></br>
**The $\boldsymbol{A}$ Matrix: Single Arrows**
The $\boldsymbol{A}$ matrix represents the asymmetrical (single) arrows in our path model. We can fill this matrix by searching for the **column** entry of the variable in which the arrow starts ($x$), and then for the matrix **row** entry of the variable in which the arrow ends ($y$). The value of our arrow, $\beta_1$, is put where the selected column and row intersect in the matrix ($i_{y,x}$). Given that there are no other paths between variables in our model, we fill the remaining fields with 0. Thus, the $\boldsymbol{A}$ matrix for our example looks like this:
```{r, message = F, out.width = '20%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/M2.png')
```
<br></br>
**The $\boldsymbol{S}$ Matrix: Single Arrows**
The $\boldsymbol{S}$ matrix represents the (co-)variances we want to estimate for the included variables. For $x$, our predictor, we need to estimate the variance $\sigma^2_x$. For our predicted variable $y$, we want to know the prediction error variance $\sigma^2_{e_y}$. Therefore, we specify $\boldsymbol{S}$ like this:
```{r, message = F, out.width = '20%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/M4.png')
```
<br></br>
**The $\boldsymbol{F}$ Matrix: Single Arrows**
The $\boldsymbol{F}$ matrix allows us to specify the **observed** variables in our model. To specify that a variable has been observed, we simply insert 1 in the respective diagonal field of the matrix. Given that both $x$ and $y$ are observed in our model, we insert 1 into both diagonal fields:
```{r, message = F, out.width = '20%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/M3.png')
```
\index{Maximum Likelihood}
Once these matrices are set, it is possible to estimate the parameters in our SEM, and to evaluate how well the specified model fits the data. This involves some matrix algebra and parameter estimation through maximum likelihood estimation, the mathematical minutiae of which we will omit here. If you are interested in understanding the details behind this step, you can have a look at @cheung2015meta, chapter 4.3.
<br></br>
### Meta-Analysis From a SEM Perspective
---
We will now combine our knowledge about meta-analysis models and SEM to formulate meta-analysis as a structural equation model [@cheung2008model].
To begin, let us return to the formula of the random-effects model. Previously, we already described that the meta-analysis model follows a multilevel structure (see Chapter \@ref(multilevel-nature)), which looks like this:
\vspace{2mm}
**Level 1**
\begin{equation}
\hat\theta_k = \theta_k + \epsilon_k
(\#eq:sem2)
\end{equation}
\vspace{2mm}
**Level 2**
\begin{equation}
\theta_k = \mu + \zeta_k
(\#eq:sem3)
\end{equation}
On the first level, we assume that the effect size $\hat\theta_k$ reported in study $k$ is an estimator of the true effect size $\theta_k$. The observed effect size deviates from the true effect because of sampling error $\epsilon_k$, represented by the variance $\widehat{\text{Var}}(\hat\theta_k)=v_k$.
\index{Random-Effects Model}
\index{Fixed-Effect Model}
In a random-effects model, we assume that even the true effect size of each study is only drawn from a population of true effect sizes at level 2. The mean of this true effect size population, $\mu$, is what we want to estimate, since it represents the pooled effect size. To do this, we also need to estimate the variance of the true effect sizes $\widehat{\text{Var}}(\theta)=\tau^2$ (i.e. the between-study heterogeneity). The fixed-effect model is a special case of the random-effects model in which $\tau^2$ is assumed to be zero.
It is quite straightforward to represent this model as a SEM graph. We use the parameters on level 1 as latent variables to “explain” how the effect sizes we observe came into being [@cheung2015meta, chapter 4.6.2]:
```{r, message = F, out.width = '45%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/REM_SEM_sep.png')
```
In the graphical model, we see that the observed effect size $\hat\theta_k$ of some study $k$ is "influenced" by two arms: the sampling error $\epsilon_k$ with variance $v_k$, and the true effect size $\theta_k$ with variance $\tau^2$.
<br></br>
### The Two-Stage Meta-Analytic SEM Approach
---
Above, we defined the (random-effects) meta-analysis model from a SEM perspective. Although this is interesting from a theoretical standpoint, the model above is not more or less capable than the meta-analysis techniques we covered before: it describes that effect sizes are pooled assuming a random-effects model.
To really exploit the versatility of meta-analytic SEM, a two-stepped approach is required [@tang2016testing; @cheung2015meta, chapter 7]. In **Two-Stage Structural Equation Modeling** (TSSEM), we first pool the effect sizes of each study. Usually, these effect sizes are correlations between several variables that we want to use for modeling. For each study $k$, we have a selection of correlations, represented by the vector $\boldsymbol{r_k} = (r_1, r_2, \dots, r_p)$, where $p$ is the total number of (unique) correlations. Like in a normal random-effects model, we assume that each observed correlation in study $k$ deviates from the true average correlation $\rho$ due to sampling error $\epsilon_k$ and between-study heterogeneity $\zeta_k$.
When we take into account that $\boldsymbol{r_k}$ stands for **several** correlations contained in one study, we get the following equation for the random-effects model:
\begin{align}
\boldsymbol{r_k} &= \boldsymbol{\rho} + \boldsymbol{\zeta_k} + \boldsymbol{\epsilon_k} \notag \\
\begin{bmatrix} r_1 \\ r_2 \\ \vdots \\ r_p \end{bmatrix} &=
\begin{bmatrix} \rho_1 \\ \rho_2 \\ \vdots \\ \rho_p \end{bmatrix} +
\begin{bmatrix} \zeta_1 \\ \zeta_2 \\ \vdots \\ \zeta_p \end{bmatrix} +
\begin{bmatrix} \epsilon_1 \\ \epsilon_2 \\ \vdots \\ \epsilon_p \end{bmatrix} (\#eq:sem4)
\end{align}
\index{metaSEM Package}
Using this model, we can calculate a vector of **pooled** correlations, $\boldsymbol{r}$. This first pooling step allows to evaluate the heterogeneity of effects between studies, and if a random-effects model or subgroup analyses should be used. Thanks to the maximum likelihood-based approach used by the **{metaSEM}** package, even studies with partially missing data can be included in this step.
\index{Weighted Least Squares (WLS)}
In the second step, we then use **weighted least squares** (see Chapter \@ref(metareg-model-fit)) to fit the structural equation model we specified. The function for the specified model $\rho(\hat\theta)$ is [@cheung2009two; @cheung2015meta, chapter 7.4.2]:
\begin{equation}
F_{\text{WLS}}(\hat\theta) = (\boldsymbol{r} - \rho(\hat\theta))^\top \boldsymbol{V}^{-1} ({r} - \rho(\hat\theta))
(\#eq:sem5)
\end{equation}
Where $\boldsymbol{r}$ is the pooled correlation vector. The important part of this formula is $\boldsymbol{V}^{-1}$, which is an inverted matrix containing the covariances of $\boldsymbol{r}$. This matrix is used for weighting. Importantly, the formula in this second step is the same no matter if we assume a random or fixed-effect model, because the between study-heterogeneity, if existent, is already taken care of in step 1.
<br></br>
## Multivariate Meta-Analysis {#multivariate-ma}
---
\index{Multivariate Meta-Analysis}
Time to delve into our first worked meta-analytic SEM example. We will begin by using the SEM approach for a **multivariate meta-analysis**, which is something we have not covered yet. In multivariate meta-analyses, we try to estimate more than just one effect at the same time. Such types of meta-analyses are helpful in cases where we are studying a research topic for which there are several main outcomes, not just one.
Imagine that we are examining the effects of some type of treatment. For this treatment, there could be two types of outcomes which are deemed as important by most experts and are thus assessed in most studies. Multivariate meta-analyses can address this, by estimating the effect sizes for both outcomes **jointly** in one model. This multivariate approach also allows us to take the correlation between the two outcomes into account. This can be used to determine if studies with a high effect size on one outcome also have higher effect sizes on the other outcome. Alternatively, we might also find out that there is a negative relationship between the two outcomes or no association at all.
\index{metaSEM Package}
It is of note that multivariate meta-analysis can also be performed outside a SEM framework [@schwarzer2015meta, chapter 7; @mvmeta]. Here, however, we will to show you how to perform them from a SEM perspective. In this and the following examples, we will work with **{metaSEM}**, a magnificent package for meta-analytic SEM developed by Mike Cheung [-@metasem]. As always, we first have to install the **{metaSEM}** package and load it from your library.
```{r, message=F, warning=F}
library(metaSEM)
```
\index{dmetar Package}
In our example, we will use **{dmetar}**'s `ThirdWave` data set again (see Chapter \@ref(pre-calculated-es)). By default, this data set only contains effects on one outcome, perceived stress. Now, imagine that most studies in this meta-analysis also measured effects on **anxiety**, which is another important mental health-related outcome. We can therefore use a multivariate meta-analysis to jointly estimate effects on stress and anxiety, and how they relate to each other.
To proceed, we therefore have to create a new data frame first, in which data for both outcomes is included. First, we define a vector containing the effects on anxiety (expressed as Hedges' $g$) as reported in each study, as well as their standard error. We also need to define a vector which contains the **covariance** between stress and anxiety reported in each study. One study did not assess anxiety outcomes, so we use `NA` in the three vectors to indicate that the information is missing.
```{r, message=F, warning=F}
# Define vector with effects on anxiety (Hedges g)
Anxiety <- c(0.224,0.389,0.913,0.255,0.615,-0.021,0.201,
0.665,0.373,1.118,0.158,0.252,0.142,NA,
0.410,1.139,-0.002,1.084)
# Standard error of anxiety effects
Anxiety_SE <- c(0.193,0.194,0.314,0.165,0.270,0.233,0.159,
0.298,0.153,0.388,0.206,0.256,0.256,NA,
0.431,0.242,0.274,0.250)
# Covariance between stress and anxiety outcomes
Covariance <- c(0.023,0.028,0.065,0.008,0.018,0.032,0.026,
0.046,0.020,0.063,0.017,0.043,0.037,NA,
0.079,0.046,0.040,0.041)
```
Then, we use this data along with information from `ThirdWave` to create a new data frame called `ThirdWaveMV`. In this data set, we include the effect size **variances** `Stress_var` and `Anxiety_var`, which can be obtained by squaring the standard error.
```{r, message=F, warning=F, echo=F}
ThirdWaveMV <- data.frame(Author = ThirdWave$Author,
Stress = ThirdWave$TE,
Stress_var = ThirdWave$seTE^2,
Anxiety = Anxiety,
Anxiety_var = Anxiety_SE^2,
Covariance = Covariance)
```
```{r, message=F, warning=F, eval=F}
ThirdWaveMV <- data.frame(Author = ThirdWave$Author,
Stress = ThirdWave$TE,
Stress_var = ThirdWave$seTE^2,
Anxiety = Anxiety,
Anxiety_var = Anxiety_SE^2,
Covariance = Covariance)
format(head(ThirdWaveMV), digits = 2)
```
```
## Author Stress Stress_var Anxiety Anxiety_var Covariance
## 1 Call et al. 0.71 0.068 0.224 0.037 0.023
## 2 Cavanagh et al. 0.35 0.039 0.389 0.038 0.028
## 3 DanitzOrsillo 1.79 0.119 0.913 0.099 0.065
## 4 de Vibe et al. 0.18 0.014 0.255 0.027 0.008
## 5 Frazier et al. 0.42 0.021 0.615 0.073 0.018
## 6 Frogeli et al. 0.63 0.038 -0.021 0.054 0.032
```
As we can see, the new data set contains the effect sizes for both stress and anxiety, along with the respective sampling variances. The `Covariance` column stores the covariance between stress and anxiety as measured in each study.
A common problem in practice is that the covariance (or correlation) between two outcomes is not reported in original studies. If this is the case, we have to **estimate** the covariance, based on a reasonable assumption concerning the correlation between the outcomes.
Imagine that we do not know the covariance in each study yet. How can we estimate it? A good way is to look for previous literature which assessed the correlation between the two outcomes, optimally in the same kind of context we are dealing with right now. Let us say we found in the literature that stress and anxiety are very highly correlated in post-tests of clinical trials, with $r_{\text{S,A}} \approx$ 0.6. Based on this assumed correlation, we can approximate the co-variance of some study $k$ using the following formula [@schwarzer2015meta, chapter 7]:
\begin{equation}
\widehat{\text{Cov}}(\theta_{1},\theta_{2}) = SE_{\theta_{1}} \times SE_{\theta_{2}} \times \hat\rho_{1, 2}
(\#eq:sem6)
\end{equation}
Using our example data and assuming $r_{\text{S,A}} \approx$ 0.6, this formula can implemented in _R_ like so:
```{r}
# We use the square root of the variance since SE = sqrt(var)
cov.est <- with(ThirdWaveMV,
sqrt(Stress_var) * sqrt(Anxiety_var) * 0.6)
```
Please note that, when we calculate covariances this way, the choice of the assumed correlation can have a profound impact on the results. Therefore, it is highly advised to (1) always report the assumed correlation coefficient, and (2) conduct sensitivity analyses, where we inspect how results change depending on the correlation we choose.
<br></br>
### Specifying the Model
---
To specify a multivariate meta-analysis model, we do not have to follow the TSSEM procedure (see previous chapter) programmatically, nor do we have to specify any RAM matrices. For such a relatively simple model, we can use the `meta` function in **{metaSEM}** to fit a meta-analytic SEM in just one step. To use `meta`, we only have to specify three essential arguments:
* **`y`**. The columns of our data set which contain the effect size data. In a multivariate meta-analysis, we have to combine the effect size columns we want to include using `cbind`.
* **`v`**. The columns of our data set which contain the effect size variances. In a multivariate meta-analysis, we have to combine the variance columns we want to include using `cbind`. We also have to include the column containing the covariance between the effect sizes. The structure of the argument should be `cbind(variance_1, covariance, variance_2)`.
* **`data`**. The data set in which the effect sizes and variances are stored.
We save our fitted model under the name `m.mv`. Importantly, before running `meta`, please make sure that the **{meta}** package is **not** loaded. Some functions in **{meta}** and **{metaSEM}** have the same name, and this can lead to errors when running the code in _R_. It is possible to "unload" packages using the `detach` function.
The resulting `m.mv` object can be inspected using `summary`.
```{r, message=F, warning=F, echo=F}
m.mv <- meta(y = cbind(Stress, Anxiety),
v = cbind(Stress_var, Covariance, Anxiety_var),
data = ThirdWaveMV)
```
```{r, fig.width=5, fig.height=5, eval=F}
m.mv <- meta(y = cbind(Stress, Anxiety),
v = cbind(Stress_var, Covariance, Anxiety_var),
data = ThirdWaveMV)
summary(m.mv)
```
```
## [...]
## Coefficients:
## Estimate Std.Error lbound ubound z value Pr(>|z|)
## Intercept1 0.570 0.087 0.399 0.740 6.5455 5.9e-13 ***
## Intercept2 0.407 0.083 0.244 0.570 4.9006 9.5e-09 ***
## Tau2_1_1 0.073 0.049 -0.023 0.169 1.4861 0.1372
## Tau2_2_1 0.028 0.035 -0.041 0.099 0.8040 0.4214
## Tau2_2_2 0.057 0.042 -0.025 0.140 1.3643 0.1725
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
## [...]
##
## Heterogeneity indices (based on the estimated Tau2):
## Estimate
## Intercept1: I2 (Q statistic) 0.6203
## Intercept2: I2 (Q statistic) 0.5292
##
## Number of studies (or clusters): 18
## [...]
## OpenMx status1: 0 ("0" or "1": The optimization is considered fine.
## Other values may indicate problems.)
```
<br></br>
### Evaluating the Results
---
\index{Maximum Likelihood}
Given that the SEM model is fitted using the maximum likelihood algorithm, the first thing we always do is check the `OpenMx status` right at the end of the output. Maximum likelihood estimation is an optimization procedure, in which parameters are changed iteratively until the optimal solution for the data at hand is found. However, especially with more complex models, it can happen that this optimum is not reached even after many iterations; the maximum likelihood algorithm will then stop and output the parameter values it has approximated so far. Yet, those values for our model components will very likely be incorrect and should not be trusted.
The `OpenMx status` for our model is `0`, which indicates that the maximum likelihood estimation worked fine. If the status would have been anything other than 0 or 1, it would have been necessary to rerun the model, using this code:
```{r, eval=F}
rerun(m.mv)
```
In the output, the two pooled effect sizes are shown as `Intercept1` and `Intercept2`. The effect sizes are numbered in the order in which we inserted them into our call to `meta`. We can see that the pooled effect sizes are $g_{\text{Stress}}$ = 0.57 and $g_{\text{Anxiety}}$ = 0.41. Both effect sizes are significant. Under `Heterogeneity indices`, we can also see the values of $I^2$, which are $I^2_{\text{Stress}}$ = 62% and $I^2_{\text{Anxiety}}$ = 53%, indicating substantial between-study heterogeneity in both outcomes.
The direct estimates of the between-study heterogeneity variance $\tau^2$ are also provided. We see that there are not only two estimates, but three. To understand what this means, we can extract the "random" values from the `m.mv` object.
```{r}
tau.coefs <- coef(m.mv, select = "random")
```
Then, we use the `vec2symMat` function to create a matrix of the coefficients. We give the matrix rows and columns the names of our variables: `Stress` and `Anxiety`.
```{r}
# Create matrix
tc.mat <- vec2symMat(tau.coefs)
# Label rows and columns
dimnames(tc.mat)[[1]] <- dimnames(tc.mat)[[2]] <- c("Stress",
"Anxiety")
tc.mat
```
We now understand better what the three $\tau^2$ values mean: they represent the between-study variance (heterogeneity) in the diagonal of the matrix. In the other two fields, the matrix shows the estimated covariance between stress and anxiety. Given that the covariance is just an unstandardized version of a correlation, we can transform these values into correlations using the `cov2cor` function.
```{r}
cov2cor(tc.mat)
```
We see that, quite logically, the correlations in the diagonal elements of the matrix are 1. The correlation between effects on stress and anxiety is $r_{\text{S,A}}$ = 0.45. This is an interesting finding: it shows that there is a positive association between a treatment's effect on perceived stress and its effect on anxiety. We can say that treatments which have high effects on stress seem to have higher effects on anxiety too.
\index{Wald-Type Test}
It is of note that the confidence intervals presented in the summary of `m.mv` are Wald-type intervals (see Chapter \@ref(knapp-hartung)). Such Wald-type intervals can sometimes be inaccurate, especially in small samples [@diciccio1996bootstrap]. It may thus be valuable to construct confidence intervals in another way, by using **likelihood-based** confidence intervals. We can get these CIs by re-running the `meta` function and additionally specifying `intervals.type = "LB"`.
```{r, eval = F}
m.mv <- meta(y = cbind(Stress, Anxiety),
v = cbind(Stress_var, Covariance, Anxiety_var),
data = ThirdWaveMV,
intervals.type = "LB")
```
We have already seen that the output for our m.mv contains non-zero estimates of the between-study heterogeneity $\tau^2$. We can therefore conclude that the model we just fitted is a random-effects model. The `meta` function uses a random-effects model automatically. Considering the $I^2$ values in our output, we can conclude that this is indeed adequate. However, if we want to fit a fixed-effect model anyway, we can do so by re-running the analysis, and adding the parameter `RE.constraints = matrix(0, nrow=2, ncol=2)`. This creates a matrix of 0s which constrains all $\tau^2$ values to zero:
\vspace{2mm}
```{r, eval = F}
m.mv <- meta(y = cbind(Stress, Anxiety),
v = cbind(Stress_var, Covariance, Anxiety_var),
data = ThirdWaveMV,
RE.constraints = matrix(0, nrow=2, ncol=2))
```
<br></br>
### Visualizing the Results
---
To plot the multivariate meta-analysis model, we can use the `plot` function. We also make some additional specifications to change the appearance of the plot. If you want to see all styling options, you can paste `?metaSEM::plot.meta` into the console and then hit Enter.
\vspace{2mm}
```{r, fig.width=5, fig.height=5, fig.align='center', out.width="60%", eval=F}
plot(m.mv,
axis.labels = c("Perceived Stress", "Anxiety"),
randeff.ellipse.col = "#014d64",
univariate.arrows.col = "gray40",
univariate.arrows.lwd = 9,
univariate.polygon.col = "gray40",
estimate.ellipse.col = "gray40",
estimate.col = "firebrick")
```
```{r, fig.width=5, fig.height=5, fig.align='center', out.width="60%", echo=F, eval=F}
par(bg="#FFFEFA")
plot(m.mv,
axis.labels = c("Perceived Stress", "Anxiety"),
randeff.ellipse.col = "#014d64",
univariate.arrows.col = "gray40",
univariate.polygon.col = "gray40",
estimate.ellipse.col = "gray40",
estimate.col = "firebrick")
```
```{r metasem_forest_img, message = F, out.width = '70%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/forest_metasem.png')
```
Let us go through what we see. The plot has two axes: an x-axis displaying the effects on stress, and a y-axis, which displays the effects on anxiety. We also see the pooled effect and its 95% confidence interval for both outcomes, symbolized by the black diamond.
\index{Prediction Interval}
In the middle of the plot, the pooled effect of both variables is shown as a red diamond. The smaller blue ellipse represents the 95% confidence interval of our pooled effect; while the larger black ellipse depicts the 95% **prediction** interval (Chapter \@ref(het-measure-which))^[These prediction intervals (or "plausible value intervals") are based on a different formula [$\hat\mu \pm 1.96 \times \hat\tau$, @raudenbush2009pi] than the one used by **{meta}** and **{metafor}** (equation 5.7 in Chapter \@ref(het-measure-which)), resulting in a slightly narrower interval.].
Lastly, the black dots show the individual studies, where the ellipses with dashed lines represent the 95% confidence intervals.
<br></br>
## Confirmatory Factor Analysis {#cfa}
---
\index{Factor Analysis}
Confirmatory Factor Analysis (CFA) is a popular SEM method in which one specifies how observed variables relate to assumed latent variables [@thompson2004exploratory, chapters 1.1 and 1.2]. CFA is often used to evaluate the psychometric properties of questionnaires or other types of assessments. It allows researchers to determine if assessed variables indeed measure the latent variables they are intended to measure, and how several latent variables relate to each other.
For frequently used questionnaires, there are usually many empirical studies which report the correlations between the different questionnaire items. Such data can be used for meta-analytic SEM. This allows us to evaluate which latent factor structure is the most appropriate based on all available evidence.
In this example, we want to confirm the latent factor structure of a (fictitious) questionnaire for sleep problems. The questionnaire is assumed to measure two distinct latent variables characterizing sleep problems: **insomnia** and **lassitude**. Koffel and Watson [-@koffel2009two] argue that sleep complaints can indeed be described by these two latent factors.
To practice meta-analytic CFA, we simulated results of 11 studies in which our imaginary sleep questionnaire was assessed. We named this data set `SleepProblems`. Each of these studies contains the inter-correlations between symptoms of sleep complaints as directly measured by our questionnaire. These measured indicators include sleep quality, sleep latency, sleep efficiency, daytime dysfunction, and **hypersomnia** (i.e. sleeping too much). We assume that the first three symptoms are related because they all measure insomnia as a latent variable, whereas daytime dysfunction and hypersomnia are related because they are symptoms of the lassitude factor.
The proposed structure represented as a graphical model looks like this^[Please note that the labels in the path diagram are somewhat "idiosyncratic" to make identifying the relevant components of the model easier later on.]:
```{r, message = F, out.width = '60%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/CFA_Graph-1_sep.png')
```
<br></br>
### Data Preparation
---
Let us first have a look at the `SleepProblems` data we want to use for our model. This data set has a special structure: it is a `list` object, containing (1) another `list` of matrices and (2) a numeric vector. Lists are very versatile _R_ objects and allow to bind together different elements in one big object. Lists can be accessed like data frames using the `$` operator. The `names` function can be used to print the names of objects in the list.
```{block, type='boxdmetar'}
**The "SleepProblems" Data Set**
\vspace{2mm}
The `SleepProblems` data set is part of the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(SleepProblems)` automatically saves the data set in your _R_ environment. The data set is then ready to be used.
\vspace{2mm}
If you have not installed **{dmetar}**, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/SleepProblems.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
```{r, message=F, warning=F}
data(SleepProblems)
names(SleepProblems)
```
We see that the list contains two elements, our actual `data` and `n`, the sample size of each study. The `data` object is itself a `list`, so we can also get the names of its contents using the `names` function.
```{r, eval=F}
names(SleepProblems$data)
```
```
## [1] "Coleman et al. (2003)" "Salazar et al. (2008)"
## [3] "Newman et al. (2016)" "Delacruz et al. (2009)"
## [5] "Wyatt et al. (2002)" "Pacheco et al. (2016)"
## [...]
```
It is also possible to display specific elements in `data` using the `$` operator.
```{r}
SleepProblems$data$`Coleman et al. (2003)`
```
The `data` list contains 11 elements, one for each of the 11 included studies. A closer look at the Coleman et al. (2003) study reveals that the data are stored as correlation matrices with five variables. Each row and column in the matrix corresponds with one of the sleep complaint symptoms assessed by our questionnaire.
The Coleman et al. (2003) study contains reported correlations for each symptom combination. However, it is also possible to use studies which have missing values (coded as `NA`) in some of the fields. This is because meta-analytic SEM can handle missing data--at least to some degree.
Before we proceed, let us quickly show how you can construct such a list yourself. Let us assume that we have extracted correlation matrices of two studies, which we imported as data frames into _R_. Assuming that these data frames are called `df1` and `df2`, we can use the following "recipe" to create a `list` object that is suitable for further analysis.
\vspace{2mm}
```{r, eval=F}
# Convert both data.frames to matrices
mat1 <- as.matrix(df1)
mat2 <- as.matrix(df2)
# Define the row labels
dimnames(mat1)[[1]] <- c("Variable 1", "Variable 2", "Variable 3")
dimnames(mat2)[[1]] <- c("Variable 1", "Variable 2", "Variable 3")
# Bind the correlation matrices together in a list
data <- list(mat1, mat2)
names(data) <- c("Study1", "Study2")
# Define sample size of both studies
n <- c(205, # N of study 1
830) # N of study 2
# Bind matrices and sample size together
cfa.data <- list(data, n)
```
<br></br>
### Model Specification
---
To specify our CFA model, we have to use the RAM specification and two-stage meta-analytic SEM procedure we mentioned before. The **{metaSEM}** package contains separate functions for each of the two stages, `tssem1` and `tssem2`. The first function pools our correlation matrices across all studies, and the second fits the proposed model to the data.
<br></br>
#### Stage 1
---
At the first stage, we pool our correlation matrices using the `tssem1` function. There are four important arguments we have to specify in the function.
* **`Cov`**. A `list` of correlation matrices we want to pool. Note that all correlation matrices in the list need to have an identical structure.
* **`n`**. A numeric vector containing the sample sizes of each study, in the same order as the matrices included in `Cov`.
* **`method`**. Specifies if we want to use a fixed-effect model (`"FEM"`) or random-effects model (`"REM"`).
* **`RE.type`**. When a random-effects model is used, this specifies how the random effects should be estimated. The default is `"Symm"`, which estimates all $\tau^2$ values, including the covariances between two variables. When set to `"Diag"`, only the diagonal elements of the random-effects matrix are estimated. This means that we assume that the random effects are independent. Although `"Diag"` results in a strongly simplified model, it is often preferable, because less parameters have to be estimated. This particularly makes sense when the number of variables is high, or the number of studies is low.
In our example, we assume a random-effects model, and use `RE.type = "Diag"`. I will save the model as `cfa1`, and then call the `summary` function to retrieve the output.
```{r, eval=F}
cfa1 <- tssem1(SleepProblems$data,
SleepProblems$n,
method="REM",
RE.type = "Diag")
summary(cfa1)
```
```
[...]
Coefficients:
Estimate Std.Error lbound ubound z value Pr(>|z|)
Intercept1 0.444 0.057 0.331 0.557 7.733 < 0.001 ***
Intercept2 0.478 0.042 0.394 0.561 11.249 < 0.001 ***
Intercept3 0.032 0.071 -0.106 0.172 0.459 0.645
Intercept4 0.132 0.048 0.038 0.227 2.756 0.005 **
Intercept5 0.509 0.036 0.438 0.581 13.965 < 0.001 ***
Intercept6 0.120 0.040 0.040 0.201 2.954 0.003 **
Intercept7 0.192 0.060 0.073 0.311 3.170 0.001 **
Intercept8 0.221 0.039 0.143 0.298 5.586 < 0.001 ***
Intercept9 0.189 0.045 0.100 0.279 4.163 < 0.001 ***
Intercept10 0.509 0.023 0.462 0.556 21.231 < 0.001 ***
Tau2_1_1 0.032 0.015 0.002 0.061 2.153 0.031 *
Tau2_2_2 0.016 0.008 0.000 0.032 1.963 0.049 *
Tau2_3_3 0.049 0.023 0.003 0.096 2.091 0.036 *
Tau2_4_4 0.019 0.010 0.000 0.039 1.975 0.048 *
Tau2_5_5 0.010 0.006 -0.001 0.022 1.787 0.073 .
Tau2_6_6 0.012 0.007 -0.002 0.027 1.605 0.108
Tau2_7_7 0.034 0.016 0.001 0.067 2.070 0.038 *
Tau2_8_8 0.012 0.006 -0.000 0.025 1.849 0.064 .
Tau2_9_9 0.017 0.009 -0.001 0.036 1.849 0.064 .
Tau2_10_10 0.003 0.002 -0.001 0.008 1.390 0.164
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
[...]
Heterogeneity indices (based on the estimated Tau2):
Estimate
Intercept1: I2 (Q statistic) 0.9316
Intercept2: I2 (Q statistic) 0.8837
Intercept3: I2 (Q statistic) 0.9336
Intercept4: I2 (Q statistic) 0.8547
Intercept5: I2 (Q statistic) 0.8315
Intercept6: I2 (Q statistic) 0.7800
Intercept7: I2 (Q statistic) 0.9093
Intercept8: I2 (Q statistic) 0.7958
Intercept9: I2 (Q statistic) 0.8366
Intercept10: I2 (Q statistic) 0.6486
[...]
OpenMx status1: 0 ("0" or "1": The optimization is considered fine.
Other values may indicate problems.)
```
A look at the `OpenMx status` confirms that the model estimates are trustworthy. To make the results more easily digestible, we can extract the fixed effects (our estimated pooled correlations) using the `coef` function. We then make a symmetrical matrix out of the coefficients using `vec2symMat`, and add the dimension names for easier interpretation.
\vspace{2mm}
```{r, echo=F}
load("data/cfa1.rda")
```
```{r}
# Extract the fixed coefficients (correlations)
fixed.coefs <- coef(cfa1, "fixed")
# Make a symmetric matrix
fc.mat <- vec2symMat(fixed.coefs, diag = FALSE)
# Label rows and columns
dimnames(fc.mat)[[1]] <- c("Quality", "Latency",
"Efficiency", "DTDysf", "HypSomnia")
dimnames(fc.mat)[[2]] <- c("Quality", "Latency",
"Efficiency", "DTDysf", "HypSomnia")
# Print correlation matrix (3 digits)
round(fc.mat, 3)
```
\vspace{2mm}
We can now see the pooled correlation matrix for our variables. Looking back at the model output, we also see that all correlation coefficients are significant ($p<$ 0.05), except one: the correlation between sleep quality and daytime dysfunction was not significant. From the perspective of our assumed model, this makes sense, because we expect these variables to load on different factors. We also see that the $I^2$ values of the different estimates are very large (65-93%).
<br></br>
#### Stage 2
---
After pooling the correlation matrices, it is now time to determine if our proposed factor model fits the data well. To specify our model, we have to use the RAM formulation this time, and specify the $\boldsymbol{A}$, $\boldsymbol{S}$ and $\boldsymbol{F}$ matrices. To fill the fields in each of these matrices, it is often best to construct an empty matrix first. Structure-wise, all matrices we define do not only contain the observed variables but also the assumed latent variables, `f_Insomnia` and `f_Lassitude`. Here is how we can create a zero matrix as a starting point:
\vspace{2mm}
```{r, echo=F}
dims <- c("Quality", "Latency", "Efficiency",
"DTDysf", "HypSomnia", "f_Insomnia", "f_Lassitude")
```
```{r, eval=F}
# Create vector of column/row names
dims <- c("Quality", "Latency", "Efficiency",
"DTDysf", "HypSomnia", "f_Insomnia", "f_Lassitude")
# Create 7x7 matrix of zeros
mat <- matrix(rep(0, 7*7), nrow = 7, ncol = 7)
# Label the rows and columns
dimnames(mat)[[1]] <- dimnames(mat)[[2]] <- dims
mat
```
```
## Qlty Ltncy Effcncy DTDysf HypSmn f_Insmn f_Lsstd
## Quality 0 0 0 0 0 0 0
## Latency 0 0 0 0 0 0 0
## Efficiency 0 0 0 0 0 0 0
## DTDysf 0 0 0 0 0 0 0
## HypSomnia 0 0 0 0 0 0 0
## f_Insomnia 0 0 0 0 0 0 0
## f_Lassitude 0 0 0 0 0 0 0
```
<br></br>
$\boldsymbol{A}$ **Matrix**
In the $\boldsymbol{A}$ matrix, we specify the asymmetrical (i.e. single) arrows in our model. Each single arrow starts at the column variable and ends where the column intersects with the entry of the row variable. All other fields which do not represent arrows are filled with `0`.
We specify that an arrow has to be “estimated” by adding a character string to the $\boldsymbol{A}$ matrix. This character string begins with a starting value for the optimization procedure (usually somewhere between 0.1 and 0.3) followed by `*`. After the `*` symbol, we specify a label for the value. If two fields in the $\boldsymbol{A}$ matrix have the same label, this means that we assume that the fields have the same value.
In our example, we use a starting value of 0.3 for all estimated arrows, and label the fields according to the path diagram we presented before.
\vspace{2mm}
```{r}
A <- matrix(c(0, 0, 0, 0, 0, "0.3*Ins_Q", 0 ,
0, 0, 0, 0, 0, "0.3*Ins_L", 0 ,
0, 0, 0, 0, 0, "0.3*Ins_E", 0 ,
0, 0, 0, 0, 0, 0 , "0.3*Las_D",
0, 0, 0, 0, 0, 0 , "0.3*Las_H",
0, 0, 0, 0, 0, 0 , 0 ,
0, 0, 0, 0, 0, 0 , 0
), nrow = 7, ncol = 7, byrow=TRUE)
# Label columns and rows
dimnames(A)[[1]] <- dimnames(A)[[2]] <- dims
```
The last step is to plug the $\boldsymbol{A}$ matrix into the `as.mxMatrix` function to make it usable for the stage 2 model.
```{r}
A <- as.mxMatrix(A)
```
<br></br>
$\boldsymbol{S}$ **Matrix**
In the $\boldsymbol{S}$ matrix, we specify the variances we want to estimate. In our example, these are the variances of all observed variables, as well as the correlation between our two latent factors. First, we set the correlation of our latent factors with themselves to 1. Furthermore, we use a starting value of 0.2 for the variances in the observed variables, and 0.3 for the correlations. All of this can be specified using this code:
\vspace{2mm}
```{r}
# Make a diagonal matrix for the variances
Vars <- Diag(c("0.2*var_Q", "0.2*var_L",
"0.2*var_E", "0.2*var_D", "0.2*var_H"))
# Make the matrix for the latent variables
Cors <- matrix(c(1, "0.3*cor_InsLas",
"0.3*cor_InsLas", 1),
nrow=2, ncol=2)
# Combine
S <- bdiagMat(list(Vars, Cors))
# Label columns and rows
dimnames(S)[[1]] <- dimnames(S)[[2]] <- dims
```
And again, we transform the matrix using `as.mxMatrix`.
```{r}
S <- as.mxMatrix(S)
```
<br></br>
$\boldsymbol{F}$ **Matrix**
The $\boldsymbol{F}$ matrix, lastly, is easy to specify. In the diagonal elements of observed variables, we fill in 1. Everywhere else, we use 0. Furthermore, we only select the rows of the matrix in which at least on element is not zero (i.e. the last two rows are dropped since they only contain zeros).
```{r}
# Construct diagonal matrix
F <- Diag(c(1, 1, 1, 1, 1, 0, 0))
# Only select non-null rows
F <- F[1:5,]
# Specify row and column labels
dimnames(F)[[1]] <- dims[1:5]
dimnames(F)[[2]] <- dims
F <- as.mxMatrix(F)
```
<br></br>
### Model Fitting
---
Now, it is time to fit our proposed model to the pooled data. To do this, we use the `tssem2` function. We only have to provide the stage 1 model `cfa1`, the three matrices, and specify `diag.constraints=FALSE` (because we are not fitting a mediation model). We save the resulting object as `cfa2` and then access it using `summary`.
\vspace{2mm}
```{r, eval=FALSE}
cfa2 <- tssem2(cfa1,
Amatrix = A,
Smatrix = S,
Fmatrix = F,
diag.constraints = FALSE)
summary(cfa2)
```
```
## [...]
## Coefficients:
## Estimate Std.Error lbound ubound z value Pr(>|z|)
## Las_D 0.688 0.081 0.527 0.848 8.409 < 0.001 ***
## Ins_E 0.789 0.060 0.670 0.908 13.026 < 0.001 ***
## Las_H 0.741 0.088 0.568 0.914 8.384 < 0.001 ***
## Ins_L 0.658 0.053 0.553 0.763 12.275 < 0.001 ***
## Ins_Q 0.613 0.051 0.512 0.714 11.941 < 0.001 ***
## cor_InsLas 0.330 0.045 0.240 0.419 7.241 < 0.001 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Goodness-of-fit indices:
## Value
## Sample size 3272.0000
## Chi-square of target model 5.2640
## DF of target model 4.0000
## p value of target model 0.2613
## [...]
## RMSEA 0.0098
## RMSEA lower 95% CI 0.0000
## RMSEA upper 95% CI 0.0297
## [...]
## OpenMx status1: 0 ("0" or "1": The optimization is considered fine.
## Other values indicate problems.)
```
We see that the `OpenMx status` is `0`, meaning that the optimization worked fine. In the output, we are provided with estimates for the paths between the two latent factors and the observed symptoms, such as 0.69 for Lassitude $\rightarrow$ Daytime Dysfunction (`Las_D`). We also see that, according to the model, there is a significant correlation between the two latent factors: $r_{\text{Ins,Las}}$ = 0.33.
Most importantly, however, we need to check how well the assumed model fits our data. This can be achieved by having a look at the `Goodness-of-fit indices`. We see that the goodness of fit test is **not** significant, with $\chi^2_4=$ 5.26, $p=$ 0.26. Contrary to other statistical tests, this outcome is desired, since it means that we accept the null hypothesis that our model fits the data **well**.
\index{Root Mean Square Error of Approximation (RMSEA)}
Furthermore, we see that the **Root Mean Square Error of Approximation** (RMSEA) value is 0.0098. As a rule of thumb, a model can be considered to fit the data well when its RSMEA value is below 0.05, with smaller values indicating a better fit [@rmsea]. Thus, this goodness of fit index also indicates that the model fits our data well.
```{block, type='boximportant'}
**Alternative Models**
\vspace{2mm}
A common problem in SEM studies is that researchers often only focus on their own proposed model, and if it fits the data well. If it is found that the assumed model shows a close fit to the data, many researchers often directly conclude that the data prove their theory.
This is problematic because more than one model can fit well to the same data. Therefore, it is necessary to also check for alternative model hypotheses and structures. If the alternative model also fits the data well, it becomes less clear if our proposed structure is really the "correct" one.
```
<br></br>
### Path Diagrams
---
\index{Path Diagram}
\index{semPlot Package}
```{r, echo=FALSE}
load("data/cfa2.rda")
rm(F)
```
After the model has been fitted, **{metaSEM}** makes it quite easy for us to visualize it graphically. However, to draw a path diagram, we first have to install and load the **{semPlot}** package [@semplot].
\vspace{2mm}
```{r,message=FALSE, warning=FALSE}
library(semPlot)
```
To plot the model, we have to convert it into a format that **{semPlot}** can use. This can be done using the `meta2semPlot` function.
\vspace{2mm}
```{r, message=F, eval=F}
cfa.plot <- meta2semPlot(cfa2)
```
We can then use the `semPaths` function in **{semPlot}** to generate the graph. This function has many parameters, which can be accessed by typing `?semPaths` into the console, and then hitting Enter. Here is our code, and the resulting plot:
\vspace{2mm}
```{r, fig.align="center", fig.width=4, fig.height=3, out.width="60%", eval=F}
# Create Plot labels (left to right, bottom to top)
labels <- c("Sleep\nQuality",
"Sleep\nLatency",
"Sleep\nEfficiency",
"Daytime\nDysfunction",
"Hyper-\nsomnia","Insomnia",
"Lassitude")
# Plot
semPaths(cfa.plot,
whatLabels = "est",
edge.color = "black",
nodeLabels = labels,
sizeMan = 10,
sizeLat = 10,
edge.label.cex = 1)
```
```{r, fig.align="center", fig.width=4, fig.height=3, out.width="60%", echo=F, eval=F}
par(bg="#FFFEFA")
# Create Plot labels (left to right, bottom to top)
labels <- c("Sleep\nQuality",
"Sleep\nLatency",
"Sleep\nEfficiency",
"Daytime\nDysfunction",
"Hyper-\nsomnia","Insomnia",
"Lassitude")
# Plot
semPaths(cfa.plot,
whatLabels = "est",
edge.color = "black",
nodeLabels = labels,
sizeMan = 10,
sizeLat = 10,
edge.label.cex = 1)
```
```{r cfa_img, message = F, out.width = '65%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/cfa.png')
```
```{block, type='boxinfo'}
**Further Reading**
\vspace{2mm}
What we covered in this chapter should at best be seen as a rudimentary introduction to meta-analytic SEM. A much more elaborated discussion of this method can be found in Mike Cheung's definitive book **Meta-Analysis: A Structural Equation Modeling Approach** [-@cheung2015meta]. This book also describes various other kinds of meta-analytic structural equation models that we have not covered, and describes how they can be implemented using _R_.
\vspace{2mm}
If you are looking for a shorter (and openly accessible) resource, you can have a look at the **{metaSEM}** package vignette. The vignette provides a brief discussion of the theory behind meta-analytic SEM and includes several illustrations with _R_. After **{metaSEM}** is loaded, the vignette can be downloaded from the Internet by running `vignette("metaSEM")` in the console.
```
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. What is structural equation modeling, and what is used for?
\vspace{-2mm}
2. What are the two ways through which SEM can be represented?
\vspace{-2mm}
3. Describe a random-effects meta-analysis from a SEM perspective.
\vspace{-2mm}
4. What is a multivariate meta-analysis, and when is it useful?
\vspace{-2mm}
5. When we find that our proposed meta-analytic SEM fits the data well, does this automatically mean that the model is the "correct" one?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda11) at the end of this book.**
```
<br></br>
## Summary
* Structural equation modeling (SEM) is a statistical technique which can be used to test **complex relationships** between observed (i.e. manifest) and unobserved (i.e. latent) variables.
* Meta-analysis is based on a multilevel model, and can therefore also be formulated from a SEM perspective. This can be used to "replicate" random-effects meta-analyses as structural equation models. More importantly, however, this allows us to perform meta-analyses which model more complex relationships between observed effect sizes.
* Meta-analytic SEM can be applied, for example, to perform **multivariate meta-analyses**. In multivariate meta-analyses, two or more outcomes are estimated jointly, while taking the correlation between both outcome measures into account.
* Another application of meta-analytic SEM is **confirmatory factor analysis**. To test the fit of a proposed factor model across all included studies, a two-step procedures must be used. At the first stage, correlation matrices of individual studies are pooled. Then, this pooled correlation matrix is used to fit the assumed SEM.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
---
title: "Doing Meta-Analysis in R"
subtitle: "A Hands-on Guide"
author:
- Mathias Harrer
- Pim Cuijpers
- Toshi A. Furukawa
- David D. Ebert
github-repo: "MathiasHarrer/Doing-Meta-Analysis-in-R"
site: bookdown::bookdown_site
output:
bookdown::gitbook:
config:
toc:
collapse: section
search: yes
fontsettings:
size: 2
split_by: section
includes:
after_body: banner.html
df_print: paged
theme: !expr bslib::bs_theme()
documentclass: book
bibliography: [book.bib, packages.bib]
biblio-style: apalike
link-citations: yes
twitter-handle: MathiasHarrer
description: "This is a guide on how to conduct Meta-Analyses in R."
favicon: "favicon.ico"
---
# Welcome! {-}
---
<a href="https://www.routledge.com/Doing-Meta-Analysis-with-R-A-Hands-On-Guide/Harrer-Cuijpers-Furukawa-Ebert/p/book/9780367610074" target="_blank"><img src="images/cover.png" width="250" align="right" alt="" class="cover" /></a> Welcome to the online version of **"Doing Meta-Analysis with R: A Hands-On Guide"**.
This book serves as an accessible introduction into how meta-analyses can be conducted in _R_. Essential steps for meta-analysis are covered, including pooling of outcome measures, forest plots, heterogeneity diagnostics, subgroup analyses, meta-regression, methods to control for publication bias, risk of bias assessments and plotting tools.
Advanced, but highly relevant topics such as network meta-analysis, multi-/three-level meta-analyses, Bayesian meta-analysis approaches, and SEM meta-analysis are also covered.
The programming and statistical background covered in the book are kept at a **non-expert level**. A **print version** of this book has been published with [Chapman & Hall/CRC Press](https://www.routledge.com/Doing-Meta-Analysis-with-R-A-Hands-On-Guide/Harrer-Cuijpers-Furukawa-Ebert/p/book/9780367610074) (Taylor & Francis).
<br></br>
## Open Source Repository {-}
---
This book has been built using [**{rmarkdown}**](https://rmarkdown.rstudio.com/docs/) and [**{bookdown}**](https://bookdown.org/). Formulas are rendered using [MathJax](http://docs.mathjax.org/en/latest/index.html). All materials and source code we used to compile the guide can be found on **GitHub**. You are free to fork, share and reuse contents. However, the repository is intended to be mainly "read-only"; PRs will generally not be considered (see section below & preface for ways to contact us).
[](https://github.com/MathiasHarrer/Doing-Meta-Analysis-in-R)
<br></br>
## How To Use The Guide {-}
---
This tutorial provides a brief introduction to the guide and how to use it for your own meta-analysis project.
<center>
<iframe width="580" height="327" style="border-radius: 5px 5px 5px 5px; box-shadow: 0 0.5rem 1rem rgba(0,0,0,.15);" src="https://www.youtube.com/embed/i1b5c-dVfkU" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen>
</iframe>
</center>
<br></br>
## Contributing {-}
---
This guide is an open source project, and we owe special thanks to our expert contributors who provided additional content in some of the sections of this guide.
* [**Luke A. McGuinness**](https://twitter.com/mcguinlu), University of Bristol: Chapter 15, Risk of Bias Plots.
Want to contribute to this guide yourself? Feel free to send **Mathias** ([email protected]) an E-mail and tell us about your proposed additions.
<br></br>
## Citing this Guide {-}
---
The suggested citation is:
```{block, type='boxempty'}
Harrer, M., Cuijpers, P., Furukawa, T.A., & Ebert, D.D. (2021). _Doing Meta-Analysis with R: A Hands-On Guide_. Boca Raton, FL and London: Chapman & Hall/CRC Press. ISBN 978-0-367-61007-4.
```
Download the reference as [BibTeX](https://www.protectlab.org/meta-analysis-in-r/data/citation.bib) or [.ris](https://www.protectlab.org/meta-analysis-in-r/data/citation.ris).
<br></br>
## Cite the Packages {-}
---
In this guide, we present and use various _R_ packages. The reason why all of us can use these packages for free is because experts all around the world have devoted enormous time and effort to their development, typically without pay. If you use some of the packages mentioned in this book for your own meta-analysis, we strongly encourage you to also cite them in your report.
In this guide, every time a new package is introduced, we also provide the reference through which it can be cited. It is also possible to run `citation("package")` to retrieve the preferred reference. Thanks!
<br></br>
```{r include=FALSE}
# automatically create a bib database for R packages
knitr::write_bib(c(
.packages(), 'bookdown', 'knitr', 'rmarkdown'
), 'packages.bib')
```
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Network Meta-Analysis {#netwma}
---
<img src="_figs/network.jpg" />
<br></br>
<span class="firstcharacter">W</span>
hen we perform meta-analyses of clinical trials or other types of intervention studies, we usually estimate the true effect size of **one** specific treatment. We include studies in which the same type of intervention was compared to similar control groups, for example a placebo. All else being equal, this allows to assess if a **specific** type of treatment is effective.
Yet, in many research areas, there is not only one "definitive" type of treatment--there are several ones. Migraine, for example, can be treated with various kinds of medications, and non-pharmaceutical therapy options also exist. Especially in "matured" research fields, it is often less relevant to show that some kind of treatment is beneficial. Instead, we want to find out which treatment is the **most** effective for some specific indication.
This leads to new problems. To assess the comparative effectiveness of several treatments in a conventional meta-analysis, sufficient head-to-head comparisons between two treatments need to be available. Alas, this is often not the case. In many research fields, it is common to find that only few--if any--trials have compared the effects of two treatments **directly**, in lieu of "weaker" control groups. This often means that traditional meta-analyses can not be used to establish solid evidence on the **relative** effectiveness of several treatments.
However, while direct comparisons between two or more treatments may not exist, **indirect** evidence is typically available. Different treatments may have been evaluated in **separate** trials, but all of these trials may have used the **same** control group. For example, it is possible that two medications were never compared directly, but that the effect of both medications compared to a pill placebo has been studied extensively.
\index{Mixed-Treatment Comparison Meta-Analysis}
**Network meta-analysis** can be used to incorporate such indirect comparisons, and thus allows us to compare the effects of several interventions simultaneously [@dias2013evidence]. Network meta-analysis is also known as **mixed-treatment comparison meta-analysis** [@van2012automating]. This is because it integrates multiple direct and indirect treatment comparisons into one model, which can be formalized as a “network” of comparisons.
\index{Consistency}
Network meta-analysis is a “hot” research topic. In the last decade, it has been increasingly picked up by applied researchers in the bio-medical field, and other disciplines. However, this method also comes with additional challenges and pitfalls, particularly with respect to heterogeneity and so-called **network inconsistency** [@salanti2014evaluating].
Therefore, it is important to first discuss the core components and assumptions of network meta-analysis models. The underpinnings of network meta-analysis can be a little abstract at times. We will therefore go through the essential details in small steps, in order to get a better understanding of this method.
<br></br>
## What Are Network Meta-Analyses? {#what-is-net-ma}
---
### Direct & Indirect Evidence {#direct-indirect-evidence}
---
First, we have to understand what we mean by a "network" of treatments. Imagine that we have extracted data from some randomized controlled trial $i$, which compared the effect of treatment A to another condition B (e.g. a wait-list control group). We can illustrate this comparison graphically:
```{r, message = F, out.width = '75%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/graph1_col_sep.png')
```
\index{Graph Theory}
This visual representation of a treatment comparison is called a **graph**. Graphs are structures used to model how different objects relate to each other, and there is an entire sub-field of mathematics, **graph theory**, which is devoted to this topic.
Our graph has two core components. The first one are two circles (so-called **nodes**), which represent the two conditions A and B in trial $i$. The second component is the line connecting these two nodes. This line is called an **edge**. The edge represents how A and B relate to each other. In our case, the interpretation of the line is quite easy. We can describe the relationship between A and B in terms of the effect size $\hat\theta_{i\text{,A,B}}$ we observe when we compare A and B. This effect size can be expressed as, for example, an SMD or odds ratio, depending on the outcome measure.
Now, imagine that we have also obtained data from another study $j$. This trial also used the control condition B. But instead of administering A, this study used another treatment C. In study $j$, treatment C was also compared to B. We can add this information to our graph:
```{r, message = F, out.width = '75%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/graph2_col_sep.png')
```
This creates our first small network. It is clearly visible that the graph now contains two effect size estimates: $\hat\theta_{i\text{,A,B}}$, comparing A to B, and $\hat\theta_{j\text{,C,B}}$, the comparison between C and B. Since both of these effect sizes were directly observed in "real" trials, we call such information **direct evidence**. Therefore, we denote these effect sizes with $\hat\theta^{\text{direct}}_{\text{B,A}}$ and $\hat\theta^{\text{direct}}_{\text{B,C}}$. Condition B comes first in this notation because we determined it to be our **reference** group. We chose B as the reference condition because both trials used it as the control group.
In the new graph, all nodes (conditions) are either **directly** or **indirectly** connected. The B condition (our control group) is directly connected to all other nodes. It takes only one “step” in the graph to get from B to the two other nodes A and C: B $\rightarrow$ A, B $\rightarrow$ C. In contrast, A and C only have one direct connection, and they both connect to B: A $\rightarrow$ B and C $\rightarrow$ B.
However, there is an indirect connection between A and C. This connection exists because B serves as the link, or **bridge**, between the two conditions: A $\rightarrow$ B $\rightarrow$ C. As a result, there is **indirect evidence** for the relationship between A and C, which can be derived from the structure of the network:
```{r, message = F, out.width = '75%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/graph3_col_sep.png')
```
Using information from the directly observed edges, we can calculate the effect of the **indirectly** observed comparison between A and C. We denote this non-observed, indirect effect size with $\hat\theta^{\text{indirect}}_{\text{A,C}}$. The effect estimate can be derived using this formula [@dias2018network, chapter 1]:
\begin{equation}
\hat\theta_{\text{A,C}}^{\text{indirect}} = \hat\theta_{\text{B,A}}^{\text{direct}} - \hat\theta_{\text{B,C}}^{\text{direct}}
(\#eq:networkes)
\end{equation}
This step is a crucial component of network meta-analysis. The equation above lets us estimate the effect size of a comparison, even if it was never directly assessed in a trial.
Network meta-analysis involves combining both direct and indirect evidence in one model. Based on this information, we can estimate the (relative) effect of each included treatment. By adding indirect evidence, we also increase the precision of an effect size estimate, even when there is direct evidence for that specific comparison. Overall, network meta-analysis comes with several benefits:
* It allows us to pool all available information from a set of related studies in one analysis. Think of how we would usually deal in conventional meta-analyses with trials comparing different treatments to, say, a placebo. We would have to pool each comparison (e.g. treatment A compared to placebo, treatment B compared to placebo, treatment A compared to treatment B, etc.) in a separate meta-analysis.
* Network meta-analysis can incorporate indirect evidence in a network, which is not possible in conventional meta-analysis. In pairwise meta-analyses, we can only pool direct evidence from comparisons which were actually included in a trial.
* If all assumptions are met, and when the results are sufficiently conclusive, network meta-analyses allow us to infer which type of treatment may be preferable for the target population under study.
All of this sounds intriguing, but there are some important limitations we have to consider. First, look at how the variance of the indirect effect size estimate is calculated:
\begin{equation}
\text{Var} \left(\hat\theta_{\text{A,C}}^{\text{indirect}} \right) = \text{Var} \left(\hat\theta_{\text{B,A}}^{\text{direct}} \right) + \text{Var} \left(\hat\theta_{\text{B,C}}^{\text{direct}} \right)
(\#eq:nw2)
\end{equation}
To calculate the variance of the indirect comparison, we **add up** the variances of the direct comparisons. This means that effect sizes estimated from indirect evidence will always have a greater variance, and thus a lower precision, than the ones based on direct evidence [@dias2018network, chapter 1]. This is nothing but logical. We can have a much higher confidence in effect sizes which were estimated from observed data, compared to results which had to be inferred mathematically.
\index{Consistency}
\index{Transitivity Assumption}
There is yet another issue. Equation \@ref(eq:networkes) from before, which allows us to estimate indirect evidence from direct comparisons, only holds if a crucial pre-requisite is met: the assumption of **transitivity**. From a statistical standpoint, this assumption translates to network **consistency** [@efthimiou2016getreal]. In the following, we explain what both of these terms mean, and why they are important.
<br></br>
### Transitivity & Consistency {#transitivity-consistency}
---
Network meta-analyses are certainly a valuable extension of standard meta-analytic methods. Their validity, however, has not remained uncontested. Most of the criticism of network meta-analysis revolves around, as you might have guessed, the use of indirect evidence [@edwards2009indirect; @ioannidis2006indirect]. This especially involves cases where direct evidence is actually available for a comparison.
The key issue is that, while participants in (randomized) trials are allocated to one of the treatment conditions (e.g., A and B) **by chance**, the trial conditions themselves were not randomly selected in our network. This is of course all but logical. It is usually no problem to randomize participants into one of several conditions of a trial. Yet, it is difficult to imagine a researcher determining treatment conditions to be used in a trial via, say, a dice roll, before rolling out her study. The composition of selected trial conditions will hardly ever follow a random pattern in a network meta-analysis.
This does not constitute a problem for network meta-analytic models _per se_ [@dias2018network, chapter 1]. Our network meta-analysis model will only be biased when the selection, or non-selection, of a specific comparison within a trial depends on the true effect of that comparison [@dias2013evidence]. This statement is quite abstract, so let us elaborate on it a little.
\index{Consistency}
\index{Transitivity Assumption}
The requirement we just mentioned is derived from the **transitivity** assumption of network meta-analyses. There is disagreement in the literature about whether this is an assumption unique to network meta-analysis, or simply an extension of the assumptions in conventional pairwise meta-analysis. The disagreement may also be partly caused by an inconsistent usage of terms in the literature [@dias2018network; @efthimiou2016getreal; @song2009methodological; @lu2009modeling].
The core tenet of the transitivity assumption is that we can combine direct evidence (e.g. from comparisons A $−$ B and C $−$ B) to create indirect evidence about a related comparison (e.g. A $−$ C), as we have done before using formula \@ref(eq:networkes) [@efthimiou2016getreal].
\index{Exchangeability Assumption}
The assumption of transitivity pertains to the concept of **exchangeability**. We already described this prerequisite in chapter \@ref(rem), where we discussed the random-effects model. The exchangeability assumption says that each true effect size $\theta_i$ of some comparison $i$ is the result of a random, **independent** draw from an "overarching" distribution of true effect sizes.
To translate this assumption to our scenario, think of network meta-analysis as a set of $K$ trials. Now, we pretend that each trial in our model contains **all possible** treatment comparisons in our network, denoted with $M$ (e.g. A $−$ B, A $−$ C, B $−$ C, and so forth). However, some of the treatment comparisons have been **"deleted"**, and are thus **"missing"** in some trials. The reason for this is that, in practice, studies can not assess all possible treatment options [@dias2013evidence].
The key assumption is that the effect of a comparison, e.g. A $-$ B, is **exchangeable** between trials--no matter if a trial actually assessed this comparison, or if it is is "missing". In network meta-analyses, exchangeability is fulfilled when the effect $\hat\theta_i$ of some comparison $i$ is based on a random, independent draw from the overarching distribution of true effects, no matter if this effect size is derived through direct or indirect evidence.
The assumption of transitivity can be violated when covariates or other effect modifiers (such as the age group of the studied populations, or the treatment intensity) are not evenly distributed across trials assessing, for example, condition A versus B, and C versus B [@song2009methodological]. Transitivity as such can not be tested statistically, but the risk for violating this assumption can be attenuated by only including studies for which the population, methodology and target condition are as similar as possible [@salanti2014evaluating].
The statistical manifestation of transitivity is called **consistency**, and a lack thereof is known as **inconsistency** [@efthimiou2016getreal; @cipriani2013conceptual]. Consistency means that the relative effect of a comparison (e.g. A $-$ B) based on direct evidence does not differ from the one based on indirect evidence [@schwarzer2015meta, chapter 8]:
\begin{equation}
\theta_{\text{A,B}}^{\text{indirect}} = \theta_{\text{A,B}}^{\text{direct}}
(\#eq:nw3)
\end{equation}
\vspace{4mm}
\index{Node Splitting}
Several methods have been proposed to diagnose inconsistency in network meta-analysis models, including **net heat plots** [@krahn2013graphical] and the **node splitting** method [@dias2010checking]. We will describe these methods in greater detail in the following sections.
<br></br>
### Network Meta-Analysis Models {#netw-which-model}
---
This concludes our description of the basic theory and assumptions of network meta-analysis models. Before, we used a simple network with three nodes and edges as an illustration. In practice, however, the number of treatments included in a network meta-analysis is usually much higher. This quickly results in considerably more complex networks, for example one which looks like this:
\vspace{4mm}
```{r, message = F, out.width = '75%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/graph4_col_sep.png')
```
Yet, with an increasing number of treatments $S$ in our network, the number of (direct and indirect) pairwise comparisons $C$ we have to estimate skyrockets:
\vspace{4mm}
```{r, message=F, warning=F, fig.width=4, fig.height=3, out.width="45%", fig.align="center", echo=F}
library(ggplot2)
C <- function(S){S*((S-1)/2)}
ggplot(data = data.frame(x = 0), mapping = aes(x = x)) +
stat_function(fun = C, cex = 1) + xlim(0,20) + theme_classic() +
annotate("text", label = expression(C == S~frac((S-1),2)), x = 6, y = 150) +
xlab("Number of treatments/conditions in network (S)") +
ylab("Number of comparisons (C)") +
theme(plot.background = element_rect(fill = "#FFFEFA", color = "#fbfbfb"),
panel.background = element_rect(fill = "#FFFEFA"))
```
\vspace{4mm}
\index{Frequentist Statistics}
\index{Bayesian Hierarchical Model}
Therefore, we need a computational model which allows us to pool all available network data in an efficient and internally consistent manner. Several statistical approaches have been developed for network meta-analysis [@efthimiou2016getreal]. In the following chapters, we will discuss a **frequentist** as well as a **Bayesian hierarchical model**, and how they can be implemented in _R_.
```{block, type='boxinfo'}
**Which Modeling Approach Should I Use?**
\vspace{2mm}
While network meta-analysis models may differ in their statistical approach, the good thing is that all should produce the same results when the sample size is sufficient [@shim2019network]. In general, no network meta-analysis method is more or less valid than the other. You may therefore safely choose one or the other approach, depending on which one you find more intuitive, or based on the functionality of the _R_ package which implements it [@efthimiou2016getreal].
\vspace{2mm}
In most disciplines, methods based on frequentist inference are (still) much more common than Bayesian approaches. This means that some people might understand the kind of results produced by a frequentist model more easily. A disadvantage is that the implementation of frequentist network meta-analysis in _R_ (which we will cover next) does not yet support meta-regression, while this is possible using a Bayesian model.
\vspace{2mm}
In practice, a useful strategy is to choose one approach for the main analysis, and then employ the other approach in a sensitivity analysis. If the two methods come to the same conclusion, this increases our confidence that the findings are trustworthy.
```
<br></br>
## Frequentist Network Meta-Analysis {#frequentist-ma}
---
\index{netmeta Package}
\index{Frequentist Statistics}
In the following, we will describe how to perform a network meta-analysis using the **{netmeta}** package [@nemeta]. This package allows to estimate network meta-analysis models within a **frequentist** framework. The method used by **{netmeta}** is derived from graph theoretical techniques, which were originally developed for electrical networks [@rucker2012network].
```{block, type='boxinfo'}
**The Frequentist Interpretation of Probability**
\vspace{2mm}
Frequentism is a common theoretical approach to interpret the probability of some event $E$. Frequentist approaches define the probability of $E$ in terms of how often $E$ is expected to occur if we repeat some process (e.g., an experiment) **many, many times** [@aronow2019foundations, chapter 1.1.1].
Frequentist ideas are at the core of many statistical procedures that quantitative researchers use on a daily basis, for example significance testing, calculation of confidence intervals, or $p$-values.
```
<br></br>
### The Graph Theoretical Model
---
Let us now describe how the network meta-analysis model implemented in the **{netmeta}** package can be formulated. Imagine that we have collected effect size data from several trials. Then, we go through all $K$ trials and count the total number of treatment comparisons contained in the studies. This number of pairwise comparisons is denoted with $M$.
We then calculate the effect size $\hat\theta_m$ for each comparison $m$, and collect all effect sizes in a vector $\boldsymbol{\hat\theta} = (\hat\theta_1, \hat\theta_2, \dots, \hat\theta_M)$. To run a network meta-analysis, we now need a model which describes how this vector of observed effect sizes $\boldsymbol{\hat\theta}$ was generated. In **{netmeta}**, the following model is used [@schwarzer2015meta, chapter 8]:
\begin{equation}
\boldsymbol{\hat\theta} =\boldsymbol{X} \boldsymbol{\theta}_{\text{treat}} + \boldsymbol{\epsilon}
(\#eq:nw4)
\end{equation}
We assume that the vector of observed effects sizes $\boldsymbol{\hat\theta}$ was generated by the right side of the equation--our model. The first part, $\boldsymbol{X}$ is a $m \times n$ **design matrix**, in which the columns represent the different treatments $n$, and the rows represent the treatment comparisons $m$. In the matrix, a treatment comparison is defined by a 1 and -1 in the same row, where the column positions correspond with the treatments that are being compared.
The most important part of the formula is the vector $\boldsymbol{\theta}_{\text{treat}}$. This vector contains the **true** effects of the $n$ unique treatments in our network. This vector is what our network meta-analysis model needs to estimate, since it allows us to determine which treatments in our network are the most effective ones.
The parameter $\boldsymbol{\epsilon}$ is a vector containing the sampling errors $\epsilon_m$ of all the comparisons. The sampling error of each comparison is assumed to be a random draw from a Gaussian normal distribution with a mean of zero and variance $\sigma^2_m$:
\begin{equation}
\epsilon_m \sim \mathcal{N}(0,\sigma_m^2)
(\#eq:nw4)
\end{equation}
To illustrate the model formula [see @schwarzer2015meta, page 189], imagine that our network meta-analysis consists of $K=$ 5 studies. Each study contains a unique treatment comparison (i.e. $K=M$). These comparisons are A $-$ B, A $-$ C, A $-$ D, B $-$ C, and B $-$ D. This results in a vector of (observed) comparisons $\boldsymbol{\hat\theta} = (\hat\theta_{1\text{,A,B}}, \hat\theta_{2\text{,A,C}}, \hat\theta_{4\text{,A,D}}, \hat\theta_{4\text{,B,C}}, \hat\theta_{5\text{,B,D}})^\top$. Our aim is to estimate the true effect size of all four conditions included in our network, $\boldsymbol{\theta}_{\text{treat}} = (\theta_{\text{A}}, \theta_{\text{B}}, \theta_{\text{C}}, \theta_{\text{D}})^\top$. If we plug these parameters into our model formula, we get the following equation:
\begin{align}
\boldsymbol{\hat\theta} &= \boldsymbol{X} \boldsymbol{\theta}_{\text{treat}} + \boldsymbol{\epsilon} \notag \\
\begin{bmatrix}
\hat\theta_{1\text{,A,B}} \\
\hat\theta_{2\text{,A,C}} \\
\hat\theta_{3\text{,A,D}} \\
\hat\theta_{4\text{,B,C}} \\
\hat\theta_{5\text{,B,D}} \\
\end{bmatrix}
&=
\begin{bmatrix}
1 & -1 & 0 & 0 \\
1 & 0 & -1 & 0 \\
1 & 0 & 0 & -1 \\
0 & 1 & -1 & 0 \\
0 & 1 & 0 & -1 \\
\end{bmatrix}
\begin{bmatrix}
\theta_{\text{A}} \\
\theta_{\text{B}} \\
\theta_{\text{C}} \\
\theta_{\text{D}} \\
\end{bmatrix}
+
\begin{bmatrix}
\epsilon_{1} \\
\epsilon_{2} \\
\epsilon_{3} \\
\epsilon_{4} \\
\epsilon_{5} \\
\end{bmatrix}
(\#eq:nw5)
\end{align}
It is of note that in its current form, this model formula is problematic from a mathematical standpoint. Right now, the model is **overparameterized**. There are too many parameters $\boldsymbol{\theta}_{\text{treat}}$ in our model to be estimated based on the information at hand.
This has something to do with the the design matrix $\boldsymbol{X}$ not having **full rank**. In our case, a matrix does not have full rank when its columns are not all **independent**; or, to put it differently, when the number of **independent** columns is smaller than the **total** number of columns, $n$^[If our matrix had fewer rows than columns ($m < n$), the matrix would not be full rank if the number of independent **rows** is smaller than the total number of rows, $m$.]. Because we are dealing with a **network** of treatments, it is clear that the treatment combinations will not be completely independent of each other. For example, the column for treatment D (the fourth column) can be described as a **linear combination** of the first three columns^[When we multiply the first three columns (representing treatments A, B and C) with -1 and add the results, we get the values in the fourth column: $(-\boldsymbol{x}_1) + (-\boldsymbol{x}_2) + (-\boldsymbol{x}_3) = \boldsymbol{x}_4$.].
Overall, there will at best be $n-1$ independent treatment comparisons, but our model always has to estimate the true effect of $n$ treatments in $\boldsymbol{\theta}_{\text{treat}}$. Thus, the matrix does not have full rank. The fact that $\boldsymbol{X}$ does not have full rank means that it is not **invertible**; therefore, $\boldsymbol{\theta}_{\text{treat}}$ cannot be estimated directly using a (weighted) least squares approach.
\index{Graph Theory}
This is where the **graph theoretical** approach implemented in the **{netmeta}** provides a solution. We will spare you the tedious mathematical details behind this approach, particularly since that the **{netmeta}** package will do the heavy lifting for us anyway. Let us only mention that this approach involves constructing a so-called **Moore-Penrose pseudoinverse matrix**, which then allows for calculating the fitted values of our network model using a weighted least squares approach.
The procedure also takes care of **multi-arm** studies, which contribute more than one pairwise comparison (i.e. studies in which more than two conditions were compared). Multi-arm comparisons are **correlated** because at least one condition is compared more than once (Chapter \@ref(unit-of-analysis)). This means that the precision of multi-arm study comparisons is artificially increased--unless this is accounted for in our model.
The model also allows us to incorporate estimates of between-study heterogeneity. Like in the "conventional" random-effects model (Chapter \@ref(rem)), this is achieved by adding the estimated heterogeneity variance $\hat\tau^2$ to the variance of a comparison $m$: $s^2_m + \hat\tau^2$. In the **{netmeta}** package, the $\tau^2$ values are estimated using an adaptation of the DerSimonian-Laird estimator [@jackson2013matrix, see also Chapter \@ref(tau-estimators)].
\index{Consistency}
An equivalent of $I^2$ can also be calculated, which now represents the amount of **inconsistency** in our network. Like in Higgins and Thompson's formula (see Chapter \@ref(i-squared)), this $I^2$ version is derived from $Q$. In network meta-analyses, however, $Q$ translates to the total heterogeneity in the **network** (also denoted with $Q_{\text{total}}$). Thus, the following formula is used:
\begin{equation}
I^2 = \text{max} \left(\frac{Q_{\text{total}}-\text{d.f.}} {Q_{\text{total}}}, 0 \right)
(\#eq:nw6)
\end{equation}
Where the degrees of freedom in our network are:
\begin{equation}
\text{d.f.} = \left( \sum^K_{k=1}p_k-1 \right)- (n-1)
(\#eq:nw7)
\end{equation}
with $K$ being the total number of studies, $p$ the number of conditions in some study $k$, and $n$ the total number of treatments in our network model.
<br></br>
### Frequentist Network Meta-Analysis in _R_
---
After all this input, it is time for a hands-on example. In the following, we will use **{netmeta}** to conduct our own network meta-analysis. As always, we first install the package and then load it from the library.
```{r, message=F, warning=F}
library(netmeta)
```
<br></br>
#### Data Preparation
---
In this illustration, we use the `TherapyFormats` data. This data set is modeled after a real network meta-analysis assessing the effectiveness of different delivery formats of cognitive behavioral therapy for depression [@cuijpers2019effectiveness]. All included studies are randomized controlled trials in which the effect on depressive symptoms was measured at post-test. Effect sizes of included comparisons are expressed as the standardized mean difference (SMD) between the two analyzed conditions.
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "TherapyFormats" Data Set**
\vspace{2mm}
The `TherapyFormats` data set is part of the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(TherapyFormats)` automatically saves the data set in your _R_ environment. The data set is then ready to be used.
\vspace{2mm}
If you have not installed **{dmetar}**, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/TherapyFormats.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
Let us have a look at the data.
```{r, message=F, warning=F}
library(dmetar)
data(TherapyFormats)
head(TherapyFormats[1:5])
```
* The second column, `TE`, contains the effect size of all comparisons, and `seTE` the respective standard error. To use **{netmeta}**, all effect sizes in our data set must be pre-calculated already. In Chapter \@ref(effects), we already covered how the most common effect sizes can be calculated, and additional tools can be found in Chapter \@ref(es-calc).
* `treat1` and `treat2` represent the two conditions that are being compared. Our data set also contains two additional columns, which are not shown here: `treat1.long` and `treat2.long`. These columns simply contain the full name of the condition.
* The `studlab` column contains unique study labels, signifying from which study the specific treatment comparison was extracted. This column is helpful to check for multi-arm studies (i.e. studies with more than one comparison). We can do this using the `table` and `as.matrix` function:
```{r, eval=F}
as.matrix(table(TherapyFormats$author))
```
```
## [...]
## Bengston, 2004 1
## Blevins, 2003 1
## Bond, 1988 1
## Bonertz, 2015 1
## Breiman, 2001 3
## [...]
```
Our `TherapyFormats` data set only contains one multi-arm study, the one by Breiman. This study, as we see, contains three comparisons, while all other studies only contain one.
When we prepare network meta-analysis data, it is essential to always (1) include a study label column in the data set, (2) give each individual study a unique name in the column, and (3) to give studies which contribute two or more comparisons **exactly** the same name.
<br></br>
#### Model Fitting
---
We can now fit our first network meta-analysis model using the `netmeta` function. The most important arguments are:
* **`TE`**. The name of the column in our dataset containing the effect sizes for each comparison.
* **`seTE`**. The name of the column which contains the standard errors of each comparison.
* **`treat1`**. The column in our data set which contains the name of the **first** treatment.
* **`treat2`**. The column in our data set which contains the name of the **second** treatment.
* **`studlab`**. The study from which a comparison was extracted. Although this argument is optional _per se_, we recommend to always specify it. It is the only way to let the function know if there are multi-arm trials in our network.
* **`data`**. The name of our data set.
* **`sm`**. The type of effect size we are using. Can be`"RD"` (risk difference), `"RR"` (risk ratio), `"OR"` (odds ratio), `"HR"` (hazard ratio), `"MD"` (mean difference), `"SMD"` (standardized mean difference), among others. Check the function documentation (`?netmeta`) for other available measures.
* **`fixed`**. Should a fixed-effect network meta-analysis should be conducted? Must be `TRUE` or `FALSE`.
* **`random`**. Should a random-effects model be used? Either `TRUE` or `FALSE`.
* **`reference.group`**. This lets us specify which treatment should be used as a reference treatment (e.g. `reference.group = "grp"`) for all other treatments.
* **`tol.multiarm`**. Effect sizes of comparisons from multi-arm studies are--by design--consistent. Sometimes however, original papers may report slightly deviating results for each comparison, which may result in a violation of consistency. This argument lets us specify a **tolerance threshold** (a numeric value) for the inconsistency of effect sizes and their standard errors allowed in our model.
* **`details.chkmultiarm`**. Whether to print the estimates of multi-arm comparisons with inconsistent effect sizes (`TRUE` or `FALSE`).
* **`sep.trts`**. The character to be used as a separator in comparison labels (for example `" vs. "`).
We save the results of our first network meta-analysis under the name `m.netmeta`. As reference group, we use the "care as usual" (`"cau"`) condition. For now, let us assume that a fixed-effect model is appropriate. This gives the following code:
\vspace{2mm}
```{r, eval=F}
m.netmeta <- netmeta(TE = TE,
seTE = seTE,
treat1 = treat1,
treat2 = treat2,
studlab = author,
data = TherapyFormats,
sm = "SMD",
fixed = TRUE,
random = FALSE,
reference.group = "cau",
details.chkmultiarm = TRUE,
sep.trts = " vs ")
summary(m.netmeta)
```
```
## Original data (with adjusted standard errors for multi-arm studies):
##
## treat1 treat2 TE seTE seTE.adj narms multiarm
## [...]
## Burgan, 2012 ind tel -0.31 0.13 0.1390 2
## Belk, 1986 ind tel -0.17 0.08 0.0830 2
## Ledbetter, 1984 ind tel -0.00 0.23 0.2310 2
## Narum, 1986 ind tel 0.03 0.33 0.3380 2
## Breiman, 2001 ind wlc -0.75 0.51 0.6267 3 *
## [...]
##
## Number of treatment arms (by study):
## narms
## Ausbun, 1997 2
## Crable, 1986 2
## Thiede, 2011 2
## Bonertz, 2015 2
## Joy, 2002 2
## [...]
##
## Results (fixed effects model):
##
## treat1 treat2 SMD 95%-CI Q leverage
## Ausbun, 1997 grp ind 0.06 [ 0.00; 0.12] 0.64 0.03
## Crable, 1986 grp ind 0.06 [ 0.00; 0.12] 3.05 0.01
## Thiede, 2011 grp ind 0.06 [ 0.00; 0.12] 0.05 0.03
## Bonertz, 2015 grp ind 0.06 [ 0.00; 0.12] 0.01 0.01
## Joy, 2002 grp ind 0.06 [ 0.00; 0.12] 0.02 0.00
## [....]
##
## Number of studies: k = 182
## Number of treatments: n = 7
## Number of pairwise comparisons: m = 184
## Number of designs: d = 17
##
## Fixed effects model
##
## Treatment estimate (sm = 'SMD', comparison: other treatments vs 'cau'):
## SMD 95%-CI z p-value
## cau . . . .
## grp -0.5767 [-0.6310; -0.5224] -20.81 < 0.0001
## gsh -0.3940 [-0.4588; -0.3292] -11.92 < 0.0001
## ind -0.6403 [-0.6890; -0.5915] -25.74 < 0.0001
## tel -0.5134 [-0.6078; -0.4190] -10.65 < 0.0001
## ush -0.1294 [-0.2149; -0.0439] -2.97 0.0030
## wlc 0.2584 [ 0.2011; 0.3157] 8.84 < 0.0001
##
##
## Quantifying heterogeneity / inconsistency:
## tau^2 = 0.26; tau = 0.51; I^2 = 89.6% [88.3%; 90.7%]
##
## Tests of heterogeneity (within designs) and inconsistency (between designs):
## Q d.f. p-value
## Total 1696.84 177 < 0.0001
## Within designs 1595.02 165 < 0.0001
## Between designs 101.83 12 < 0.0001
```
There is plenty to see in this output, so let us go through it step by step. The first thing we see are the calculated effect sizes for each comparison. The asterisk signifies our multi-arm study, for which the standard error has been corrected (to account for effect size dependency). Below that, we see an overview of the number of treatment arms in each included study.
The next table shows us the fitted values for each comparison in our (fixed-effect) network meta-analysis model. The $Q$ column in this table is usually very interesting because it tells us which comparison contributes substantially to the overall inconsistency in our network. For example, we see that the $Q$ value of `Crable, 1986` is rather high, with $Q=$ 3.05.
Then, we get to the core of our network meta-analysis: the `Treatment estimate`. As specified, the effects of all treatments are displayed in comparison to the care as usual condition, which is why there is no effect shown for `cau`. Below that, we can see that the heterogeneity/inconsistency in our network model is very high, with $I^2=$ 89.6%. This indicates that selecting a fixed-effect model was probably **not** appropriate (we will get back to this point later).
\index{Consistency}
\index{Heterogeneity}
The last part of the output (`Tests of heterogeneity`) breaks down the total heterogeneity in our network. There are two components: **within-design** heterogeneity, and inconsistency **between** designs. A "design" is defined as a selection of conditions included in one trial, for example A $-$ B, or A $-$ B $-$ C. When there are true effect size differences between studies which included exactly the same conditions, we can speak of within-design heterogeneity. Variation between designs, on the other hand, reflects the inconsistency in our network. Both the within-design heterogeneity and between-design inconsistency are highly significant ($p$s < 0.001).
This is yet another sign that the random-effects model may be indicated. To further corroborate this, we can calculate the total inconsistency based on the **full design-by-treatment interaction random-effects model** [@higgins2012consistency]. To do this, we only have to plug the `m.netmeta` object into the `decomp.design` function.
\vspace{2mm}
```{r, eval=F}
decomp.design(m.netmeta)
```
```
## Q statistics to assess homogeneity / consistency
## [...]
## Design-specific decomposition of within-designs Q statistic
##
## Design Q df p-value
## cau vs grp 82.5 20 < 0.0001
## cau vs gsh 0.7 7 0.9982
## cau vs ind 100.0 29 < 0.0001
## cau vs tel 11.4 5 0.0440
## [...]
##
## Between-designs Q statistic after detaching of single designs
##
## Detached design Q df p-value
## [...]
## ind vs wlc 77.23 11 < 0.0001
## tel vs wlc 95.45 11 < 0.0001
## ush vs wlc 95.81 11 < 0.0001
## gsh vs ind vs wlc 101.78 10 < 0.0001
##
## Q statistic to assess consistency under the assumption of
## a full design-by-treatment interaction random effects model
##
## Q df p-value tau.within tau2.within
## Between designs 3.82 12 0.9865 0.5403 0.2919
```
\index{Cochran's \textit{Q}}
In the output, we are first presented with $Q$ values showing the individual contribution of each design to the within- and between-design heterogeneity/inconsistency in our model. The important part of the output is in the last section (`Q statistic to assess consistency under the assumption of a full design-by-treatment interaction random effects model`). We see that the value of $Q$ decreases considerably when assuming a full design-by-treatment random-effects model ($Q=$ 101.83 before, $Q=$ 3.83 now), and that the between-design inconsistency is not significant anymore ($p=$ 0.986).
This also suggests that a random-effects model may be indicated to (at least partly) account for the inconsistency and heterogeneity in our network model.
<br></br>
#### Further Examination of the Network Model
---
##### The Network Graph
---
\index{Network Graph}
After a network meta-analysis model has been fitted using `netmeta`, it is possible to produce a **network graph**. This can be done using the `netgraph` function. The `netgraph` function has many arguments, which you can look up by running `?netgraph` in your console. Most of those arguments, however, have very sensible default values, so there is not too much to specify.
As a first step, we feed the function with our fitted model `m.netmeta`. Since we used the shortened labels in our model, we should replace them with the long version (stored in `treat1.long` and `treat2.long`) in the plot. This can be achieved using the `labels` argument, where we have to provide the full names of all treatments. The treatment labels should be in the same order as the ones stored in `m.netmeta$trts`.
\vspace{4mm}
```{r, echo=F}
load("data/m_netmeta.rda")
```
```{r}
# Show treatment order (shortened labels)
m.netmeta$trts
```
```{r, message=F, warning=F, eval=F}
# Replace with full name (see treat1.long and treat2.long)
long.labels <- c("Care As Usual", "Group",
"Guided Self-Help",
"Individual", "Telephone",
"Unguided Self-Help",
"Waitlist")
netgraph(m.netmeta,
labels = long.labels)
```
```{r, message=F, warning=F, echo=F, fig.width=7, fig.height=7, echo=F, fig.align='center', out.width="55%"}
# Replace with full name (see treat1.long and treat2.long)
long.labels <- c("Care As Usual", "Group", "Guided \n Self-Help",
"Individual", "Telephone",
"Unguided \n Self-Help", "Waitlist")
par(bg="#FFFEFA")
# Produce the plot
netgraph(m.netmeta, labels = long.labels, col.multiarm = "lightgray", offset = 0.03, cex = 1.5)
long.labels <- c("Care As Usual", "Group", "Guided Self-Help",
"Individual", "Telephone",
"Unguided Self-Help", "Waitlist")
```
This network graph transports several kinds of information. First, we see the overall structure of comparisons in our network. This allows us to better understand which treatments were compared to each other in the original data.
Furthermore, we can see that the edges in the plot have a different **thickness**. The degree of thickness represents how often we find a specific comparison in our network. For example, we see that guided self-help formats have been compared to wait-lists in many trials. We also see the multi-arm trial in our network, which is represented by a shaded triangle. This is the study by Breiman, which compared guided self-help, individual therapy, and a wait-list.
The `netgraph` function also allows to plot a **3D graph**, which can be helpful to get a better grasp of complex network structures. The function requires the **{rgl}** package to be installed and loaded. To produce a 3D graph, we only have to set the `dim` argument to `"3d"`.
```{r, eval=F}
library(rgl)
netgraph(m.netmeta, dim = "3d")
```
<br></br>
##### Visualizing Direct and Indirect Evidence
---
In the next step, let us have a look at the proportion of **direct** and **indirect** evidence used to estimate each comparison. The `direct.evidence.plot` function in **{dmetar}** has been developed for this purpose.
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "direct.evidence.plot" Function**
\vspace{4mm}
The `direct.evidence.plot` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/direct.evidence.plot.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{ggplot2}** and **{gridExtra}** package is installed and loaded.
```
The function provides us with a plot showing the percentage of direct and indirect evidence used for each estimated comparison. The only thing the `direct.evidence.plot` function requires as input is our fitted network meta-analysis model `m.netmeta`.
\vspace{2mm}
```{r, message=F, warning=F, fig.width=8, fig.height=5, fig.align='center', out.width="75%", eval=F}
library(dmetar)
d.evidence <- direct.evidence.plot(m.netmeta)
plot(d.evidence)
```
```{r, message=F, warning=F, fig.width=8, fig.height=5, fig.align='center', out.width="75%", echo=F}
library(dmetar)
# source("data/direct.evidence.plot.bw.R")
d.evidence <- dmetar::direct.evidence.plot(m.netmeta)
plot(d.evidence)
```
\index{Mean Path Length}
\index{Minimal Parallelism}
\vspace{2mm}
As we can see, there are several estimates in our network model which had to be inferred by indirect evidence alone. The plot also provides us with two additional metrics: the **minimal parallelism** and **mean path length** of each estimated comparison. According to König, Krahn, and Binder [-@konig2013visualizing], a mean path length > 2 means that a comparison estimate should be interpreted with particular caution.
<br></br>
##### Effect Estimate Table
---
Next, we can have a look at the estimates of our network for all possible treatment comparisons. To do this, we can use the matrix saved in `m.netmeta$TE.fixed` (if we use the fixed-effects model) or `m.netmeta$TE.random` (if we use the random-effects model). We need to make a few pre-processing steps to make the matrix easier to read. First, we extract the data from our `m.netmeta` object, and round the numbers in the matrix to two decimal places.
```{r}
result.matrix <- m.netmeta$TE.fixed
result.matrix <- round(result.matrix, 2)
```
Given that one "triangle" in our matrix will hold redundant information, we replace the lower triangle with empty values using this code:
```{r}
result.matrix[lower.tri(result.matrix, diag = FALSE)] <- NA
```
This gives the following result:
```{r}
result.matrix
```
If we want to report these results in our research paper, a good idea might be to also include the confidence intervals for each effect size estimate. These can be obtained the same way as before using the `lower.fixed` and `upper.fixed` (or `lower.random` and `upper.random`) matrices in `m.netmeta`.
An even more convenient way to export all estimated effect sizes is to use the `netleague` function. This function creates a table similar to the one we created above. Yet, in the matrix produced by `netleague`, the upper triangle will display only the pooled effect sizes of the **direct comparisons** available in our network, sort of like one would attain them if we had performed a conventional meta-analysis for each comparison. Because we do not have direct evidence for all comparisons, some fields in the upper triangle will remain empty. The lower triangle of the matrix produced by `netleague` contains the estimated effect sizes for **each** comparison (even the ones for which only indirect evidence was available).
The output of `netleague` can be easily exported into a .csv file. It can be used to report comprehensive results of our network meta-analysis in a single table. Another big plus of using this function is that effect size estimates **and** confidence intervals will be displayed together in each cell. Suppose that we want to produce such a treatment estimate table, and save it as a .csv file called "netleague.csv". This can be achieved using the following code:
```{r}
# Produce effect table
netleague <- netleague(m.netmeta,
bracket = "(", # use round brackets
digits=2) # round to two digits
# Save results (here: the ones of the fixed-effect model)
write.csv(netleague$fixed, "netleague.csv")
```
<br></br>
##### Treatment Ranking
---
\index{Surface Under the Cumulative Ranking (SUCRA) Score}
\index{P-Score}
The most interesting question we can answer in network meta-analysis is which treatment has the highest effects. The `netrank` function implemented in **{netmeta}** is helpful in this respect. It allows us to generate a **ranking** of treatments, indicating which treatment is more or less likely to produce the largest benefits.
The `netrank` function is, like the model used in `netmeta` itself, based on a frequentist approach. This frequentist method uses **P-scores** to rank treatments, which measure the certainty that one treatment is better than another treatment, averaged over all competing treatments. The P-score has been shown to be equivalent to the **SUCRA** score [@rucker2015ranking], which we will describe in the chapter on Bayesian network meta-analysis.
The `netrank` function requires our `m.netmeta` model as input. Additionally, we should also specify the `small.values` parameter, which defines if smaller (i.e. negative) effect sizes in a comparison indicate a beneficial (`"good"`) or harmful (`"bad"`) effect. Here, we use `small.values = "good"`, since negative effect sizes mean that a treatment was more effective in **reducing** depression.
```{r}
netrank(m.netmeta, small.values = "good")
```
We see that individual therapy (`ind`) has the highest P-score, indicating that this treatment format may be particularly helpful. Conversely, wait-lists (`wlc`) have a P-score of zero, which seems to go along with our intuition that simply letting people wait for treatment is not the best option.
Nonetheless, one should never automatically conclude that one treatment is the "best", solely because it has the highest score in the ranking [@mbuagbaw2017approaches]. A way to better visualize the **uncertainty** in our network is to produce a forest plot, in which one condition is used as the comparison group.
\index{Forest Plot}
In **{netmeta}**, this can be achieved using the `forest` function. The `forest` function in **{netmeta}** works very similar to the one of the **{meta}** package, which we already described in Chapter \@ref(forest). The main difference is that we need to specify the reference group in the forest plot using the `reference.group` argument. We use care us usual (`"cau"`) again.
\vspace{4mm}
```{r, fig.width=6, fig.height=4, out.width="80%", fig.align='center'}
forest(m.netmeta,
reference.group = "cau",
sortvar = TE,
xlim = c(-1.3, 0.5),
smlab = paste("Therapy Formats vs. Care As Usual \n",
"(Depressive Symptoms)"),
drop.reference.group = TRUE,
label.left = "Favors Intervention",
label.right = "Favors Care As Usual",
labels = long.labels)
```
\vspace{4mm}
The forest plot shows that there are other high-performing treatments formats besides individual therapy. We also see that some of the confidence intervals are overlapping. This makes a clear-cut decision less easy. While individual treatments do seem to produce the best results, there are several therapy formats which also provide substantial benefits compared to care as usual.
<br></br>
#### Evaluating the Validity of the Results
---
##### The Net Heat Plot {#net-heat-plot}
---
The **{netmeta}** package has an in-built function, `netheat`, which allows us to produce a **net heat plot**. Net heat plots are very helpful to evaluate the inconsistency in our network model, and what designs contribute to it.
The `netheat` function only needs a fitted network meta-analysis object to produce the plot.
```{r, fig.width=5, fig.height=5, eval=F}
netheat(m.netmeta)
```
```{r, message = F, out.width = '60%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/heat_fixed_col_sep.png')
```
The function generates a quadratic heatmap, in which each design in a row is compared to the other designs (in the columns). Importantly, the rows and columns signify specific **designs**, not individual treatment **comparisons** in our network. Thus, the plot also features rows and columns for the design used in our multi-arm study, which compared guided self-help, individual therapy, and a wait-list. The net heat plot has two important features [@schwarzer2015meta, chapter 8]:
\index{Consistency}
* **Gray boxes**. The gray boxes signify how important a treatment comparison is for the estimation of another treatment comparison. The bigger the box, the more important the comparison. An easy way to analyze this is to go through the rows of the plot one after another and to check in each row which boxes are the largest. A common finding is that boxes are large in the diagonal of the heat map because this means that direct evidence was used. A particularly big box, for example, can be seen at the intersection of the "cau vs grp" row and the "cau vs grp" column.
* **Colored backgrounds**. The colored backgrounds signify the amount of **inconsistency** of the design in a **row** that can be attributed to the design in a **column**. Field colors can range from a deep red (which indicates strong inconsistency) to blue (which indicates that evidence from this design supports evidence in the row). The `netheat` function uses an algorithm to sort rows and columns into clusters with higher versus lower inconsistency. In our plot, several inconsistent fields are displayed in the upper-left corner. For example, in the row "ind vs wlc", we see that the entry in column "cau vs grp" is displayed in red. This means that the evidence contributed by "cau vs grp" for the estimation of "ind vs wlc" is inconsistent. On the other hand, we see that the field in the "gsh vs wlc" column has a deep blue background, which indicates that evidence of this design **supports** the evidence of the row design "ind vs wlc".
We should remind ourselves that these results are based on the fixed-effect model, since we used it to fit our network meta-analysis model. Yet, from what we have learned so far, it has become increasingly clear that using the fixed-effect model was not appropriate--there is too much heterogeneity and design inconsistency.
Therefore, let us check how the net heat plot changes when we assume a random-effects model. We can do this by setting the `random` argument in `netheat` to `TRUE`.
```{r, fig.width=5, fig.height=5, eval=F}
netheat(m.netmeta, random = TRUE)
```
```{r, message = F, out.width = '60%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/heat_random_col_sep.png')
```
We see that this results in a substantial decrease of inconsistency in our network. There are no fields with a dark red background now, which indicates that the overall consistency of our model improves considerably once a random-effects model is used.
We can therefore conclude that the random-effects model is preferable for our data. In practice, this would mean that we re-run the model using `netmeta` while setting `comb.random` to `TRUE` (and `comb.fixed` to `FALSE`), and that we only report results of analyses based on the random-effects model. We omit this step here, since all the analyses we presented before can also be applied to random-effects network models, in exactly the same way.
<br></br>
##### Net Splitting {#net-splitting}
---
\index{Node Splitting}
Another method to check for consistency in our network is **net splitting**. This method splits our network estimates into the contribution of direct and indirect evidence, which allows us to control for inconsistency in the estimates of individual comparisons in our network. To apply the net splitting technique, we only have to provide the `netsplit` function with our fitted model.
```{r, eval=F}
netsplit(m.netmeta)
```
```
## Separate indirect from direct evidence using back-calculation method
##
## Fixed effects model:
##
## comparison k prop nma direct indir. Diff z p-value
## grp vs cau 21 0.58 -0.5767 -0.3727 -0.8628 0.4901 8.72 < 0.0001
## gsh vs cau 8 0.22 -0.3940 -0.5684 -0.3442 -0.2243 -2.82 0.0048
## ind vs cau 30 0.71 -0.6403 -0.7037 -0.4863 -0.2174 -3.97 < 0.0001
## tel vs cau 6 0.35 -0.5134 -0.7471 -0.3867 -0.3604 -3.57 0.0004
## ush vs cau 9 0.35 -0.1294 -0.1919 -0.0953 -0.0966 -1.06 0.2903
## [...]
##
## Legend:
## [...]
## Diff - Difference between direct and indirect estimates
## z - z-value of test for disagreement (direct vs. indirect)
## p-value - p-value of test for disagreement (direct vs. indirect)
```
The most important information presented in the output is the difference between effect estimates based on direct and indirect evidence (`Diff`), and whether this difference is significant (as indicated by the `p-value` column). When a difference is $p<$ 0.05, there is a significant disagreement (inconsistency) between the direct and indirect estimate.
We see in the output that there are indeed many comparisons which show significant inconsistency between direct and indirect evidence (when using the fixed-effects model). A good way to visualize the net split results is through a forest plot.
```{r, fig.width=9, fig.height=17, fig.align='center', out.width="79%"}
netsplit(m.netmeta) %>% forest()
```
<br></br>
##### Comparison-Adjusted Funnel Plots
---
\index{Funnel Plot}
Assessing publication bias in network meta-analysis models is difficult. Most of the techniques that we covered in Chapter \@ref(pub-bias) are not directly applicable once we make the step from conventional to network meta-analysis. **Comparison-adjusted funnel plots**, however, have been proposed to evaluate the risk of publication bias in network meta-analyses, and can be used in some contexts [@salanti2014evaluating]. Such funnel plots are applicable when we have a **specific** hypothesis concerning how publication bias has affected our network model.
Publication bias may be created, for example, because studies with "novel" findings are more likely to get published--even if they have a small sample size. There is a natural incentive in science to produce "groundbreaking" results, for example to show that a new type of treatment is superior to the current state of the art.
This would mean that something similar to small-study effects (see Chapter \@ref(small-study-effects)) exists in our data. We would expect that effects of comparisons in which a new treatment was compared to an older one are **asymmetrically** distributed in the funnel plot. This is because "disappointing" results (i.e. the new treatment is not better than the old one) end up in the file drawer. With decreasing sample size, the benefit of the new treatment must be increasingly large to become significant, and thus merit publication. In theory, this would create the characteristic asymmetrical funnel plot that we also find in standard meta-analyses.
Of course, such a pattern will only appear when the effect sizes in our plot are coded in a certain way. To test our "new versus old" hypothesis, for example, we have to make sure that each effect size used in the plot can has the same interpretation. We have to make sure that (for example) a positive effect size always indicates that the "new" treatment was superior, while a negative sign means the opposite. We can do this by defining a "ranking" of treatments from old to new, and by using this ranking to define the sign of each effect.
The `funnel` function in **{netmeta}** can be used to generate such comparison-adjusted funnel plots. Here are the most important arguments:
* **`order`**. This argument specifies the order of the hypothesized publication bias mechanism. We simply have to provide the names of all treatments in our network and sort them according to our hypothesis. For example, if we want to test if publication bias favored "new" treatments, we insert the names of all treatments, starting from the oldest treatment, and ending with the most novel type of intervention.
* **`pch`**. This lets us specify the symbol(s) to be used for the studies in the funnel plot. Setting this to `19` gives simple dots, for example.
* **`col`**. Using this argument, we can specify the colors used to distinguish different comparisons. The number of colors we specify here must be the same as the number of **unique** comparisons in our funnel plot. In practice, this can mean that many different colors are needed. A complete list of colors that _R_ can use for plotting can be found [online](http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf).
* **`linreg`**. When set to `TRUE`, Egger's test for funnel plot asymmetry (Chapter \@ref(eggers-test)) is conducted, and its $p$-value is displayed in the plot.
Arguments that are defined for the `funnel` function in **{meta}** can also be used additionally.
```{r, fig.width=7, fig.height=5, eval=F}
funnel(m.netmeta,
order = c("wlc", "cau", "ind", "grp", # from old to new
"tel", "ush", "gsh"),
pch = c(1:4, 5, 6, 8, 15:19, 21:24),
col = c("blue", "red", "purple", "forestgreen", "grey",
"green", "black", "brown", "orange", "pink",
"khaki", "plum", "aquamarine", "sandybrown",
"coral", "gold4"),
linreg = TRUE)
```
```{r, fig.width=9, fig.height=7, fig.align='center', out.width="78%", echo=F}
par(bg="#FFFEFA")
funnel(m.netmeta,
order = c("wlc", "cau", # from old to new
"ind", "grp", "tel",
"ush", "gsh"),
pch = c(1:4, 5, 6, 8, 15:19, 21:24),
col = c("blue", "red", "purple", "forestgreen", "grey",
"green", "black", "brown", "orange", "pink",
"khaki", "plum", "aquamarine", "sandybrown",
"coral", "gold4"),
linreg = TRUE)
```
If our hypothesis is true, we can expect that studies with a small sample (and thus a higher standard error) are asymmetrically distributed around the zero line in the plot. This is because small studies comparing a novel treatment to an older one, yet finding that the new treatment is not better, are less likely to get published. Therefore, they are systematically missing on one side of the funnel.
The plot, however, looks quite symmetrical. This is corroborated by Egger's test, which is not significant ($p=$ 0.402). Overall, this does not indicate that there are small-study effects in our network. At least not because "innovative" treatments with superior effects are more likely to be found in the published literature.
```{block, type='boxinfo'}
**Network Meta-Analysis using {netmeta}: Concluding Remarks**
\vspace{2mm}
This has been a long chapter, and we have covered many new topics. We have shown the core ideas behind the statistical model used by **{netmeta}**, described how to fit a network meta-analysis model with this approach, how to visualize and interpret the results, and how to evaluate the validity of your findings. It can not be stressed enough that (clinical) decision-making in network meta-analyses should not be based on one single test or metric.
Instead, we have to explore our model and its results with open eyes, check the patterns we find for their consistency, and take into account the large uncertainty that is often associated with some of the estimates.
\vspace{2mm}
In the next chapter, we will try to (re-)think network meta-analysis from a Bayesian perspective. Although the philosophy behind this approach varies considerably from the one we described here, both techniques essentially try to achieve the same thing. In practice, the analysis "pipeline" is also surprisingly similar. Time to go Bayesian!
```
<br></br>
## Bayesian Network Meta-Analysis {#bayesian-net-ma}
---
In the following, we will describe how to perform a network meta-analysis based on a Bayesian hierarchical framework. The _R_ package we will use to do this is called **{gemtc}** [@van2012automating]. But first, let us consider the idea behind Bayesian inference in general, and the type of Bayesian model we can use for network meta-analysis.
<br></br>
### Bayesian Inference {#bayesian-inference}
---
\index{Bayes' Theorem}
\index{Frequentist Statistics}
\index{Conditional Probability}
Besides the frequentist approach, **Bayesian** inference is another important strand of inference statistics. Frequentist statistics is arguably used more often in most research fields. The Bayesian approach, however, is actually older; and while being increasingly picked up by researchers in recent years [@marsman2017bayesian], it has never really been "gone" [@mcgrayne2011theory].
The foundation of Bayesian statistics is **Bayes’ Theorem**, first formulated by Reverend Thomas Bayes [1701-1761, @bellhouse2004reverend]. Bayesian statistics differs from frequentism because it also incorporates “subjective” **prior** knowledge to make inferences. Bayes' theorem allows us to estimate the probability of an event A, **given** that we already know that another event B has occurred. This results in a **conditional probability**, which can be denoted like this: $P(\text{A}|\text{B})$. The theorem is based on a formula that explains how this conditional probability can be calculated:
\begin{equation}
P(\text{A}|\text{B})=\frac{P(\text{B}|\text{A})\times
P(\text{A})}{P(\text{B})}
(\#eq:nw8)
\end{equation}
\index{Posterior Distribution}
\index{Prior Distribution}
In this formula, the two probabilities in the numerator of the fraction each have their own names. The $P(\text{B}|\text{A})$ part is known as the **likelihood**. It is the probability of event B, given that A is the case, or occurs [@etz2018introduction]. $P(\text{A})$ is the **prior** probability that $A$ occurs. $P(\text{A}|\text{B})$, lastly, is the **posterior** probability: the probability of A given B. Since $P(\text{B})$ is a fixed constant, the formula above is often simplified:
\begin{equation}
P(\text{A}|\text{B}) \propto P(\text{B}|\text{A})\times P(\text{A})
(\#eq:nw9)
\end{equation}
Where the $\propto$ symbol means that, since we discarded the denominator of the fraction, the probability on the left remains at least **proportional** to the part on the right as values change.
It is easier to understand Bayes' theorem if we think of the formula above as a process, beginning on the right side of the equation. We simply combine the prior information we have on the probability of A, with the likelihood of B given that A occurs, to produce our posterior, or adapted, probability of A: $P(\text{A}|\text{B})$. The crucial point here is that we can produce a "better" (posterior) estimate of A's probability when we take previous knowledge into account. This knowledge is the assumed (prior) probability of A.
Bayes' Theorem is often explained in the way we just did, with A and B standing for specific events. However, we can also think of A and B as probability **distributions** of two variables. Imagine that A is a random variable following a normal distribution. This distribution can be characterized by a set of parameters, which we denote with $\boldsymbol{\theta}$. Since A is normally distributed, $\boldsymbol{\theta}$ contains two elements: the true mean $\mu$ and variance $\sigma^2$ of A. These parameters $\boldsymbol{\theta}$ are what we actually want to estimate.
Furthermore, imagine that for B, we have collected **actual data**, which we want to use to estimate $\boldsymbol{\theta}$. We store our observed data in a vector $\boldsymbol{Y}$. Our observed data also follows a normal distribution, represented by $P({Y})$. This leads to a formula that looks like this:
\begin{equation}
P(\boldsymbol{\theta} | {\boldsymbol{Y}} ) \propto P( {\boldsymbol{Y}} | \boldsymbol{\theta} )\times P( \boldsymbol{\theta})
(\#eq:bayes)
\end{equation}
The new equation contains $P(\boldsymbol{\theta})$, the assumed prior distribution of $\boldsymbol{\theta}$. This prior distribution can be defined by us _a priori_, either based on our previous knowledge, or even only an intuition concerning what $\boldsymbol{\theta}$ may look like. Together with the likelihood distribution $P({\boldsymbol{Y}}|\boldsymbol{\theta})$, the probability of our collected data given the parameters $\boldsymbol{\theta}$, we can estimate the posterior distribution $P(\boldsymbol{\theta}|{\boldsymbol{Y}})$. This posterior distribution represents our estimate of $\boldsymbol{\theta}$ if we take both the observed data and our prior knowledge into account.
\index{Credible Interval}
Importantly, the posterior is still a **distribution**, not one estimated "true" value. This means that even the results of Bayesian inference are still **probabilistic**. They are also **subjective**, in the sense that they represent our **beliefs** concerning the actual parameter values. Therefore, in Bayesian statistics, we do not calculate confidence intervals around our estimates, but **credible intervals** (CrI).
Here is a visualization of the three distributions we described before, and how they might look like in a concrete example:
```{r, message = F, out.width = '93%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/prior_col_sep.png')
```
\index{Markov Chain Monte Carlo}
\index{Gibbs Sampler}
Another asset of Bayesian approaches is that the parameters do not have to follow a bell curve distribution, like the ones in our visualization. Other kinds of (more complex) distributions can also be modeled. A disadvantage of Bayesian inference, however, is that generating the (joint) distribution from our collected data can be very computationally expensive. Special **Markov Chain Monte Carlo** simulation procedures, such as the **Gibbs sampling algorithm**, have been developed to generate posterior distributions. Markov Chain Monte Carlo is also used in the **{gemtc}** package to run our Bayesian network meta-analysis model [@van2012automating].
<br></br>
### The Bayesian Network Meta-Analysis Model {#bayesian-net-ma-model}
---
#### Pairwise Meta-Analysis
---
We will now formulate the Bayesian hierarchical model that **{gemtc}** uses for network meta-analysis. Let us start by defining the model for a conventional, pairwise meta-analysis first.
\index{Bayesian Hierarchical Model}
This definition is equivalent to the one provided in Chapter \@ref(rem), where we discuss the "standard" random-effects model. What we describe in the following is simply the "Bayesian way" to conceptualize meta-analysis. On the other hand, this Bayesian definition of pairwise meta-analysis is already very informative, because it is directly applicable to network meta-analyses, without any further extension [@dias2013evidence].
We refer to this model as a Bayesian **hierarchical** model [@efthimiou2016getreal, see Chapter \@ref(bayes-hierarchical-model) for a more detailed discussion]. There is nothing mysterious about the word "hierarchical" here. Indeed, we already described in Chapter \@ref(multilevel-ma) that every meta-analysis model presupposes a hierarchical, or “multi-level” structure.
Suppose that we want to conduct a conventional meta-analysis. We have included $K$ studies, and have calculated an observed effect size $\hat\theta_k$ for each one. We can then define the fixed-effect model like so:
\begin{equation}
\hat\theta_k \sim \mathcal{N}(\theta,\sigma_k^2)
(\#eq:nw11)
\end{equation}
This formula expresses the **likelihood** of our effect sizes--the $P(\boldsymbol{Y}|\boldsymbol{\theta})$ part in equation \@ref(eq:bayes)--assuming that they follow a normal distribution. We assume that each effect size is a draw from the same distribution, the mean of which is the true effect size $\theta$, and the variance of which is $\sigma^2_k$. In the fixed-effect model, we assume that the true effect size is identical across all studies, so $\theta$ stays the same for different studies $k$ and their observed effect sizes $\hat\theta_k$.
\index{Uninformative Prior}
\index{Prior Distribution}
An interesting aspect of the Bayesian model is that, while the true effect $\theta$ is unknown, we can still define a prior distribution for it. This prior distribution approximates how we think $\theta$ **may** look like. For example, we could assume a prior based on a normal distribution with a mean of zero, $\theta \sim \mathcal{N}(0, \sigma^2)$, where we specify $\sigma^2$.
By default, the **{gemtc}** package uses so-called **uninformative priors**, which are prior distributions with a very large variance. This is done so that our prior "beliefs" do not have a big impact on the posterior results, and we primarily let the actually observed data "speak". We can easily extend the formula to a random-effects model:
\begin{equation}
\hat\theta_k \sim \mathcal{N}(\theta_k,\sigma_k^2)
(\#eq:nw12)
\end{equation}
This does not change much in the equation, except that now, we do not assume that each study is an estimator of the same true effect size $\theta$. Instead, we assume that there are "study-specific" true effects $\theta_k$ estimated by each observed effect size $\hat\theta_k$. Furthermore, these study-specific true effects are part of an overarching distribution of true effect sizes. This true effect size distribution is defined by its mean $\mu$ and variance $\tau^2$, our between-study heterogeneity.
\begin{equation}
\theta_k \sim \mathcal{N}(\mu,\tau^2)
(\#eq:nw13)
\end{equation}
In the Bayesian model, we also give an (uninformative) prior distribution to both $\mu$ and $\tau^2$.
<br></br>
#### Extension to Network Meta-Analysis
---
Now that we have covered how a Bayesian meta-analysis model can be formulated for one pairwise comparison, we can start to extend it to network meta-analysis. The two formulas of the random-effects model from before can be re-used for this. We only have to conceptualize the model parameters a little differently. Since comparisons in network meta-analyses can consist of varying treatments, we denote an effect size found in some study $k$ with $\hat\theta_{k \text{,A,B}}$. This signifies some effect size in study $k$ in which treatment A was compared to treatment B. If we apply this new notation, we get these formulas:
\begin{align}
\hat\theta_{k \text{,A,B}} &\sim \mathcal{N}(\theta_{k \text{,A,B}},\sigma_k^2) \notag \\
\theta_{k \text{,A,B}} &\sim \mathcal{N}(\theta_{\text{A,B}},\tau^2) (\#eq:nw14)
\end{align}
We see that the general idea expressed in the equations stays the same. We now assume that the (study-specific) true effect of the A $-$ B comparison, $\theta_{k \text{,A,B}}$, is part of an overarching distribution of true effects with mean $\theta_{\text{A,B}}$. This mean true effect size $\theta_{\text{A,B}}$ is the result of subtracting $\theta_{1\text{,A}}$ from $\theta_{1\text{,B}}$, where $\theta_{1\text{,A}}$ is the effect of treatment A compared to some predefined reference treatment $1$. Similarly, $\theta_{1\text{,B}}$ is defined as the effect of treatment B compared to the same reference treatment. In the Bayesian model, these effects compared to a reference group are also given a prior distribution.
As we have already mentioned in the previous chapter on frequentist network meta-analysis, inclusion of multi-arm studies into our network model is problematic, because the effect sizes will be correlated. In Bayesian network meta-analysis, this issue can be solved by assuming that effects of a multi-arm study stem from a **multivariate** (normal) distribution.
Imagine that a multi-arm study $k$ examined a total of $n=$ 5 treatments: A, B, C, D, and E. When we choose E as the reference treatment, this leads to $n$ - 1 = 4 treatment effects. Using a Bayesian hierarchical model, we assume that these observed treatment effects are draws from a multivariate normal distribution of the following form^[In practice, it is usually assumed that the between-study heterogeneity variances $\tau^2$ in multi-arm trials are **homogeneous** (i.e. identical) across the comparisons. This allows us define all covariances in the matrix as $\tau^2/2$.]:
\begin{align}
\begin{bmatrix}
\hat\theta_{k\text{,A,E}} \\
\hat\theta_{k\text{,B,E}} \\
\hat\theta_{k\text{,C,E}} \\
\hat\theta_{k\text{,D,E}}
\end{bmatrix}
&=
\mathcal{N}\left(
\begin{bmatrix}
\theta_{\text{A,E}} \\
\theta_{\text{B,E}} \\
\theta_{\text{C,E}} \\
\theta_{\text{D,E}}
\end{bmatrix}
,
\begin{bmatrix}
\tau^2 & \tau^2/2 & \tau^2/2 & \tau^2/2 \\
\tau^2/2 & \tau^2 & \tau^2/2 & \tau^2/2 \\
\tau^2/2 & \tau^2/2 & \tau^2 & \tau^2/2 \\
\tau^2/2 & \tau^2/2 & \tau^2/2 & \tau^2
\end{bmatrix}
\right).
(\#eq:nw15)
\end{align}
<br></br>
### Bayesian Network Meta-Analysis in _R_
---
\index{gemtc Package}
Now, let us use the **{gemtc}** package to perform our first Bayesian network meta-analysis. As always, we have to first install the package, and then load it from our library.
```{r, message=F, warning=F}
library(gemtc)
```
\index{Gibbs Sampler}
\index{JAGS}
\index{rjags Package}
The **{gemtc}** package depends on **{rjags}** [@rjags], which is used for the Gibbs sampling procedure that we described before (Chapter \@ref(bayesian-inference)). However, before we install and load this package, we first have to install another software called **JAGS** (short for “Just Another Gibbs Sampler”). The software is available for both Windows and Mac, and you can download it for free from the [Internet](https://sourceforge.net/projects/mcmc-jags/files/). After this is completed, we can install and load the **{rjags}** package^[Technically speaking, **JAGS** is not only a computer program, but also a programming language for Bayesian modeling that **{gemtc}** uses in the background (you can find the manual [here](https://people.stat.sc.edu/hansont/stat740/jags_user_manual.pdf)). JAGS itself relies heavily on the **BUGS** (short for "Bayesian inference Using Gibbs Sampling) language, which exists since the late 1980s [@lunn2012bugs, chapter 2.2.1].].
\vspace{2mm}
```{r, eval=F}
install.packages("rjags")
library(rjags)
```
<br></br>
#### Data Preparation
---
In our example, we will again use the `TherapyFormats` data set, which we already used to fit a frequentist network meta-analysis. However, it is necessary to tweak to the structure of our data a little so that it can be used in **{gemtc}**.
The original `TherapyFormats` data set includes the columns `TE` and `seTE`, which contain the standardized mean difference and standard error, with each row representing one comparison. If we want to use such relative effect data in **{gemtc}**, we have to reshape our data frame so that each row represents a single **treatment arm**. Furthermore, we have to specify which treatment was used as the reference group in a comparison by filling in `NA` into the effect size column. We have saved this reshaped version of the data set under the name `TherapyFormatsGeMTC`^[We have also prepared an _R_ vignette describing how one can transform network meta-analysis data in the "wider" **{netmeta}** format to the "longer" format required for relative effect size data in **{gemtc}**. The vignette can be found online: https://www.protectlab.org/vignettes/reshape-gemtc/].
\index{dmetar Package}
```{block, type='boxinfo'}
**The "TherapyFormatsGeMTC" Data Set**
\vspace{2mm}
The `TherapyFormatsGeMTC` data set is part of the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(TherapyFormatsGeMTC)` automatically saves the data set in your _R_ environment. The data set is then ready to be used. If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/TherapyFormatsGeMTC.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
The `TherapyFormatsGeMTC` data set is actually a list with two elements, one of which is called `data`. This element is the data frame we need to fit the model. Let us have a look at it.
```{r, warning=F, message=F, eval=F}
library(dmetar)
data(TherapyFormatsGeMTC)
head(TherapyFormatsGeMTC$data)
```
```{r, warning=F, message=F, echo=F}
library(dmetar)
load("data/TherapyFormatsGeMTC.rda")
head(TherapyFormatsGeMTC$data)
```
The **{gemtc}** package also requires that the columns of our data frame are labeled correctly. If we are using effect sizes based on continuous outcomes (such as the mean difference or standardized mean difference), our data set has to contain these columns:
* **`study`**. This column contains a (unique) label for each study included in our network, equivalent to the `studlab` column used in **{netmeta}**.
* **`treatment`**. This column contains the label or shortened code for the treatment.
* **`diff`**. This column contains the effect size (e.g. the standardized mean difference) calculated for a comparison. Importantly, the `diff` column contains `NA`, a missing, in the row of the reference treatment used in a comparison. The row of the treatment to which the reference treatment was compared then holds the actual effect size calculated for this comparison. Also keep in mind that the reference category is defined **study-wise**, not **comparison-wise**. This means that in multi-arm studies, we still have only one reference treatment to which all the other treatments are compared. For a three-arm study, for example, we need to include two effect sizes: one for the first treatment compared to the reference group, and a second one for the other treatment compared to the reference group.
* **`std.err`**. This column contains the standard error of the effect sizes. It is also set to `NA` in the reference group and only defined in the row of the treatment that was compared to the reference group.
Please note that other data entry formats are also possible, for example for binary outcome data. The way the data set needs to be structured for different types of effect size data is detailed in the **{gemtc}** documentation. You can access it by running `?mtc.model` in the console, and then scrolling to the "Details" section.
<br></br>
#### Network Graph
---
Now that we have our data ready, we feed it to the `mtc.network` function. This generates an object of class `mtc.network`, which we can use for later modeling steps. Because we are using pre-calculated effect size data, we have to specify our data set using the `data.re` argument in `mtc.network`. For raw effect size data (e.g. mean, standard deviation and sample size), we would have used the `data.ab` argument.
The optional `treatments` argument can be used to provide **{gemtc}** with the actual names of all the treatments included in the network. This information should be prepared in a data frame with an `id` and `description` column. We have created such a data frame and saved it as `treat.codes` in `TherapyFormatsGeMTC`:
```{r}
TherapyFormatsGeMTC$treat.codes
```
We use this data frame and our effect size data in `TherapyFormatsGeMTC` to build our `mtc.network` object. We save it under the name `network`.
```{r}
network <- mtc.network(data.re = TherapyFormatsGeMTC$data,
treatments = TherapyFormatsGeMTC$treat.codes)
```
Plugging the resulting object into the `summary` function already provides us with some interesting information about our network.
```{r, eval=F}
summary(network)
```
```
## $Description
## [1] "MTC dataset: Network"
##
## $`Studies per treatment`
## ind grp gsh tel wlc cau ush
## 62 52 57 11 83 74 26
##
## $`Number of n-arm studies`
## 2-arm 3-arm
## 181 1
##
## $`Studies per treatment comparison`
## t1 t2 nr
## 1 ind tel 4
## 2 ind wlc 18
## 3 grp ind 7
## [...]
```
\index{Network Graph}
We can also use the `plot` function to generate a network plot. Like the network generated by the **{netmeta}** package, the edge thickness corresponds with the number of studies we included for that comparison.
\vspace{2mm}
```{r, fig.width=8, fig.height=8, fig.align="center", out.width="65%", message=F, warning=F, eval=F}
plot(network,
use.description = TRUE) # Use full treatment names
```
```{r, fig.width=9, fig.height=8, fig.align="center", out.width="55%", message=F, warning=F, echo=F}
par(bg="#FFFEFA")
plot(network,
use.description = TRUE) # Use full treatment names
```
\vspace{2mm}
As an alternative, we can also check if we can create a better visualization of our network using the **Fruchterman-Reingold algorithm**. This algorithm comes with some inherent randomness, meaning that we have to set a seed to make our result reproducible.
The network plots are created using the **{igraph}** package [@igraph]. When this package is installed and loaded, we can also use other arguments to change the appearance of our plot. A detailed description of the different styling options can be found in the online **{igraph}** [manual](https://igraph.org/r/doc/plot.common.html).
\vspace{2mm}
```{r, fig.width=10, fig.height=8, fig.align="center", out.width="55%", message=F, warning=F, eval=F}
library(igraph)
set.seed(12345) # set seed for reproducibility
plot(network,
use.description = TRUE, # Use full treatment names
vertex.color = "white", # node color
vertex.label.color = "gray10", # treatment label color
vertex.shape = "sphere", # shape of the node
vertex.label.family = "Helvetica", # label font
vertex.size = 20, # size of the node
vertex.label.dist = 2, # distance label-node center
vertex.label.cex = 1.5, # node label size
edge.curved = 0.2, # edge curvature
layout = layout.fruchterman.reingold)
```
```{r, fig.width=10, fig.height=8, fig.align="center", out.width="55%", message=F, warning=F, echo=F}
library(igraph)
set.seed(12345) # set seed for reproducibility
par(bg="#FFFEFA")
plot(network,
use.description = TRUE, # Use full treatment names
vertex.color = "white", # node color
vertex.label.color = "gray10", # treatment label color
vertex.shape = "sphere", # shape of the node
vertex.label.family = "Helvetica", # label font
vertex.size = 20, # size of the node
vertex.label.dist = 2, # distance label-node center
vertex.label.cex = 1.5, # node label size
edge.curved = 0.2, # edge curvature
layout = layout.fruchterman.reingold)
```
<br></br>
#### Model Compilation
---
Using our `mtc.network` object, we can now start to specify and compile our model. The great thing about the **{gemtc}** package is that it automates most parts of the Bayesian inference process, for example by choosing adequate prior distributions for all parameters in our model.
\index{Markov Chain Monte Carlo}
Thus, there are only a few arguments we have to specify when compiling our model using the `mtc.model` function. First, we have to specify the `mtc.network` object we have created before. Furthermore, we have to decide if we want to use a random- or fixed effects model using the `linearModel` argument. Given that our previous frequentist analysis indicated substantial heterogeneity and inconsistency (see Chapter \@ref(net-heat-plot)), we will use `linearModel = "random"`. We also have to specify the number of **Markov chains** we want to use. A value between 3 and 4 is sensible here, and we take `n.chain = 4`.
There are two additional, optional arguments called `likelihood` and `link`. These two arguments vary depending on the type of effect size data we are using, and are automatically inferred by **{gemtc}** unless explicitly specified. Since we are dealing with effect sizes based on continuous outcome data (viz. SMDs), we are assuming a `"normal"` likelihood along with an `"identity"` link.
Had we been using binary outcome measures (e.g. log-odds ratios), the appropriate likelihood and link would have been `"binom"` (binomial) and `"logit"`, respectively. More details on this can be found in the documentation of `mtc.model`. However, when the data has been prepared correctly in the previous step, `mtc.model` usually selects the correct settings automatically.
```{r}
# We give our compiled model the name `model`.
model <- mtc.model(network,
likelihood = "normal",
link = "identity",
linearModel = "random",
n.chain = 4)
```
<br></br>
#### Markov Chain Monte Carlo Sampling
---
\index{Markov Chain Monte Carlo}
Now we come to the crucial part of our analysis: the Markov Chain Monte Carlo (MCMC) sampling. The MCMC simulation allows to estimate the posterior distributions of our parameters, and thus to generate the results of our network meta-analysis. There are two important desiderata we want to achieve during this procedure:
* We want that the first few runs of the Markov Chain Monte Carlo simulations, which will likely produce inadequate results, to not have a large impact on the whole simulation results.
* The Markov Chain Monte Carlo process should run long enough for us to obtain accurate estimates of the model parameters (i.e. it should **converge**).
To address these points, we split the number of times the Markov Chain Monte Carlo algorithm iterates to infer the model results into **two phases**: first, we define a number of **burn-in** iterations (`n.adapt`), the results of which are discarded. For the following phase, we specify the number of actual simulation iterations (`n.iter`), which are actually used to estimate the model parameters.
Given that we typically simulate many, many iterations, we can also specify the `thin` argument, which allows us to only extract the values of every $i$th iteration. This can help to reduce the required computer memory.
The simulation can be performed using the `mtc.run` function. In our example, we will perform two separate runs with different settings to compare which one works better. We have to provide the function with our compiled `model`object, and specify the parameters we just described.
First, we conduct a simulation with only a few iterations, and then a second one in which the number of iterations is large. We save both objects as `mcmc1` and `mcmc2`, respectively. Note that, depending on the size of your network, the simulation may take some time to finish.
\vspace{2mm}
```{r, eval=F}
mcmc1 <- mtc.run(model, n.adapt = 50, n.iter = 1000, thin = 10)
mcmc2 <- mtc.run(model, n.adapt = 5000, n.iter = 1e5, thin = 10)
```
<br></br>
#### Assessing Model Convergence {#bayesian-model-convergence}
---
To see if our simulations have resulted in the convergence of the algorithm, and to check which settings are preferable, we can evaluate some of the outputs of our `mcmc1` and `mcmc2` objects. A good start is to use the `plot` function. This provides us with a kind of "time series", commonly referred to as a **trace plot**, for each treatment comparison over all iterations. In this example, we only focus on the estimate of the individual therapy (`ind`) versus wait-list control (`wlc`) comparison.
```{r, eval=F}
plot(mcmc1)
plot(mcmc2)
```
```{r, echo=F, fig.width = 10, fig.height=4, fig.align="center", out.width="50%"}
load("data/mcmc1.rda")
load("data/mcmc2.rda")
library(coda)
library(purrr)
mcmc1[["samples"]] %>%
map(function(x) {
x[,"d.ind.wlc"]
}) %>% as.mcmc.list() -> mcmc1.tp
mcmc2[["samples"]] %>%
map(function(x) {
x[,"d.ind.wlc"]
}) %>% as.mcmc.list() -> mcmc2.tp
coda:::plot.mcmc.list(as.mcmc.list(mcmc1.tp),
density = F,
col = "gray50",
main = "Trace of d.ind.wlc (mcmc1)") # remove for color plots
coda:::plot.mcmc.list(as.mcmc.list(mcmc2.tp),
density = F,
col = "gray50",
main = "Trace of d.ind.wlc (mcmc2)") # remove for color plots
```
When comparing earlier to later iterations in `mcmc1`, we see that there is a slight discontinuity in the overall trend of the time series. The estimates of the four different chains (the four lines) slightly differ in their course when moving from the first half to the second half of the plot. In the plot for `mcmc2`, on the other hand, we see much more rapid up-and-down variation, but no real long-term trend. This delivers a first indication that the settings in `mcmc2` are more adequate^[To be reliable, the Markov chains of an estimated parameter should have reached **stationarity** during the course of the simulation, meaning that all lines scatter randomly around a common, stable mean value. When this point is reached, the chains in the trace plot typically resemble a "fat hairy caterpillar" [@lunn2012bugs, chapter 4.4.1].].
We can continue with our convergence assessment by looking at the density plots of the posterior effect size estimate. We see that, while the distribution in `mcmc1` still diverges somewhat from a smooth normal distribution, the result of `mcmc2` comes closer to a classic bell curve.
\vspace{2mm}
```{r, echo=F, fig.width = 6, fig.height=5, out.width="50%"}
load("data/mcmc1.rda")
load("data/mcmc2.rda")
library(coda)
library(purrr)
mcmc1[["samples"]] %>%
map(function(x) {
x[,"d.ind.wlc"]
}) %>% as.mcmc.list() -> mcmc1.tp
mcmc2[["samples"]] %>%
map(function(x) {
x[,"d.ind.wlc"]
}) %>% as.mcmc.list() -> mcmc2.tp
coda:::plot.mcmc.list(as.mcmc.list(mcmc1.tp),
density = T,
trace = F,
col = 1,
main = "Density of d.ind.wlc (mcmc1)") # remove for color plots
coda:::plot.mcmc.list(as.mcmc.list(mcmc2.tp),
density = T,
trace = F,
col = 1,
main = "Density of d.ind.wlc (mcmc2)") # remove for color plots
```
\index{Potential Scale Reduction Factor}
Another highly helpful method to assess convergence is the **Gelman-Rubin plot**. This plot shows the so-called **Potential Scale Reduction Factor** (PSRF), which compares the variation within each chain to the variation between chains, and how both develop over time. In case of convergence, the PRSF should gradually shrink down to zero with increasing numbers of iterations, and should at least be below 1.05 in the end.
To produce this plot, we simply have to plug in the `mtc.run` object into the `gelman.plot` function. Here is the result for both simulations (again only showing the `ind` versus `wlc` comparison).
\vspace{2mm}
```{r, eval=F}
gelman.plot(mcmc1)
gelman.plot(mcmc2)
```
```{r, echo=F, fig.width = 5, fig.height=5, out.width="50%"}
load("data/mcmc1.rda")
load("data/mcmc2.rda")
library(coda)
library(purrr)
mcmc1[["samples"]] %>%
map(function(x) {
x[,"d.ind.wlc"]
}) %>% as.mcmc.list() -> mcmc1.tp
mcmc2[["samples"]] %>%
map(function(x) {
x[,"d.ind.wlc"]
}) %>% as.mcmc.list() -> mcmc2.tp
gelman.plot(mcmc1.tp, main = "Gelman-Rubin Plot of d.ind.wlc (mcmc1)")
gelman.plot(mcmc2.tp, main = "Gelman-Rubin Plot of d.ind.wlc (mcmc2)")
```
We can also directly access the **overall** PSRF of our model, using this code:
```{r, eval=F}
gelman.diag(mcmc1)$mpsrf
```
```
## [1] 1.034131
```
```{r, eval=F}
gelman.diag(mcmc2)$mpsrf
```
```
## [1] 1.000351
```
We see that, while the overall PRSF is below the threshold in both simulations, the value in `mcmc2` is much lower and very close to 1. This indicates that the second model should be used.
<br></br>
#### Assessing Inconsistency: The Nodesplit Method
---
Like the **{netmeta}** package, **{gemtc}** package also provides us with a way to evaluate the consistency of our network model: the _nodesplit_ method (Dias et al., 2010). The idea behind this procedure is similar to the one of the net splitting method that we described before (Chapter \@ref(net-splitting)). To perform a _nodesplit_ analysis, we use the `mtc.nodesplit` function, using the same settings as in `mcmc2`. We save the result as `nodesplit`.
Please be aware that the nodesplit model computation may take a long time, even up to several hours, depending on the complexity of your network.
\index{Node Splitting}
\index{Consistency}
```{r, echo=F}
load("data/nodesplit.rda")
```
```{r, eval=F}
nodesplit <- mtc.nodesplit(network,
linearModel = "random",
likelihood = "normal",
link = "identity",
n.adapt = 5000,
n.iter = 1e5,
thin = 10)
```
Using the `summary` function, we can print the results.
```{r, eval=F}
summary(nodesplit)
```
```
## Node-splitting analysis of inconsistency
## ========================================
##
## comparison p.value CrI
## 1 d.ind.tel 0.62785
## 2 -> direct 0.13 (-0.39, 0.64)
## 3 -> indirect -0.037 (-0.46, 0.38)
## 4 -> network 0.034 (-0.30, 0.36)
## 5 d.ind.wlc 0.87530
## 6 -> direct 1.0 (0.74, 1.3)
## 7 -> indirect 0.97 (0.71, 1.2)
## 8 -> network 0.98 (0.80, 1.2)
## 9 d.ind.grp 0.61380
## 10 -> direct 0.14 (-0.29, 0.57)
## 11 -> indirect 0.26 (0.044, 0.48)
## 12 -> network 0.24 (0.041, 0.43)
## [...]
```
The function output shows us the results for the effects of different comparisons when using only direct, only indirect, and all available evidence. Different estimates using direct and indirect evidence suggest the presence of inconsistency. We can control for this by looking at the Bayesian `p.value` column. One or more comparisons with $p<$ 0.05 are problematic, since this indicates inconsistency in our network. From the output, we see that this is not the case in our (random-effects model) example.
When the nodesplitting method **does** show inconsistencies in some of the estimates, it is important to again check **all** included evidence for potential differences between designs. It may be possible, for example, that studies comparing A and B included systematically different populations than other studies which also assessed A.
Another approach is to check if the inconsistency persists when only a sensible subset of studies is included in the network. Lastly, it is also possible to assess reasons for inconsistency by running a network meta-regression, which we will cover later.
\index{Forest Plot}
It is also possible to generate a forest plot for the nodesplit model, using the `plot` function. However, a forest is only generated when we plug the nodesplit object into `summary` first.
```{r, eval=F}
plot(summary(nodesplit))
```
```{r, message = F, out.width = '90%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/nodesplit_forest_sep.png')
```
<br></br>
#### Generating the Network Meta-Analysis Results
---
Now that we fitted our network meta-analysis model, and have convinced ourselves that it is trustworthy, it is time to finally produce the results.
As mentioned before, the main question we may want to answer in network meta-analyses is which treatment performs the best. To answer this question, we can first run the `rank.probability` function. This function calculates the probability of a treatment being the best option, second best option, third best option, and so forth. The function needs our `mcmc2` object as input, and we additionally specify the `preferredDirection` argument. If smaller (i.e. negative) effect sizes indicate better outcomes, we set this argument to `-1`. Otherwise, we use `1`.
We save the results under the name `rank`, and then visualize them using a so-called **rankogram**.
```{r, fig.height=4, fig.width=7, out.width="65%", fig.align="center", eval=F}
rank <- rank.probability(mcmc2, preferredDirection = -1)
plot(rank, beside=TRUE)
```
```{r, fig.height=4, fig.width=7, out.width="65%", fig.align="center", echo=F}
rank <- rank.probability(mcmc2, preferredDirection = -1)
par(bg="#FFFEFA")
plot(rank, beside=TRUE)
```
In this plot, we see that individual therapy (ind) is probably the best treatment option in our network, given that its first bar (signifying the first rank) is the largest. This finding is in agreement with the results of the frequentist analysis, where we found the same pattern.
Additionally, we can also produce a forest plot of our results using the `forest` function. To do this, we first have to put our results object into the `relative.effect` function and specify `t1`, the reference treatment. We use care as usual (`"cau"`) as the reference group again. Then, we call the `forest` function on the results to generate the plot.
```{r, fig.width=7, fig.height=3, fig.align="center", out.width="80%", eval=F}
forest(relative.effect(mcmc2, t1 = "cau"),
use.description = TRUE, # Use long treatment names
xlim = c(-1.5, 0.5))
```
```{r, fig.width=7, fig.height=3, fig.align="center", out.width="80%", echo=F}
par(bg="#FFFEFA")
forest(relative.effect(mcmc2, t1 = "cau"),
use.description = TRUE, # Use long treatment names
xlim = c(-1.5, 0.5))
```
\index{SUCRA Score}
In the chapter on frequentist network meta-analysis, we already covered the P-score as a metric to evaluate which treatment in a network is likely to be the most efficacious. An equivalent to the P-score is the **Surface Under the Cumulative Ranking** (SUCRA) score, which can be calculated like this [@salanti2011graphical]:
\begin{equation}
\text{SUCRA}_j = \frac{\sum_{b=1}^{a-1}\text{cum}_{jb}}{a-1}
(\#eq:nw16)
\end{equation}
Where $j$ is some treatment, $a$ are all competing treatments, $b$ are the $b = 1, 2, \dots, a-1$ best treatments, and $\text{cum}$ represents the **cumulative probability** of a treatment being among the $b$ best treatments. To calculate the SUCRA scores in _R_, we can use the `sucra` function.
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "sucra" Function**
\vspace{4mm}
The `sucra` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
\vspace{2mm}
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/sucra.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{ggplot2}** package is installed and loaded.
```
The `sucra` function only needs a `rank.probability` object as input, and we need to specify if lower values indicate better outcomes. This can be done using the `lower.is.better` argument. Let us see what results we get.
```{r, message=F, warning=F, fig.height=3, fig.width=5, fig.align="center", out.width="40%", eval=F}
library(dmetar)
rank.probability <- rank.probability(mcmc2)
sucra <- dmetar::sucra(rank.probability, lower.is.better = TRUE)
sucra
```
```
## SUCRA
## ind 0.9225292
## tel 0.8516583
## gsh 0.6451292
## [...]
```
```{r, message=F, warning=F, fig.height=3, fig.width=5, fig.align="center", out.width="40%", echo=F}
library(dmetar)
rank.probability <- rank.probability(mcmc2)
sucra <- dmetar::sucra(rank.probability, lower.is.better = TRUE)
```
```{r, message=F, warning=F, fig.height=4, fig.width=5, fig.align="center", out.width="50%"}
plot(sucra)
```
Looking at the SUCRA values of each treatment, we again see that individual treatment may be the best option, followed by telephone-based treatment and guided self-help.
Usually, we want to report the effect size estimate for each treatment comparison based on our model. A treatment effect table can be exported using the `relative.effect.table` function. We save the results of this function in an object called `result`, which we can then export as a .csv file.
The `relative.effect.table` function automatically creates a treatment comparison matrix containing the estimated effect, as well as the credible intervals for each comparison.
```{r, eval=F}
results <- relative.effect.table(mcmc2)
save(results, file = "results.csv")
```
<br></br>
### Network Meta-Regression
---
\index{Meta-Regression}
\index{gemtc Package}
A big asset of the **{gemtc}** package is that it allows us to conduct **network meta-regression**. Much like conventional meta-regression (Chapter \@ref(metareg)), we can use this functionality to determine if specific study characteristics influence the magnitude of effect sizes found in our network. It can also be a helpful tool to check for variables that may explain inconsistency.
\index{Risk of Bias}
Imagine we want to evaluate if the risk of bias of a study has an influence on the effects in our network meta-analysis. For example, it could be that studies with a high risk of bias generally report higher effects compared to the control group or alternative treatments. By including risk of bias as a predictor to our model, we can control for such an association, and assess its impact on our results.
To run a network meta-regression in **{gemtc}**, we have to follow similar steps as before, when we fitted a Bayesian network meta-analysis model without covariates. First, we need to set up our network using `mtc.network`. This time, however, we specify an additional argument called `studies`. This argument requires a data frame in which predictor information for each study is stored. The `TherapyFormatsGeMTC` data set includes an element `study.info`, which contains the risk of bias of each study.
Let us have a quick look at the data.
```{r, eval=F}
TherapyFormatsGeMTC$study.info
```
```
## study rob
## 1 Campbell, 2000 1
## 2 Reynolds, 1989 1
## 3 Carpenter, 1994 0
## 4 Shrednik, 2000 1
## [...]
```
\index{Dummy Variable}
The data set contains two columns: `study`, the name of the study included in our network and `rob`, its risk of bias. Please note that the study labels must be completely identical to the ones used in the actual effect size data set. The `rob` variable is a dummy-coded predictor, where `0` indicates a low risk of bias, and `1` high risk of bias. Using the `study.info` data frame, we can now create a meta-regression network using `mtc.network`.
```{r, eval=F}
network.mr <- mtc.network(data.re = TherapyFormatsGeMTC$data,
studies = TherapyFormatsGeMTC$study.info,
treatments = TherapyFormatsGeMTC$treat.codes)
```
Now, we must define the **regressor** we want to include in our network meta-analysis model. This can be done by generating a list object with three elements:
* **`coefficient`**: We set this element to `"shared"` because we want to estimate one shared coefficient for the effect of (high) risk of bias across all treatments included in our network meta-analysis.
* **`variable`**: This specifies the name of the variable we want to use as the predictor (here: `"rob"`).
* **`control`**: We also have to specify the treatment which we want to use as the reference group. We use `"cau"` (care as usual) in our example.
```{r}
regressor <- list(coefficient = "shared",
variable = "rob",
control = "cau")
```
Next, we compile our model. We provide the `mtc.model` function with the network we just generated, set the type of our model to `"regression"`, and provide the function with the `regressor` object we just generated. We save the output under the name `model.mr`.
```{r, eval=F}
model.mr <- mtc.model(network.mr,
likelihood = "normal",
link = "identity",
type = "regression",
regressor = regressor)
```
After this step, we can run the model using the `mtc.run` function. We use the same specifications as used for fitting the `mcmc2` model before. The results are saved as `mcmc3`.
```{r, eval=F}
mcmc3 <- mtc.run(model.mr,
n.adapt = 5000,
n.iter = 1e5,
thin = 10)
```
```{r, echo=F}
load("data/mcmc3.rda")
```
Now, we can analyze the results using the `summary` function.
```{r, eval=F}
summary(mcmc3)
```
```
## Results on the Mean Difference scale
## [...]
##
## 1. Empirical mean and standard deviation for each variable,
## plus standard error of the mean:
##
## Mean SD Naive SE Time-series SE
## d.ind.cau 0.6992 0.07970 0.0003985 0.0004201
## d.ind.grp 0.1933 0.10009 0.0005005 0.0005321
## [...]
## B -0.3297 0.13047 0.0006523 0.0010379
##
## 2. Quantiles for each variable:
##
## 2.5% 25% 50% 75% 97.5%
## d.ind.cau 0.542044 0.64602 0.69967 0.7529 0.85571
## d.ind.grp -0.002622 0.12599 0.19353 0.2608 0.38962
## [...]
## B -0.586266 -0.41790 -0.32957 -0.2417 -0.07455
##
## [...]
## -- Regression settings:
##
## Regression on "rob", shared coefficients, "cau" as control
## Input standardized: x' = (rob - 0.4340659) / 1
## Estimates at the centering value: rob = 0.4340659
```
The results for our predictor are reported next to `B`. Because our predictor is dummy-coded, the value of `B` represents the effect of a study having a **high** risk of bias. The estimate is $b=$ -0.33, and when looking at the second table (`Quantiles for each variable`), we see that the 95% credible interval of $b$ ranges from -0.59 to -0.08. Since the credible interval does not include zero, we may conclude that risk of bias does indeed influence the results. When the risk of bias is high (`rob` = 1), we can predict higher overall effects (since negative effect sizes indicate "better" outcomes in our example).
We can explore the effect of the predictor further by generating two forest plots: one for the estimated treatment effects when the risk of bias is high, and another for when it is low. We can do this using the `relative.effect` function, where we specify the `covariate` value. A value of `covariate = 0` stands for studies with a low risk of bias, and `covariate = 1` for high risk of bias.
```{r, fig.show='hold', fig.width=7, fig.height=3, fig.align="center", out.width="80%", eval=F}
forest(relative.effect(mcmc3, t1 = "cau", covariate = 1),
use.description = TRUE, xlim = c(-1.5, 1))
title("High Risk of Bias")
forest(relative.effect(mcmc3, t1 = "cau", covariate = 0),
use.description = TRUE, xlim = c(-1.5, 1))
title("Low Risk of Bias")
```
```{r, fig.show='hold', fig.width=7, fig.height=3, fig.align="center", out.width="80%", echo=F}
par(bg="#FFFEFA")
forest(relative.effect(mcmc3, t1 = "cau", covariate = 1),
use.description = TRUE, xlim = c(-1.5, 1))
title("High Risk of Bias")
```
```{r, fig.show='hold', fig.width=7, fig.height=3, fig.align="center", out.width="80%", echo=F}
par(bg="#FFFEFA")
forest(relative.effect(mcmc3, t1 = "cau", covariate = 0),
use.description = TRUE, xlim = c(-1.5, 1))
title("Low Risk of Bias")
```
Comparing the forest plots, we can see that there is a pattern. The treatment effects based on high risk of bias studies are generally higher (i.e. more negative). This is in line with the estimate of our predictor $b$ in the fitted model.
\index{Akaike's Information Criterion}
Lastly, we can also examine if the network meta-regression model we just generated fits the data better than the "normal" network meta-analysis model from before. To do this, we can compare the **deviance information criteria** (DICs), which are an equivalent to the AIC and BIC values in frequentist statistics. We can access the DIC of both `mcmc3` and `mcmc2` using this code:
```{r}
summary(mcmc3)$DIC
summary(mcmc2)$DIC
```
We see in the output that the DIC value of our meta-regression model (261.19) is lower than the one of our previous model which did not control for risk of bias (DIC = 323.6). Lower DIC values indicate a better fit. Based on this finding, we can conclude that our network meta-regression model fits the data better than one without the covariate.
\index{WinBUGS}
```{block, type='boxinfo'}
**Further Reading**
\vspace{2mm}
This is the end of our brief introduction to network meta-analysis using _R_. We have described the general idea behind network meta-analysis, the assumptions and some of the caveats associated with it, two different statistical approaches through which network meta-analysis can be conducted, and how they are implemented in _R_.
\vspace{2mm}
We would like to stress that what we covered here should only be seen as a rough overview. Although we have covered some of the main pitfalls, it is still possible that you may get stuck once you begin with your own network meta-analysis.
\vspace{2mm}
An excellent resource to learn more about network meta-analysis and how it can be applied in practice is _Network Meta-Analysis for Decision-Making_, written by Dias and colleagues [-@dias2018network]. The book also features several hands-on examples, and shows how to run network meta-analysis models using the open source software _WinBUGS_. A shorter (and rather technical) overview of the "state-of-art" in network meta-analysis can be found in an open-access paper by Efthimiou et al. [-@efthimiou2016getreal].
```
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. When are network meta-analyses useful? What is their advantage compared to standard meta-analyses?
\vspace{-2mm}
2. What is the difference between direct and indirect evidence in a treatment network? How can direct evidence be used to generate indirect evidence?
\vspace{-2mm}
3. What is the main idea behind the assumption of transitivity in network meta-analyses?
\vspace{-2mm}
4. What is the relationship between transitivity and consistency?
\vspace{-2mm}
5. Name two modeling approaches that can be used to conduct network meta-analyses. Is one of them better than the other?
\vspace{-2mm}
6. When we include several comparisons from one study (i.e. multi-arm studies), what problem does this cause?
\vspace{-2mm}
7. What do we have to keep in mind when interpreting the P- or SUCRA score of different treatments?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda12) at the end of this book.**
```
<br></br>
## Summary
* Network meta-analysis is a useful tool to jointly estimate the **relative effectiveness** of various treatments or interventions.
* To estimate the treatment effects, network meta-analysis **combines** both direct (i.e. observed) and indirect evidence. This, however, is based on the assumption of **transitivity**. Transitivity is fulfilled when we can combine direct evidence of two comparisons to derive valid indirect evidence about a third one.
* The **statistical manifestation** of transitivity is consistency, the opposite of which is inconsistency. Inconsistency arises when the true effect of some comparison based on direct evidence does not coalign with the one based on indirect evidence.
* Some methods, such as nodesplitting or net heat plots, can be used to **identify** inconsistencies in our network. When inconsistencies are found, this threatens the validity of our results as a whole. In such cases, the entire network should be checked for characteristics that may have caused systematic differences between studies/designs.
* Network meta-analysis is possible using either a **frequentist** or **Bayesian** approach. In practice, each of these methods has individual strengths, but the overall results are usually very similar.
* In network meta-analyses based on a Bayesian hierarchical model, we can also add study **covariates** that predict effect size differences. This results in a network meta-regression model.
* Indices such as the SUCRA or P-score can be used to examine which type of treatment may be **the most effective** in our network. However, it is also important to integrate uncertainty into our decision-making process. Confidence/credible intervals of different treatments often overlap, which makes it less clear if one format is truly superior to all the others.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Forest Plots {#forest}
---
<img src="_figs/forest2.jpg" />
<br></br>
\index{Forest Plot}
<span class="firstcharacter">I</span>
n the last chapters, we learned how we can pool effect sizes in _R_, and how to assess the heterogeneity in a meta-analysis. We now come to a somewhat more pleasant part of meta-analyses, in which we visualize the results we obtained in previous steps.
The most common way to visualize meta-analyses is through **forest plots**. Such plots provide a graphical display of the observed effect, confidence interval, and usually also the weight of each study. They also display the pooled effect we have calculated in a meta-analysis. Overall, this allows others to quickly examine the precision and spread of the included studies, and how the pooled effect relates to the observed effect sizes.
\index{meta Package}
The **{meta}** package has an in-built function which makes it very easy to produce beautiful forest plots directly in _R_. The function has a broad functionality and allows one to change the appearance of the plot as desired. This forest plot function, and how we can use it in practice, will be the main focus of this chapter. Furthermore, we will also briefly discuss an alternative approach to visualize the results of a meta-analysis.
<br></br>
## What Is a Forest Plot?
---
Figure \@ref(fig:forest) shows the main components of a forest plot. On the left side, forest plots display the name of each study included in the meta-analysis. For each study, a graphical representation of the effect size is provided, usually in the center of the plot. This visualization shows the point estimate of a study on the x-axis. This point estimate is supplemented by a line, which represents the range of the confidence interval calculated for the observed effect size. Usually, the point estimate is surrounded by a square. The size of this square is determined by the weight (Chapter \@ref(fem)) of the effect size: studies with a larger weight are given a larger square, while studies with a lower weight have a smaller square.
Conventionally, a forest plot should also contain the effect size data that was used to perform the meta-analysis. This provides others with the data needed to replicate our results.
```{r forest, out.width='100%', message = F, echo = F, fig.align='center', fig.cap = "Key elements of a forest plot."}
library(OpenImageR)
knitr::include_graphics('images/forest_sep.png')
```
At the bottom of the plot, a diamond shape represents the average effect. The length of the diamond symbolizes the confidence interval of the pooled result on the x-axis. Typically, forest plots also include a vertical **reference line**, which indicates the point on the x-axis equal to no effect. As we will see in the coming examples, forest plots can be enhanced by also displaying, for example, a heterogeneity measure such as $I^2$ or $\tau^2$.
\index{Logarithm, Natural}
Effect size and confidence intervals in forest plots are usually displayed on a linear scale. Yet, when the summary measure is a **ratio** (such as odds ratios or risk ratios), it is common to use a **logarithmic** scale on the x-axis instead. This means that values around 1 are more closely together than values which are much lower or higher than 1.
This makes sense for ratios since these effect size metrics cannot be interpreted in a "linear" fashion (i.e. the "opposite" of RR = 0.50 is 2, not 1.5; see Chapter \@ref(ratios)). The reference line for such effect sizes is usually 1, which indicates no effect.
<br></br>
## Forest Plots in _R_ {#forest-R}
---
We can produce a forest plot for any type of **{meta}** meta-analysis object (e.g. results of `metagen`, `metacont`, or `metabin`) using the `forest.meta` function. We simply have to provide `forest.meta` with our **{meta}** object, and a plot will be created. Usually, these forest plots already look very good by default, but the function also has countless additional arguments to further adapt the appearance. All of these arguments are described in the function documentation (which can be accessed by running `?forest.meta`). Here is a list of the more important ones:
* **`sortvar`**. The variable in the meta-analysis data set by which studies are sorted in the forest plot. If we want to order the results by effect size, for example, we can use the code `sortvar = TE`.
* **`comb.fixed`**. Logical, indicating if the fixed-effect model estimate should be included in the plot.
* **`comb.random`**. Logical, indicating if the random-effects model estimate should be included in the plot.
* **`text.fixed`**. The **label** for the pooled effect according to the fixed-effect model. By default, `"Fixed effect model"` is printed.
* **`text.random`**. The label for the pooled effect according to the random-effects model. By default, `"Random effects model"` is printed.
* **`prediction`**. Logical, indicating if the prediction interval should be added to the plot.
* **`label.left`** and **`label.right`**. Label added to the left and right side of the forest plot. This can be used to specify that, for example, effects on this side favor the treatment (e.g. `label.left = "Favors treatment"`).
* **`smlab`**. A label displayed on top of the plot. This can be used to show which effect size metric was used.
* **`xlim`**. The limits of the x-axis, or the character `“s”` to produce symmetric forest plots. This is argument particularly relevant when your results deviate substantially from zero, or if you also want to have outliers depicted. If we want that the x-axis ranges from 0 to 2, for example, the code is `xlim = c(0,2)`.
* **`ref`**. The reference line in the plot. Depending on the summary measure we used, this is either 0 or 1 by default.
* **`leftcols`** and **`rightcols`**. Here, you can specify which variables should be displayed on the left and right side of the forest plot. There are a few in-built elements that the function uses by default. For example, `"studlab"` stands for the study label, `"effect"` for the observed effect size, and **`effect.ci`** for both the effect size and its confidence interval. It is also possible to add user-defined columns, as long as these were included in the `data.frame` we initially provided to the **{meta}** function. In this case, we only have to add the name of the column as a character string.
* **`leftlabs`** and **`rightlabs`**. The labels that should be used for the columns displayed to the left and right of the forest plot.
* **`print.I2`** and `print.I2.ci`. Logical, indicating if the $I^2$ value and its confidence interval should be printed. This is `TRUE` by default.
* **`print.tau2`** and **`print.tau`**. Logical, indicating if the $\tau^2$ and $\tau$ value should be printed. The value of $\tau^2$ is printed by default.
* **`col.square`**, **`col.diamond`** and **`col.predict`**. The color (e.g. `"blue"`) of the square, diamond and prediction interval, respectively.
Time to generate our first forest plot. In this example, we plot the `m.gen` object that we also used in the previous examples. We sort the studies in the forest plot by effect size, add a prediction interval, and user-defined labels to the left. The `forest.meta` function prints the $\tau^2$ value by default, which we do not want here, so we set `print.tau2` to `FALSE`.
This is how our code looks in the end:
```{r, fig.height=6, fig.width=8, eval=F}
forest.meta(m.gen,
sortvar = TE,
prediction = TRUE,
print.tau2 = FALSE,
leftlabs = c("Author", "g", "SE"))
```
```{r, fig.height=6, fig.width=8, echo=F}
par(bg="#FFFEFA")
forest(m.gen,
sortvar = TE,
prediction = TRUE,
print.tau2 = FALSE,
leftlabs = c("Author", "g", "SE"))
```
The plot that `forest.meta` provides us with already looks quite decent. We also see that a think black line has been added to the plot, representing the prediction interval around our pooled effect.
\index{Risk of Bias}
We could enhance the plot by adding a column displaying the risk of bias of each study. The `ThirdWave` data set, which we used to generate `m.gen`, contains a column called `RiskOfBias`, in which the risk of bias assessment of each study is stored.
When we used `metagen` to calculate the meta-analysis (Chapter \@ref(pre-calculated-es)), the function automatically saved this data within `m.gen`. Therefore, we can use the `leftcols` argument to add the column to the plot. This results in the following code:
```{r, fig.height=6, fig.width=9, eval=F}
forest.meta(m.gen,
sortvar = TE,
prediction = TRUE,
print.tau2 = FALSE,
leftcols = c("studlab", "TE", "seTE", "RiskOfBias"),
leftlabs = c("Author", "g", "SE", "Risk of Bias"))
```
```{r, fig.height=6, fig.width=9, echo=F}
par(bg="#FFFEFA")
forest(m.gen,
sortvar = TE,
prediction = TRUE,
print.tau2 = FALSE,
leftcols = c("studlab", "TE", "seTE", "RiskOfBias"),
leftlabs = c("Author", "g", "SE", "Risk of Bias"))
```
We see that now, the risk of bias information of each study has been added to the forest plot.
\vspace{4mm}
<br></br>
### Layout Types
---
The `forest.meta` function has two "pre-packaged" layouts, which we can use to bring our forest plot into a specific format without having to specify numerous arguments. One of them is the `"JAMA"` layout, which gives us a forest plot according to the guidelines of the **Journal of the American Medical Association**. This layout may be used if you want to publish your meta-analysis in a medical journal.
\vspace{2mm}
```{r, eval=F}
forest.meta(m.gen, layout = "JAMA")
```
```{r, echo=F, fig.height=6, fig.width=8, out.width="75%", fig.align='center'}
par(bg="#FFFEFA")
forest.meta(m.gen, layout = "JAMA")
```
\index{Review Manager (RevMan)}
The other layout is `"RevMan5"`, which produces a forest plot similar to the ones generated by Cochrane's **Review Manager 5**.
```{r, eval=F}
forest.meta(m.gen, layout = "RevMan5")
```
```{r, echo=F, fig.height=6, fig.width=9, out.width="75%", fig.align='center'}
par(bg="#FFFEFA")
forest.meta(m.gen, layout = "RevMan5")
```
<br></br>
### Saving the Forest Plots
---
Forest plots generated by `forest.meta` can be saved as a PDF, PNG, or **scalable vector graphic** (SVG) file. In contrast to other plots generated through base _R_ or the **{ggplot2}** package, the output of `forest.meta` is not automatically re-scaled when we save it as a file. This means that forest plots are sometimes cut off on two or four sides, and we have to adjust the width and height manually so that everything is visible.
The `pdf`, `png` and `svg` function can be used to save plots via _R_ code. We have to start with a call to one of these functions, which tells _R_ that the output of the following code should be saved in the document. Then, we add our call to the `forest.meta` function. In the last line, we have to include `dev.off()`, which will save the generated output to the file we specified above.
All three functions require us to specify the `file` argument, which should contain the name of the file. The file is then automatically saved in the working directory under that name. Additionally, we can use the `width` and `height` argument to control the size of the plot, which can be helpful when the output is cut off.
Assuming we want to save our initial forest plot under the name "forestplot", we can use the following code to generate a PDF, PNG and SVG file.
\vspace{2mm}
**PDF**
```{r, eval=F}
pdf(file = "forestplot.pdf", width = 8, height = 7)
forest.meta(m.gen,
sortvar = TE,
prediction = TRUE,
print.tau2 = FALSE,
leftlabs = c("Author", "g", "SE"))
dev.off()
```
\vspace{2mm}
**PNG**
```{r, eval=F}
png(file = "forestplot.png", width = 2800, height = 2400, res = 300)
forest.meta(m.gen,
sortvar = TE,
prediction = TRUE,
print.tau2 = FALSE,
leftlabs = c("Author", "g", "SE"))
dev.off()
```
\vspace{2mm}
**SVG**
```{r, eval=F}
svg(file = "forestplot.svg", width = 8, height = 7)
forest.meta(m.gen,
sortvar = TE,
prediction = TRUE,
print.tau2 = FALSE,
leftlabs = c("Author", "g", "SE"))
dev.off()
```
<br></br>
## Drapery Plots {#drapery}
---
\index{Drapery Plot}
\index{P-Value}
Forest plots are, by far, the most common way to visualize meta-analyses. Most published meta-analyses contain a forest plot, and many researchers understand how they are interpreted. It is advisable that you also include one in your meta-analysis report since forest plots provide a comprehensive and easily understandable summary of your findings.
However, forest plots are not the only way to illustrate our results. Meta-analyses can also be visualized, for example, through **drapery plots** [@rucker2020beyond]. A drawback of forest plots is that they can only display confidence intervals assuming a fixed significance threshold, conventionally $p<$ 0.05. It is based on these confidence intervals that researchers decide if an effect is significant or not.
There has been a controversy around the use of $p$-values in recent years [@wellek2017critical], and some have argued that hypothesis testing based on $p$-values has contributed to the "replication crisis" in many research areas [@nuzzo2014statistical].
Drapery plots are based on $p$-**value functions**. Such $p$-value functions have been proposed to prevent us from solely relying on the $p$<0.05 significance threshold when interpreting the results of an analysis [@infanger2019p].
Therefore, instead of only calculating the 95% confidence interval, $p$-value functions provide a continuous curve which shows the confidence interval for varying values of $p$. In a drapery plot, a confidence curve is plotted for each study, as well as for the average effect. The x-axis shows the effect size metric, and the y-axis the assumed $p$-value.
Drapery plots can be generated through the `drapery` function in **{meta}**. Like `forest.meta`, this function automatically generates the plot once we provide it with a **{meta}** meta-analysis object. There are a few additional arguments, with the most important ones being:
* **`type`**: Defines the type of value to be plotted on the y-axis. This can be `"zvalue"` (default) for the test statistic, or the $p$-value (`"pvalue"`).
* **`study.results`**: Logical, indicating if the results of each study should be included in the plot. If `FALSE`, only the summary effect is printed.
* **`labels`**: When we set this argument to `"studlab"`, the study labels will be included in the plot.
* **`legend`**: Logical, indicating if a legend should be printed.
* **`pos.legend`**. The position of the legend. Either `"bottomright"`, `"bottom"`, `"bottomleft"`, `"left"`, `"topleft"`, `"top"`, `"topright"`, `"right"`, or `"center"`.
Let us try out the `drapery` function in an example using our `m.gen` meta-analysis object.
```{r, fig.align='center', eval=F}
drapery(m.gen,
labels = "studlab",
type = "pval",
legend = FALSE)
```
```{r, fig.align='center', echo=F, fig.width=12, fig.height=8}
par(bg="#FFFEFA")
drapery(m.gen,
labels = "studlab",
type = "pval",
legend = FALSE,
bg = "#FFFEFA")
```
The resulting plot contains a $p$-value curve for each effect size, all in the shape of an upside down V. The thick line represents the average effect according to the random-effects model. The shaded area we see in the plot represents the prediction interval, which is considerably wider than the confidence interval of the pooled effect.
The "peak" of the $p$-value functions represents the exact value of the effect size in our meta-analysis. As we go down the y-axis, the $p$-value becomes smaller, and the confidence intervals wider and wider, until we reach conventional significance thresholds, indicated by the dashed horizontal lines.
Based on the plot, we see that we can be quite confident in the pooled effect size being greater than zero, given that the thick line reaches zero on the x-axis when $p$ is already very, very small (<0.01).
Rücker et al. [-@rucker2020beyond] recommend that drapery plots should mainly be used **in addition** to forest plots. Simply replacing the forest with a drapery plot may be not a good idea, because the latter does not contain much of the effect size information that might be needed by others to reproduce our results.
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. What are the key components of a forest plot?
\vspace{-2mm}
2. What are the advantages of presenting a forest plot of our meta-analysis?
\vspace{-2mm}
3. What are the limitations of forest plots, and how do drapery plots overcome this limitation?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda6) at the end of this book.**
```
<br></br>
## Summary
* It is conventional to visualize the results of meta-analyses through forest plots.
* Forest plots contain a graphical representation of each study's effect size and confidence interval, and also show the calculated overall effect. Furthermore, they contain the effect size data that was used for pooling.
* It is also possible to add other kinds of information to a forest plot, for example the quality rating that each study received.
* Forest plots can only display results assuming a fixed significance threshold, usually $p<$ 0.05. To visualize how results change for varying significance thresholds, drapery plots can be generated in addition.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Subgroup Analyses {#subgroup}
---
<img src="_figs/fassade.jpg" />
<br></br>
<span class="firstcharacter">I</span>
n Chapter \@ref(heterogeneity), we discussed the concept of between-study heterogeneity, and why it is so important in meta-analyses. We also learned methods that allow us to identify which studies contribute to the observed heterogeneity as part of outlier and influence analyses. In these analyses, we approach our meta-analysis from a purely statistical standpoint. We "measure" considerable heterogeneity in our data, and therefore exclude studies with unfitting statistical properties (i.e. outlying and influential studies) to improve the robustness of our model.
\index{Outlier}
\index{Influential Case}
\index{Heterogeneity}
This approach can be seen as a **post hoc** procedure. Outlier and influence analyses are performed **after** seeing the data, and often **because** of the results we found. Also, they do not pay attention to anything else than the data itself. An influence analysis method may tell us that some study does not properly follow the expectations of our model, but not **why** this is the case. It might be because this study used just a slightly different research method or treatment. Yet, we are not able to know this based on the study's influence alone.
Imagine that you perform a meta-analysis investigating the effectiveness of a medical treatment. You find out that, overall, the treatment has no effect. However, there are three studies in which a considerable treatment effect was found. It may be possible to detect these studies in influence analyses, but this will not tell you why they are influential. It could be that all three studies used a treatment which varied slightly from the one used in all the other studies, and that this little detail had a profound impact on the treatment's effectiveness. This would be a groundbreaking discovery. However, it is one which cannot be made using outlier and influence analyses alone.
\index{Subgroup Analysis}
\index{Moderator Analysis}
This makes it clear that we need a different approach, one that allows us to identify **why** a specific heterogeneity pattern can be found in our data. **Subgroup analyses**, also known as **moderator analyses**, are one way to do this. They allow us to test specific hypotheses, describing why some type of study produces lower or higher effects than another.
As we learned in Chapter \@ref(analysis-plan), subgroup tests should be defined **a priori**. Before we begin with our meta-analysis, we should define different study characteristics which may influence the observed effect size, and code each study accordingly. There are countless reasons why effect sizes may differ, but we should restrict ourselves to the ones that matter in the context of our analysis.
We can, for example, examine if some type of medication yields higher effects than another one. Or we might compare studies in which the follow-up period was rather short to studies in which it was long. We can also examine if observed effects vary depending on the cultural region in which a study was conducted. As a meta-analyst, it helps to have some subject-specific expertise, because this allows to find questions that are actually relevant to other scientists or practitioners in the field.
The idea behind subgroup analyses is that meta-analysis is not only about calculating an average effect size, but that it can also be a tool to investigate variation in our evidence. In subgroup analyses, we see heterogeneity not merely as a nuisance, but as interesting variation which may or may not be explainable by a scientific hypothesis. In the best case, this can further our understanding of the world around us, or at least produce practical insights that guide future decision-making.
In this chapter, we will describe the statistical model behind subgroup analyses, and how we can conduct one directly in _R_.
<br></br>
## The Fixed-Effects (Plural) Model {#fixed-effect-plural}
---
\index{Fixed-Effects (Plural) Model}
In subgroup analyses, we hypothesize that studies in our meta-analysis do not stem from one overall population. Instead, we assume that they fall into different **subgroups** and that each subgroup has its own true overall effect. Our aim is to reject the null hypothesis that there is no difference in effect sizes between subgroups.
The calculation of a subgroup analysis consists of two parts: first, we pool the effect in each subgroup. Subsequently, the effects of the subgroups are compared using a statistical test [@borenstein2013meta].
<br></br>
### Pooling the Effect in Subgroups
---
The first part is rather straightforward, as the same criteria as the ones for a meta-analysis without subgroups (see Chapter \@ref(fem-rem)) apply. If we assume that all studies in a subgroup stem from the same population, and have one shared true effect, we can use the fixed-effect model. As we mentioned previously, it is often unrealistic that this assumption holds in practice, even when we partition our studies into smaller groups.
\index{Random-Effects Model}
The alternative, therefore, is to use a random-effects model. This assumes that studies within a subgroup are drawn from a universe of populations, the mean of which we want to estimate. The difference to a normal meta-analysis is that we conduct **several** separate random-effects meta-analyses, one for each subgroup. Logically, this results in a pooled effect $\hat\mu_g$ for each subgroup $g$.
\index{Heterogeneity}
Since each subgroup gets its own separate meta-analysis, estimates of the $\tau^2$ heterogeneity will also differ from subgroup to subgroup. In practice, however, the individual heterogeneity values $\hat\tau^2_g$ are often replaced with a version of $\tau^2$ that was pooled across subgroups.
This means that all subgroups are assumed to share a **common** estimate of the between-study heterogeneity. This is mostly done for practical reasons. When the number of studies in a subgroup is small, e.g. $k_g \leq 5$ [@borenstein2011introduction, chapter 19], it is likely that the estimate of $\tau^2$ will be imprecise. In this case, it is better to calculate a pooled version of $\tau^2$ that is used across all subgroups, than to rely on a very imprecise estimate of the between-study heterogeneity in one subgroup.
<br></br>
### Comparing the Subgroup Effects {#comparing-the-subgroup-effects}
---
In the next step, we assess if there is a **true** difference between the $G$ subgroups. The assumption is that the subgroups are different, meaning that at least one subgroup is part of a different population of studies.
An elegant way to test this is to pretend that the pooled effect of a subgroup is actually nothing more than the **observed effect size** of **one large study** [see @borenstein2011introduction, chapter 19]. If we conduct a subgroup analysis with $G=3$ subgroups, for example, we pretend that we have calculated the observed effect sizes (and standard errors) of three big studies.
Once we look at the subgroups this way, it becomes clear that the question we ask ourselves is quite similar to the one we face when assessing the heterogeneity of a normal meta-analysis. We want to know if differences in effect sizes exist only due to sampling error, or because of **true** differences in the effect sizes.
\index{Cochran's \textit{Q}}
Therefore, we use the value of $Q$ to determine if the subgroup differences are large enough to not be explainable by sampling error alone. Pretending that the subgroup effects are **observed** effect sizes, we calculate the value of $Q$. This observed $Q$ value is compared to its expected value assuming a $\chi^2$ distribution with, in our case, $G-1$ degrees of freedom (Chapter \@ref(cochran-q)).
When the observed value of $Q$ is substantially larger than the expected one, the $p$-value of the $Q$ test will become significant. This indicates that there is a difference in the true effect sizes between subgroups. This $Q$ test is an **omnibus test**. It tests the null hypothesis that all subgroup effect sizes are equal, and is significant when at least two subgroups, or combinations thereof, differ.
While we normally assume that studies within the subgroups behave according to the random-effects model, the situation looks different on the pooled subgroup level. Borenstein and Higgins [-@borenstein2013meta] argue that in many fields, the subgroups we choose to analyze cannot be seen as random draws from a "universe" of possible subgroups, but represent **fixed** levels of a characteristic we want to examine. Take employment status as an example. This feature has two fixed subgroups, "employed" and "unemployed". Same also applies, for example, to studies in patients with and without a specific co-morbidity.
\index{Fixed-Effects (Plural) Model}
\index{Random-Effects Model}
Borenstein and Higgins call the model for subgroup analyses the **fixed-effects (plural) model**. The word "plural" is added because we have to differentiate it from the standard fixed-effect model. The fixed-effects (plural) model can be seen like a hybrid creature, including features of both the fixed-effect model and the random-effects model. Like in the random-effects model, we assume that there is more than one true effect size, because there are subgroups in our data.
However, we do not see the subgroups as random draws from a whole universe of subgroups. Our subgroup levels are fixed, and **exhaustive**, meaning that no generalization is needed. This makes it clear why we call the process generating our subgroup data a fixed-effects "plural" model: because there are **several** true effect sizes, but the true effect sizes represent subgroup levels that are assumed to be **fixed**.
Borenstein and colleagues [-@borenstein2011introduction, chapter 19] argue that all of this may seem a little confusing to us because the word "fixed" can mean different things in statistics. In conventional meta-analyses, the term "fixed effect" is used synonymously with "common effect". In the context of subgroup analyses, however, we speak of "fixed effects" to underline that they are "not random". They are not simply random manifestations of an over-arching distribution, to which we aim to generalize, but the **real** and **only** categories into which a variable can fall.
Figure \@ref(fig:subgroups) visualizes the fixed-effects (plural) model, assuming that studies within subgroups follow the random-effects model.
```{r subgroups, message = F, out.width = '90%', echo = F, fig.align='center', fig.cap="Visualization of the fixed-effects (plural) model, assuming a random-effects model within subgroups."}
library(OpenImageR)
knitr::include_graphics('images/subgroups_sep.png')
```
```{block, type='boxinfo'}
**A Few Examples of Subgroup Variables With Fixed Levels**
\vspace{4mm}
* **Age group**: children, young adults, adults, elderly persons.
\vspace{2mm}
* **Cultural background**: western, non-western.
\vspace{2mm}
* **Control group**: alternative treatment, minimal treatment, no treatment.
\vspace{2mm}
* **Tool used to measure the outcome**: self-report, expert-rated.
\vspace{2mm}
* **Study quality**: high, low, unclear.
\vspace{2mm}
* **Species**: plants, animals.
\vspace{2mm}
* **Setting**: schools, hospitals, private households.
\vspace{4mm}
Note that the concrete selection and definition of subgroups can and should be adapted based on the aim and scope of your meta-analysis.
```
\index{Mixed-Effects Model}
\index{Meta-Regression}
Because the fixed-effects (plural) model contains both random effects (within subgroups) and fixed effects (since subgroups are assumed to be fixed), it is also known in the literature as a **mixed-effects model**. We already came across this term previously in Chapter \@ref(pooling-props), where we discussed a different type of (generalized) mixed-effects model that can be used to pool, for example, proportions.
The model we use for subgroup analyses is heavily related to other methods that are also often used in meta-analyses. In Chapter \@ref(metareg), we will show that subgroup analyses are just a special case of **meta-regression**, for which we also use a mixed-effects model.
\index{Multilevel Meta-Analysis}
Furthermore, it is also possible that subgroup levels can **not** be assumed to be fixed. Imagine that we want to assess if effect sizes differ depending on the location in which the effect was observed. Some studies assessed the effect in Israel, some in Italy, others in Mexico, and some in mainland China. One can argue that "country of origin" is not a factor with fixed levels: there are many, many countries in the world, and our study simply includes a "random" selection.
In this case, it makes sense to not model the subgroups as fixed, but also let our model estimate the variability between countries as a random effect. This leads to a **multi-level model**, which we cover in Chapter \@ref(multilevel-ma).
<br></br>
## Limitations & Pitfalls of Subgroup Analyses {#limits-subgroup}
---
\index{Power}
Intuitively, one might think that subgroup analysis is an exceptionally good tool to detect effect moderators. The aim of meta-analyses, after all, is to study all available evidence. This means that the total number of individuals analyzed in meta-analyses will usually surpass the one of a primary study by orders of magnitude.
Unfortunately, however, this does not necessarily provide us with more **statistical power** to detect subgroup differences. There are several reasons for this [@hedges2004power]:
* First, remember that in subgroup analyses, the results within subgroups are usually pooled using the random-effects model. If there is substantial between-study heterogeneity within the subgroup, this will decrease the precision (i.e. increase the standard error) of the pooled effect. Yet, when the subgroup effect estimates are very imprecise, this means that their confidence intervals will have a large overlap. Consequentially, this makes it harder to find a significant difference between subgroups--even if this difference does exist.
* In the same vein, statistical power is also often low because the effects we want to detect in subgroup analyses are much lower than in normal meta-analyses. Imagine that we want to examine if effects differed between studies assessing an outcome of interest through **self-reports** versus **expert ratings**. Even if there is a difference, it is very likely to be small. It is often possible to find a significant difference between treatment and control groups. Yet, detecting effect size differences **between studies** is usually much harder, because the differences are smaller, and more statistical power is needed.
* From the points above follows an important caveat: **absence of evidence is not evidence of absence**. If we do **not** find a difference in effect sizes between subgroups, this does not automatically mean that the subgroups produce **equivalent** outcomes. As we argued above, there are various reasons why our subgroup analysis may not have the statistical power needed to ascertain a true difference in effects. If this is the case, it would be a gross misinterpretation to say that the subgroups have the same effect--we simply do not know if differences exist or not. This becomes particularly explosive when we want to assess if one treatment is better than the other. Some stakeholders, including corporations, often have a vested interest in showing the equivalence of a treatment. But subgroup analyses are usually not an adequate way to prove this.
* We can check if statistical power is a problem in our subgroup analysis by performing a **subgroup power analysis** beforehand. In such an analysis, we can check the minimum effect size difference we are able to detect in our subgroup analysis. In chapter \@ref(power-subgroup) in the "Helpful Tools" section, we cover how subgroup power analyses can be performed in _R_. But note that power analyses can at best be seen as a helpful diagnostic, not as proof that the power of our analysis is high enough to show that the subgroups are equivalent. Schwarzer and colleagues [@schwarzer2015meta, chapter 4.3] mention, as a general rule of thumb, that subgroup analyses only make sense when your meta-analysis contains at least $K=$ 10 studies.
Another important limitation of subgroup analyses is that they are purely observational [@borenstein2013meta]. Meta-analyses often only include randomized controlled trials (RCTs), in which participants were randomly allocated to either a treatment or control group. When properly conducted, such RCTs can provide evidence that the treatment **caused** the group difference observed in the study. This is because all relevant variables that may influence the assessed outcomes are equal in the two groups. The only difference is that one group received the treatment, while the other did not.
Subgroup analyses, even when consisting solely of randomized studies, cannot show causality. Imagine that our subgroup analysis finds that one type of treatment is more effective than the other. There are countless reasons why this finding may be spurious; for example, it could be that studies investigating treatment A used other control groups than the ones examining treatment B. This means that both treatments could be equally effective--we just see a difference because the treatment type is **confounded** with methodological factors. This example should underline that one should always appraise the results of subgroup analyses critically.
A last important pitfall involves the way the subgroups are defined. Often, it may be tempting to sort studies into subgroups based on **aggregate information**. Schwarzer and colleagues [@schwarzer2015meta, chapter 4.3] name the mean age of a study as a common example. Say you want to assess if effects differ between elderly individuals (65+ years of age) and general adult populations. Therefore, you sort studies into these two categories, depending on whether the reported mean age is above or below 65.
If we find that effects are higher in the subgroup with higher mean age, we may intuitively think that this shows that the effects are higher in older individuals. But this reasoning is deeply flawed. When the **mean** age of a primary study is above 65, it is still possible that it included a substantial proportion of individuals who were **younger** than that. **Vice versa**, it is also perfectly possible that a study included a large share of individuals **older** than 65, even when the **mean** age is **lower**.
This means that the higher effects found in the "elderly" subgroup could **solely** be driven by individuals who are actually younger than 65. Conversely, it is possible that in the "younger" subgroup, the lower effects were caused by the individuals in the studies who were older than 65.
This leads to a paradoxical situation: on the aggregate level, we find that studies with a higher mean age have higher effects. But on the individual level, the opposite is true: a person will experience **lower** effects with rising age.
\index{Ecological Bias}
The scenario we just described is caused by so-called **ecological bias** [@thompson2002should; @piantadosi1988ecological]. It arises whenever we want to use relationships on an aggregate (**macro**) level to predict associations on the individual (**micro**) level.
The best way to avoid ecological bias is to **never, ever** use aggregate information in subgroup analyses and meta-regression. The situation is different, however, if we know that **all** individuals in a study fall into one category. If, for example, we have a few studies in which **only** adolescents under the age of 18 were included, and others in which **only** adults (18+ years) could participate, the risk of ecological bias is largely eliminated. However, it is still possible that effect differences were caused by confounding variables and not by the age of the participants.
```{block, type='boxinfo'}
**Subgroup Analysis: Summary of the Dos & Don'ts**
\vspace{4mm}
1. Subgroup analyses depend on the statistical power, so it usually makes no sense to conduct one when the number of studies is small (i.e. $K$ < 10).
\vspace{1mm}
2. If you do not find a difference in effect sizes between subgroups, this does **not** automatically mean that the subgroups produce **equivalent** results.
\vspace{1mm}
3. Subgroup analyses are purely **observational**, so we should always keep in mind that effect differences may also be caused by confounding variables.
\vspace{1mm}
4. It is a bad idea to use aggregate study information in subgroup analyses, because this may introduce ecological bias.
```
<br></br>
## Subgroup Analysis in _R_ {#subgroup-R}
---
\index{meta Package}
Time to implement what we learned in _R_. Conducting a subgroup analysis using the **{meta}** package is relatively straightforward. In every meta-analysis function in **{meta}**, the `subgroup` argument can be specified^[In older versions of **{meta}** (before version 5.0-0), this argument is called `byvar`.]. This tells the function which effect size falls into which subgroup and runs a subgroup analysis. The `subgroup` argument accepts `character`, `factor`, `logical` or `numeric` variables. The only thing we have to take care of is that studies in the same subgroup have absolutely identical labels.
In this example, we use our `m.gen` meta-analysis object again. The `ThirdWave` data set, which we used to calculate the meta-analysis, contains a few columns with subgroup information. Here, we want to examine if there are effect size differences between studies with a high versus low risk of bias. The risk of bias information is stored in the `RiskOfBias` column.
Let us have a look at this column first. In our code, we use the `head` function so that only the first few rows of the data set are shown.
```{r}
# Show first entries of study name and 'RiskOfBias' column
head(ThirdWave[,c("Author", "RiskOfBias")])
```
We see that every study in our data set has a label specifying its risk of bias assessment. When we calculated the meta-analysis using `metagen`, this information was saved internally in the `m.gen` object. To conduct a subgroup analysis, we can therefore use the `update.meta` function, provide it with the `m.gen` object, and use the `subgroup` argument to specify which column in our data set contains the subgroup labels.
Previously, we also covered that subgroup analyses can be conducted with or without a common estimate of $\tau^2$ across subgroups. This can be controlled in **{meta}** by setting `tau.common` to `TRUE` or `FALSE`. For now, let us use separate estimates of the between-study heterogeneity variance in each subgroup.
In our example, we want to apply the fixed-effects (plural) model and assume that studies within subgroups are pooled using the random-effects model. Given that `m.gen` contains results for the random-effects model (because we set `comb.fixed` to `FALSE` and `comb.random` to `TRUE`), there is nothing we have to change. Because the original meta-analysis was performed using the random-effects model, `update.meta` automatically assumes that studies within subgroups should also be pooled using the random-effects model.
Therefore, the resulting code looks like this:
```{r, eval=FALSE}
update.meta(m.gen,
subgroup = RiskOfBias,
tau.common = FALSE)
```
```
## Review: Third Wave Psychotherapies
##
## Number of studies combined: k = 18
##
## SMD 95%-CI t p-value
## Random effects model (HK) 0.5771 [ 0.3782; 0.7760] 6.12 < 0.0001
## Prediction interval [-0.0572; 1.2115]
##
## Quantifying heterogeneity:
## tau^2 = 0.0820 [0.0295; 0.3533]; tau = 0.2863 [0.1717; 0.5944]
## I^2 = 62.6% [37.9%; 77.5%]; H = 1.64 [1.27; 2.11]
##
## Test of heterogeneity:
## Q d.f. p-value
## 45.50 17 0.0002
##
## Results for subgroups (random effects model (HK)):
## k SMD 95%-CI tau^2 tau Q I^2
## RiskOfBias = high 7 0.8126 [0.2835; 1.3417] 0.2423 0.4922 25.89 76.8%
## RiskOfBias = low 11 0.4300 [0.2770; 0.5830] 0.0099 0.0997 13.42 25.5%
##
## Test for subgroup differences (random effects model (HK)):
## Q d.f. p-value
## Between groups 2.84 1 0.0917
##
## Details on meta-analytical method:
## - Inverse variance method
## - Restricted maximum-likelihood estimator for tau^2
## - Q-Profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp (HK) adjustment for random effects model (df = 17)
## - Prediction interval based on t-distribution (df = 16)
```
In the output, we see a new section called `Results for subgroups`. This part of the output shows the pooled effect size separately for each subgroup. We see that there are $k=$ 7 studies with a high risk of bias, and 11 with a low risk of bias. The estimated between-study heterogeneity differs considerably, with $I^2=$ 77% in high risk of bias studies, but only 26% in studies with a low risk.
The effect sizes of the subgroups also differ. With $g=$ 0.43, the effect estimate in low risk of bias studies is smaller than in studies with a high risk of bias. This is a common finding because biased studies are more likely to overestimate the effects of a treatment.
But is the difference statistically significant? We can check this by looking at the results of the `Test for subgroup differences`. This shows us the $Q$-test, which, in our example with 2 subgroups, is based on one degree of freedom. The $p$-value of the test is 0.09, which is larger than the conventional significance threshold, but still indicates a difference on a trend level.
We can also check the results if were to assume a **common** $\tau^2$ estimate in both subgroups. We only have to set `tau.common` to `TRUE`.
```{r, eval=F}
update.meta(m.gen, subgroup = RiskOfBias, tau.common = TRUE)
```
```
## [...]
## k SMD 95%-CI tau^2 tau Q I^2
## RiskOfBias = high 7 0.7691 [0.2533; 1.2848] 0.0691 0.2630 25.89 76.8%
## RiskOfBias = low 11 0.4698 [0.3015; 0.6382] 0.0691 0.2630 13.42 25.5%
##
## Test for subgroup differences (random effects model (HK)):
## Q d.f. p-value
## Between groups 1.79 1 0.1814
## Within groups 39.31 16 0.0010
##
## Details on meta-analytical method:
## - Inverse variance method
## - Restricted maximum-likelihood estimator for tau^2
## (assuming common tau^2 in subgroups)
## [...]
```
In the output, we see that the estimated between-study heterogeneity variance is $\tau^2=$ 0.069, and identical in both subgroups. We are presented with two $Q$-tests: one **between** groups (the actual subgroup test), and another for the **within-subgroup** heterogeneity.
Like in a normal meta-analysis, the latter simply indicates that there is excess variability in the subgroups ($p=$ 0.001). The test of subgroup differences again indicates that there is not a significant difference between studies with a low versus high risk of bias ($p=$ 0.181).
We now explored the results assuming either an independent or common estimate of $\tau^2$. Since we are not aware of good reasons to assume that the heterogeneity in both subgroups is equal, and given that we have a minimum $k=$ 7 studies in each subgroup, separate estimates of $\tau^2$ may be appropriate. However, we saw that the interpretation of our results is similar for both approaches anyway, at least in our example.
```{block2, type='boxreport'}
**Reporting the Results of a Subgroup Analysis**
\vspace{2mm}
The results of subgroup analyses are usually reported in a table displaying the estimated effect and heterogeneity in each subgroup, as well as the $p$-value of the test for subgroup differences.
```
```{r, echo=F}
library(kableExtra)
m.gen.sg = update.meta(m.gen,
subgroup = RiskOfBias,
tau.common = FALSE)
dat = data.frame(g = round(m.gen.sg$TE.random.w, 2),
g.ci = paste0(round(m.gen.sg$lower.random.w,2),"-",
format(round(m.gen.sg$upper.random.w,2), nsmall = 2)),
p = c("0.009", format.pval(m.gen.sg$pval.random.w[2], eps = 0.001)),
i2 = round(m.gen.sg$I2.w, 2),
ci.i2 = paste0(format(round(m.gen.sg$lower.I2.w, 2), nsmall=2), "-", round(m.gen.sg$upper.I2.w, 2)),
p.sg = c(" ", " "))
dat$p = stringr::str_replace_all(dat$p, "\\<", "$<$")
dat = rbind(c("", "", "", "", "", 0.092), dat)
rownames(dat) = c("Risk of Bias", "- High", "- Low")
colnames(dat) = c("$g$", "95\\%CI", "$p$", "$I^2$", "95\\%CI", "$p$<sub>subgroup</sub>")
kable(dat, booktabs = T, digits = 2, escape = FALSE) %>%
kable_styling(latex_options = c("scale_down"),
bootstrap_options = c("condensed", "striped"))
# kable_styling(font_size = 7)
```
```{block2, type='boxempty'}
In the table above, the two $p$-values in the third column show if the subgroup-specific effects are significant. We can see that this is the case for both the high and low risk of bias studies. At the same time, the value under $p_{\textsf{subgroup}}$ shows that the effect _difference_ between high and low risk of bias studies is not significant.
To extract the subgroup-specific $p$-values, it is necessary to save the results of `update.meta` to an object, and then extract the `pval.random.w` element from that object using the `$` operator.
Further rows can be added to the table if more than one subgroup analysis was conducted.
```
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. In the best case, what can a subgroup analysis tell us that influence and outlier analyses cannot?
\vspace{-2mm}
2. Why is the model behind subgroup analyses called the fixed-effects (plural) model?
\vspace{-2mm}
3. As part of your meta-analysis, you want to examine if the effect of an educational training program differs depending on the school district in which it was delivered. Is a subgroup analysis using the fixed-effects (plural) model appropriate to answer this question?
\vspace{-2mm}
4. A friend of yours conducted a meta-analysis containing a total of nine studies. Five of these studies fall into one subgroup, four into the other. She asks you if it makes sense to perform a subgroup analysis. What would you recommend?
\vspace{-2mm}
5. You found a meta-analysis in which the authors claim that the analyzed treatment is more effective in women than men. This finding is based on a subgroup analysis in which studies were divided into groups based on the **share** of females included in the study population. Is this finding credible, and why (not)?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda7) at the end of this book.**
```
<br></br>
## Summary
* Although there are various ways to assess the heterogeneity of a meta-analysis, these approaches do not tell us **why** we find excess variability in our data. Subgroup analysis allows us to test hypotheses on why some studies have higher or lower true effect sizes than others.
* For subgroup analyses, we usually assume a **fixed-effects (plural) model**. Studies within subgroups are pooled, in most cases, using the random-effects model. Subsequently, a $Q$-test based on the overall subgroup results is used to determine if the groups differ significantly.
* The subgroup analysis model is called a "fixed-effects" model because the different categories themselves are assumed to be fixed. The subgroup levels are not seen as random draws from a universe of possible categories. They represent the only values that the subgroup variable can take.
* When calculating a subgroup analysis, we have to decide whether separate or common estimates of the between-study heterogeneity should be used to pool the results within subgroups.
* Subgroup analyses are not a panacea. They often lack the statistical power needed to detect subgroup differences. Therefore, a non-significant test for subgroup differences does **not** automatically mean that the subgroups produce equivalent results.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# (PART) Helpful Tools {-}
# Power Analysis {#power}
---
<img src="_figs/power_analysis.jpg" />
<br></br>
\index{Power Analysis}
<span class="firstcharacter">O</span>
ne of the reasons why meta-analysis can be so helpful is because it allows us to combine several **imprecise** findings into a more **precise** one. In most cases, meta-analyses produce estimates with narrower confidence intervals than any of the included studies. This is particularly useful when the true effect is small. While primary studies may not be able to ascertain the significance of a small effect, meta-analytic estimates can often provide the statistical power needed to verify that such a small effect exists.
\index{Potential Scale Reduction Factor}
\index{Cochrane}
Lack of statistical power, however, may still play an important role--even in meta-analysis. The number of included studies in many meta-analyses is small, often below $K=$ 10. The median number of studies in Cochrane systematic reviews, for example, is six [@borenstein2011introduction]. This becomes even more problematic if we factor in that meta-analyses often include subgroup analyses and meta-regression, for which even more power is required. Furthermore, many meta-analyses show high between-study heterogeneity. This also reduces the overall precision and thus the statistical power.
We already touched on the concept of statistical power in Chapter \@ref(p-curve-es), where we learned about the p-curve method. The idea behind statistical power is derived from classical hypothesis testing. It is directly related to the two types of **errors** that can occur in a hypothesis test. The first error is to accept the alternative hypothesis (e.g. $\mu_1 \neq \mu_2$) while the null hypothesis ($\mu_1 = \mu_2$) is true. This leads to a **false positive**, also known as a **Type I** or $\alpha$ error. Conversely, it is also possible that we accept the null hypothesis, while the alternative hypothesis is true. This generates a **false negative**, known as a **Type II** or $\beta$ error.
\index{Power}
The power of a test directly depends on $\beta$. It is defined as Power = $1 - \beta$. Suppose that our null hypothesis states that there is no difference between the means of two groups, while the alternative hypothesis postulates that a difference (i.e. an "effect") exists. The statistical power can be defined as the probability that the test will detect an effect (i.e. a mean difference), **if** it exists:
\begin{equation}
\text{Power} = P(\text{reject H}_0~|~\mu_1 \neq \mu_2) = 1 - \beta
(\#eq:pow1)
\end{equation}
It is common practice to assume that a type I error is more grave than a type II error. Thus, the $\alpha$ level is conventionally set to 0.05 and the $\beta$ level to 0.2. This leads to a threshold of $1-\beta$ = 1 - 0.2 = 80%, which is typically used to determine if the statistical power of a test is adequate or not. When researchers plan a new study, they usually select a sample size that guarantees a power of 80%. It is easier to obtain statistically significant results when the true effect is large. Therefore, when the power is fixed at 80%, the required sample size only depends on the size of the true effect. The smaller the assumed effect, the larger the sample size needed to ascertain 80% power.
Researchers who conduct a primary study can plan the size of their sample **a priori**, based on the effect size they expect to find. The situation is different in meta-analysis, where we can only work with the published material. However, we have some control over the number and type of studies we include in our meta-analysis (e.g. by defining more lenient or strict inclusion criteria). This way, we can also adjust the overall power. There are several factors that can influence the statistical power in meta-analyses:
* The total **number** of included or eligible studies, and their **sample size**. How many studies do we expect, and are they rather small or large?
* The effect size we want to find. This is particularly important, as we have to make assumptions about how big an effect size has to be to still be meaningful. For example, one study calculated that for depression interventions, even effects as small as SMD = 0.24 may still be meaningful to patients [@cuijpers2014threshold]. If we want to study negative effects of an intervention (e.g. death or symptom deterioration), even very small effect sizes are extremely important and should be detected.
* The expected between-study heterogeneity. Substantial heterogeneity affects the precision of our meta-analytic estimates, and thus our potential to find significant effects.
Besides that, it is also important to think about other analyses, such as subgroup analyses, that we want to conduct. How many studies are there for each subgroup? What effects do we want to find in each group?
\index{Power Approach Paradox}
```{block, type='boximportant'}
**Post-Hoc Power Tests: "The Abuse of Power"**
\vspace{2mm}
Please note that power analyses should always be conducted **a priori**, meaning **before** you perform the meta-analysis.
\vspace{2mm}
Power analyses conducted **after** an analysis ("post-hoc") are based on a logic that is deeply flawed [@hoenig2001abuse]. First, post-hoc power analyses are **uniformative**--they tell us nothing that we do not already know. When we find that an effect is not significant based on our collected sample, the calculated post-hoc power will be, by definition, insufficient (i.e. 50% or lower). When we calculate the post-hoc power of a test, we simply "play around" with a power function that is directly linked to the $p$-value of the result.
There is nothing in the post-hoc power estimate that the $p$-value would not already tell us. Namely that, based on the effect and sample size of our test, the power is insufficient to ascertain statistical significance.
\vspace{2mm}
When we interpret the post-hoc power, this can also lead to the **power approach paradox** (PAP). This paradox arises because an analysis yielding no significant effect is thought to show **more** evidence that the null hypothesis is true when the p-value is **smaller**, since then, the power to detect a true effect would be **higher**.
```
<br></br>
## Fixed-Effect Model
---
\index{Fixed-Effect Model}
To determine the power of a meta-analysis under the fixed-effect model, we have to specify a distribution which represents that our alternative hypothesis is correct. To do this, however, it is not sufficient to simply say that $\theta \neq 0$ (i.e. that **some** effect exists). We have to assume a **specific** true effect that we want to be able to detect with sufficient (80%) power. For example SMD = 0.29.
We already covered previously (see Chapter \@ref(metareg-continuous)) that dividing an effect size through its standard error creates a $z$ score. These $z$ scores follow a standard normal distribution, where a value of $|z| \geq$ 1.96 means that the effect is significantly different from zero ($p<$ 0.05). This is exactly what we want to achieve in our meta-analysis: no matter how large the exact effect size and standard error of our result, the value of $|z|$ should be at least 1.96, and thus statistically significant:
\begin{equation}
z = \frac{\theta}{\sigma_{\theta}}~~~\text{where}~~~|z| \geq 1.96.
(\#eq:pow2)
\end{equation}
The value of $\sigma_{\theta}$, the standard error of the pooled effect size, can be calculated using this formula:
\begin{equation}
\sigma_{\theta}=\sqrt{\frac{\left(\frac{n_1+n_2}{n_1n_2}\right)+\left(\frac{\theta^2}{2(n_1+n_2)}\right)}{K}}
(\#eq:pow3)
\end{equation}
Where $n_1$ and $n_2$ stand for the sample sizes in group 1 and group 2 of a study, where $\theta$ is the assumed effect size (expressed as a standardized mean difference), and $K$ is the total number of studies in our meta-analysis. Importantly, as a simplification, this formula assumes that the sample sizes in both groups are identical across all included studies.
The formula is very similar to the one used to calculate the standard error of a standardized mean difference, with one exception. We now divide the the standard error by $K$. This means that the standard error of our pooled effect is reduced by factor $K$, representing the total number of studies in our meta-analysis. Put differently, when assuming a fixed-effect model, pooling the studies leads to a $K$-fold increase in the precision of our overall effect^[Note that this statement is, of course, only correct because we are using the simplified formula in equation \@ref(eq:pow3).].
After we defined $\theta$ and calculated $\sigma_{\theta}$, we end up with a value of $z$. This $z$ score can be used to obtain the power of our meta-analysis, given a number of studies $K$ with group sizes $n_1$ and $n_2$:
\begin{align}
\text{Power} &= 1-\beta \notag \\
&= 1-\Phi(c_{\alpha}-z)+\Phi(-c_{\alpha}-z) \notag \\
&= 1-\Phi(1.96-z)+\Phi(-1.96-z). (\#eq:pow4)
\end{align}
\index{Cumulative Distribution Function (CDF)}
Where $c_{\alpha}$ is the critical value of the standard normal distribution, given a specified $\alpha$ level. The $\Phi$ symbol represents the **cumulative distribution function** (CDF) of a standard normal distribution, $\Phi(z)$. In _R_, the CDF of the standard normal distribution is implemented in the `pnorm` function.
We can now use these formulas to calculate the power of a fixed-effect meta-analysis. Imagine that we expect $K=$ 10 studies, each with approximately 25 participants in both groups. We want to be able to detect an effect of SMD = 0.2. What power does such a meta-analysis have?
```{r}
# Define assumptions
theta <- 0.2
K <- 10
n1 <- 25
n2 <- 25
# Calculate pooled effect standard error
sigma <- sqrt(((n1+n2)/(n1*n2)+(theta^2/(2*n1+n2)))/K)
# Calculate z
z = theta/sigma
# Calculate the power
1 - pnorm(1.96-z) + pnorm(-1.96-z)
```
We see that, with 60.6%, such a meta-analysis would be **underpowered**, even though 10 studies were included. A more convenient way to calculate the power of a (fixed-effect) meta-analysis is to use the `power.analysis` function.
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "power.analysis" Function**
\vspace{4mm}
The `power.analysis` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
\vspace{2mm}
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/power.analysis.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{ggplot2}** package is installed and loaded.
```
The `power.analysis` function contains these arguments:
* **`d`**. The hypothesized, or plausible overall effect size, expressed as the standardized mean difference (SMD). Effect sizes must be positive numeric values.
* **`OR`**. The assumed effect of a treatment or intervention compared to control, expressed as an odds ratio (OR). If both `d` and `OR` are specified, results will only be computed for the value of `d`.
* **`k`**. The expected number of studies included in the meta-analysis.
* **`n1`**, **`n2`**. The expected average sample size in group 1 and group 2 of the included studies.
* **`p`**. The alpha level to be used. Default is $\alpha$=0.05.
* **`heterogeneity`**. The level of between-study heterogeneity. Can either be `"fixed"` for no heterogeneity (fixed-effect model), `"low"` for low heterogeneity, `"moderate"` for moderate-sized heterogeneity, or `"high"` for high levels of heterogeneity. Default is `"fixed"`.
Let us try out this function, using the same input as in the example from before.
```{r, eval=F}
library(dmetar)
power.analysis(d = 0.2,
k = 10,
n1 = 25,
n2 = 25,
p = 0.05)
```
```{r, echo=F, fig.width=4, fig.height=3, fig.align='center', out.width="55%"}
#source("data/power.analysis.bw.R")
dmetar::power.analysis(d = 0.2,
k = 10,
n1 = 25,
n2 = 25,
p = 0.05)
```
<br></br>
## Random-Effects Model
---
\index{Random-Effects Model}
For power analyses assuming a random-effects model, we have to take the between-study heterogeneity variance $\tau^2$ into account. Therefore, we need to calculate an adapted version of the standard error, $\sigma^*_{\theta}$:
\begin{equation}
\sigma^*_{\theta}=\sqrt{\frac{\left(\frac{n_1+n_2}{n_1n_2}\right)+\left(\frac{\theta^2}{2(n_1+n_2)}\right)+\tau^2}{K}}
(\#eq:pow5)
\end{equation}
The problem is that the value of $\tau^2$ is usually not known before seeing the data. Hedges and Pigott [-@hedges2001power], however, provide guidelines that may be used to model either low, moderate or large between-study heterogeneity:
\vspace{2mm}
**Low heterogeneity:**
\begin{equation}
\sigma^*_{\theta} = \sqrt{1.33\times\dfrac{\sigma^2_{\theta}}{K}}
(\#eq:pow6)
\end{equation}
\vspace{2mm}
**Moderate heterogeneity:**
\begin{equation}
\sigma^*_{\theta} = \sqrt{1.67\times\dfrac{\sigma^2_{\theta}}{K}}
(\#eq:pow7)
\end{equation}
\vspace{2mm}
**Large heterogeneity:**
\begin{equation}
\sigma^*_{\theta} = \sqrt{2\times\dfrac{\sigma^2_{\theta}}{K}}
(\#eq:pow8)
\end{equation}
\vspace{2mm}
The `power.analysis` function can also be used for random-effects meta-analyses. The amount of assumed between-study heterogeneity can be controlled using the `heterogeneity` argument. Possible values are `"low"`, `"moderate"` and `"high"`. Using the same values as in the previous example, let us now calculate the expected power when the between-study heterogeneity is moderate.
\vspace{2mm}
```{r, eval=F}
power.analysis(d = 0.2,
k = 10,
n1 = 25,
n2 = 25,
p = 0.05,
heterogeneity = "moderate")
```
```
## Random-effects model used (moderate heterogeneity assumed).
## Power: 40.76%
```
We see that the estimated power is 40.76%. This is lower than the normative 80% threshold. It is also lower than the 60.66% we obtain we assuming a fixed-effect model. This is because between-study heterogeneity decreases the precision of our pooled effect estimate, resulting in a drop in statistical power.
Figure \@ref(fig:power) visualizes the effect of the true effect size, number of studies, and amount of between-study heterogeneity on the power of a meta-analysis.^[If you want to quickly check the power of a meta-analysis under varying assumptions, you can also use a **power calculator tool** developed for this purpose. The tool is based on the same _R_ function that we cover in this chapter. It can be found online: https://mathiasharrer.shinyapps.io/power_calculator_meta_analysis/.]
\vspace{2mm}
```{r power,fig.width=5,fig.height=4, fig.align='center',echo=FALSE,fig.cap="Power of random-effects meta-analyses ($n$=50 in each study). Darker colors indicate higher between-study heterogeneity.", message=FALSE, warning=F, out.width="55%"}
library(ggplot2)
library(reshape)
source("data/power.analysis.random.R")
k <- seq(0, 50, length=1000)
pow.vals01<-lapply(k,function(k) power.analysis.random(d=0.10,k=k,n1=25,n2=25,p=0.05,heterogeneity = "moderate"))
pow.vals02<-lapply(k,function(k) power.analysis.random(d=0.20,k=k,n1=25,n2=25,p=0.05,heterogeneity = "moderate"))
pow.vals03<-lapply(k,function(k) power.analysis.random(d=0.30,k=k,n1=25,n2=25,p=0.05,heterogeneity = "moderate"))
pow.vals01<-as.numeric(pow.vals01)
pow.vals02<-as.numeric(pow.vals02)
pow.vals03<-as.numeric(pow.vals03)
data1<-data.frame(k,pow.vals01,pow.vals02,pow.vals03)
k <- seq(0, 50, length=1000)
pow.vals01<-lapply(k,function(k) power.analysis.random(d=0.10,k=k,n1=25,n2=25,p=0.05,heterogeneity = "low"))
pow.vals02<-lapply(k,function(k) power.analysis.random(d=0.20,k=k,n1=25,n2=25,p=0.05,heterogeneity = "low"))
pow.vals03<-lapply(k,function(k) power.analysis.random(d=0.30,k=k,n1=25,n2=25,p=0.05,heterogeneity = "low"))
pow.vals01<-as.numeric(pow.vals01)
pow.vals02<-as.numeric(pow.vals02)
pow.vals03<-as.numeric(pow.vals03)
data2<-data.frame(k,pow.vals01,pow.vals02,pow.vals03)
k <- seq(0, 50, length=1000)
pow.vals01<-lapply(k,function(k) power.analysis.random(d=0.10,k=k,n1=25,n2=25,p=0.05,heterogeneity = "high"))
pow.vals02<-lapply(k,function(k) power.analysis.random(d=0.20,k=k,n1=25,n2=25,p=0.05,heterogeneity = "high"))
pow.vals03<-lapply(k,function(k) power.analysis.random(d=0.30,k=k,n1=25,n2=25,p=0.05,heterogeneity = "high"))
pow.vals01<-as.numeric(pow.vals01)
pow.vals02<-as.numeric(pow.vals02)
pow.vals03<-as.numeric(pow.vals03)
data3<-data.frame(k,pow.vals01,pow.vals02,pow.vals03)
ggplot()+
geom_line(data = data1, aes(x = k, y = pow.vals01), color = "dodgerblue3",size=0.9) +
geom_line(data = data1, aes(x = k, y = pow.vals02), color = "firebrick3",size=0.9) +
geom_line(data = data1, aes(x = k, y = pow.vals03), color = "springgreen3",size=0.9) +
geom_line(data = data2, aes(x = k, y = pow.vals01), color = "dodgerblue1",size=0.9) +
geom_line(data = data2, aes(x = k, y = pow.vals02), color = "firebrick1",size=0.9) +
geom_line(data = data2, aes(x = k, y = pow.vals03), color = "springgreen1",size=0.9) +
geom_line(data = data3, aes(x = k, y = pow.vals01), color = "dodgerblue4",size=0.9) +
geom_line(data = data3, aes(x = k, y = pow.vals02), color = "firebrick4",size=0.9) +
geom_line(data = data3, aes(x = k, y = pow.vals03), color = "springgreen4",size=0.9) +
xlab('Number of Studies') +
ylab('Power')+
scale_x_continuous(expand = c(0, 0), limits = c(1, 50), breaks = c(1, 10, 20, 30, 40, 50)) +
scale_y_continuous(labels = scales::percent)+
theme(
axis.line= element_line(color = "black",size = 0.5,linetype = "solid"),
legend.position = "bottom",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "#FFFEFA", size = 0),
plot.background = element_rect(fill = "#FFFEFA", size = 0),
legend.background = element_rect(linetype="solid",
colour ="black"),
legend.title = element_blank(),
legend.key.size = unit(0.75,"cm"),
legend.text=element_text(size=14))+
annotate("text", x = 6, y = 0.9, label = expression(theta==0.3),size=5, parse = T)+
annotate("text", x = 25, y = 0.6, label = expression(theta==0.2),size=5)+
annotate("text", x = 20, y = 0.13, label = expression(theta==0.1),size=5)+
geom_hline(yintercept=0.8,linetype="dotted")
```
<br></br>
## Subgroup Analyses {#power-subgroup}
---
\index{Subgroup Analysis}
When planning subgroup analyses, it can be relevant to know how large the difference between two groups must be so that we can detect it, given the number of studies at our disposal. This is where a power analysis for subgroup differences can be applied. A subgroup power analysis can be conducted in _R_ using the `power.analysis.subgroup` function, which implements an approach described by Hedges and Pigott [-@hedges2004power].
```{block, type='boxdmetar'}
**The "power.analysis.subgroup" Function**
\vspace{2mm}
The `power.analysis.subgroup` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/power.analysis.subgroup.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{ggplot2}** package is installed and loaded.
```
Let us assume that we expect the first group to show an effect of SMD = 0.3 with a standard error of 0.13, while the second group has an effect of SMD = 0.66, and a standard error of 0.14. We can use these assumptions as input to our call to the function:
```{r, fig.width=4, fig.height=3, fig.align='center', out.width="55%"}
power.analysis.subgroup(TE1 = 0.30, TE2 = 0.66,
seTE1 = 0.13, seTE2 = 0.14)
```
In the output, we can see that the power of our imaginary subgroup test (47%) would not be sufficient. The output also tells us that, all else being equal, the effect size difference needs to be at least 0.54 in order to reach sufficient power.
$$\tag*{$\blacksquare$}$$
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Preface {-}
---
<br></br>
> "The problems are solved, not by giving new information,
> but by arranging what we have known since long."
>
> -- **Ludwig Wittgenstein**, [**Philosophical Investigations**](https://plato.stanford.edu/entries/wittgenstein/#PhilInve)
<br></br>
<span class="firstcharacter">I</span>
t is a trivial observation that our world is complex. Scientific research is no exception; in most research fields, we are often faced with a seemingly insurmountable body of previous research. Evidence from different studies can be conflicting, and it can be difficult to make sense out of various sources of information.
**Evidence synthesis** methods therefore play a crucial role in many disciplines, for example the social sciences, medicine, biology, or econometrics. **Meta-analysis**, the statistical procedure used to combine results of various studies or analyses, has become an indispensable tool in many research areas. Meta-analyses can be of enormous importance, especially if they guide practical decision-making, or future research efforts. Many applied researchers therefore have some meta-analysis skills in their "statistical toolbox", while others want to learn how to perform them in their own research field. Meta-analysis has become so ubiquitous that many graduate and undergraduate students already learn how to perform meta-analyses as part of their curriculum -- sometimes with varying levels of enthusiasm.
The way meta-analyses can be performed, like statistical computing as a whole, has seen major shifts in the last decades. This has a lot to do with the rise of open source, collaborative statistical software, primarily in the form of the _R_ Statistical Programming Language and Environment. The _R_ ecosystem allows researchers and statisticians everywhere to build their own **packages**, and to make them available to everyone, for free. This has led to a spectacular rise in readily available statistical software for the _R_ language. While we are writing this, the [CRAN Task View](https://cran.r-project.org/web/views/MetaAnalysis.html) lists more than 130 packages dedicated to meta-analysis alone.
In _R_, you can do **anything** -- literally. It is a full programming language, so if you do not find a function for something you want to do, you can easily write it yourself. For meta-analyses, however, there is hardly any need to do this anymore. Just a small collection of _R_ packages already provide all the functionality you can find in current "state-of-the-art" meta-analysis programs -- for free. Even more so, there are many novel meta-analysis methods that can currently **only** be applied in _R_. In short: the _R_ environment gives researchers much more tools for their meta-analyses. In the best case, this allows us to draw more robust conclusions from our data, and thus better inform decision-making.
This raises the question why not everyone is using _R_ for meta-analyses. We think there are two main reasons: **convenience** and **anxiety** (and sometimes a mixture of both). Both reasons are very understandable. Most meta-analysts are applied researchers; not statisticians or programmers. The thought of learning an obscure and complicated-seeming programming language can act as a deterrent. The same is true for meta-analytic methods, with their special theoretical background, their myriad of analytic choices, and different statistics that need to be interpreted correctly.
With this guide, we want to show that many of these concerns are unfounded, and that learning how to do a meta-analysis in _R_ is worth the effort. We hope that the guide will help you to learn the skills needed to master your own meta-analysis project in _R_. We also hope that this guide will make it easier for you to not only learn **what** meta-analytic methods to apply when, but also **why** we apply them. Last but not least, we see this guide as an attempt to show you that meta-analysis methods and _R_ programming are not mere inconveniences, but a fascinating topic to explore.
<br></br>
## This Book Is for Mortals {-}
---
This guide was not written for meta-analysis experts or statisticians. We do not assume that you have any special background knowledge on meta-analytic methods. Only basic knowledge of fundamental mathematical and statistical concepts is needed. For example, we assume that you have heard before what things like a "mean", "standard deviation", "correlation", "regression", "$p$-value" or a "normal distribution" are. If these terms ring a bell, you should be good to go. If you are really starting from scratch, you may want to first have a look at Robert Stinerock's statistics beginner's guide [@stinerock2018statistics] for a thorough introduction including hands-on examples in _R_ -- or some other introductory statistics textbook of your choice.
Although we tried to keep it as minimal as possible, we will use mathematical formulas and statistical notation at times. But do not panic. Formulas and greek letters can seem confusing at first glance, but they are often a very good way to precisely describe the idea behind some meta-analysis methods. Having seen those formulas, and knowing what they represent, will also make it easier for you to understand more advanced texts you may want to read further down the line. And of course, we tried our best to always explain in detail what certain symbols or letters stand for, and what a specific formula **wants to tell us**. In appendix \@ref(symbollist) of this book, you can find a list of the symbols we use, and what they stand for. In later chapters, especially the Advanced Methods section, we need to become a little more technical to explain the idea behind some of the applied techniques. Nevertheless, we made sure to always include some background information on the mathematical and statistical concepts used in these sections.
No prior knowledge of _R_ (or programming in general) is required. In the guide, we try to provide a gentle introduction into basic _R_ skills you need to code your own meta-analysis. We also provide references to adequate resources to keep on learning. Furthermore, we will show you how you can set up a free computer program which allows you use _R_ conveniently on your PC or Mac.
As it says in the title, our book focuses on the "doing" part of meta-analysis. Our guide aims to be an accessible resource which meets the needs of applied researchers, students and data scientists who want to get going with their analyses using _R_. Meta-analysis, however, is a vast and multi-faceted topic, so it is natural that not everything can be covered in this guide. For this book, limitations particularly pertain to three areas:
- Although we provide a short primer on these topics, we do not cover **in detail** how to define research questions, systematically search and include studies for your meta-analysis, as well as how to assess their quality. Each of these topics merits books of their own, and luckily many helpful resources already exist. We therefore only give an overview of important considerations and pitfalls when collecting the data for your meta-analysis, and will refer you to adequate resources dealing with the nitty-gritty details.
- The second limitation of this guide pertains to its level of technicality. This book is decidedly written for "mortals". We aim to show you when, how and why to apply certain meta-analytic techniques, along with their pitfalls. We also try to provide an easily accessible, conceptual understanding of the techniques we cover, resorting to more technical details only if it benefits this mission. Quite naturally, this means that some parts of the guide will not contain a deep dive into technicalities that expert-level meta-analysts and statisticians may desire. Nevertheless, we include references to more advanced resources and publications in each chapter for the interested reader.
- Contents of a book will always to some extent reflect the background and experience of its authors. We are confident that the methods we cover here are applicable and relevant to a vast range of research areas and disciplines. Nevertheless, we wanted to disclose that the four authors of this book are primarily versed in current research in psychology, psychiatry, medicine, and intervention research. "Real-world" use cases and examples we cover in the book therefore concentrate on topics where we know our way around. The good news is that meta-analytic methods (provided some assumptions, which we will cover) are largely agnostic to the research field from which data stem from, and can be used for various types of outcome measures. Nonetheless, and despite our best intentions to make this guide as broadly applicable to as many applied research disciplines as possible, it may still be possible that some of the methods covered in this book are more relevant for some research areas than others.
<br></br>
## Topics Covered in the Book {-}
---
Among other things, this guide will cover the following topics:
* What a meta-analysis is, and why it was **invented**.
* **Advantages** and **common problems** with meta-analysis.
* How **research questions** for meta-analyses are specified, and how the **search for studies** can be conducted.
* How you can set up _R_, and a **computer program** which allows you to use _R_ in a convenient way.
* How you can **import** your meta-analysis data into _R_, and how to **manipulate** it through code.
* What **effect sizes** are, and how they are calculated.
* How to **pool effect sizes** in fixed-effect and random-effects meta-analyses.
* How to analyze the **heterogeneity** of your meta-analysis, and how to explore it using **subgroup analyses** and **meta-regression**.
* Problems with **selective outcome reporting**, and how to tackle them.
* How to perform **advanced types** of meta-analytic techniques, such as "multilevel" meta-analysis, meta-analytic structural equation modeling, network meta-analysis, or Bayesian meta-analysis.
* How to **report** your meta-analysis results, and make them **reproducible**.
<br></br>
## How to Use This Book {-}
---
### Work Flow {-}
---
This book is intended to be read in a "linear" fashion. We recommend that you start with the first chapters on meta-analysis and _R_ basics, and then keep on working yourself through the book one chapter after another. Jumping straight to the hands-on chapters may be tempting, but it is not generally recommended. Teaching students and researchers how to perform their first meta-analyses, we found that a basic familiarity with this technique, as well as the R Studio environment, is a necessary evil to avoid frustrations later on. This is particularly true if you have no previous experience with meta-analysis **and** _R_ programming. Experienced _R_ users may skip Chapter \@ref(discovering-R), which introduces _R_ and R Studio. However, it will certainly do no harm to work through the chapter anyway as a quick refresher.
While all chapters are virtually self-contained, we do sometimes make references to topics covered in previous chapters. Chapters in the Advanced Methods section in particular assume that you are familiar with theoretical concepts we have covered before.
The last section of this book contains helpful tools for your meta-analysis. This does not mean that these topics are the final things you have to consider when performing a meta-analysis. We simply put these chapters in the end because they primarily serve as reference works for your own meta-analysis projects. We link to these tools throughout the book in sections where they are thematically relevant.
<br></br>
### Companion _R_ Package {-}
---
This book comes with a companion _R_ package called **{dmetar}**. This package mainly serves two functions. First, it aims to make your life easier. Although there are fantastic _R_ packages for meta-analysis out there with a vast range of functionality, there are still a few things which are currently not easy to implement in _R_, at least for beginners.
The **{dmetar}** package aims to bridge this gap by providing a few extra functions facilitating exactly those things. Secondly, the package also contains all the data sets we are using for the hands-on examples shown in this book. In chapter \@ref(dmetar), the **{dmetar}** package is introduced in detail, and we show you how to install the package step by step. Although we will make sure that there are no substantial changes, **{dmetar}** is still under active development, so it may be helpful to have a look at the [package website](https://dmetar.protectlab.org) now and then to check if there are new or improved functionalities which you can use for your meta-analysis.
Although advised, it is not essential that you install the package. Wherever we make use of **{dmetar}** in the book, we will also provide you with the raw code for the function, or a download link to the data set we are using.
<br></br>
### Text Boxes {-}
---
Throughout the book, a set of text boxes is used.
```{block, type='boxinfo'}
**General Note**
General notes contain relevant background information, insights, anecdotes, considerations or take-home messages pertaining to the covered topic.
```
```{block, type='boximportant'}
**Important Information**
These boxes contain information on caveats, problems, drawbacks or pitfalls you have to keep in mind.
```
```{block, type='boxquestion'}
**Questions**
After each chapter, this box will contain a few questions through which you can test your knowledge. Answers to these questions can be found at the end of the book in [Appendix A](#qanda).
```
```{block, type='boxdmetar'}
**{dmetar} Note**
The **{dmetar}** note boxes appear whenever functions or data sets contained in the companion _R_ package are used. These boxes also contain URLs to the function code, or data set download links for readers who did not install the package.
```
```{block, type='boxreport'}
**How Can I Report This?**
These boxes contain recommendations on how you can report _R_ output in your thesis or research article.
```
<br></br>
## Conventions {-}
---
A few conventions are followed throughout the book.
$$~$$
**{packages}**
All _R_ packages are written in bold and are put into curly brackets. This is a common way to write package names in the _R_ community.
$$~$$
`R Code`
All _R_ code or objects we define in _R_ are written in this monospace font.
$$~$$
`## R Output`
The same monospace font is used for the output we receive after running _R_ code. However, we use two number signs (hashes) to differentiate it from _R_ input.
$$~$$
$Formula$
This serif font is reserved for formulas, statistics, and other forms of mathematical notation.
<br></br>
## What to Do When You Are Stuck {-}
---
Undeniably, the road to doing meta-analyses in _R_ can be a rocky path at times. Although we think this is sometimes exaggerated, _R_'s learning curve **is** steep. Statistics **is** hard. We did our best to make your experience of learning how to perform meta-analyses using _R_ as painless as possible. Nevertheless, this will not shield you from being frustrated sometimes. This is all but natural. We all had to start from scratch somewhere down the line. From our own experience, we can assure you that we have never met anyone who was **not** able to learn _R_, or how to do a meta-analysis. It only takes practice, and the understanding that there will be no point in time when you are "done" learning. We believe in you.
If you are looking for something a little more practical than this motivational message: here are a few things you can do once you stumble upon things that this guide cannot answer.
<br></br>
### Do Not Panic {-}
---
Making their first steps in _R_, many people are terrified when the first red error messages start popping up. That is not necessary. **Everyone** gets error messages **all the time**. Instead of becoming panicky or throwing your computer out the window, take a deep breath and take a closer look at the error message. Very often, it only takes a few tweaks to make the error messages disappear. Have you misspelled something in your code? Have you forgotten to close a bracket, or to put something into quotation marks?
Also, make sure that your output actually **is** an error message. _R_ distinguishes between `Error`s, `Warning`s, and plain messages. Only the first means that your code could not be executed. `Warning`s mean that your code did run, but that something **may** have gone awry. Messages mean that your code did run completely, and are usually shown when a function simply wants to bring your attention to something it has done for you under the hood. For this reason, they are also called **diagnostic messages**.
<br></br>
### Google {-}
---
A software developer friend once told the first author this joke about his profession: "A programmer is someone who can google better than Average Joe". This observation certainly also applies to _R_ programming. If you find yourself in a situation in which you cannot make sense out of an error or warning message you receive, do not hesitate to simply copy and paste it, and do a Google search. Adding "R" to your search is often helpful to improve the results. Most content on the Internet is in English; so if your error message in _R_ is in another language, run `Sys.setenv(LANGUAGE = "en")` and then rerun your code again.
There is a large _R_ community out there, and it is very likely that someone had the same problem as you before. Google is also helpful if there is something specific you want to do with your data, but do not know what _R_ commands you can use to do this. Even for experts, it is absolutely normal to use Google **dozens** of times when writing _R_ code. Do not hesitate to do the same whenever you get stuck.
<br></br>
### _StackOverflow_ & _CrossValidated_ {-}
---
When searching for _R_-related questions on Google, you will soon find out that many of the first hits will link you to a website called [StackOverflow](https://stackoverflow.com/). StackOverflow is a large community-based forum for questions related to programming in general. On StackOverflow, everyone (including you) can ask and answer questions.
In contrast to many other forums on the Internet, answers you get on StackOverflow are usually goal-oriented and helpful. If searching Google did not help you to solve your problem, addressing it there might be a good solution. However, there are a few things to keep in mind. First, when asking a question, always tag your question with `[R]` so that people know which programming language you are talking about. Also, run `sessionInfo()` in _R_ and attach the output you get to your question. This lets people know which _R_ and package versions you are using, and might be helpful to locate the problem.
Lastly, do not expect overwhelming kindness. Many StackOverflow users are experienced programmers who may be willing to point you at certain solutions; but do not expect anyone to solve your problem for you. It is also possible that someone will simply inform you that this topic has already been covered elsewhere, send you the link, and then move on. Nevertheless, using StackOverflow **is** usually the best way to get high-quality support for specific problems you are dealing with.
StackOverflow, by the way, is primarily for questions on programming. If your question also has a statistics background, you can use [CrossValidated](https://stats.stackexchange.com/) instead. CrossValidated works like StackOverflow, but is primarily used by statistics and machine learning experts.
<br></br>
### Contact Us {-}
---
If you have the feeling that your problem has something to do with this guide itself, you can also contact us. This particularly pertains to issues with the companion _R_ package for this guide, **{dmetar}**. If you have trouble installing the package, or using some if its functions, you can go to our [website](https://www.protectlab.org/meta-analysis-in-r), where you can find ways to report your issue. When certain problems come up frequently, we usually try to have a look at them and search for fixes. Known issues will also be displayed in the Corrections & Remarks section in the online version of the guide (see **Work Flow** section). Please do not be disappointed if we do not answer your question personally, or if takes long until we get back to you. We receive many questions related to meta-analysis and our package every day, so it is sometimes not possible to directly answer each and every one.
<br></br>
## Acknowledgments {-}
---
We would like to thank David Grubbs and Chapmann & Hall/CRC Press for approaching us with the wonderful idea of turning our online guide into the printed book you are reading right now, and for their invaluable editorial support.
Many researchers and students have shared their feedback and experiences working with this guide with us since we began writing a preliminary online version of it in late 2018. This feedback has been incredibly valuable, and has helped us considerably to tailor this book further to the needs of the ones reading it. Thank you to all of you.
We owe a great debt of gratitude to all researchers involved in the development of the _R_ meta-analysis infrastructure presented in this guide; but first and foremost to Guido Schwarzer and Wolfgang Viechtbauer, maintainers of the **{meta}** and **{metafor}** package, respectively. This guide, like the whole _R_ meta-analysis community, would not exist without your effort and dedication.
Furthermore, particular thanks go to Luke McGuinness, author of the gorgeous **{robvis}** package, for writing an additional chapter on Risk of Bias visualization, which you can find on this book's companion website. Luke, we are incredibly grateful for your continuous support for this project.
Last but not least, we want to thank Lea Schuurmans, Paula Kuper and Antonia Sprenger for supporting us in the development and compilation of this book.
<br></br>
Erlangen, Amsterdam, Kyoto and Munich
<p style="text-align:right;"><strong>Mathias, Pim, Toshi & David</strong></p>
<br></br>
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
bookdown::gitbook:
css: [style.css, font-awesome.min.css]
config:
toc:
before: |
<li><a href="./">Doing Meta-Analysis in R</a></li>
after: |
<li><a href="https://www.protectlab.org/meta-analysis-in-r" target="blank">How to cite this guide</a></li>
<li><a href="https://dmetar.protectlab.org" target="blank">Get the <i>dmetar</i> package</a></li>
<li><a href="https://www.protectlab.org/meta-analysis-in-r" target="blank">PDF Download</a></li>
toc_depth: 3
toc_float:
collapsed: false
edit: https://github.com/MathiasHarrer/Doing-Meta-Analysis-in-R/blob/master/%s
download: no
fontsettings:
size: 1
toolbar:
search: yes
bookdown::pdf_book:
includes:
in_header: preamble.tex
latex_engine: xelatex
citation_package: natbib
keep_tex: yes
bookdown::epub_book: default
bookdown::bs4_book:
theme:
fg: "black"
bg: "#FFFEFA"
primary: "#277DB0"
secondary: "#277DB0"
success: "#277DB0"
info: "#277DB0"
warning: "#277DB0"
danger: "#277DB0"
base_font: !expr bslib::font_google("Fira Sans")
code_font: !expr bslib::font_google("Roboto Mono")
heading_font: !expr bslib::font_google("Fira Sans")
headings-font-weight: "500"
lead-font-weight: "500"
box-shadow: "none"
border: "0px"
#repo: https://github.com/MathiasHarrer/Doing-Meta-Analysis-in-R/
twitter-handle: MathiasHarrer
includes:
in_header: [style.html]
favicon: "favicon.ico"
biblio-style: "apalike"
link-citations: true
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Citing this Guide {-}
---
<br></br>
This suggested citation is:
```{block, type='boxempty'}
**Harrer, M., Cuijpers, P., Furukawa, T.A., & Ebert, D.D.** (2021). _Doing Meta-Analysis with R: A Hands-On Guide_. Boca Raton, FL and London: Chapmann & Hall/CRC Press. ISBN 978-0-367-61007-4.
```
Download the reference as [BibTeX](https://www.protectlab.org/meta-analysis-in-r/data/citation.bib) or [.ris](https://www.protectlab.org/meta-analysis-in-r/data/citation.ris).
<br></br>
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# (PART) Meta-Analysis in R {-}
# Effect Sizes {#effects}
---
<img src="_figs/effect_sizes.jpg" />
<br></br>
<span class="firstcharacter">I</span>
n the last chapter, we were able to familiarize ourselves with the _R_ universe and learned a few helpful tools to import and manipulate data. In this second part of the book, we can now apply and expand our _R_ knowledge while learning about core statistical techniques that are used in meta-analyses.
\index{Mean, Arithmetic}
In Chapter \@ref(what-are-mas), we defined meta-analysis as a technique which summarizes quantitative outcomes from several studies. In meta-analyses, studies instead of individuals become the fundamental units of our analysis.
This introduces new problems. In a primary study, it is usually quite easy to calculate **summary statistics** through which we can describe the data we collected. For example, it is conventional to calculate the **arithmetic mean** $\bar{x}$ and **standard deviation** $s$ of a continuous outcome in primary studies.
However, this is only possible because an essential prerequisite is usually met in primary studies: we know that the outcome variable has been **measured in the same way** across all study subjects. For meta-analyses, this assumption is typically not met. Imagine that we want to conduct a meta-analysis where our outcome of interest are the math skills of grade eight students. Even if we apply strict inclusion criteria (see Chapter \@ref(research-question)), it is likely that not every study used exactly the same test to measure math skills; some may have even only reported the proportion of students who passed or failed the test. This makes it virtually impossible to quantitatively synthesize the results directly.
To perform a meta-analysis, we have to find an **effect size** which can be summarized across all studies. Sometimes, such effect sizes can be directly extracted from the publication; more often, we have to calculate them from other data reported in the studies. The selected effect size metric can have a substantial impact on the results of a meta-analysis, and their interpretability. They should therefore fulfill a few important criteria [@lipsey2001practical; @higgins2019cochrane]. In particular, the selected effect size measure for a meta-analysis should be:
* **Comparable**. It is important that the effect size measure has the same meaning across all studies. Let us take math skills as an example again. It makes no sense to pool differences between experimental and control groups in the number of points achieved on a math test when studies used different tests. Tests may, for example, vary in their level of difficulty, or in the maximum number of points that can be achieved.
* **Computable**. We can only use an effect size metric for our meta-analysis if it is possible to derive its numerical value from the primary study. It must be possible to calculate the effect size for all of the included studies based on their data.
* **Reliable**. Even if it is possible to calculate an effect size for all included studies, we must also be able to **pool** them statistically. To use some metric in meta-analyses, it must be at least possible to calculate the **standard error** (see next chapter). It is also important that the format of the effect size is suited for the meta-analytic technique we want to apply, and does not lead to errors or biases in our estimate.
* **Interpretable**. The type of effect size we choose should be appropriate to answer our research question. For example, if we are interested in the strength of an association between two continuous variables, it is conventional to use correlations to express the size of the effect. It is relatively straightforward to interpret the magnitude of a correlation, and many researchers can understand them. In the following chapters, we will learn that it is sometimes not possible to use outcome measures which are both easy to interpret **and** ideal for our statistical computations. In such cases, it is necessary to transform effect sizes to a format with better mathematical properties before we pool them.
It is very likely that you have already stumbled upon the term "effect size" before. We also used the word here, without paying too much attention to what it precisely stands for. In the next section, we should therefore explore what we actually mean when we talk about an "effect size".
<br></br>
## What Is an Effect Size? {#what-is-es}
---
In the terminology we use in this book, an effect size is defined as a metric quantifying the relationship between two entities. It captures the **direction** and **magnitude** of this relationship. If relationships are expressed as the same effect size, it is possible to compare them.
\index{Correlation}
\index{Standardized Mean Difference}
We want to stress here that this is just **one** way to define what an effect size means. Definitions of an effect size can be wider and narrower, and the term is used differently by different people [@borenstein2011introduction, chapter 3]. Some researchers only talk of effect sizes when referring to the results of intervention studies, which are usually expressed as differences between the treatment and control group (see Chapter \@ref(s-md)). Using this conceptualization, "effect size" refers to the effect of a treatment, and how large this effect is.
In our opinion, this is quite a narrow definition. Not only treatments can have an effect on some variable; effects can also appear **naturally** without any direct human intervention. For example, it is possible that socio-demographic variables, such as the income and education of parents, may have an effect on the educational attainment of their children. Correlations describe how well we can predict the values of a variable through the values of another, and can also be seen as a form of effect size.
On the other hand, it might go too far to say that everything we can pool as part of a meta-analysis is automatically an effect size. As we will learn, there are measures of the **central tendency**, such as the sample mean, which can also be used in meta-analyses. But a sample mean alone does not quantify a relationship between two phenomena, and there is no "effect". Nevertheless, in this book, we will often use the word "effect size" as a **pars pro toto**, representing both estimates of an actual effect, as well as "one-variable" and central tendency measures. We do not do this because this is accurate, but because it is more convenient.
Others disapprove of the term "effect size" altogether. They stress that the word "effect" in "effect size" suggests that there is a **causal** relationship. However, we all know that **correlation is not causation**, and a difference between an intervention and control group must not automatically be caused by the treatment itself. In the end, it is up to you to decide which definition you prefer, but be aware that people may have different conceptualizations in mind when they talk about effect sizes.
\index{Sampling Error}
In mathematical notation, it is common to use the greek letter **theta** ($\theta$) as the symbol for a **true** effect size^[In this book, we will largely follow the notation used by Schwarzer et al. [-@schwarzer2015meta] when discussing effect sizes.]. More precisely, $\theta_k$ represents the true effect size of a study $k$. Importantly, the true effect size is **not** identical with the **observed effect size** that we find in the published results of the study. The observed effect size is only an **estimate** of the true effect size. It is common to use a **hat** (^) symbol to clarify that the entity we refer to is only an estimate. The observed effect size in study $k$, our estimate of the true effect size, can therefore be written as $\hat\theta_k$.
But why does $\hat\theta_k$ differ from $\theta_k$? It differs because of the **sampling error**, which can be symbolized as $\epsilon_k$. In every primary study, researchers can only draw a small sample from the whole population. For example, when we want to examine the benefits of regular exercise on the cardiovascular health of primary care patients, we will only be able to include a small selection of patients, not **all** primary care patients in the world. The fact that a study can only take small samples from an infinitely large population means that the observed effect will differ from the true population effect.
Put simply, $\hat\theta_k$ is, therefore, the same as $\theta_k$ plus some sampling error $\epsilon_k$^[It should be noted that there are often more reasons for our observed effect sizes to differ from the true effect size than sampling error alone; for example biases in a study's research methodology, or measurement error. In Chapter \@ref(es-correction), we will discuss this in greater detail.].
\begin{align}
\hat\theta_k = \theta_k + \epsilon_k
(\#eq:es1)
\end{align}
It is obviously desirable that the effect size estimate $\hat\theta_k$ of study $k$ is as close as possible to the true effect size, and that $\epsilon_k$ is minimal. All things being equal, we can assume that studies with smaller $\epsilon$ will deliver a more **precise** estimate of the true effect size. Meta-analysis methods take into account how precise an effect size estimate is (see Chapter \@ref(pooling-es)). When pooling the results of different studies, they give effects with a greater precision (i.e., less sampling error) a higher weight, because they are better estimators of the true effect [@hedges2014statistical].
But how can we know how big the sampling error is? Unsurprisingly, the true effect of a study $\theta_k$ is unknown, and so $\epsilon_k$ is also unknown. Often, however, we can use statistical theory to approximate the sampling error. A common way to quantify $\epsilon$ is through the **standard error** ($SE$). The standard error is defined as the standard deviation of the **sampling distribution**. A sampling distribution is the distribution of a metric we get when we draw random samples with the same sample size $n$ from our population **many, many times**.
We can make this more concrete by simulating data in _R_. We can pretend that we are drawing random samples from a larger population using the `rnorm` function. This function allows us to draw **r**andom samples from a **norm**al distribution, therefore the name. The `rnorm` function simulates a "perfect world" in which we **know** how values are distributed in the true population and lets us take samples.
The function takes three arguments: `n`, the number of observations we want to have in our sample; `mean`, the **true** mean of the population; and `sd`, the **true** standard deviation. The `rnorm` function has a random component, so to make results reproducible, we have to set a **seed** first. This can be done using the `set.seed` function, which we have to supply with a number. For our example, we chose to set a seed of `123`. Furthermore, we want to simulate that the true mean of our population is $\mu =$ 10, that the true standard deviation is $\sigma =$ 2, and that our sample consists of $n=$ 50 randomly selected observations, which we save under the name `sample`.
This is what our code looks like:
```{r}
set.seed(123)
sample <- rnorm(n = 50, mean = 10, sd = 2)
```
Now, we can calculate the mean of our sample.
```{r}
mean(sample)
```
\index{Central Limit Theorem}
We see that the mean is $\bar{x} =$ 10.07, which is already very close to the true value in our population. The sampling distribution can now be created by repeating what we did here--taking a random sample and calculating its mean--**countless times**. To simulate this process for you, we conducted the steps from before 1000 times.
The histogram in Figure \@ref(fig:samplingdist) displays the results. We can see that the means of the samples closely resemble a normal distribution with a mean of 10. If we were to draw even more samples, the distribution of the means would get even closer to a normal distribution. This idea is expressed in one of the most fundamental tenets of statistics, the **central limit theorem** [@aronow2019foundations, chapter 3.2.4].
```{r samplingdist, fig.cap='"Sampling distribution" of means (1000 samples).', warning=F, message=F, echo=F, out.width='70%', fig.align='center'}
library(magrittr)
set.seed(123)
res = list()
for (i in 1:1000){
x = rnorm(50, 10, 2)
c(mean(x), sd(x)) -> res[[i]]
}
do.call(rbind, res) %>% as.data.frame() %>%
set_colnames(c("mu", "sigma")) -> res_1000
par(bg="#FFFEFA")
hist(res_1000$mu, breaks = 25,
main = "",
xlab = "Calculated Mean")
```
The standard error is defined as the standard deviation of this sampling distribution. Therefore, we calculated the standard deviation of the 1000 simulated means to get an approximation of the standard error. The result is $SE =$ 0.267.
As we mentioned before, we cannot simply calculate the standard error in real life by simulating the true sampling distribution. However, there are formulas based on statistical theory which allow us to calculate an estimate of the standard error, even when we are limited to only one observed sample--which we usually are. The formula to calculate the standard error of the **mean** is defined like this:
\begin{align}
SE = \frac{s}{\sqrt{n}}
(\#eq:es2)
\end{align}
It defines the standard error as the standard deviation of our sample $s$, divided by the square root of the sample size $n$. Using this formula, we can easily calculate the standard error of our `sample` object from before using _R_. Remember that the size of our random sample was $n =$ 50.
```{r}
sd(sample)/sqrt(50)
```
If we compare this value to the one we found in our simulation of the sampling distribution, we see that they are nearly identical. Using the formula, we could quite accurately estimate the standard error using only the sample we have at hand.
In formula 3.2, we can see that the standard error of the mean depends on the sample size of a study. When $n$ becomes larger, the standard error becomes smaller, meaning that a study's estimate of the true population mean becomes more precise.
To exemplify this relationship, we conducted another simulation. Again, we used the `rnorm` function, and assumed a true population mean of $\mu =$ 10 and that $\sigma =$ 2. But this time, we varied the sample size, from $n =$ 2 to $n =$ 500. For each simulation, we calculated both the mean, and the standard error using formula 3.2.
```{r simulse, fig.cap = 'Sample mean and standard error as a function of sample size.', message=F, echo=F, fig.height=2.5}
library(ggplot2)
library(gridExtra)
load("data/simul_SE.rda")
plotdat_mean = data.frame(x = 2:500,
y = resul$mean)
plotdat_se = data.frame(x = 2:500,
y = resul$se)
ggplot(data = plotdat_mean, aes(x = x, y = y)) +
geom_smooth(method = "gam", se = F, color = "black",
linetype = "dotted", size = 0.5) +
geom_line(color = "gray39") +
scale_color_grey() +
theme_minimal() +
theme(panel.background = element_rect(fill = "#FFFEFA",
size = 0),
plot.background = element_rect(fill = "#FFFEFA",
size = 0)) +
ylab("Mean") +
xlab("Sample Size") -> p1
ggplot(data = plotdat_se[2:499,], aes(x = x, y = y)) +
geom_smooth(method = "gam", se = F, color = "black",
linetype = "dotted", size = 0.5) +
geom_line(color = "gray39") +
scale_color_grey() +
theme_minimal() +
theme(panel.background = element_rect(fill = "#FFFEFA",
size = 0),
plot.background = element_rect(fill = "#FFFEFA",
size = 0)) +
ylab("Standard Error") +
xlab("Sample Size") -> p2
grid.arrange(p1, p2, nrow=1)
```
Figure \@ref(fig:simulse) shows the results. We can see that the means look like a **funnel**: as the sample size increases, the mean estimates become more and more precise, and converge towards 10. This increase in precision is represented by the standard error: with increasing sample size, the standard error becomes smaller and smaller.
We have now explored the quintessential elements we need to conduct a meta-analysis: an (1) observed effect size or outcome measure, and (2) its precision, expressed as the standard error. If these two types of information can be calculated from a published study, it is usually also possible to perform a meta-analytic synthesis (see Chapter \@ref(pooling-es)).
In our simulations, we used the mean of a variable as an example. It is important to understand that the properties we saw above can also be found in other outcome measures, including commonly used effect sizes. If we would have calculated a mean **difference** in our sample instead of a mean, this mean difference would have exhibited a similarly shaped sampling distribution, and the standard error of the mean difference would have also decreased as the sample sizes increases (provided the standard deviation remains the same). The same is also true, for example, for (Fisher's $z$ transformed) correlations.
In the following sections, we will go through the most commonly used effect sizes and outcome measures in meta-analyses. One reason why these effect size metrics are used so often is because they fulfill two of the criteria we defined at the beginning of this chapter: they are **reliable** and **computable**.
In formula 3.2, we described how the standard error of a mean can be calculated, but this formula can **only** be readily applied to **means**. Different formulas to calculate the standard error are needed for other effect sizes and outcome measures. For the effect size metrics we cover here, these formulas luckily exist, and we will show you all of them. A collection of the formulas can be also found in the [Appendix](#formula). Some of these formulas are somewhat complicated, but the good news is that we hardly ever have to calculate the standard error manually. There are various functions in _R_ which do the heavy lifting for us.
In the following section, we not only want to provide a theoretical discussion of different effect size metrics. We also show you which kind of information you have to prepare in your data set so that the _R_ meta-analysis functions we are using later can easily calculate the effect sizes for us.
We grouped effect sizes based on the type of research design in which they usually appear: **single group designs** (e.g. naturalistic studies, surveys, or uncontrolled trials), and **control group designs** (e.g. experimental studies or controlled clinical trials). Please note that this is just a rough classification, not a strict rule. Many of the effect sizes we present are technically applicable to any type of research design, as long as the type of outcome data is suited.
<br></br>
## Measures & Effect Sizes in Single Group Designs {#single-group-es}
---
### Means {#means}
---
\index{Mean, Arithmetic}
The **arithmetic mean** is probably the most commonly used central tendency measure. Although means are rather infrequently used as outcome measures, they can easily be pooled in a meta-analysis. For example, one could investigate the mean height of males, expressed as centimeters or inches, by pooling several representative studies.
The arithmetic mean $\bar{x}$ is calculated by summing all individual values $x_i$ in a sample and then dividing the sum by the sample size.
\begin{equation}
\bar{x} = \frac{\sum^{n}_{i=1}x_i}{n}
(\#eq:es3)
\end{equation}
We already covered how the standard error of the mean is calculated (see Chapter \@ref(what-is-es)). We simply have to divide the sample standard deviation $s$ through the square root of the sample size.
\begin{equation}
SE_{\bar{x}} = \frac{s}{\sqrt{n}}
(\#eq:es4)
\end{equation}
As we have seen before, the mean and its standard error are easy to calculate in _R_.
```{r}
# Set seed of 123 for reproducibility
# and take a random sample (n=50).
set.seed(123)
sample <- rnorm(n = 50, mean = 20, sd = 5)
# Calculate the mean
mean(sample)
# Calculate the standard error
sd(sample)/sqrt(50)
```
To conduct a meta-analysis of means, our data set should at least contain the following columns:
* **`n`**. The number of observations (sample size) in a study.
* **`mean`**. The mean reported in the study.
* **`sd`**. The standard deviation of the variable reported in the study.
<br></br>
### Proportions {#props}
---
\index{Proportion}
A **proportion** is another type of central tendency measure. It specifies how many units of a sample fall into a certain subgroup. Proportions can take values between zero and one, which can be transformed into **percentages** by multiplying with 100. Proportions may, for example, be used as an outcome measure when we want to examine the prevalence of a disease at a given point in time. To calculate a proportion $p$, we have to divide the number of individuals $k$ falling into a specific subgroup by the total sample size $n$.
\begin{equation}
p = \frac{k}{n}
(\#eq:es5)
\end{equation}
The standard error of a proportion can be calculated this way:
\begin{equation}
SE_{p} = \sqrt{\frac{p(1-p)}{n}}
(\#eq:es6)
\end{equation}
We can calculate the proportion and its standard error in _R_ using this code:
```{r}
# We define the following values for k and n:
k <- 25
n <- 125
# Calculate the proportion
p <- k/n
p
# Calculate the standard error
sqrt((p*(1-p))/n)
```
\index{Odds}
\index{Logit-Transformation}
\index{Logarithm, Natural}
The fact that the range of proportions is restricted between 0 and 1 can be problematic [@lipsey2001practical, chapter 3]. When $p$ is close to 0 or close to 1, the standard error is artificially compressed, which leads us to overestimate the precision of the proportion estimate.
This has something to do with the sampling distribution. When values of $p$ are very low or very high, the sampling distribution will not be approximately normal like in Figure \@ref(fig:samplingdist). The distribution will be **right-skewed** or **left-skewed** because it is impossible for a random sample to have a calculated proportion outside the 0-1 range.
To avoid this, proportions are commonly **logit**-transformed before they are pooled. A logit-transformation first involves calculating the **odds** (see Chapter \@ref(or)). Odds are defined as the proportion of participants which fall into a specific category, divided by the proportion of units which do not fall into that category.
The natural logarithm function $\log_e$ is then used to transform the odds into a format where $p=$ 0.5 equals a value of 0, and where there is no range restriction. This ensures that the sampling distribution is approximately normal and that standard errors are not biased.
The calculation of logit-transformed proportions and their standard errors can be done using these formulas [@lipsey2001practical, chapter 3]^[To transform a logit-proportion back to the original scale, we can use the following formula: $p=\frac{\exp(p_{\text{logit}})}{1+\exp(p_{\text{logit}})}$, where $\exp$ is the **exponential function**, implemented via `exp` in _R_ (see Chapter \@ref(ppoolbin)).]:
\begin{equation}
p_{\text{logit}} = \log_{e} \left(\frac{p}{1-p}\right)
(\#eq:es7)
\end{equation}
\begin{equation}
SE_{p_{\text{logit}}} = \sqrt{\frac{1}{np}+\frac{1}{n(1-p)}}
(\#eq:es8)
\end{equation}
Luckily, the meta-analysis function we can use in _R_ performs this logit-transformation automatically for us. We therefore only have to prepare the following columns in our data set:
* **`event`**. The number of observations which are part of a specific subgroup ($k$).
* **`n`**. The total sample size $n$.
<br></br>
### Correlations {#cors}
---
#### Pearson Product-Moment Correlation {#pearson-cors}
---
\index{Correlation}
\index{History of Meta-Analysis}
A correlation is an effect size which expresses the amount of **co-variation** between two variables. The most common form is the **Pearson product-moment correlation**^[This type of correlation was named after Karl Pearson, the famous statistician who also played a part in the history of meta-analysis (see Chapter \@ref(history)).], which can be calculated for two continuous variables. Product-moment correlations can be used as the effect size, for example, when a meta-analyst wants to examine the relationship between relationship quality and well-being.
A correlation $r_{xy}$ between a variable $x$ and a variable $y$ is defined as the **co-variance** $\text{Cov}(x,y)=\sigma^{2}_{xy}$ of $x$ and $y$, divided by the **product** of their standard deviations, $\sigma_x$ and $\sigma_y$.
\begin{equation}
r_{xy} = \frac{\sigma^{2}_{xy}}{\sigma_x \sigma_y}
(\#eq:es9)
\end{equation}
Using the sample size $n$, the standard error of $r_{xy}$ can be calculated like this:
\begin{equation}
SE_{r_{xy}} = \frac{1-r_{xy}^2}{\sqrt{n-2}}
(\#eq:es10)
\end{equation}
When calculating the product-moment correlation, we standardize the co-variation between two variables by their standard deviations. This means that it becomes less relevant if two or more studies measured a construct on the same scale; once we calculate a correlation, it is automatically possible to compare the effects.
Correlations can take values between -1 and 1. The magnitude of a correlation is often interpreted using Cohen's [-@cohen1988statistical] conventions:
* $r \approx$ 0.10: small effect.
* $r \approx$ 0.30: moderate effect.
* $r \approx$ 0.50: large effect.
It should be noted, however, that these conventions may be at best seen as rules of thumb. It is often much better to quantify a correlation as small or large depending on the subject and previous research.
\index{Fisher's \textit{z}}
\index{History of Meta-Analysis}
Unfortunately, like proportions (Chapter \@ref(props)), correlations are restricted in their range, and it can introduce bias when we estimate the standard error for studies with a small sample size [@alexander1989statistical].
In meta-analyses, correlations are therefore usually transformed into **Fisher's** $z$^[Fisher's $z$ was named after yet another famous statistician we mentioned in Chapter \@ref(history), Ronald A. Fisher.]. Like the logit-transformation, this also entails the use of the natural logarithm function to make sure that the sampling distribution is approximately normal (see Chapter \@ref(ratios) for a more detailed explanation). The formula looks like this:
\begin{equation}
z = 0.5\log_{e}\left(\frac{1+r}{1-r}\right)
(\#eq:es11)
\end{equation}
If we know the sample size $n$, the approximate standard error of Fisher's $z$ can be obtained through this formula [@olkin1995correlations]:
\begin{equation}
SE_{z} = \frac{1}{\sqrt{n-3}}
(\#eq:es12)
\end{equation}
We can also calculate $r_{xy}$ and $z$ directly in _R_, using the `cor` and `log` function.
```{r}
# Simulate two continuous variables x and y
set.seed(12345)
x <- rnorm(20, 50, 10)
y <- rnorm(20, 10, 3)
# Calculate the correlation between x and y
r <- cor(x,y)
r
# Calculate Fisher's z
z <- 0.5*log((1+r)/(1-r))
z
```
Thankfully, we do not have to perform Fisher's $z$ transformation manually when conducting a meta-analysis of correlations in _R_. The only columns we need in our data set are:
* **`cor`**. The (non-transformed) correlation coefficient of a study.
* **`n`**. The sample size of the study.
<br></br>
#### Point-Biserial Correlation {#pb-cors}
---
\index{Correlation}
\index{Correlation, Point-Biserial}
The Pearson product-moment correlation describes the relationship between two continuous variables. In cases where only one variable $y$ is continuous, while the other variable $x$ is dichotomous (i.e. only takes two values), a **point-biserial correlation** can be calculated, which expresses how well $y$ can be predicted from the group membership in $x$.
Point-biserial correlations can be calculated using this formula:
\begin{equation}
{r_{pb}}= \frac{(\bar{y_1}-\bar{y_2})\sqrt{p_1(1-p_1)}}{s_y}
(\#eq:es13)
\end{equation}
In this formula, $\bar{y_1}$ is the mean of the continuous variable when only the first group of the dichotomous variable $x$ is considered, and $\bar{y_2}$ is the mean when only the second group of $x$ is considered; $p_1$ is the proportion of cases that fall into group 1 in $x$, and $s_y$ is the standard deviation of $y$.
The point-biserial correlation can be calculated in _R_ using the `cor` function (see previous section). If one of the supplied variables only assumes two values while the other is continuous, the (approximate) point-biserial correlation is automatically calculated.
\index{Standardized Mean Difference}
The point-biserial correlation bears a close resemblance to the **standardized mean difference**, which we will cover later (Chapter \@ref(b-group-smd)). Both effect size metrics quantify how much values of a continuous variable differ between two groups. However, it is less common that point-biserial correlations are pooled in meta-analyses. Like the product-moment correlation, the point-biserial correlation has undesirable statistical properties for meta-analyses, such as range restriction when the group proportions are unequal [@bonett2019point].
When we are interested in group differences on a continuous outcome variable, it is therefore advised to convert point-biserial correlations to standardized mean differences for meta-analyses [@lipsey2001practical, chapter 3]. A formula to convert a point-biserial correlation to a standardized mean difference can be found in Chapter \@ref(convert-corr) in the "Helpful Tools" section of this book.
<br></br>
## Effect Sizes in Control Group Designs
---
### (Standardized) Mean Differences {#s-md}
---
#### Between-Group Mean Difference {#b-group-md}
---
The **between-group mean difference** $\text{MD}_{\text{between}}$ is defined as the raw, un-standardized difference in means between two **independent** groups. Between-group mean differences can be calculated when a study contained at least two groups, as is usually the case in controlled trials or other types of experimental studies. In meta-analyses, mean differences can only be used when **all** the studies measured the outcome of interest on **exactly** the same scale. Weight, for example, is nearly always measured in kilograms in scientific research; and in diabetology, the HbA$_{\text{1c}}$ value is commonly used to measure the blood sugar.
The mean difference is defined as the mean of group 1, $\bar{x}_1$, minus the mean of group 2, $\bar{x}_2$:
\begin{equation}
\text{MD}_{\text{between}} = \bar{x}_1 - \bar{x}_2
(\#eq:es14)
\end{equation}
The standard error can be obtained using this formula:
\begin{equation}
SE_{\text{MD}_{\text{between}}} = s_{\text{pooled}}\sqrt{\frac{1}{n_1}+\frac{1}{n_2}}
(\#eq:es15)
\end{equation}
\index{Pooled Standard Deviation}
In the formula, $n_1$ represents the sample size in group 1, $n_2$ the sample size in group 2, and $s_{\text{pooled}}$ the **pooled standard deviation** of both groups. Using the standard deviation of group 1 ($s_1$) and group 2 ($s_2$), the value of $s_{\text{pooled}}$ can be calculated this way:
\begin{align}
s_{\text{pooled}} = \sqrt{\frac{(n_1-1)s^2_1+(n_2-1)s^2_2}{(n_1-1)+(n_2-1)}}
(\#eq:es16)
\end{align}
Time to provide an example on how we can calculate a mean difference and its standard error in _R_. As a first step, we will be simulating some data. We only do this here so that we have plausible values to work with. In a real meta-analysis, the mean of `x1` and `x2` as well as their standard deviations `s1` and `s2` are something that we can hopefully extract from the published paper, same as the group sample sizes `n1` and `n2`. So you do not have to worry about this first part too much.
```{r, eval=FALSE}
# Generate two random variables with different population means
set.seed(123)
x1 <- rnorm(n = 20, mean = 10, sd = 3)
x2 <- rnorm(n = 20, mean = 15, sd = 3)
# Calculate values we need for the formulas
s1 <- sd(x1)
s2 <- sd(x2)
n1 <- 20
n2 <- 20
```
With this data at hand, we can proceed to the core part, in which we calculate the mean difference and its standard error using the formulae we showed before:
```{r, eval=FALSE}
# Calculate the mean difference
MD <- mean(x1) - mean(x2)
MD
```
```
## [1] -4.421357
```
```{r, eval=FALSE}
# Calculate s_pooled
s_pooled <- sqrt(
(((n1-1)*s1^2) + ((n2-1)*s2^2))/
((n1-1)+(n2-1))
)
# Calculate the standard error
se <- s_pooled*sqrt((1/n1)+(1/n2))
se
```
```
## [1] 0.8577262
```
It is usually not necessary to do these calculations **manually** like we did here. For a meta-analysis of mean differences, we only have to prepare the following columns in our data set:
* **`n.e`**. The number of observations in the intervention/experimental group.
* **`mean.e`**. The mean of the intervention/experimental group.
* **`sd.e`**. The standard deviation in the intervention/experimental group.
* **`n.c`**. The number of observations in the control group.
* **`mean.c`**. The mean of the control group.
* **`sd.c`**. The standard deviation in the control group.
<br></br>
#### Between-Group Standardized Mean Difference {#b-group-smd}
---
\index{Standardized Mean Difference}
\index{Cohen's \textit{d}}
The standardized between-group mean difference $\text{SMD}_{\text{between}}$ is defined as the difference in means between two independent groups, standardized by the pooled standard deviation $s_{\text{pooled}}$. In the literature, the standardized mean difference is also often called **Cohen's** $d$, named after the psychologist and statistician Jacob Cohen.
In contrast to unstandardized mean differences, $\text{SMD}_{\text{between}}$ expresses the difference between two groups in **units of standard deviations**. This can be achieved by dividing the raw mean difference of two groups, $\bar{x_1}$ and $\bar{x_2}$, through the pooled standard deviation $s_{\text{pooled}}$ of both groups:
\begin{equation}
\text{SMD}_{\text{between}} = \frac{\bar{x}_1 - \bar{x}_2}{s_{\text{pooled}}}
(\#eq:es17)
\end{equation}
Where $s_{\text{pooled}}$ is calculated using the same formula (3.16) we already covered in Chapter \@ref(b-group-md). Standardized mean differences are much more often used in meta-analyses than unstandardized mean differences. This is because $\text{SMD}_{\text{between}}$ can be compared between studies, even if those studies did not measure the outcome of interest using the same instruments.
The standardization has the effect that $\text{SMD}_{\text{between}}=$ 1 always means that the two groups means are one sample standard deviation away from each other (see Figure \@ref(fig:smd)); $\text{SMD}_{\text{between}}=$ 2 then represents a difference of 2 standard deviations, and so forth^[Kristoffer Magnusson developed a great interactive tool visualizing the distribution of two groups for varying values of the standardized mean difference. The tool can be found online: https://www.rpsychologist.com/d3/cohend/].
```{r smd, fig.cap='Standardized mean difference of 1 (assuming normality, equal standard deviations and equal sample size in both groups).', out.width='85%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/smd_sep.png')
```
The standardization makes it much easier to evaluate the magnitude of the mean difference. Standardized mean differences are often interpreted using the conventions by Cohen [-@cohen1988statistical]:
* SMD $\approx$ 0.20: small effect.
* SMD $\approx$ 0.50: moderate effect.
* SMD $\approx$ 0.80: large effect.
Like the convention for Pearson product-moment correlations (Chapter \@ref(pearson-cors)), these are rules of thumb at best.
It is usually much better to interpret standardized mean differences based on their "real-life" implications. An effect size may be small according to the criteria by Cohen, but it can still be extremely important. For many serious diseases, for example, even a very small statistical effect can still have a huge impact on the population level, and potentially save millions of lives. One study showed that, for depression treatment, even effects as small as $\text{SMD}_{\text{between}}=$ 0.24 can have a clinically important impact on the lives of patients [@cuijpers2014threshold].
The standard error of $\text{SMD}_{\text{between}}$ can be calculated using this formula [@borenstein2011introduction]:
\begin{equation}
SE_{\text{SMD}_{\text{between}}} = \sqrt{\frac{n_1+n_2}{n_1n_2} + \frac{\text{SMD}^2_{\text{between}}}{2(n_1+n_2)}}
(\#eq:es18)
\end{equation}
Where $n_1$ and $n_2$ are the sample sizes of group 1 and group 2, and with $\text{SMD}_{\text{between}}$ being the calculated between-group standardized mean difference.
\index{esc Package}
There are several functions in _R_ which allow us to calculate $\text{SMD}_{\text{between}}$/Cohen's $d$ in one step. Here, we use the `esc_mean_sd` function, which is part of the **{esc}** package [@esc]. We have not used this package before, so it is necessary to install it first (see Chapter \@ref(packages)).
```{r, message=F, eval=F}
# Load esc package
library(esc)
# Define the data we need to calculate SMD/d
# This is just some example data that we made up
grp1m <- 50 # mean of group 1
grp2m <- 60 # mean of group 2
grp1sd <- 10 # sd of group 1
grp2sd <- 10 # sd of group 2
grp1n <- 100 # n of group1
grp2n <- 100 # n of group2
# Calculate effect size
esc_mean_sd(grp1m = grp1m, grp2m = grp2m,
grp1sd = grp1sd, grp2sd = grp2sd,
grp1n = grp1n, grp2n = grp2n)
```
```
## Effect Size Calculation for Meta Analysis
##
## Conversion: mean and sd to effect size d
## Effect Size: -1.0000
## Standard Error: 0.1500
## [...]
```
In the output, there are two things to mention. First, we see that the calculated standardized mean difference is exactly 1. This makes sense because the difference between the two means we defined is equal to the (pooled) standard deviation.
Secondly, we see that the effect size is **negative**. This is because the mean of group 2 is larger than the mean of group 1. While this is mathematically correct, we sometimes have to change the sign of calculated effect sizes so that others can interpret them more easily.
Imagine that the data in this example came from a study measuring the mean number of cigarettes people were smoking per week after receiving an intervention (group 1) or no intervention (group 2). In this context, the results of the study were **positive** because the mean number of smoked cigarettes was lower in the intervention group. Therefore, it makes sense to report the effect size as 1.0 instead of -1.0, so that other people can intuitively understand that the intervention had a positive effect.
The sign of effect sizes becomes particularly important when some studies used measures for which **higher** values mean better outcomes, while others used a measure for which **lower** values indicate better outcomes. In this case, it is essential that all effect sizes are consistently coded in the same direction (we have to ensure that, for example, higher effect sizes mean better outcomes in the intervention group in all studies in our meta-analysis).
\index{Hedges' \textit{g}}
Often, a small-sample correction is applied to standardized mean differences, which leads to an effect size called **Hedges'** $g$. We will cover this correction in Chapter \@ref(hedges-g).
To conduct a meta-analysis of standardized mean differences, our data set should at least contain the following columns:
* **`n.e`**. The number of observations in the intervention/experimental group.
* **`mean.e`**. The mean of the intervention/experimental group.
* **`sd.e`**. The standard deviation in the intervention/experimental group.
* **`n.c`**. The number of observations in the control group.
* **`mean.c`**. The mean of the control group.
* **`sd.c`**. The standard deviation in the control group.
<br></br>
```{block2, type='boxinfo'}
**Standardizing by External Estimates of the Standard Deviation**
\vspace{2mm}
When calculating SMDs, we use $s_{\text{pooled}}$ because it serves as a proxy of the true standard deviation in our population. Especially when a study is small, however, the standard deviation calculated based its sample may be a bad estimator of the population standard deviation.
In this case, a possible solution is to use **external** estimates of $s_{\text{pooled}}$ to standardize the mean difference [@higgins2019cochrane]. Such external estimates may be extracted from larger cross-sectional studies which used the same instrument as the study in a similar population.
```
<br></br>
#### Within-Group (Standardized) Mean Difference {#w-group-smd}
---
\index{Standardized Mean Difference}
**Within-group** unstandardized or standardized mean differences can be calculated when a difference **within** one group is examined. This is usually the case when the same group of people is measured at two different time points (e.g. before an intervention and after an intervention).
In contrast to between-group mean differences, $\text{(S)MD}_{\text{within}}$ is calculated using data that is **not independent**. For example, it is likely that the value of person $i$ at measurement point $t_1$ has influenced the value of the same person at measurement point $t_2$. Due to the fact that within-group mean differences are usually based on data measured at different time points, they are also known as the **(standardized) mean gain**.
The within-group mean difference $\text{MD}_{\text{within}}$ is calculated the same way as $\text{MD}_{\text{between}}$ (see Chapter \@ref(b-group-md)), except that we now compare the values of the same group at two different time points, $t_1$ and $t_2$.
\begin{equation}
\text{MD}_{\text{within}} = \bar{x}_{\text{t}_2} - \bar{x}_{\text{t}_1}
(\#eq:es19)
\end{equation}
Things become more complicated when we want to calculate a standardized version of the within-group mean difference. There is no full consensus on how $\text{SMD}_{\text{within}}$ should be computed. In a [blog post](http://jakewestfall.org/blog/index.php/2016/03/25/five-different-cohens-d-statistics-for-within-subject-designs/), Jake Westfall points out that there are at least five distinct ways to calculate it.
An intuitive option is to standardize the mean difference $\text{MD}_{\text{within}}$ using the pooled standard deviation of both assessment points, $s_{\text{t}_1}$ and $s_{\text{t}_2}$. Since in the number of observations is typically identical in within-group designs, this means that the sum of the two squared standard deviations simply has to be divided by 2 to obtain $s^2_{\text{pooled}}$. Otherwise, formula (3.16) in Chapter \@ref(b-group-md) can be used to calculate $s_{\text{pooled}}$. This leads to the following formula:
\begin{equation}
\text{SMD}_{\text{within}} = \frac{\bar{x}_{\text{t}_2} - \bar{x}_{\text{t}_1}}{s_{\text{pooled}}}
(\#eq:es20)
\end{equation}
An even better solution, proposed (among others) by Becker [-@becker1988synthesizing], is to divide $\text{MD}_{\text{within}}$ by the standard deviation of the pre-test scores ($s_{\text{t}_1}$). The reason for this is that $s_{\text{t}_1}$ is less likely to be influenced by intervention effects^[There is also another, "statistical" reasoning behind using $s_{\text{t}_1}$ instead of $\sqrt{(s_{\text{t}_1}^2 + s_{\text{t}_2}^2/2)}$ in the denominator. In within-group designs, $s_{\text{t}_1}$ and $s_{\text{t}_2}$ are not independent. This means that $s_{\text{t}_1}^2 + s_{\text{t}_2}^2$ does not follow a $\chi^2$ distribution, which is required for formula (3.23) below to be applicable when calculating the standard error of $\text{SMD}_{\text{within}}$. A "correct" formula of the approximate sampling distribution of $\text{SMD}_{\text{within}}$ when using $\sqrt{(s_{\text{t}_1}^2 + s_{\text{t}_2}^2/2)}$ in the denominator has recently been described [@cousineau2020approximating]; it also assumes that $r_{\text{t}_1\text{t}_2}$ is known. @cousineau2021ci discuss ways to calculate confidence intervals based on this newly "discovered" distribution. Here, however, we restrict ourselves to the formulas for $\text{SMD}_{\text{within}}$ and its standard error when using only $s_{\text{t}_1}$ in the denominator. We do this because these formulas are relatively easy to apply in practice, commonly found in the literature, and because standardizing by the pre-test standard deviation $s_{\text{t}_1}$ is typically a reasonable approach.]:
\begin{equation}
\text{SMD}_{\text{within}} = \frac{\bar{x}_{\text{t}_2} - \bar{x}_{\text{t}_1}}{s_{\text{t}_1}}
(\#eq:es201)
\end{equation}
The standard errors of $\text{MD}_{\text{within}}$ and $\text{SMD}_{\text{within}}$ can be calculated using these formulas [@borenstein2011introduction, chapter 4; @becker1988synthesizing]:
\begin{equation}
SE_{\text{MD}_{\text{within}}}=\sqrt{\dfrac{s^2_{\text{t}_1}+s^2_{\text{t}_2}-(2r_{\text{t}_1\text{t}_2}s_{\text{t}_1}s_{\text{t}_2})}{n}}
(\#eq:es21)
\end{equation}
\begin{equation}
SE_{\text{SMD}_{\text{within}}} = \sqrt{\frac{2(1-r_{\text{t}_1\text{t}_2})}{n}+\frac{\text{SMD}^2_{\text{within}}}{2n}}
(\#eq:es22)
\end{equation}
The fact that we need two know the correlation $r_{\text{t}_1\text{t}_2}$ between the assessment points to calculate the standard error of within-group (standardized) mean differences is often problematic in practice. The pre-post correlation of a variable is hardly ever reported in published research, which forces us to assume a value of $r_{\text{t}_1\text{t}_2}$ based on previous research.
However, if we do not get the correlation exactly right, this can lead to errors in our results. It general, it should best be avoided to calculate within-group effect sizes for a meta-analysis [@cuijpers2017pre]. Especially when we have data from both an experimental **and** control group, it is much better to calculate the **between-group** (standardized) mean differences at $t_2$ to measure the effect of a treatment, instead of pre-post comparisons. Within-group mean difference may be calculated, however, when our meta-analysis focuses solely on studies which did not include a control group.
The within-group standardized mean difference (also known as the within-group Cohen's $d$) can be calculated like this in _R_:
```{r, message=F}
# Define example data needed for effect size calculation
x1 <- 20 # mean at t1
x2 <- 30 # mean at t2
sd1 <- 13 # sd at t1
n <- 80 # sample size
r <- 0.5 # correlation between t1 and t2
# Caclulate the raw mean difference
md_within <- x2 - x1
# Calculate the smd:
# Here, we use the standard deviation at t1
# to standardize the mean difference
smd_within <- md_within/sd1
smd_within
# Calculate standard error
se_within <- sqrt(((2*(1-r))/n) +
(smd_within^2/(2*n)))
se_within
```
Meta-analyses of within-group (standardized) mean differences can only be performed in _R_ using **pre-calculated effect sizes** (see Chapter \@ref(es-formats-different)). The following columns are required in our data set:
* **`TE`**: The calculated within-group effect size.
* **`seTE`**: The standard error of the within-group effect size.
<br></br>
```{block2, type='boximportant'}
**The Limits of Standardization**
\vspace{2mm}
Standardized mean differences are, without a doubt, one of the **most frequently used** effect sizes metrics in meta-analyses. As we mentioned in Chapter \@ref(b-group-smd), standardization allows us, at least in theory, to compare the strength of an effect observed in different studies; even if these studies did not use the same instruments to measure it.
Standardization, however, is not a **"Get Out of Jail Free card"**. The size of a particular study's $\text{SMD}$ depends heavily on the **variability** of its sample [see also @viechtbauer2007approximate]. Imagine that we conduct two identical studies, use the same instrument to measure our outcome of interest, but that the two studies are conducted in two populations with drastically different variances. In this case, the $\text{SMD}$ value of both studies would **differ greatly**, even if the "raw" mean difference in both studies was **identical**.
In this case, it is somewhat difficult to argue that the "causal" strength of the effect in one study was much larger or smaller than in the other. As Jacob Cohen [-@cohen1994earth] put it in a famous paper: "[t]he effect of A on B for me can hardly depend on whether I'm in a group that varies greatly [...] or another that does not vary at all" (p. 1001). This problem, by the way, applies to all commonly used
"standardized" effect size metrics in meta-analysis, for example correlations.
In addition, we have also seen that the **unit** by which to standardize is often **less clearly defined** than one may think. Various options exist both for between- and within-group $\text{SMD}$s, and it is often hard to disentangle which approach was chosen in a particular study. It is necessary to always be **as consistent as possible** across studies in terms of how we calculate standardized effect sizes for our meta-analysis. Even so, one should keep in mind that the **commensurability of effect sizes** can be limited, even if standardization was applied.
Of course, the best solutation would be if outcomes were **measured on the same scale in all studies**, so that **raw mean differences** could be used. In many research fields, however, we are living far away from such a level of methodological harmony. Thus, unfortunately, standardized effect sizes are often our **second best option**.
```
<br></br>
### Risk & Odds Ratios {#ratios}
---
#### Risk Ratio {#rr}
---
\index{Risk Ratio}
As it says in the name, a **risk ratio** (also known as the **relative risk**) is a ratio of two **risks**. Risks are essentially **proportions** (see Chapter \@ref(props)). They can be calculated when we are dealing with binary, or **dichotomous**, outcome data.
We use the term "risk" instead of "proportion" because this type of outcome data is frequently found in medical research, where one examines the **risk** of developing a disease or dying. Such occurrences are known as **events**. Imagine we are conducting a controlled clinical trial comprising a treatment group and a control group. We are interested in how many patients experienced some event $E$ during the study period.
The results we get from such a study can be categorized in a $2 \times 2$ table [@schwarzer2015meta, chapter 3.1]:
```{r twobytwo, echo=F, message = F}
library(kableExtra)
df = data.frame(
`x` = c("Treatment", "Control", " "),
event = c("$a$", "$c$", "$n_E$"),
`no event` = c("$b$", "$d$", "$n_{\\neg E}$"),
`sample size` = c("$n_{\\text{treat}}$", "$n_{\\text{control}}$", " "))
colnames(df) = c(" ", "Event", "No Event", " ")
kable(df, "html", booktabs = TRUE, escape = FALSE, align="lccl",
cap = "Results of controlled studies using binary outcome data.",
full_width = F) %>%
kable_styling(latex_options = c("hold_position", "condensed"),
bootstrap_options = c("condensed"),
full_width = F) %>%
column_spec(1, border_right = TRUE) %>%
column_spec(4, border_left = TRUE) %>%
row_spec(2, hline_after = TRUE)
```
Based on this data, we can calculate the risk of experiencing event $E$ during the study period for both the treatment group and control group. We simply divide the number of people experiencing $E$ in one group by the total sample size of that group.
The risk in the treatment group, ${p_{E}}_{\text{treat}}$, is therefore calculated like this:
\begin{equation}
{p_{E}}_{\text{treat}} = \frac{a}{a+b} = \frac{a}{n_{\text{treat}}}
(\#eq:es23)
\end{equation}
And the risk in the control group, ${p_{E}}_{\text{control}}$, like this:
\begin{equation}
{p_{E}}_{\text{control}} = \frac{c}{c+d} = \frac{c}{n_{\text{control}}}
(\#eq:es24)
\end{equation}
The risk ratio is then defined as the risk in the treatment/intervention group divided by the risk in the control group:
\begin{equation}
\text{RR} = \frac{{p_{E}}_{\text{treat}}}{{p_{E}}_{\text{control}}}
(\#eq:es25)
\end{equation}
\index{Log-Risk Ratio}
Because both ${p_{E}}_{\text{treat}}$ and ${p_{E}}_{\text{control}}$ can only have values between 0 and 1, the RR has a few interesting properties. First of all, a risk ratio can never be negative. Secondly, if there is no difference between the treatment group and the control group, RR has a value of 1 (instead of 0, like SMDs). If an RR is larger than 1, this means that the treatment group increases the risk of event $E$; if RR is smaller than 1, the intervention reduces the risk.
A peculiarity of the RR is that same-sized effects are not **equidistant**. For example, RR $=$ 0.5 means that the risks are halved in the intervention group. However, the direct opposite of this effect, the risk being doubled due to the intervention, is not expressed by RR $=$ 1.5, but by RR $=$ 2. This means that risk ratios do not follow a normal distribution, which can be problematic in meta-analyses.
To avoid this issue, risk ratios are often transformed into the **log-risk ratio** before pooling. This ensures asymptotic normality, that effect sizes can assume any value, and that values are centered around 0 (meaning no effect). The transformation is performed by taking the natural logarithm of RR:
\begin{equation}
\log \text{RR} = \log_{e}(\text{RR})
(\#eq:es26)
\end{equation}
The standard error of the log-risk ratio can then be calculated using this formula:
\begin{equation}
SE_{\log \text{RR}} = \sqrt{\frac{1}{a}+\frac{1}{c} - \frac{1}{a+b} - \frac{1}{c+d}}
(\#eq:es27)
\end{equation}
We can calculate the (log-)risk ratio in _R_ like this:
```{r}
# Define data
a <- 46 # events in the treatment group
c <- 77 # events in the control group
n_treat <- 248 # sample size treatment group
n_contr <- 251 # sample size control group
# Calculate the risks
p_treat <- a/n_treat
p_contr <- c/n_contr
# Calculate the risk ratio
rr <- p_treat/p_contr
rr
# Calculate the log-risk ratio and its standard error
log_rr <- log(rr)
log_rr
se_log_rr <- sqrt((1/a) + (1/c) - (1/n_treat) - (1/n_contr))
se_log_rr
```
\index{Zero Cell Problem}
\index{Continuity Correction}
\index{Mantel-Haenszel Method}
The calculation of risk ratios becomes difficult when there are **zero cells**. It is possible in practice that $a$ or $c$ (or both) are zero, meaning that no event was recorded in the treatment or control group. If you have a look at the formula used to calculate RRs, it is easy to see why this is problematic. If $a$ (events in the treatment group) is zero, ${p_{E}}_{\text{treat}}$ is also zero, and the RR will be zero. The case of $c$ being zero is even more problematic: it means that ${p_{E}}_{\text{control}}$ is zero, and we all know that we **cannot divide by zero**.
This issue is often dealt with using a **continuity correction**. The most common continuity correction method is to add an increment of 0.5 in all cells that are zero [@gart1967bias]. When the sample sizes of the control group and treatment group are very uneven, we can also use the **treatment arm continuity correction** [@j2004add].
However, there is evidence that such corrections can lead to biased results [@efthimiou2018practical]. The (fixed-effect) **Mantel-Haenszel** method, a meta-analytic pooling technique we will discover in Chapter \@ref(mantel-haenszel), can handle zero cells **without** correction, unless they exist in **every study** in our meta-analysis. It may therefore be advisable to avoid continuity corrections unless the latter scenario applies.
A special form of the **zero cell** problem are **double-zero studies**. These are studies in which both $a$ and $c$ are zero. Intuitively, one might think that the results of such studies simply mean that the risk in the intervention and control group are similar, and that RR = 1.
Unfortunately, it is not that easy. It is very much possible that there is a true effect between the two groups, but that the sample size was too small to detect this difference. This is particularly likely when the probability that $E$ occurs is very low.
Imagine that a crazy scientist conducts a randomized controlled trial in which he assesses the effect of **Fulguridone**, a medication that allegedly reduces the risk of getting struck by lightning. He allocates 100 people evenly to either a medication group or a control group, and observes them for three years. The results of the trial are disappointing, because no one was struck by lightning, neither in the treatment group, nor in the control group. However, we know how unlikely it is, **in general**, to get struck by lightning. Observing only 100 people is simply not enough to detect differences in such a rare event, even if we accept the somewhat bizarre idea that the treatment works. For this reason, double-zero studies are often discarded completely when pooling the effects.
This leads us to one last caveat pertaining to risk ratios: they give us no information on how common an event is **in general**. If a meta-analysis reports a risk ratio of 0.5, for example, we know that an intervention reduced the risk by half. But we do not know if it reduced the risk from 40% to 20%, or from 0.004% to 0.002%. Whether a risk ratio is practically relevant depends on the context. If a risk ratio of 0.5 represents a risk reduction of 0.002%, this may not have a large impact on a population level, but it may still be important if the event of interest is, for example, a severe and debilitating disease.
When we conduct a meta-analysis in _R_, it is usually not necessary to calculate the log-risk ratio of a study by hand. We also do not have to worry about zero cells when importing the data. The following columns should be included in our data set:
* **`event.e`**. The number of events in the treatment or experimental group.
* **`n.e`**. The sample size of the treatment or experimental group.
* **`event.c`**. The number of events in the control group.
* **`n.c`**. The sample size of the control group.
<br></br>
#### Odds Ratio {#or}
---
\index{Odds Ratio}
\index{Odds}
Like the risk ratio (Chapter \@ref(rr)), **odds ratios** can also be calculated when we have binary outcome data of two groups. In the previous chapter on proportions (Chapter \@ref(props)), we already defined the odds as the number of cases which fall into a specific category, divided by the number of units which do not fall into that category.
Using the notation in Table \@ref(tab:twobytwo), the formula for the odds in the treatment and control group looks like this:
\begin{equation}
\text{Odds}_{\text{treat}} = \frac{a}{b}
(\#eq:es28)
\end{equation}
\begin{equation}
\text{Odds}_{\text{control}} = \frac{c}{d}
(\#eq:es29)
\end{equation}
It can be difficult to correctly interpret what odds actually mean. They describe the ratio of events to non-events, not the **probability** of the event. Imagine that we studied three individuals. Two experienced the event of interest, while one person did not. Based on this data, the probability (or risk) of the event would be $p = 2/3 \approx 66\%$. However, the odds of the event would be Odds = $\frac{2}{1}$ = 2, meaning that there are two events for one non-event.
The odds ratio (OR) is then defined as the odds in the treatment group, divided by the odds in the control group:
\begin{equation}
\text{OR} = \frac{a/b}{c/d}
(\#eq:es30)
\end{equation}
\index{Log-Odds Ratio}
Like the risk ratio (see Chapter \@ref(rr)), the odds ratio as such has undesirable statistical properties for meta-analyses. It is therefore also common to transform the odds ratio to the **log-odds ratio** using the natural logarithm:
\begin{equation}
\log \text{OR} = \log_{e}(\text{OR})
(\#eq:es31)
\end{equation}
The standard error of the log-odds ratio can be calculated using this formula (we use the notation in Table \@ref(tab:twobytwo)):
\begin{equation}
SE_{\log \text{OR}} = \sqrt{\frac{1}{a}+\frac{1}{b}+\frac{1}{c}+\frac{1}{d}}
(\#eq:es32)
\end{equation}
The `esc_2x2` function in the **{esc}** package provides an easy way to calculate the (log) odds ratio in _R_.
```{r, message=F, warning=F}
library(esc)
# Define data
grp1yes <- 45 # events in the treatment group
grp1no <- 98 # non-events in the treatment group
grp2yes <- 67 # events in the control group
grp2no <- 76 # non-events in the control group
# Calculate OR by setting es.type to "or"
esc_2x2(grp1yes = grp1yes, grp1no = grp1no,
grp2yes = grp2yes, grp2no = grp2no,
es.type = "or")
# Calculate logOR by setting es.type to "logit"
esc_2x2(grp1yes = grp1yes, grp1no = grp1no,
grp2yes = grp2yes, grp2no = grp2no,
es.type = "logit")
```
\index{Zero Cell Problem}
\index{Risk Ratio}
The same problems pertaining to risk ratios, **zero cells** and **double-zero studies** (see Chapter \@ref(rr)), are also relevant when calculating odds ratios. However, the odds ratio has one additional disadvantage compared to RRs: many people find it harder to understand, and ORs are often erroneously interpreted as RRs.
It is, therefore, often preferable to either only use risk ratios in a meta-analysis, or to convert odds ratios to risk ratios when reporting the results [@higgins2019cochrane, chapter 6.4.1.2]. The conversion can be performed using this formula [@zhang1998whats]:
\begin{equation}
\text{RR} = \frac{\text{OR}}{\left(1-\dfrac{c}{n_{\text{control}}}\right)+ \left(\dfrac{c}{n_{\text{control}}}\times \text{OR} \right)}
(\#eq:es33)
\end{equation}
To conduct a meta-analysis of odds ratios in _R_, the following columns should be included in our data set:
* **`event.e`**. The number of events in the treatment or experimental group.
* **`n.e`**. The sample size of the treatment or experimental group.
* **`event.c`**. The number of events in the control group.
* **`n.c`**. The sample size of the control group.
<br></br>
### Incidence Rate Ratios {#irr}
---
The effect sizes for binary outcome data we examined previously, risk ratios and odds ratios, are ways to compare the number of events in two groups. However, they do not directly encode the **time** during which these events occurred. When calculating a risk or odds ratio, we tacitly assume that the observation periods in both groups are comparable. Furthermore, risk and odds ratios do not provide us with any information on **how long** it took until the events occurred.
In some cases, this is just fine, because the time frame is not overly relevant for our research question. It is also possible that our binary data is cross-sectional and has no time dimension at all^[For example, we can also use risk or odds ratios to express differences in the proportion of smokers between females and males, based on cross-sectional survey data. In this context, RRs and ORs are often referred to as the **prevalence ratio** (PR) and **prevalence odds ratio** (POR), respectively [@tamhane2016prevalence].]. In these cases, the risk or odds ratio is usually an appropriate effect size metric.
But now, imagine a study in which we examine the mortality of individuals in two groups over 10 years. It might be possible that the number of events over these 10 years (e.g. death) is roughly similar in both groups. However, once we have a closer look at **when** the deaths occurred, we see that more events in one group occurred in the first years, while in the other group, somewhat more events occurred to the end of our 10-year observation period. The calculated odds or risk ratio for our data would be approximately 1, indicating no group difference. But this misses something important: that participants in one group survived **somewhat longer**, even if they died eventually.
\index{Incidence Rate Ratio}
\index{Person-Time}
To incorporate time into our effect size estimate, we can calculate **incidence rate ratios**, which are sometimes simply called **rate ratios**. Incidence rate ratios consist of two **incidence rates**. To calculate these incidence rates, we have to first understand the concept of **person-time**.
The person-time expresses the total time in which participants in a study were at risk of having an event. To calculate the person-time, we sum up the time at risk (expressed as days, weeks, or years) of all study subjects. However, the time at risk differs from person to person.
To exemplify this, imagine we are conducting a study with 6 participants. The study lasts for exactly 10 years. After each year, we interview the participants to examine if they experienced the event of interest. Whenever we observe that the event has occurred, the study ends for the affected participant, and we do not examine her or him until the study ends. The results of our study are visualized in Figure \@ref(fig:incidence).
```{r incidence, fig.cap = 'Example of time-to-event data.', message=F, echo=F, fig.height=2.5}
library(ggplot2)
df = data.frame(name = c("Rebecca", "Marvin", "Nicole", "Victoria", "Marie", "Lea"),
value = c(2, 5, 6, 10, 9, 10))
ggplot(data = df, aes(x = name, y = value)) +
geom_bar(stat = "identity", width = 0.4) +
geom_hline(yintercept = 1:9, color = "white") +
geom_hline(yintercept = 10, color = "black") +
coord_flip() +
theme_void() +
theme(axis.text.x = element_text(),
axis.title.x = element_text(),
axis.text.y = element_text()) +
scale_y_continuous(breaks = 0:10) +
ylab("Year") +
theme(panel.background = element_rect(fill = "#FFFEFA",
size = 0),
plot.background = element_rect(fill = "#FFFEFA",
size = 0))
```
\index{Survival Analysis}
\index{Odds Ratio}
\index{Censoring}
\index{Person-Time}
We see that only two of our participants, Victoria and Lea, remained in the study until the end. This is because they did not experience the event during the entire 10-year observation period. Therefore, both were **at risk** for 10 years.
All other participants experienced the event during the study period. When Rebecca was examined at year 2, for example, we found out that she experienced the event during the last year. However, we only know **that** the event occurred during year 2, not when exactly.
Research data like this is called **interval censored** data, and very frequently found in clinical trials which conduct a so-called **survival analysis**. Data being censored means that we only partially know how long Rebecca was at risk before she finally experienced the event. We know that she had the event after year 1 and before the end of year 2, but not more. Lacking other information, we may therefore assume that the event occurred somewhere in the middle, and settle with a time at risk of 1.5 years.
If we apply the same scheme for all our censored data, we can calculate the **person-years** at risk in our study:
$$10 + 1.5+5.5+4.5+8.5+10 = 40$$
So the estimated total person-years at risk in our study is 40. Knowing that a year has 52 weeks, we can also calculate the **person-weeks** of our study: $40 \times 52 = 2080$.
Now that we know the person-years in our experiment, which we will denote as $T$, we can also calculate the incidence rate within one year. We know that four participants experienced the event during the study period, so the number of events is $E=4$. We can then calculate the incidence rate IR using this formula:
\begin{equation}
\text{IR} = \frac{E}{T}
(\#eq:es33)
\end{equation}
In our example, this gives an incidence rate of $4/40 = 0.1$. This incidence rate means that, if we would follow 1000 people for one year, 100 would experience the event during that time.
To calculate the incidence rate ratio IRR, we have to divide the incidence rate of one group by the incidence rate of another group:
\begin{equation}
\text{IRR} = \frac{ E_{\text{treat}}/T_{\text{treat}} }{E_{\text{control}}/T_{\text{control}}}
(\#eq:es34)
\end{equation}
In this formula, $E_{\text{treat}}$ and $T_{\text{treat}}$ are the number of events and person-time in the treatment group, and $E_{\text{control}}$ and $T_{\text{control}}$ the number of events and person-time in the control group. Of course, the two groups may also represent other dichotomous variables of interest, for example women and men, or smokers and non-smokers, and so forth.
The IRR shares many properties with the risk and odds ratio, such as being centered at 1 and always being non-negative. Like ORs and RRs, incidence rate ratios are also often log-transformed for meta-analyses, creating the log-incidence rate ratio:
\index{Log-Incidence Rate Ratio}
\begin{equation}
\log \text{IRR} = \log_{e}(\text{IRR})
(\#eq:es35)
\end{equation}
For which we can calculate the standard error like this [@rothman2008modern, chapter 14]:
\begin{equation}
SE_{\log \text{IRR}} = \sqrt{\frac{1}{E_{\text{treat}}}+\frac{1}{E_{\text{control}}}}
(\#eq:es36)
\end{equation}
We can calculate the (log-)incidence ratio and the standard error in _R_ like this:
```{r}
# Define Data
e_treat <- 28 # Number of events in the treatment group
e_contr <- 28 # Number of events in the control group
t_treat <- 3025 # Person-time in the treatment group
t_contr <- 2380 # Person-time in the control group
# Calculate IRR
irr <- (e_treat/t_treat)/(e_contr/t_contr)
irr
# Calculate log-IRR
log_irr <- log(irr)
# Calculate standard error
se_log_irr <- sqrt((1/e_treat)+(1/e_contr))
```
In this example, we simulated a case in which the number of events $E_{\text{treat}}$ and $E_{\text{control}}$ is exactly equal, but where the treatment group has a longer person-time at risk. This time difference is accounted for when we calculate IRRs. Therefore, the result we get is not 1, but IRR $\approx$ 0.79, indicating that the incidence rate is smaller in the treatment group.
Incidence rate ratios are commonly used in epidemiology and prevention research. They can be used when participants are followed for a longer period of time, and when there are regular assessments in between. In practice, however, there is one caveat we should consider when calculating IRRs as part of a meta-analysis: it is important that the incidence data reported in the included articles is fine-grained enough. Sometimes, papers only report the total number of events during the entire study period and not the number of events recorded at each assessment point in between. It is also possible that no interim assessments were made to begin with.
In our example above (see Figure \@ref(fig:incidence)), we simply took the **midpoint** between the last "event-free" assessment point and the assessment point in which the event was recorded to estimate the time at risk of a participant. It is important to keep in mind that this is only a **best guess** of when the event happened exactly. Even when taking the midpoint, our estimates can still be off by about half a year in our example.
Our estimate of the person-time will be best if the time between assessment points is as small as possible. If assessment intervals in a study are too coarse depends on the context of the meta-analysis, but it is always advisable to conduct sensitivity analyses [@panageas2007you].
This means to recalculate the IRR of studies based on different estimates of the person-time:
* using the **midpoint** of the interval,
* using the **last "event-free" assessment point**, and
* using the assessment point in which the event was **detected**.
If the results of all three of these meta-analyses point in the same direction, we can be more confident in our findings. We should also make sure that the assessment periods do not differ too much between studies (e.g. one study examining events daily, and the other only each year). When there are doubts about the applicability of IRRs for a meta-analysis, there is always the possibility to calculate risk or odds ratios instead (or in addition). However, when we do this, we should make sure that the assessment point was similar in each study (e.g. after one year).
To calculate a meta-analysis based on incidence rate ratios in _R_, the following columns need to be prepared in our data set:
* **`event.e`**: The total number of events in the treatment or experimental group.
* **`time.e`**: The person-time in the treatment or experimental group. The person-time has to be expressed in the same units (person-days, person-weeks, or person-years) in all studies.
* **`event.c`**: The total number of events in the control group.
* **`time.c`**: The person-time in the control group. The person-time has to be expressed in the same units (person-days, person-weeks, or person-years) in all studies.
<br></br>
```{block2, type='boximportant'}
**Hazard Ratios & Limitations of Incidence Rate Ratios**
\vspace{2mm}
Incidence rates and IRRs are an intuitive way to summarize event data, and the time frame during which these events occur. However, they are not without flaws. To calculate an incidence rate, we assume that the **underlying risk** in a population is **constant** over time (e.g. between year 1 and year 2 of a study). For IRRs it is assumed that, while the underlying risk may differ between the treatment and control group (e.g. because the treatment reduces the risk of experiencing an event), the risk **within** each group is constant.
It is not hard to see that this assumption is quite simplistic. There are many scenarios in which it is very **unrealistic** to presuppose that the event risk does not change over time [@kraemer2009events, e.g. death in metastatic cancer patients]. Based on a simulation study, Bender and Beckmann [-@bender2019limitations] concluded that using IRRs is only adequate when the average **observation periods** of both groups do **not differ** substantially, and when the **baseline risk** of the studied event is rather **low** (<25%).
An alternative, and often preferable measure to express group differences based on time-to-event data are **hazard ratios** (HR). A hazard ratio is a ratio of two (proportional) **hazard functions**. Such hazard functions describe the (varying) instantaneous risk (i.e., the "hazard") of experiencing an event at some timepoint $t$, provided that a person has not yet experienced this event until $t$.
Hazard ratios are typically estimated based on individual participant data using **Cox regression** models. If we can extract the log-hazard ratio $\log_{\text{e}}(\text{HR})$ and its corresponding standard error from all studies, it is possible to perform a meta-analysis using **inverse-variance pooling** (see Chapter \@ref(fem)). This works in the same way as one would pool, say, log-risk or odds ratios. In _R_, log-hazard ratios can be pooled using the `metagen` function, with the `sm` argument set to `"HR"` (see Chapter \@ref(pre-calculated-es)).
Pooling HRs can be cumbersome in practice because not all studies **report** the (log-)hazard ratio and its standard error. @parmar1998extracting describe various methods to derive the log-hazard ratio and its variance from **log-rank test** results and **survival curves**, among other things. Some of these methods are somewhat laborious, but the same arguably also applies to the way IRRs are derived from reported data.
```
<br></br>
## Effect Size Correction {#es-correction}
---
In Chapter \@ref(what-is-es), we covered that the effect size $\hat\theta_k$ we calculate for a study $k$ is an estimate of the study's true effect size $\theta_k$, and that $\hat\theta_k$ deviates from $\theta_k$ due to the sampling error $\epsilon_k$. Unfortunately, in many cases, this is an oversimplification. In the equation we discussed before, the only thing that separates the estimated effect size from the true effect is the sampling error. Following the formula, as the sampling error decreases, the effect size estimate "naturally" converges with the true effect size in the population.
This is not the case, however, when our effect size estimate is additionally burdened by systematic error, or **bias**. Such biases can have different reasons. Some are caused by the mathematical properties of an effect size metric itself, while other biases are created by the way a study was conducted.
We can deal with biases arising from the way a study was conducted by evaluating its risk of bias (see Chapter \@ref(data-extraction) for an introduction to risk of bias assessment tools and Chapter \@ref(risk-of-bias-plots) for ways to visualize the risk of bias). This judgment can then also be used to determine if the risk of bias is associated with differences in the pooled effects, for example in subgroup analyses (Chapter \@ref(subgroup)).
To deal with biases arising from the statistical properties of an effect size metric, we can use specific **effect size correction** methods to adjust our data before we begin with the meta-analysis.
In this chapter, we will cover three commonly used effect size correction procedures, and how we can implement them in _R_.
<br></br>
### Small Sample Bias {#hedges-g}
---
\index{Hedges' \textit{g}}
\index{Standardized Mean Difference}
In Chapter \@ref(s-md), we covered standardized mean differences (SMDs), an effect size we can calculate when we have continuous outcome data of two groups. The standardized mean difference, however, has been found to have an **upward bias** when the sample size of a study is small, especially when $n \leq$ 20 [@hedges1981distribution]. This small sample bias means that SMDs systematically overestimate the true effect size when the total sample size of a study is small--which is unfortunately often the case in practice.
It is therefore sensible to correct the standardized mean differences of all included studies for small-sample bias, which produces an effect size called Hedges' $g$. Hedges' $g$ was named after Larry Hedges, the inventor of this correction. The formula to convert uncorrected SMDs/Cohen's $d$ to Hedges' $g$ looks like this:
\begin{equation}
g = \text{SMD} \times (1-\frac{3}{4n-9})
(\#eq:es37)
\end{equation}
\index{esc Package}
In this formula, $n$ represents the total sample size of the study. We can easily convert unstandardized SMDs/Cohen's $d$ to Hedges' $g$ using the `hedges_g` function in the **{esc}** package.
```{r}
# Load esc package
library(esc)
# Define uncorrected SMD and sample size n
SMD <- 0.5
n <- 30
# Convert to Hedges g
g <- hedges_g(SMD, n)
g
```
As we can see in the output, Hedges' $g$ is smaller than the uncorrected SMD. Hedges' $g$ can never be larger than the uncorrected SMD, and the difference between the two metrics is larger when the sample size is smaller (see Figure \@ref(fig:dtog)).
```{r dtog, fig.height=2, fig.width=4, fig.cap='Corrected and uncorrected SMD of 0.2 for varying sample sizes.', echo=F, fig.align='center'}
library(esc)
library(ggplot2)
data = data.frame(es = c(rep(0.2, 97), hedges_g(0.2, 4:100)),
val = rep(4:100, 2),
esm = rep(c("d", "g"), each = 97))
cols = c("")
ggplot(data, aes(x = val, y = es, group = esm, color=esm, linetype = esm)) +
geom_line(size = 1) +
scale_linetype_manual(values=c(3,1),
name = " ", labels = c(bquote("Uncorrected"~italic(SMD)),
bquote("Hedges'"~italic(g)))) +
theme_classic() +
scale_color_manual(values = c("black", "gray40"),
name = " ", labels = c(bquote("Uncorrected"~italic(SMD)),
bquote("Hedges'"~italic(g)))) +
ylab(" ") +
xlab("Sample Size") +
theme(panel.background = element_rect(fill = "#FFFEFA",
size = 0),
plot.background = element_rect(fill = "#FFFEFA",
size = 0),
legend.background = element_rect(fill = "#FFFEFA",
size = 0))
```
It is important to note that the terms SMD and Hedges' $g$ are sometimes used interchangeably in research reports. When a study reports results as the SMD, it is, therefore, relevant to check if the authors indeed refer to the uncorrected standardized mean difference, or if the small-sample bias correction has been applied (meaning that Hedges' $g$ was used).
<br></br>
### Unreliability {#unrealiable}
---
\index{Unrealiability Correction}
It is also possible that effect size estimates are biased due to **measurement error**. Most questionnaires or tests do not measure an outcome of interest perfectly. The less prone an instrument is to produce measurement errors, the more **reliable** it is. The reliability of an instrument measuring some variable $x$ can be expressed through a reliability coefficient $r_{xx}$, which can take values between 0 and 1. Reliability is often defined as the **test-retest-reliability**, and can be calculated by taking two or more measurements of the same person under similar circumstances within a short period of time, and then calculating the correlation between the values^[An accessible and more detailed discussion of various methods to estimate the reliability of an instrument can be found in Hunter and Schmidt [-@hunter2004methods], chapter 3.].
\index{Attenuation}
When we examine the relationship of two continuous variables, a lack of reliability in one or both of the instruments used to assess these variables can lead to a phenomenon called **attenuation**. This problem has been described as early as 1904 by the famous psychologist Charles Spearman [-@spearman1904reprinted]. When we calculate a correlation, for example, and one or both variables are measured with error, this causes us to **underestimate** the true correlation. The correlation is **diluted**. But there are good news. If we have an estimate of the (un)reliability of a measurement, it is possible to correct for this attenuation in order to get a better estimate of the true effect size.
John Hunter and Frank Schmidt, two important contributors to the field of meta-analysis, have developed and promoted a method through which a correction for attenuation can be conducted as part of meta-analyses [@hunter2004methods, chapters 3 and 7]. This correction is one of several other procedures, which together are sometimes called "Hunter and Schmidt techniques" or the "Hunter and Schmidt method" [@hough1994comparison].
Hunter and Schmidt's correction for attenuation can be applied to (product-moment) correlations and standardized mean differences. First, let us assume that we want to correct for the unreliability in the measurement of variable $x$ when we calculate the product-moment correlations $r_{xy}$ of studies as part of our meta-analysis. If we know the reliability in the measurement of $x$, denoted by $r_{xx}$, we can calculate a **corrected version** of the correlation, ${r_{xy}}_{c}$:
\begin{equation}
{r_{xy}}_{c} = \frac{r_{xy}}{\sqrt{r_{xx}}}
(\#eq:es38)
\end{equation}
When outcome $x$ was observed in two groups, and our goal is to calculate the standardized mean difference between those groups, the correction can be conducted in a similar way to obtain $\text{SMD}_c$:
\begin{equation}
\text{SMD}_c = \frac{\text{SMD}}{\sqrt{r_{xx}}}
(\#eq:es39)
\end{equation}
When we calculate a product-moment correlation using two continuous variables $x$ and $y$, it is also possible to correct for the unreliability of both $x$ and $y$, provided we also know $y$'s reliability coefficient $r_{yy}$:
\begin{equation}
{r_{xy}}_{c} = \frac{r_{xy}}{\sqrt{r_{xx}}\sqrt{r_{yy}}}
(\#eq:es40)
\end{equation}
Lastly, we also have to correct the standard error. The standard error is corrected in the same way as the effect size itself. If we want to correct one variable $x$, we can use this formula:
\begin{equation}
SE_c = \frac{SE}{\sqrt{r_{xx}}}
(\#eq:es41)
\end{equation}
If we want to correct (a product-moment correlation) for both $x$ and $y$, we can use this formula.
\begin{equation}
SE_c = \frac{SE}{\sqrt{r_{xx}}\sqrt{r_{yy}}}
(\#eq:es42)
\end{equation}
\index{Hedges' \textit{g}}
\index{Fisher's \textit{z}}
After the correlation or SMD has been corrected, it is possible to apply other common transformations, such as converting ${r_{xy}}_c$ to Fisher's $z$ (Chapter \@ref(cors)) or $\text{SMD}_c$ to Hedges' $g$ (Chapter \@ref(hedges-g)).
Let us try out the correction procedure in an example using _R_.
```{r}
# Define uncorrected correlation and SMD with their standard error
r_xy <- 0.34
se_r_xy <- 0.09
smd <- 0.65
se_smd <- 0.18
# Define reliabilities of x and y
r_xx <- 0.8
r_yy <- 0.7
# Correct SMD for unreliability in x
smd_c <- smd/sqrt(r_xx)
smd_c
se_c <- se_smd/sqrt(r_xx)
se_c
# Correct correlation for unreliability in x and y
r_xy_c <- r_xy/(sqrt(r_xx)*sqrt(r_yy))
r_xy_c
se_c <- se_r_xy/(sqrt(r_xx)*sqrt(r_yy))
se_c
```
Take a close look at the results in this example. We see that due to the correction, the correlation and SMD are larger than the initial uncorrected value. However, we also see that the standard errors increase. This result is intended; we correct the standard error so that it also incorporates the measurement error we assume for our data.
It is common in some fields, for example in organizational psychology, to apply attenuation corrections. However, in other disciplines, including the biomedical field, this procedure is rarely used. In meta-analyses, we can only perform a correction for unreliability if the reliability coefficient $r_{xx}$ (and $r_{yy}$) is reported in each study.
Very often, this is not the case. In this scenario, we may assume a value for the reliability of the instrument based on previous research. However, given that the correction has a large impact on the value of the effect size, taking an inappropriate estimate of $r_{xx}$ can distort the results considerably. Also, it is not possible to only correct **some** effect sizes in our meta-analysis, while leaving others uncorrected. Due to these reasons, the applicability of the reliability correction is unfortunately often limited in practice.
<br></br>
### Range Restriction {#range}
---
\index{Range Restriction Correction}
Another effect size adjustment proposed by Hunter and Schmidt [-@hunter2004methods, chapters 3 and 7] deals with the problem of range restriction. Range restriction is a phenomenon which occurs when the variation in some variable $x$ is smaller in a study than in the actual population of interest. This often happens when a study recruited a very selective sample of individuals which may not represent the population as a whole.
For example, consider the case where a study reports the correlation between the age of a participant and her or his cognitive functioning. Intuitively, one may assume that there is indeed an association between these variables. However, if the study only included participants which were 65 to 69 years old, it is very unlikely that a (high) correlation will be found between the two variables. This is because age in the study sample is highly range restricted. There is no real variation in age, which means that this variable can not be a good predictor of cognitive abilities.
Like unreliability of our measurement instruments (see previous chapter), this leads to an artificial attenuation in the effects we calculate for a study: even when there is in fact an important association, we are not able to detect it.
It is possible to correct for range restriction in SMDs or correlations $r_{xy}$. However, this requires that we know (or estimate) the unrestricted standard deviation $s_{\text{unrestricted}}$ of our population of interest. The population of interest is determined by the research question of our meta-analysis.
For example, if we want to examine the relationship between age and cognitive functioning in **older age**, we might want to search for an estimate of the standard deviation in large representative samples of individuals who are older than 65 (this is commonly how "older person" is defined in research). Of course, this is still a range restriction, but it restricts age to a range that **matters**, because it reflects the study population we are dealing with in our meta-analysis.
To correct for range restriction, we have to calculate $U$, the ratio between the unrestricted population standard deviation $s_{\text{unrestricted}}$, and the standard deviation of the restricted variable in our study, $s_{\text{restricted}}$.
\begin{equation}
U = \frac{s_{\text{unrestricted}}}{s_{\text{restricted}}}
(\#eq:es43)
\end{equation}
The value of $s_{\text{unrestricted}}$ can be obtained, for example, from previous representative studies which assessed the variable of interest. We can then use $U$ to correct the value of a correlation $r_{xy}$ using this formula:
\begin{equation}
{r_{xy}}_c = \frac{U\times r_{xy}}{\sqrt{(U^2-1)r_{xy}^2+1}}
(\#eq:es44)
\end{equation}
This lets us obtain the corrected correlation ${r_{xy}}_c$. The same formula can also be used to calculate a corrected version of the SMD:
\begin{equation}
\text{SMD}_c = \frac{U\times \text{SMD}}{\sqrt{(U^2-1)\text{SMD}^2+1}}
(\#eq:es45)
\end{equation}
The standard errors of $r_{xy}$ and SMD, respectively, must also be corrected using these formulas:
\begin{equation}
SE_{{r_{xy}}_c} = \frac{{r_{xy}}_c}{r_{xy}}SE_{r_{xy}}
(\#eq:es46)
\end{equation}
\begin{equation}
SE_{{\text{SMD}}_c} = \frac{{\text{SMD}}_c}{\text{SMD}}SE_{\text{SMD}}
(\#eq:es47)
\end{equation}
\index{Hedges' \textit{g}}
\index{Fisher's \textit{z}}
After the correlation or SMD has been corrected, it is possible to apply other common transformations, such as converting ${r_{xy}}_c$ to Fisher's $z$ (Chapter \@ref(pearson-cors)) or $\text{SMD}_c$ to Hedges' $g$ (see Chapter \@ref(hedges-g)). Let us now try out the correction using _R_.
```{r}
# Define correlation to correct
r_xy <- 0.34
se_r_xy <- 0.09
# Define restricted and unrestricted SD
sd_restricted <- 11
sd_unrestricted <- 18
# Calculate U
U <- sd_unrestricted/sd_restricted
# Correct the correlation
r_xy_c <- (U*r_xy)/sqrt((U^2-1)*r_xy^2+1)
r_xy_c
# Correct the standard error
se_r_xy_c <- (r_xy_c/r_xy)*se_r_xy
se_r_xy_c
```
Like other Hunter and Schmidt adjustments, corrections of range restriction are more commonly found in some research areas than in others. When we decide to apply a correction for range restriction, it is important that the correction is performed for **all** effect sizes in our meta-analysis. It is technically possible to correct for range restriction in every meta-analysis, but often, this is not necessary.
In practice, it is hardly ever the case that each study perfectly represents the scope of our meta-analysis. In fact, the purpose of meta-analysis is to go **beyond** the results of individual studies. A correction of range restriction may therefore only be necessary when the range of several studies is heavily restricted.
\index{Psychometric Meta-Analysis}
```{block, type='boxinfo'}
**Further Reading**
\vspace{2mm}
In this guide, we only cover corrections for unreliability and range restriction, because these problems are most commonly found in practice. However, Hunter and Schmidt have proposed various other kinds of artifact corrections. Along with a few additional methods, these techniques are sometimes called **psychometric meta-analysis**.
If you want to learn more about the Hunter and Schmidt methods, you can have a look at their book **Methods of Meta-Analysis** [@hunter2004methods], which provides an accessible and comprehensive overview. A shorter introduction can also be found in Borenstein et al. [-@borenstein2011introduction], chapter 38.
Many of the techniques of Hunter and Schmidt are also implemented in an _R_ package called [**{psychmeta}**](https://psychmeta.com/) [@psychmeta].
```
<br></br>
## Common Problems
---
In this chapter, we want to devote a little more time to problems that we are often faced with in practice when calculating effect sizes. First, we will discuss what we can do when effect size data is reported in different formats. After that, we examine the unit-of-analysis problem, which has implications on the meta-analytic pooling in later steps.
<br></br>
### Different Effect Size Data Formats {#es-formats-different}
---
\index{Analysis of Variance}
When we described effect size metrics in the last chapters, we also mentioned the type of variables we need as columns in our data set. These variables are needed so that _R_ functions can calculate the effect sizes, and perform a meta-analysis for us. To calculate a meta-analysis of between-group standardized mean differences, for example, we have to prepare the mean, standard deviation, and sample size of both groups.
If we can extract this information from all studies, everything is fine. In practice, however, one may soon find that not all studies report their results in a suitable format. Some studies, for example, may not report the raw data of two groups, but only a calculated standardized mean difference, and its confidence interval. Others may only report the results of a $t$-test or **analysis of variance** (ANOVA) examining the difference between two groups.
If this is the case, it often becomes impossible to use raw effect size data for our meta-analysis. Instead, we have to **pre-calculate** the effect size of each study before we can pool them. In Chapter \@ref(what-is-es), we already found out that the minimum information we need to do a meta-analysis is the effect size and standard error of a study. Therefore, as long as we can transform the results into an estimate of the effect size and its standard error, a study can be included. In Chapter \@ref(es-calc) in the "Helpful Tools" section, we present several effect size converters which can help you to derive an effect size from other types of reported data.
However, it is still possible that there are studies for which effect sizes cannot be calculated, even with these tools. As mentioned in Chapter \@ref(study-selection), one remaining possibility under such circumstances is to contact the authors of the respective publication several times, and ask them if they can provide the data you need to calculate the effect size. If this also fails, the study has to be excluded.
In Chapter \@ref(pre-calculated-es), we will learn about a special function in _R_ called `metagen`. This function allows us to perform a meta-analysis of effect size data that had to be pre-calculated. To use the function, we have to prepare the following columns in our data set:
* **`TE`**. The calculated effect size of each study.
* **`seTE`**. The standard error of each effect size.
<br></br>
### The Unit-of-Analysis Problem {#unit-of-analysis}
---
\index{Unit-of-Analysis Problem}
It is not uncommon that a study contributes more than one effect size to our meta-analysis. In particular, it may be that (1) a study included more than two groups, or that (2) a study measured an outcome using two or more instruments. Both cases cause problems. If studies contribute more than one effect size in a meta-analysis, we violate one of its core assumptions: that each effect size in a meta-analysis is **independent** [@higgins2019cochrane, chapters 6.2 and 23; @borenstein2011introduction, chapter 25]. If this assumption is not met, we are dealing with a **unit-of-analysis** problem.
\index{Double-Counting}
Let us begin with the first case, where a study has more than two groups; for example one group examining treatment A, another one in which treatment B is administered, and a control group C. We can calculate **two** effect sizes for this study. Depending on the outcome data, these can be risk, odds or incidence rate ratios, or standardized mean differences. We have one effect size $\hat\theta_{\text{A-C}}$ comparing treatment A to control, and another effect size $\hat\theta_{\text{B-C}}$, which expresses the effect of treatment B compared to control. When both $\hat\theta_{\text{A-C}}$ and $\hat\theta_{\text{B-C}}$ are included in the same meta-analysis, these effect sizes are not independent, because the information in C is included twice. This issue is also known as **double-counting**.
Due to the double-counting of C, the two effect sizes are **correlated**. If the sample size is equal in all groups, we know that this correlation is $r =$ 0.5 [@borenstein2011introduction, chapter 25]. This is because A and B are independent groups, and therefore uncorrelated. However, the control group in both effect sizes is identical, which leads to a perfect correlation of 1; the midpoint is 0.5. Double-counting of a group leads us to overestimate the precision (i.e. the standard error) of the affected effect sizes. This inflates the weight we give these effects in our meta-analysis, and ultimately distorts our results. There are three ways to deal with this issue:
1. **Split the sample size of the shared group**. This would mean to split the sample size of group C (e.g. $n =$ 200) evenly between the comparison with A, and the comparison with C, when we calculate the effect size. If we are dealing with binary outcome data, the number of events is also split evenly. In our example, we would calculate the two effect sizes like we have before, but now we pretend that C only consisted of 100 individuals in both calculations. This approach solves the problem that the precision of the effect sizes is artificially inflated due to double-counting. However, it is still sub-optimal, because the effect sizes will remain correlated [@higgins2019cochrane, 23.3.4].
2. **Remove groups**. A brute force approach is to simply remove one comparison, e.g. $\hat\theta_{\text{B-C}}$, entirely from the meta-analysis. This solves the unit-of-analysis problem, but causes new issues. If we simply discard one effect size, we lose potentially relevant information.
3. **Combine groups**. This approach involves combining the results of two groups so that only one comparison remains. In our example, this would mean to combine the data of A and B and then compare the pooled results with C. This is relatively easy for binary outcome data, where we only have to sum up the number of participants and the number of events in both groups. If we have continuous outcome data, i.e. means and standard deviations, things are a little more complicated. In Chapter \@ref(pool-groups) in the "Helpful Tools" section, you can find an _R_ function which allows us to combine such data. By combining groups, we avoid both double-counting and correlated effect sizes. This is why this approach is also recommended by Cochrane [@higgins2019cochrane, chapter 23.3.4]. Nevertheless, the method also has its drawbacks. It is possible that two groups are so different that we lump something together which can not actually be compared. Imagine that the treatments in group A and B were completely different, with A being a state-of-the-art intervention, and B being an outdated approach with a limited evidence base. If we combine these two treatments and find no effect, it is nearly impossible to disentangle if this is true for both types of interventions, or if the ineffectiveness of B simply diluted the effects of A. Approaches (1) and (2) may therefore be used when two groups are too dissimilar.
\index{Multilevel Meta-Analysis}
The unit-of-analysis problem also arises when a study measured an outcome using multiple instruments. This is commonly the case when there is no clear "gold standard" determining how a variable of interest should be measured. If we calculate an effect size for each of these measurements and include them into our meta-analysis, this also results in double-counting. Furthermore, the effect sizes will be correlated, because the same sample was used to measure the effects. There are three approaches to deal with this situation:
- First, we can simply select one instrument per study. It is important that this selection is done in a systematic and reproducible way. At best, our analysis plan (Chapter \@ref(analysis-plan)) should already define a hierarchy of instruments for our meta-analysis. This hierarchy can be based on previous evidence on the reliability of certain instruments, or based on which type of measurement reflects the content of our research question best. The hierarchy then clearly determines which instrument we select when more than one is available.
- Alternatively, we can also use the calculated effect sizes and aggregate them, so that each study only provides one (aggregated) effect size. This is somewhat of a "brute force" approach. It requires us to specify how strongly effect sizes are correlated within studies, but this value is typically not known. In Chapter \@ref(aggregate-es), we present a function which allows to aggregate pre-calculated effect sizes into one combined estimate for each study.
- The third approach is to include data from all available instruments, and use meta-analytic models which can account for the fact that studies in our meta-analysis contribute more than one effect size. This can be achieved by **"three-level" meta-analysis** models, which we will examine in Chapter \@ref(multilevel-ma).
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. Is there a clear definition of the term "effect size"? What do people refer to when they speak of effect sizes?
\vspace{-2mm}
2. Name a primary reason why observed effect sizes deviate from the true effect size of the population. How can it be quantified?
\vspace{-2mm}
3. Why are large studies better estimators of the true effect than small ones?
\vspace{-2mm}
4. What criteria does an effect size metric have to fulfill to be usable for meta-analyses?
\vspace{-2mm}
5. What does a standardized mean difference of 1 represent?
\vspace{-2mm}
6. What kind of transformation is necessary to pool effect sizes based on ratios (e.g. an odds ratio) using the inverse-variance method?
\vspace{-2mm}
7. Name three types of effect size corrections.
\vspace{-2mm}
8. When does the unit-of-analysis problem occur? How can it be avoided?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda3) at the end of this book.**
```
<br></br>
## Summary
* Effect sizes are the building blocks of meta-analyses. To perform a meta-analysis, we need at least an estimate of the effect size and its standard error.
* The standard error of an effect size represents how **precise** the study's estimate of the effect is. Meta-analysis gives effect sizes with a greater precision a higher weight because they are better estimators of the true effect.
* There are various effect sizes we can use in meta-analyses. Common ones are "one-variable" relationship measures (such as means and proportions), correlations, (standardized) mean differences, as well as risk, odds, and incidence rate ratios.
* Effect sizes can also be biased, for example by measurement error and range restriction. There are formulas to correct for some biases, including the small sample bias of standardized mean differences, attenuation due to unreliability, as well as range restriction problems.
* Other common problems are that studies report the data needed to calculate effect sizes in different formats, as well as the unit-of-analysis problem, which arises when studies contribute more than one effect size.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Pooling Effect Sizes {#pooling-es}
---
<img src="_figs/pooling_es.jpg" />
<br></br>
<span class="firstcharacter">A</span> long and winding road already lies behind us. Fortunately, we have now reached the core part of every meta-analysis: the pooling of effect sizes. We hope that you were able to resist the temptation of starting directly with this chapter. We have already discussed various topics in this book, including the definition of research questions, guidelines for searching, selecting, and extracting study data, as well as how to prepare our effect sizes.
Thorough preparation is a key ingredient of a good meta-analysis, and will be immensely helpful in the steps that are about to follow. We can assure you that the time you spent working through the previous chapters was well invested.
\index{meta Package}
There are many packages which allow us to pool effect sizes in _R_. Here, we will focus on functions of the **{meta}** package, which we already installed in Chapter \@ref(packages). This package is very user-friendly and provides us with nearly all important meta-analysis results using just a few lines of code. In the previous chapter, we covered that effect sizes come in different "flavors", depending on the outcome of interest. The **{meta}** package contains specialized meta-analysis functions for each of these effect size metrics. All of the functions also follow nearly the same structure.
Thus, once we have a basic understanding of how **{meta}** works, coding meta-analyses becomes straightforward, no matter which effect size we are focusing on. In this chapter, we will cover the general structure of the **{meta}** package. And of course, we will also explore the meta-analysis functions of the package in greater detail using hands-on examples.
The **{meta}** package allows us to tweak many details about the way effect sizes are pooled. As we previously mentioned, meta-analysis comes with many "researcher degrees of freedom". There are a myriad of choices concerning the statistical techniques and approaches we can apply, and if one method is better than the other often depends on the context.
\index{Fixed-Effect Model}
\index{Random-Effects Model}
Before we begin with our analyses in _R_, we therefore have to get a basic understanding of the statistical assumptions of meta-analyses, and the maths behind it. Importantly, we will also discuss the "idea" behind meta-analyses. In statistics, this "idea" translates to a **model**, and we will have a look at what the meta-analytic model looks like.
As we will see, the nature of the meta-analysis requires us to make a fundamental decision right away: we have to assume either a **fixed-effect model** or a **random-effects model**. Knowledge of the concept behind meta-analytic pooling is needed to make an informed decision which of these two models, along with other analytic specifications, is more appropriate in which context.
<br></br>
## The Fixed-Effect and Random-Effects Model {#fem-rem}
---
Before we specify the meta-analytic model, we should first clarify what a statistical model actually is. Statistics is full of "models", and it is likely that you have heard the term in this context before. There are "linear models", "generalized linear models", "mixture models", "gaussian additive models", "structural equation models", and so on.
The ubiquity of models in statistics indicates how important this concept is. In one way or the other, models build the basis of virtually all parts of our statistical toolbox. There is a model behind $t$-tests, ANOVAs, and regression. Every hypothesis test has its corresponding statistical model.
When defining a statistical model, we start with the information that is already given to us. This is, quite literally, our **data**^["Data" is derived from the Latin word **datum**, meaning "a thing that is given".]. In meta-analyses, the data are effect sizes that were observed in the included studies. Our model is used to describe the process through which these observed data were generated.
The data are seen as the product of a **black box**, and our model aims to illuminate what is going on inside that black box.
```{r model, out.width='50%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/model_concept_sep.png')
```
Typically, a statistical model is like a special type of **theory**. Models try to explain the mechanisms that generated our observed data, especially when those mechanisms themselves cannot be directly observed. They are an **imitation of life**, using a mathematical formula to describe processes in the world around us in an idealized way.
This explanatory character of models is deeply ingrained in modern statistics, and meta-analysis is no exception. The conceptualization of models as a vehicle for explanation is the hallmark of a statistical "culture" to which, as Breiman [-@breiman2001statistical] famously estimated, 98% of all statisticians adhere.
By specifying a statistical model, we try to find an approximate representation of the "reality" behind our data. We want a mathematical formula that explains how we can find the **true** effect size underlying all of our studies, based on their observed results. As we learned in Chapter \@ref(what-are-mas), one of the ultimate goals of meta-analysis is to find one numerical value that characterizes our studies **as a whole**, even though the observed effect sizes vary from study to study. A meta-analysis model must therefore explain the reasons why and how much observed study results differ, even though there is only one overall effect.
There are two models which try to answer exactly this question, the **fixed-effect model** and the **random-effects model**. Although both are based on different assumptions, there is still a strong link between them, as we will soon see.
<br></br>
### The Fixed-Effect Model {#fem}
---
\index{Fixed-Effect Model}
\index{Sampling Error}
The fixed-effect model assumes that all effect sizes stem from a single, homogeneous population. It states that all studies share the **same** true effect size. This true effect is the overall effect size we want to calculate in our meta-analysis, denoted with $\theta$.
According to the fixed-effect model, the only reason why a study $k$'s observed effect size $\hat\theta_k$ deviates from $\theta$ is because of its sampling error $\epsilon_k$. The fixed-effect model tells us that the process generating studies' different effect sizes, the content of the "black box", is simple: all studies are estimators of the same true effect size. Yet, because every study can only draw somewhat bigger or smaller samples of the infinitely large study population, results are burdened by sampling error. This sampling error causes the observed effect to deviate from the overall, true effect.
We can describe the relationship like this [@borenstein2011introduction, chapter 11]:
\begin{equation}
\hat\theta_k = \theta + \epsilon_k
(\#eq:pes1)
\end{equation}
To the alert reader, this formula may seem oddly similar to the one in Chapter \@ref(what-is-es). You are not mistaken. In the previous formula, we defined that an observed effect size $\hat\theta_k$ of some study $k$ is an estimator of that study's true effect size $\theta_k$, burdened by the study's sampling error $\epsilon_k$.
There is only a tiny, but insightful difference between the previous formula, and the one of the fixed-effect model. In the formula of the fixed-effect model, the true effect size is not symbolized by $\theta_k$, but by $\theta$; the subscript $k$ is dropped.
Previously, we only made statements about the true effect size of **one** individual study $k$. The fixed-effect model goes one step further. It tells us that if we find the true effect size of study $k$, this effect size is not only true for $k$ specifically, but for **all** studies in our meta-analysis. A **study's** true effect size $\theta_k$, and the **overall**, pooled effect size $\theta$, are **identical**.
```{block2, type='boxinfo'}
The **idea behind the fixed-effect model** is that observed effect sizes may vary from study to study, but this is only because of the sampling error. In reality, their true effect sizes are **all the same**: they are fixed. For this reason, the fixed-effect model is sometimes also referred to as the **"equal effects"** or **"common effect"** model.^[The term "equal effects model" is used by the **{metafor}** package, while **{meta}** uses the term "common effect model".]
```
The formula of the fixed-effect models tells us that there is only one reason why observed effect sizes $\theta_k$ deviate from the true overall effect: because of the sampling error $\epsilon_k$. In Chapter \@ref(what-is-es), we already discussed that there is a link between the sampling error and the sample size of a study. All things being equal, as the sample size becomes larger, the sampling error becomes smaller. We also learned that the sampling error can be represented numerically by the **standard error**, which also grows smaller when the sample size increases.
Although we do not know the true overall effect size of our studies, we can exploit this relationship to arrive at the best possible estimate of the true overall effect, $\hat\theta$. We know that a smaller standard error corresponds with a smaller sampling error; therefore, studies with a small standard error should be better estimators of the true overall effect than studies with a large standard error.
We can illustrate this with a simulation. Using the `rnorm` function we already used before, we simulated a selection of studies in which the true overall effect is $\theta = 0$. We took several samples but varied the sample size so that the standard error differs between the "observed" effects. The results of the simulation can be found in Figure \@ref(fig:funnel1).
```{r funnel1, fig.height=5, fig.width=5, fig.cap='Relationship between effect size and standard error.', echo=F, fig.align='center', message=FALSE, out.width="50%"}
library(plotrix)
library(data.table)
library(ggplot2)
set.seed(1234)
res = list()
for (i in 5:54){
vec = list()
for (x in 1:50){
dat = rnorm(i, 0, sd = 10)
vec[[x]] = data.frame(value = mean(dat),
SE = std.error(dat))
}
vec = do.call(rbind, vec)
res[[i-4]] = vec
}
res = rbindlist(res)
ggplot(data = res, aes(x = value, y = log(SE))) +
geom_point(alpha = 0.5, size = 0.8) +
scale_y_reverse() +
geom_vline(xintercept = 0, color = "gray", size = 2, alpha = 0.5) +
theme_classic() +
xlab("Effect Size") +
ylab("log-Standard Error") +
theme(panel.background = element_rect(fill = "#FFFEFA",
size = 0),
plot.background = element_rect(fill = "#FFFEFA",
size = 0))
```
The results of the simulation show an interesting pattern. We see that effect sizes with a small sampling error are tightly packed around the true effect size $\theta = 0$. As the standard error on the y-axis^[We log-transformed the standard error before plotting so that the pattern can be more easily seen.] increases, the **dispersion** of effect sizes becomes larger and larger, and the observed effects deviate more and more from the true effect.
This behavior can be predicted by the formula of the fixed-effect model. We know that studies with a smaller standard error have a smaller sampling error, and their estimate of the overall effect size is therefore more likely to be closer to the truth.
\index{Weight}
We have seen that, while all observed effect sizes are estimators of the true effect, some are better than others. When we pool the effects in our meta-analysis, we should therefore give effect sizes with a higher **precision** (i.e. a smaller standard error) a greater **weight**. If we want to calculate the pooled effect size under the fixed-effect model, we therefore simply use a **weighted average** of all studies.
To calculate the weight $w_k$ for each study $k$, we can use the standard error, which we square to obtain the **variance** $s^2_k$ of each effect size. Since a **lower** variance indicates higher precision, the **inverse** of the variance is used to determine the weight of each study.
\begin{equation}
w_k = \frac{1}{s^2_k}
(\#eq:pes2)
\end{equation}
Once we know the weights, we can calculate the weighted average, our estimate of the true pooled effect $\hat\theta$. We only have to multiply each study's effect size $\hat\theta_k$ with its corresponding weight $w_k$, sum the results across all studies $K$ in our meta-analysis, and then divide by the sum of all the individual weights.
\begin{equation}
\hat\theta = \frac{\sum^{K}_{k=1} \hat\theta_kw_k}{\sum^{K}_{k=1} w_k}
(\#eq:pes3)
\end{equation}
\index{Inverse-Variance Weighting}
\index{Mantel-Haenszel Method}
\index{Peto Method}
This method is the most common approach to calculate average effects in meta-analyses. Because we use the inverse of the variance, it is often called **inverse-variance weighting** or simply **inverse-variance meta-analysis**.
For binary effect size data, there are alternative methods to calculate the weighted average, including the **Mantel-Haenszel**, **Peto**, or the sample size weighting method by Bakbergenuly [-@bakbergenuly2020methods]. We will discuss these methods in Chapter \@ref(pooling-or-rr).
The **{meta}** package makes it very easy to perform a fixed-effect meta-analysis. Before, however, let us try out the inverse-variance pooling "manually" in _R_. In our example, we will use the `SuicidePrevention` data set, which we already imported in Chapter \@ref(data-prep-R).
\index{dmetar Package}
```{block2, type='boxdmetar'}
**The "SuicidePrevention" Data Set**
The `SuicidePrevention` data set is also included directly in the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(SuicidePrevention)` automatically saves the data set in your _R_ environment. The data set is then ready to be used. If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/suicideprevention.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
\index{esc Package}
\index{Standardized Mean Difference}
\index{Hedges' \textit{g}}
The `SuicidePrevention` data set contains raw effect size data, meaning that we have to calculate the effect sizes first. In this example, we calculate the small-sample adjusted standardized mean difference (Hedges' $g$). To do this, we use the `esc_mean_sd` function in the **{esc}** package (Chapter \@ref(b-group-smd)).
The function has an additional argument, `es.type`, through which we can specify that the small-sample correction should be performed (by setting `es.type = "g"`; Chapter \@ref(hedges-g)).
Since the release of _R_ version 4.2.1, we additionally have to plug our call to `esc_mean_sd` into the `pmap_dfr` function so that a standardized mean difference is calculated for each row in our data set:
```{r, message=F, eval=F}
# Load dmetar, esc and tidyverse (for pipe)
library(dmetar)
library(esc)
library(tidyverse)
# Load data set from dmetar
data(SuicidePrevention)
# Calculate Hedges' g and the Standard Error
# - We save the study names in "study".
# - We use the pmap_dfr function to calculate the effect size
# for each row.
SP_calc <- pmap_dfr(SuicidePrevention,
function(mean.e, sd.e, n.e, mean.c,
sd.c, n.c, author, ...){
esc_mean_sd(grp1m = mean.e,
grp1sd = sd.e,
grp1n = n.e,
grp2m = mean.c,
grp2sd = sd.c,
grp2n = n.c,
study = author,
es.type = "g") %>%
as.data.frame()})
# Let us catch a glimpse of the data
# The data set contains Hedges' g ("es") and standard error ("se")
glimpse(SP_calc)
```
```
## Rows: 9
## Columns: 9
## $ study <chr> "Berry et al.", "DeVries et …
## $ es <dbl> -0.14279447, -0.60770928, -0…
## $ weight <dbl> 46.09784, 34.77314, 14.97625…
## $ sample.size <dbl> 185, 146, 60, 129, 100, 220,…
## $ se <dbl> 0.1472854, 0.1695813, 0.2584…
## $ var <dbl> 0.02169299, 0.02875783, 0.06…
## $ ci.lo <dbl> -0.4314686, -0.9400826, -0.6…
## $ ci.hi <dbl> 0.145879624, -0.275335960, 0…
## $ measure <chr> "g", "g", "g", "g", "g", "g"…
```
Next, we use these results to apply the formula of the fixed-effect model:
```{r, message=F, eval=F}
# Calculate the inverse variance-weights for each study
SP_calc$w <- 1/SP_calc$se^2
# Then, we use the weights to calculate the pooled effect
pooled_effect <- sum(SP_calc$w*SP_calc$es)/sum(SP_calc$w)
pooled_effect
```
```
## [1] -0.2311121
```
The results of our calculations reveal that the pooled effect size, assuming a fixed-effect model, is $g \approx$ -0.23.
<br></br>
### The Random-Effects Model {#rem}
---
\index{Random-Effects Model}
As we have seen, the fixed-effect model is one way to conceptualize the genesis of our meta-analysis data, and how effects can be pooled. However, the important question is: does this approach adequately reflect reality?
The fixed-effect model assumes that all our studies are part of a homogeneous population and that the only cause for differences in observed effects is the sampling error of studies. If we were to calculate the effect size of each study without sampling error, all true effect sizes would be absolutely the same.
\index{Heterogeneity}
Subjecting this notion to a quick reality check, we see that the assumptions of the fixed-effect model might be too simplistic in many real-world applications. It is simply unrealistic that studies in a meta-analysis are always completely homogeneous. Studies will very often differ, even if only in subtle ways. The outcome of interest may have been measured in different ways. Maybe the type of treatment was not exactly the same or the intensity and length of the treatment. The target population of the studies may not have been exactly identical, or maybe there were differences in the control groups that were used.
It is likely that the studies in your meta-analysis will not only vary on one of these aspects but several ones at the same time. If this is true, we can anticipate considerable between-study **heterogeneity** in the true effects.
All of this casts the validity of the fixed-effect model into doubt. If some studies used different types of a treatment, for example, it seems perfectly normal that one format is more effective than the other. It would be far-fetched to assume that these differences are only noise, produced by the studies' sampling error.
Quite the opposite, there may be countless reasons why **real** differences exist in the **true** effect sizes of studies. The random-effects model addresses this concern. It provides us with a model that often reflects the reality behind our data much better.
\index{Sampling Error}
In the random-effects model, we want to account for the fact that effect sizes show more variance than when drawn from a single homogeneous population [@hedges1998fixed]. Therefore, we assume that effects of individual studies do not only deviate due to sampling error alone but that there is **another** source of variance.
This additional variance component is introduced by the fact that studies do not stem from one single population. Instead, each study is seen as an independent draw from a “universe” of populations.
```{block2, type='boxinfo'}
The random-effects model assumes that there is not only one true effect size but a **distribution** of true effect sizes. The goal of the random-effects model is therefore not to estimate the one true effect size of all studies, but the **mean** of the **distribution** of true effects.
```
Let us see how the random-effects model can be expressed in a formula. Similar to the fixed-effect model, the random-effects model starts by assuming that an observed effect size $\hat\theta_k$ is an estimator of the study's true effect size $\theta_k$, burdened by sampling error $\epsilon_k$:
\begin{equation}
\hat\theta_k = \theta_k + \epsilon_k
(\#eq:pes4)
\end{equation}
The fact that we use $\theta_k$ instead of $\theta$ already points to an important difference. The random-effects model only assumes that $\theta_k$ is the true effect size of **one** single study $k$. It stipulates that there is a second source of error, denoted by $\zeta_k$. This second source of error is introduced by the fact that even the true effect size $\theta_k$ of study $k$ is only part of an over-arching distribution of true effect sizes with mean $\mu$.
\begin{equation}
\theta_k = \mu + \zeta_k
(\#eq:pes5)
\end{equation}
The random-effects model tells us that there is a hierarchy of two processes happening inside our black box [@thompson2001multilevel]: the observed effect sizes of a study deviate from their true value because of the sampling error. But even the true effect sizes are only a draw from a universe of true effects, whose mean $\mu$ we want to estimate as the pooled effect of our meta-analysis.
By plugging the second formula into the first one (i.e. replacing $\theta_k$ with its definition in the second formula), we can express the random-effects model in one line [@borenstein2011introduction, chapter 12]:
\begin{equation}
\hat\theta_k = \mu + \zeta_k + \epsilon_k
(\#eq:pes6)
\end{equation}
This formula makes it clear that our observed effect size deviates from the pooled effect $\mu$ because of two error terms, $\zeta_k$ and $\epsilon_k$. This relationship is visualized in Figure \@ref(fig:random).
\index{Exchangeability Assumption}
A crucial assumption of the random-effects model is that the size of $\zeta_k$ is **independent** of $k$. Put differently, we assume that there is nothing which indicates **a priori** that $\zeta_k$ in one study is higher than in another. We presuppose that the size of $\zeta_k$ is a product of chance, and chance alone.
This is known as the **exchangeability** assumption of the random-effects model [@higgins2009re; @lunn2012bugs, chapter 10.1]. All true effect sizes are assumed to be exchangeable in so far as we have nothing that could tell us how big $\zeta_k$ will be in some study $k$ before seeing the data.
\index{Heterogeneity}
```{block2, type='boxinfo'}
**Which Model Should I Use?**
\vspace{2mm}
In practice, is it very uncommon to find a selection of studies that is perfectly homogeneous. This is true even when we follow best practices, and try to make the scope of our analysis as precise as possible through our PICO (Chapter \@ref(research-question)).
\vspace{4mm}
In many fields, including medicine and the social sciences, it is therefore conventional to **always** use a random-effects model, since some degree of between-study heterogeneity can virtually always be anticipated. A fixed-effect model may only be used when we could not detect any between-study heterogeneity (we will discuss how this is done in Chapter \@ref(heterogeneity)) **and** when we have very good reasons to assume that the true effect is fixed. This may be the case when, for example, only exact replications of a study are considered, or when we meta-analyze subsets of one big study. Needless to say, this is seldom the case, and applications of the fixed-effect model "in the wild" are rather rare.
\vspace{4mm}
Even though it is conventional to use the random-effects model a priori, this approach is not undisputed. The random-effects model pays more attention to small studies when calculating the overall effect of a meta-analysis [@schwarzer2015meta, chapter 2.3]. Yet, small studies in particular are often fraught with biases (see Chapter \@ref(small-study-effects)). This is why some have argued that the fixed-effect model is (sometimes) preferable [@poole1999random; @furukawa2003low]. Stanley, Doucouliagos, and Ioannidis [-@stanley2022beyond] make a similar point and argue that, in some disciplines, a so-called "unrestricted weighted least squares" (UWLS) model should be used instead of the random-effects model.
```
```{r random, fig.cap='Illustration of parameters of the random-effects model.', out.width='60%', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rem_sep2.png')
```
<br></br>
#### Estimators of the Between-Study Heterogeneity {#tau-estimators}
---
\index{Weight}
The challenge associated with the random-effects model is that we have to take the error $\zeta_k$ into account. To do this, we have to estimate the **variance** of the distribution of true effect sizes. This variance is known as $\tau^2$, or **tau-squared**. Once we know the value of $\tau^2$, we can include the between-study heterogeneity when determining the inverse-variance weight of each effect size.
In the random-effects model, we therefore calculate an adjusted **random-effects weight** $w^*_k$ for each observation. The formula looks like this:
\begin{equation}
w^*_k = \frac{1}{s^2_k+\tau^2}
(\#eq:pes7)
\end{equation}
\index{Inverse-Variance Weighting}
Using the adjusted random-effects weights, we then calculate the pooled effect size using the inverse variance method, just like we did using the fixed-effect model:
\begin{equation}
\hat\theta = \frac{\sum^{K}_{k=1} \hat\theta_kw^*_k}{\sum^{K}_{k=1} w^*_k}
(\#eq:pes8)
\end{equation}
There are several methods to estimate $\tau^2$, most of which are too complicated to do by hand. Luckily, however, these estimators are implemented in the functions of the **{meta}** package, which does the calculations automatically for us. Here is a list of the most common estimators, and the code by which they are referenced in **{meta}**:
\index{DerSimonian-Laird Estimator}
\index{Restricted Maximum Likelihood Estimator}
\index{Maximum Likelihood}
\index{Sidik-Jonkman Estimator}
* The **DerSimonian-Laird** (`"DL"`) estimator [@dersimonian1986meta].
* The **Restricted Maximum Likelihood** (`"REML"`) or **Maximum Likelihood** (`"ML"`) procedures [@viechtbauer2005bias].
* The **Paule-Mandel** (`"PM"`) procedure [@paule1982consensus].
* The **Empirical Bayes** (`"EB"`) procedure [@sidik2019note], which is practically identical to the Paule-Mandel method.
* The **Sidik-Jonkman** (`"SJ"`) estimator [@sidik2005simple].
It is an ongoing research question which of these estimators performs best for different kinds of data. If one of the approaches is better than the other often depends on parameters such as the number of studies $k$, the number of participants $n$ in each study, how much $n$ varies from study to study, and how big $\tau^2$ is. Several studies have analyzed the bias of $\tau^2$ estimators under these varying scenarios [@veroniki2016methods; @viechtbauer2005bias; @sidik2007comparison; @langan2019comparison].
\index{Review Manager (RevMan)}
\index{Comprehensive Meta-Analysis (CMA)}
Arguably, the most frequently used estimator is the one by DerSimonian and Laird. The estimator is implemented in software that has commonly been used by meta-analysts in the past, such as **RevMan** (a program developed by Cochrane) or **Comprehensive Meta-Analysis**. It is also used to be the default estimator used in **{meta}**. Due to this historic legacy, one often finds research papers in which "using a random-effects model" is used synonymous with employing the DerSimonian-Laird estimator.
However, it has been found that this estimator can be biased, particularly when the number of studies is small and heterogeneity is high [@hartung1999alternative; @hartung2001refined; @hartung2001tests; @follmann1999valid; @makambi2004effect]. This is quite problematic because it is very common to find meta-analyses with few studies and high heterogeneity.
\index{Paule-Mandel Estimator}
\index{metafor Package}
\index{Sidik-Jonkman Estimator}
\index{Restricted Maximum Likelihood Estimator}
In an overview paper, Veroniki and colleagues [-@veroniki2016methods] reviewed evidence on the robustness of various $\tau^2$ estimators. They recommended the Paule-Mandel method for both binary and continuous effect size data, and the restricted maximum likelihood estimator for continuous outcomes. The restricted maximum-likelihood estimator is also the default method used by the **{metafor}** package.
A more recent simulation study by Langan and colleagues [-@langan2019comparison] came to a similar result but found that the Paule-Mandel estimator may be suboptimal when the sample size of studies varies drastically. Another study by Bakbergenuly and colleagues [-@bakbergenuly2020methods] found that the Paule-Mandel estimator is well suited especially when the number of studies is small. The Sidik-Jonkman estimator, also known as the **model error variance method**, is only well suited when $\tau^2$ is very large [@sidik2007comparison].
```{block2, type='boxinfo'}
**Which Estimator Should I Use?**
\vspace{2mm}
There are no iron-clad rules when exactly which estimator should be used. In many cases, there will only be minor differences in the results produced by various estimators, meaning that you should not worry about this issue **too** much.
\vspace{4mm}
When in doubt, you can always rerun your analyses using different $\tau^2$ estimators, and see if this changes the interpretation of your results. Here are a few tentative guidelines that you may follow in your own meta-analysis:
1. For effect sizes based on continuous outcome data, the restricted maximum likelihood estimator may be used as a first start.
2. For binary effect size data, the Paule-Mandel estimator is a good first choice, provided there is no extreme variation in the sample sizes.
3. When you have very good reason to believe that the heterogeneity of effects in your sample is very large, and if avoiding false positives has a very high priority, you may use the Sidik-Jonkman estimator.
4. If you want that others can replicate your results as precisely as possible outside _R_, the DerSimonian-Laird estimator is the method of choice.
\vspace{2mm}
```
Overall, estimators of $\tau^2$ fall into two categories. Some, like the DerSimonian-Laird and Sidik-Jonkman estimator, are based on **closed-form expressions**, meaning that they can be directly calculated using a formula.
The (restricted) maximum likelihood, Paule-Mandel and empirical Bayes estimator find the optimal value of $\tau^2$ through an **iterative algorithm**. Latter estimators may therefore sometimes take a little longer to calculate the results. In most real-world cases, however, these time differences are minuscule at best.
<br></br>
#### Knapp-Hartung Adjustments {#knapp-hartung}
---
\index{Knapp-Hartung Adjustment}
\index{Wald-Type Test}
\index{t-Distribution}
In addition to our selection of the $\tau^2$ estimator, we also have to decide if we want to apply so-called Knapp-Hartung adjustments^[This approach is also known as "Hartung-Knapp adjustments" or the "Hartung-Knapp-Sidik-Jonkman" (HKSJ) method.] [@knapp2003improved; @sidik2002simple]. These adjustments affect the way the standard error (and thus the confidence intervals) of our pooled effect size $\hat\theta$ is calculated.
The Knapp-Hartung adjustment tries to control for the uncertainty in our estimate of the between-study heterogeneity. While significance tests of the pooled effect usually assume a normal distribution (so-called **Wald-type** tests), the Knapp-Hartung method is based on a $t$-distribution. Knapp-Hartung adjustments can only be used in random-effects models, and usually cause the confidence intervals of the pooled effect to become slightly larger.
\index{Heterogeneity}
```{block, type='boxreport'}
**Reporting the Type of Model Used In Your Meta-Analysis**
\vspace{2mm}
It is highly advised to specify the type of model you used in the methods section of your meta-analysis report. Here is an example:
> _"As we anticipated considerable between-study heterogeneity, a random-effects model was used to pool effect sizes. The restricted maximum likelihood estimator (Viechtbauer, 2005) was used to calculate the heterogeneity variance $\tau^2$. We used Knapp-Hartung adjustments (Knapp & Hartung, 2003) to calculate the confidence interval around the pooled effect."_
```
Applying a Knapp-Hartung adjustment is usually sensible. Several studies [@inthout2014hartung; @langan2019comparison] showed that these adjustments can reduce the chance of false positives, especially when the number of studies is small.
The use of the Knapp-Hartung adjustment, however, is not uncontroversial. Wiksten and colleagues [-@wiksten2016hartung], for example, argued that the method can cause anti-conservative results in (seldom) cases when the effects are very homogeneous.
<br></br>
## Effect Size Pooling in _R_ {#pooling-es-r}
---
\index{meta Package}
Time to put what we learned into practice. In the rest of this chapter, we will explore how we can run meta-analyses of different effect sizes directly in _R_. The **{meta}** package we will use to do this has a special structure. It contains several meta-analysis functions which are each focused on one type of effect size data. There is a set of parameters which can be specified in the same way across all of these functions; for example if we want to apply a fixed- or random-effects model, or which $\tau^2$ estimator should be used. Apart from that, there are **function-specific** arguments which allow us to tweak details of our meta-analysis that are only relevant for a specific type of data.
Figure \@ref(fig:metaflow) provides an overview of **{meta}**'s structure. To determine which function to use, we first have to clarify what kind of effect size data we want to synthesize. The most fundamental distinction is the one between **raw** and **pre-calculated** effect size data. We speak of "raw" data when we have all the necessary information needed to calculate the desired effect size stored in our data frame but have not yet calculated the actual effect size. The `SuicidePrevention` data set we used earlier contains raw data: the mean, standard deviation and sample size of two groups, which is needed to calculate the standardized mean difference.
We call effect size data "pre-calculated", on the other hand, when they already contain the final effect size of each study, as well as the standard error. If we want to use a corrected version of an effect metric (such as Hedges' $g$, Chapter \@ref(hedges-g)), it is necessary that this correction has already been applied to pre-calculated effect size data before we start the pooling.
```{r metaflow, out.width='100%', fig.cap='Conceptual overview of meta-analysis functions.', message = F, echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/meta_flow_sep.png')
```
If possible, it is preferable to use raw data in our meta-analysis. This makes it easier for others to understand how we calculated the effect sizes, and replicate the results. Yet, using raw data is often not possible in practice, because studies often report their results in a different way (Chapter \@ref(es-formats-different)).
This leaves us no other choice than to pre-calculate the desired effect size for each study right away so that all have the same format. Chapter \@ref(es-calc) in the “Helpful Tools” part of this book presents a few formulas which can help you to convert a reported effect size into the desired metric.
The function of choice for pre-calculated effect sizes is `metagen`. Its name stands for **gen**eric inverse variance meta-analysis. If we use `metagen` with binary data (e.g. proportions, risk ratios, odds ratios), it is important, as we covered in Chapter \@ref(ratios), that the effect sizes are log-transformed before the function is used.
When we can resort to raw effect size data, **{meta}** provides us with a specialized function for each effect size type. We can use the `metamean`, `metacont` and `metacor` function for means, (standardized) mean differences and correlations, respectively. We can pool (incidence) rates, proportions and incidence rate ratios using the `metarate`, `metaprop` and `metainc` functions. The `metabin` function can be employed when we are dealing with risk or odds ratios.
All meta-analysis functions in **{meta}** follow the same structure. We have to provide the functions with the (raw or pre-calculated) effect size data, as well as further arguments, which control the specifics of the analysis. There are six core arguments which can be specified in each function:
* **`studlab`**. This argument associates each effect size with a **study label**. If we have the name or authors of our studies stored in our data set, we simply have to specify the name of the respective column (e.g. `studlab = author`).
* **`sm`**. This argument controls the **summary measure**, the effect size metric we want to use in our meta-analysis. This option is particularly important for functions using raw effect size data. The **{meta}** package uses codes for different effect size formats, for example `"SMD"` or `"OR"`. The available summary measures are not the same in each function, and we will discuss the most common options in each case in the following sections.
* **`fixed`**. We need to provide this argument with a logical (`TRUE` or `FALSE`), indicating if a fixed-effect model meta-analysis should be calculated^[In older versions of **{meta}** (before version 5.0-0), this argument is called `comb.fixed`.].
* **`random`**. In a similar fashion, this argument controls if a random-effects model should be used. If both `comb.fixed` and `comb.random` are set to `TRUE`, both models are calculated and displayed^[In older versions of **{meta}** (before version 5.0-0), this argument is called `comb.random`.].
* **`method.tau`**. This argument defines the $\tau^2$ estimator. All functions use the codes for different estimators that we already presented in the previous chapter (e.g. for the DerSimonian-Laird method: `method.tau = "DL"`).
* **`hakn`**. This is yet another logical argument, and controls if the Knapp-Hartung adjustments should be applied when using the random-effects model.
* **`data`**. In this argument, we provide **{meta}** with the name of our meta-analysis data set.
* **`title`** (**not mandatory**). This argument takes a character string with the name of the analysis. While it is not essential to provide input for this argument, it can help us to identify the analysis later on.
There are also a few additional arguments which we will get to know in later chapters. In this guide, we will not be able to discuss **all** arguments of the **{meta}** functions: there are more than 100.
Thankfully, most of these arguments are rarely needed or have sensible defaults. When in doubt, you can always run the name of the function, preceded by a question mark (e.g. `?metagen`) in the _R_ console; this will open the function documentation.
\index{Function Argument}
\index{Position Matching}
\index{Documentation, _R_}
```{block, type='boxinfo'}
**Default Arguments & Position Matching**
\vspace{2mm}
For _R_ beginners, it is often helpful to learn about **default arguments** and **position-based matching** in functions.
\vspace{2mm}
Default arguments are specified by the person who wrote the function. They set a function argument to a predefined value, which is automatically used unless we explicitly provide a different value. In **{meta}** many, but not all arguments have default values.
\vspace{2mm}
Default values are displayed in the "usage" section of the function documentation. If a function has defined a default value for an argument, it is not necessary to include it in our function call, unless we are not satisfied with the default behavior.
\vspace{2mm}
Arguments **without** default values always need to be specified in our function call. The **{meta}** package has a convenience function called `gs` which we can use to check the default value used for a specific argument. For example, try running `gs("method.tau")`. If there is no default value, `gs` will return `NULL`.
\vspace{2mm}
Another interesting detail about _R_ functions is position matching. Usually, we have to write down the name of an argument and its value in a function call. Through position matching, however, we can leave out the name of the argument, and only have to type in the argument value. We can do this if we specify the argument in the same **position** in which it appears in the documentation.
Take the `sqrt` function. A written out call of this function would be `sqrt(x = 4)`. However, because we know that `x`, the number, is the first argument, we can simply type in `sqrt(4)` with the same result.
```
<br></br>
### Pre-Calculated Effect Size Data {#pre-calculated-es}
---
Let us begin our tour of meta-analysis functions with `metagen`. As we learned, this function can be used for pre-calculated effect size data. In our first example, we will use the function to perform a meta-analysis of the `ThirdWave` data set.
\index{Hedges' \textit{g}}
\index{Standardized Mean Difference}
This data set contains studies examining the effect of so-called "third wave" psychotherapies on perceived stress in college students. For each study, the standardized mean difference between a treatment and control group at post-test was calculated, and a small sample correction was applied. The effect size measure used in this meta-analysis, therefore, is Hedges' $g$. Let us have a look at the data.
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "ThirdWave" Data Set**
\vspace{2mm}
The `ThirdWave` data set is included directly in the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(ThirdWave)` automatically saves the data set in your _R_ environment. The data set is then ready to be used. If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/thirdwave.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
```{r, message=F}
library(tidyverse) # needed for 'glimpse'
library(dmetar)
library(meta)
data(ThirdWave)
glimpse(ThirdWave)
```
We see that the data set has eight columns, the most important of which are `Author`, `TE` and `seTE`. The `TE` column contains the $g$ value of each study, and `seTE` is the standard error of $g$. The other columns represent variables describing the subgroup categories that each study falls into. These variables are not relevant for now.
We can now start to think about the type of meta-analysis we want to perform. Looking at the subgroup columns, we see that studies vary at least with respect to their risk of bias, control group, intervention duration, intervention type, and mode of delivery.
This makes it quite clear that some between-study heterogeneity can be expected, and that it makes no sense to assume that all studies have a fixed true effect. We may therefore use the random-effects model for pooling. Given its robust performance in continuous outcome data, we choose the restricted maximum likelihood (`"REML"`) estimator in this example. We will also use the Knapp-Hartung adjustments to reduce the risk of a false positive result.
Now that we have these fundamental questions settled, the specification of our call to `metagen` becomes fairly straightforward. There are two function-specific arguments which we always have to specify when using the function:
* **`TE`**. The name of the column in our data set which contains the calculated effect sizes.
* **`seTE`**. The name of the column in which the standard error of the effect size is stored.
The rest are generic **{meta}** arguments that we already covered in the last chapter. Since the analysis deals with standardized mean differences, we also specify `sm = "SMD"`. However, in this example, this has no actual effect on the results, since effect sizes are already calculated for each study. It will only tell the function to label effect sizes as SMDs in the output.
This gives us all the information we need to set up our first call to `metagen`. We will store the results of the function in an object called `m.gen`.
```{r}
m.gen <- metagen(TE = TE,
seTE = seTE,
studlab = Author,
data = ThirdWave,
sm = "SMD",
fixed = FALSE,
random = TRUE,
method.tau = "REML",
hakn = TRUE,
title = "Third Wave Psychotherapies")
```
Our `m.gen` object now contains all the meta-analysis results. An easy way to get an overview is to use the `summary` function^[In older versions of **{meta}** (before version 5.0-0), this overview can be printed without using `summary`. Simply call your newly created meta-analysis object directly in the _R_ console.].
```{r, eval=F}
summary(m.gen)
```
```
## Review: Third Wave Psychotherapies
## SMD 95%-CI %W(random)
## Call et al. 0.7091 [ 0.1979; 1.2203] 5.0
## Cavanagh et al. 0.3549 [-0.0300; 0.7397] 6.3
## DanitzOrsillo 1.7912 [ 1.1139; 2.4685] 3.8
## de Vibe et al. 0.1825 [-0.0484; 0.4133] 7.9
## Frazier et al. 0.4219 [ 0.1380; 0.7057] 7.3
## Frogeli et al. 0.6300 [ 0.2458; 1.0142] 6.3
## Gallego et al. 0.7249 [ 0.2846; 1.1652] 5.7
## Hazlett-Steve… 0.5287 [ 0.1162; 0.9412] 6.0
## Hintz et al. 0.2840 [-0.0453; 0.6133] 6.9
## Kang et al. 1.2751 [ 0.6142; 1.9360] 3.9
## Kuhlmann et al. 0.1036 [-0.2781; 0.4853] 6.3
## Lever Taylor… 0.3884 [-0.0639; 0.8407] 5.6
## Phang et al. 0.5407 [ 0.0619; 1.0196] 5.3
## Rasanen et al. 0.4262 [-0.0794; 0.9317] 5.1
## Ratanasiripong 0.5154 [-0.1731; 1.2039] 3.7
## Shapiro et al. 1.4797 [ 0.8618; 2.0977] 4.2
## Song & Lindquist 0.6126 [ 0.1683; 1.0569] 5.7
## Warnecke et al. 0.6000 [ 0.1120; 1.0880] 5.2
##
## Number of studies combined: k = 18
##
## SMD 95%-CI t p-value
## Random effects model 0.5771 [0.3782; 0.7760] 6.12 < 0.0001
##
## Quantifying heterogeneity:
## tau^2 = 0.0820 [0.0295; 0.3533]; tau = 0.2863 [0.1717; 0.5944];
## I^2 = 62.6% [37.9%; 77.5%]; H = 1.64 [1.27; 2.11]
##
## Test of heterogeneity:
## Q d.f. p-value
## 45.50 17 0.0002
##
## Details on meta-analytical method:
## - Inverse variance method
## - Restricted maximum-likelihood estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
```
Here we go, the results of our first meta-analysis using _R_. There is a lot to unpack, so let us go through the output step by step.
\index{Weight}
* The first part of the output contains the individual studies, along with their effect sizes and confidence intervals. Since the effects were pre-calculated, there is not much new to be seen here. The `%W(random)` column contains the weight (in percent) that the random-effects model attributed to each study. We can see that, with 7.9%, the greatest weight in our meta-analysis has been given to the study by de Vibe. The smallest weight has been given to the study by Ratanasiripong. Looking at the confidence interval of this study, we can see why this is the case. The CIs around the pooled effect are extremely wide, meaning that the standard error is very high, and that the study's effect size estimate is therefore not very precise.
* Furthermore, the output tells us the total number of studies in our meta-analysis. We see that $K=$ 18 studies were combined.
* The next section provides us with the core result: the pooled effect size. We see that the estimate is $g \approx$ 0.58 and that the 95% confidence interval ranges from $g \approx$ 0.38 to 0.78. We are also presented with the results of a test determining if the effect size is significant. This is the case ($p<$ 0.001). Importantly, we also see the associated test statistic, which is denoted with `t`. This is because we applied the Knapp-Hartung adjustment, which is based on a $t$-distribution.
* Underneath, we see results concerning the between-study heterogeneity. We will learn more about some of the results displayed here in later chapters, so let us only focus on $\tau^2$. Next to `tau^2`, we see an estimate of the variance in true effects: $\tau^2$ = 0.08. We see that the confidence interval of `tau^2` does not include zero (0.03--0.35), meaning that $\tau^2$ is significantly greater than zero. All of this indicates that between-study heterogeneity exists in our data and that the random-effects model was a good choice.
* The last section provides us with details about the meta-analysis. We see that effects were pooled using the inverse variance method, that the restricted maximum-likelihood estimator was used, and that the Knapp-Hartung adjustment was applied.
We can also access information stored in `m.gen` directly. Plenty of objects are stored by default in the meta-analysis results produced by **{meta}**, and a look into the "value" section of the documentation reveals what they mean. We can use the `$` operator to print specific results of our analyses. The pooled effect, for example, is stored as `TE.random`.
```{r}
m.gen$TE.random
```
\index{Fixed-Effect Model}
Even when we specify `fixed = FALSE`, **{meta}**'s functions always also calculate results for the fixed-effect model internally. Thus, we can also access the pooled effect assuming a fixed-effect model.
```{r}
m.gen$TE.fixed
```
We see that this estimate deviates considerably from the random-effects model result.
When we want to adapt some details of our analyses, the `update.meta` function can be helpful. This function needs the **{meta}** object as input, and the argument we want to change. Let us say that we want to check if results differ substantially if we use the Paule-Mandel instead of the restricted maximum likelihood estimator. We can do that using this code:
```{r}
m.gen_update <- update.meta(m.gen,
method.tau = "PM")
# Get pooled effect
m.gen_update$TE.random
# Get tau^2 estimate
m.gen_update$tau2
```
We see that while the pooled effect does not differ much, the Paule-Mandel estimator gives us a somewhat larger approximation of $\tau^2$.
Lastly, it is always helpful to save the results for later. Objects generated by **{meta}** can easily be saved as _.rda_ (_R_ data) files, using the `save` function.
```{r, eval=F}
save(m.gen, file = "path/to/my/meta-analysis.rda") # example path
```
<br></br>
### (Standardized) Mean Differences {#pooling-smd}
---
\index{Standardized Mean Difference}
\index{Glass' Delta}
Raw effect size data in the form of means and standard deviations of two groups can be pooled using `metacont`. This function can be used for both standardized and unstandardized between-group mean differences. These can be obtained by either specifying `sm = "SMD"` or `sm = "MD"`. Otherwise, there are seven function-specific arguments we have to provide:
* **`n.e`**. The number of observations in the treatment/experimental group.
* **`mean.e`**. The mean in the treatment/experimental group.
* **`sd.e`**. The standard deviation in the treatment/experimental group.
* **`n.c`**. The number of observations in the control group.
* **`mean.c`**. The mean in the control group.
* **`sd.c`**. The standard deviation in the control group.
* **`method.smd`**. This is only relevant when `sm = "SMD"`. The `metacont` function allows us to calculate three different types of standardized mean differences. When we set `method.smd = "Cohen"`, the uncorrected standardized mean difference (Cohen's $d$) is used as the effect size metric. The two other options are `"Hedges"` (default and recommended), which calculates Hedges' $g$, and `"Glass"`, which will calculate Glass' $\Delta$ (_delta_). Glass' $\Delta$ uses the control group standard deviation instead of the pooled standard deviation to standardize the mean difference. This effect size is sometimes used in primary studies when there is more than one treatment group, but usually not the preferred metric for meta-analyses.
\index{Knapp-Hartung Adjustment}
For our example analysis, we will recycle the `SuicidePrevention` data set we already worked with in Chapters \@ref(data-prep-R) and \@ref(fem). Not all studies in our sample are absolutely identical, so using a random-effects model is warranted. We will also use Knapp-Hartung adjustments again, as well as the restricted maximum likelihood estimator for $\tau^2$. We tell `metacont` to correct for small-sample bias, producing Hedges' $g$ as the effect size metric. Results are saved in an object that we name `m.cont`.
Overall, our code looks like this:
```{r}
# Make sure meta and dmetar are already loaded
library(meta)
library(dmetar)
library(meta)
# Load dataset from dmetar (or download and open manually)
data(SuicidePrevention)
# Use metcont to pool results.
m.cont <- metacont(n.e = n.e,
mean.e = mean.e,
sd.e = sd.e,
n.c = n.c,
mean.c = mean.c,
sd.c = sd.c,
studlab = author,
data = SuicidePrevention,
sm = "SMD",
method.smd = "Hedges",
fixed = FALSE,
random = TRUE,
method.tau = "REML",
hakn = TRUE,
title = "Suicide Prevention")
```
Let us see what the results are:
```{r}
summary(m.cont)
```
Looking at the output and comparing it to the one we received in Chapter \@ref(pre-calculated-es), we already see one of **{meta}**'s greatest assets. Although `metagen` and `metacont` are different functions requiring different data types, the structure of the output looks nearly identical. This makes interpreting the results quite easy. We see that the pooled effect according to the random-effects model is $g=$ -0.23, with the 95% confidence interval ranging from -0.09 to -0.37. The effect is significant ($p=$ 0.006).
We see that the effect sizes have a negative sign. In the context of our meta-analysis, this represents a favorable outcome, because it means that suicidal ideation was lower in the treatment groups compared to the control groups. To make this clearer to others, we may also consistently reverse the sign of the effect sizes (e.g. write $g=$ 0.23 instead), so that positive effect sizes always represent "positive" results.
The restricted maximum likelihood method estimated a between-study heterogeneity variance of $\tau^2$ = 0.004. Looking at `tau^2`, we see that the confidence interval includes zero, meaning that the variance of true effect sizes is not significantly greater than zero.
In the details section, we are informed that Hedges' $g$ was used as the effect size metric--just as we requested.
<br></br>
### Binary Outcomes
---
#### Risk & Odds Ratios {#pooling-or-rr}
---
\index{Risk Ratio}
\index{Odds Ratio}
\index{Inverse-Variance Weighting}
\index{Sparse Data}
The `metabin` function can be used to pool effect sizes based on binary data, particularly risk and odds ratios. Before we start using the function, we first have to discuss a few particularities of meta-analyses based on these effect sizes.
It is possible to pool binary effect sizes using the generic inverse variance method we covered in Chapter \@ref(fem) and \@ref(tau-estimators). We need to calculate the log-odds or risk ratio, as well as the standard error of each effect, and can then use the inverse of the effect size variance to determine the pooling weights.
However, this approach is suboptimal for binary outcome data [@higgins2019cochrane, chapter 10.4.1]. When we are dealing with **sparse** data, meaning that the number of events or the total sample size of a study is small, the calculated standard error may not be a good estimator of the precision of the binary effect size.
<br></br>
##### The Mantel-Haenszel Method {#mantel-haenszel}
---
\index{Mantel-Haenszel Method}
The **Mantel-Haenszel** method [@mantel1959statistical; @robins1986general] is therefore commonly used as an alternative to calculate the weights of studies with binary outcome data. It is also the default approach used in `metabin`. This method uses the number of events and non-events in the treatment and control group to determine a study's weight. There are different formulas depending on if we want to calculate the risk or odds ratio.
\vspace{8mm}
**Risk Ratio:**
\begin{equation}
w_k = \frac{(a_k+b_k) c_k}{n_k}
(\#eq:pes9)
\end{equation}
**Odds Ratio:**
\begin{equation}
w_k = \frac{b_kc_k}{n_k}
(\#eq:pes10)
\end{equation}
In the formulas, we use the same notation as in Chapter \@ref(rr), with $a_k$ being the number of events in the treatment group, $c_k$ the number of event in the control group, $b_k$ the number of non-events in the treatment group, $d_k$ the number of non-events in the control group, and $n_k$ being the total sample size.
\index{Peto Method}
\index{Peto Odds Ratio}
<br></br>
##### The Peto Method
---
A second approach is the **Peto** method [@yusuf1985beta]. In its essence, this approach is based on the inverse variance principle we already know. However, it uses a special kind of effect size, the **Peto odds ratio**, which we will denote with $\hat\psi_k$.
To calculate $\hat\psi_k$, we need to know $O_k$, the observed events in the treatment group, and calculate $E_k$, the **expected** number of cases in the treatment group. The difference $O_k-E_k$ is then divided by the variance $V_k$ of the difference between $O_k$ and $E_k$, resulting in a log-transformed version of $\hat\psi_k$. Using the same cell notation as before, the formulas to calculate $E_k$, $O_k$ and $V_k$ are the following:
\begin{equation}
O_k = a_k
(\#eq:pes11)
\end{equation}
\begin{equation}
E_k = \frac{(a_k+b_k)(a_k+c_k)}{a_k+b_k+c_k+d_k}
(\#eq:pes12)
\end{equation}
\vspace{4mm}
\begin{equation}
V_k = \frac{(a_k+b_k)(c_k+d_k)(a_k+c_k)(b_k+d_k)}{{(a_k+b_k+c_k+d_k)}^2(a_k+b_k+c_k+d_k-1)}
(\#eq:pes13)
\end{equation}
\vspace{4mm}
\begin{equation}
\log\hat\psi_k = \frac{O_k-E_k}{V_k}
(\#eq:pes14)
\end{equation}
The inverse of the variance of $\log\hat\psi_k$ is then used as the weight when pooling the effect sizes^[The variance of the log Peto odds ratio $\log\hat\psi_k$ is defined as $\text{SE}_{\log\hat\psi_k}^2 = \widehat{\text{Var}}(\log\hat\psi_k)=V_k^{-1}$, so that the pooling weight using the Peto method is defined as $w^{\text{(Peto)}}_k=\widehat{\text{Var}}(\log\hat\psi_k)^{-1}$.].
<br></br>
##### The Bakbergenuly-Sample Size Method
---
Recently, Bakbergenuly and colleagues [-@bakbergenuly2020methods] proposed another method in which the weight of effects is only determined by a study's sample size, and showed that this approach may be preferable to the one by Mantel and Haenszel. We will call this the **sample size method**. The formula for this approach is fairly easy. We only need to know the sample size $n_{\text{treat}_k}$ and $n_{\text{control}_k}$ in the treatment and control group, respectively.
\begin{equation}
w_k = \frac{n_{\text{treat}_k}n_{\text{control}_k}}{n_{\text{treat}_k} + n_{\text{control}_k} }
(\#eq:pes15)
\end{equation}
When we implement this pooling method in `metabin`, the weights and overall effect using the fixed- and random-effects model will be identical. Only the $p$-value and confidence interval of the pooled effect will differ.
\index{Continuity Correction}
\index{Zero Cell Problem}
```{block2, type='boxinfo'}
**Which Pooling Method Should I Use?**
\vspace{4mm}
In Chapter \@ref(rr), we already talked extensively about the problem of **zero-cells** and **continuity correction**. While both the Peto and sample size method can be used without modification when there are zero cells, it is common to add 0.5 to zero cells when using the Mantel-Haenszel method. This is also the default behavior in `metabin`.
Using continuity corrections, however, has been discouraged [@efthimiou2018practical], as they can lead to biased results. The Mantel-Haenszel method only **really** requires a continuity correction when one specific cell is zero in **all** included studies, which is rarely the case. Usually, it is therefore advisable to use the **exact** Mantel-Haenszel method without continuity corrections by setting `MH.exact = TRUE` in `metabin`.
\vspace{4mm}
The Peto method also has its limitations. First of all, it can only be used for odds ratios. Simulation studies also showed that the approach only works well when (1) the number of observations in the treatment and control group is similar, (2) when the observed event is rare (<1%), and (3) when the treatment effect is not overly large [@bradburn2007much; @j2004add].
The Bakbergenuly-sample size method, lastly, is a fairly new approach, meaning that it is not as well studied as the other two methods.
\vspace{2mm}
All in all, it may be advisable in most cases to follow Cochrane's general assessment [@higgins2019cochrane, chapter 10.4], and use the Mantel-Haenszel method (without continuity correction). The Peto method may be used when the odds ratio is the desired effect size metric, and when the event of interest is expected to be rare.
```
<br></br>
##### Pooling Binary Effect Sizes in _R_ {#ppoolbin}
---
There are eight important function-specific arguments in `metabin`:
* **`event.e`**. The number of events in the treatment/experimental group.
* **`n.e`**. The number of observations in the treatment/experimental group.
* **`event.c`**. The number of events in the control group.
* **`n.c`**. The number of observations in the control group.
* **`method`**. The pooling method to be used. This can either be `"Inverse"` (generic inverse-variance pooling), `"MH"` (Mantel-Haenszel; default and recommended), `"Peto"` (Peto method), or `"SSW"` (Bakbergenuly-sample size method; only when `sm = "OR"`).
* **`sm`**. The summary measure (i.e. effect size metric) to be calculated. We can use `"RR"` for the risk ratio and `"OR"` for the odds ratio.
* **`incr`**. The increment to be added for continuity correction of zero cells. If we specify `incr = 0.5`, an increment of 0.5 is added. If we set `incr = "TACC"`, the treatment arm continuity correction method is used (see Chapter \@ref(rr)). As mentioned before, it is usually recommended to leave out this argument and not apply continuity corrections.
* **`MH.exact`**. If `method = "MH"`, we can set this argument to `TRUE`, indicating that we do not want that a continuity correction is used for the Mantel-Haenszel method.
For our hands-on example, we will use the `DepressionMortality` data set. This data set is based on a meta-analysis by Cuijpers and Smit [-@cuijpers2002excess], which examined the effect of suffering from depression on all-cause mortality. The data set contains the number of individuals with and without depression, and how many individuals in both groups had died after several years.
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "DepressionMortality" Data Set**
\vspace{2mm}
The `DepressionMortality` data set is included directly in the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(DepressionMortality)` automatically saves the data set in your _R_ environment. The data set is then ready to be used. If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/depressionmortality.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
Let us have a look at the data set first:
```{r, message = F}
library(dmetar)
library(tidyverse)
library(meta)
data(DepressionMortality)
glimpse(DepressionMortality)
```
\index{Paule-Mandel Estimator}
\index{Mantel-Haenszel Method}
In this example, we will calculate the risk ratio as the effect size metric, as was done by Cuijpers and Smit. We will use a random-effects pooling model, and, since we are dealing with binary outcome data, we will use the Paule-Mandel estimator for $\tau^2$.
Looking at the data, we see that the sample sizes vary considerably from study to study, a scenario in which the Paule-Mandel method may be slightly biased (see Chapter \@ref(tau-estimators)). Keeping this in mind, we can also try out another $\tau^2$ estimator as a sensitivity analysis to check if the results vary by a lot.
The data set contains no zero cells, so we do not have to worry about continuity correction, and can use the exact Mantel-Haenszel method right away. We save the meta-analysis results in an object called `m.bin`.
```{r, eval=F}
m.bin <- metabin(event.e = event.e,
n.e = n.e,
event.c = event.c,
n.c = n.c,
studlab = author,
data = DepressionMortality,
sm = "RR",
method = "MH",
MH.exact = TRUE,
fixed = FALSE,
random = TRUE,
method.tau = "PM",
hakn = TRUE,
title = "Depression and Mortality")
summary(m.bin)
```
```{r, echo=F, message=F, warning=F}
m.bin <- metabin(event.e = event.e,
n.e = n.e,
event.c = event.c,
n.c = n.c,
studlab = author,
data = DepressionMortality,
sm = "RR",
method = "MH",
MH.exact = TRUE,
fixed = FALSE,
random = TRUE,
method.tau = "PM",
hakn = TRUE,
title = "Depression and Mortality")
```
```
## Review: Depression and Mortality
## RR 95%-CI %W(random)
## Aaroma et al., 1994 2.09 [1.41; 3.12] 6.0
## Black et al., 1998 1.75 [1.31; 2.33] 6.6
## Bruce et al., 1989 2.51 [1.07; 5.88] 3.7
## Bruce et al., 1994 1.16 [0.85; 1.57] 6.5
## Enzell et al., 1984 1.82 [1.28; 2.60] 6.3
## Fredman et al., 1989 0.39 [0.05; 2.78] 1.2
## Murphy et al., 1987 1.76 [1.26; 2.46] 6.4
## Penninx et al., 1999 1.46 [0.93; 2.29] 5.8
## Pulska et al., 1998 1.94 [1.34; 2.81] 6.2
## Roberts et al., 1990 2.30 [1.92; 2.75] 7.0
## Saz et al., 1999 2.18 [1.55; 3.07] 6.3
## Sharma et al., 1998 2.05 [1.07; 3.91] 4.7
## Takeida et al., 1997 6.97 [4.13; 11.79] 5.3
## Takeida et al., 1999 5.81 [3.88; 8.70] 6.0
## Thomas et al., 1992 1.33 [0.77; 2.27] 5.3
## Thomas et al., 1992 1.77 [1.10; 2.83] 5.6
## Weissman et al., 1986 1.25 [0.66; 2.33] 4.8
## Zheng et al., 1997 1.98 [1.40; 2.80] 6.3
##
## Number of studies combined: k = 18
##
## RR 95%-CI t p-value
## Random effects model 2.0217 [1.5786; 2.5892] 6.00 < 0.0001
##
## Quantifying heterogeneity:
## tau^2 = 0.1865 [0.0739; 0.5568]; tau = 0.4319 [0.2718; 0.7462];
## I^2 = 77.2% [64.3%; 85.4%]; H = 2.09 [1.67; 2.62]
##
## Test of heterogeneity:
## Q d.f. p-value
## 74.49 17 < 0.0001
##
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Paule-Mandel estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
```
We see that the pooled effect size is RR $=$ 2.02. The pooled effect is significant ($p<$ 0.001), and indicates that suffering from depression doubles the mortality risk. We see that our estimate of the between-study heterogeneity variance is $\tau^2 \approx$ 0.19.
The confidence interval of $\tau^2$ does not include zero, indicating substantial heterogeneity between studies. Lastly, a look into the details section of the output reveals that the `metabin` function used the Mantel-Haenszel method for pooling, as intended.
As announced above, let us have a look if the method used to estimate $\tau^2$ has an impact on the results. Using the `update.meta` function, we re-run the analysis, but use the restricted maximum likelihood estimator this time.
```{r}
m.bin_update <- update.meta(m.bin,
method.tau = "REML")
```
\index{Exponentiation}
Now, let us have a look at the pooled effect again by inspecting `TE.random`. We have to remember here that meta-analyses of binary outcomes are actually performed by using a log-transformed version of the effect size. When presenting the results, `metabin` just reconverts the effect size metrics to their original form for our convenience. This step is not performed if we inspect elements in our meta-analysis object.
To retransform log-transformed effect sizes, we have to **exponentiate** the value. Exponentiation can be seen as the "antagonist" of log-transforming data, and can be performed in _R_ using the `exp` function^[Generally speaking, exponentiation is an operation including two variables $b$ and $x$. When $x$ is exponentiated, we raise $b$ (the base) to the power of $x$ (the exponent): $b^x$. Most commonly, $b$ is defined as Euler's number $e \approx$ 2.718). Using $e^x$ creates an **exponential function** $\exp(x)$, which can be used to reconvert some log-transformed value $x$ back to its original scale. This operation is also known as **"taking the anti-log"**.]. Let us put this into practice.
```{r}
exp(m.bin_update$TE.random)
```
We see that the pooled effect using the restricted maximum likelihood estimator is virtually identical. Now, let us see the estimate of $\tau^2$:
```{r}
m.bin_update$tau2
```
This value deviates somewhat, but not to a degree that should make us worry about the validity of our initial results.
Our call to `metabin` would have looked exactly the same if we had decided to pool odds ratios. The only thing we need to change is the `sm` argument, which has to be set to `"OR"`. Instead of writing down the entire function call one more time, we can use the `update.meta` function again to calculate the pooled OR.
```{r, eval=F}
m.bin_or <- update.meta(m.bin, sm = "OR")
m.bin_or
```
```
## Review: Depression and Mortality
##
## [...]
##
## Number of studies combined: k = 18
##
## OR 95%-CI t p-value
## Random effects model 2.2901 [1.7512; 2.9949] 6.52 < 0.0001
##
## Quantifying heterogeneity:
## tau^2 = 0.2032 [0.0744; 0.6314]; tau = 0.4508 [0.2728; 0.7946];
## I^2 = 72.9% [56.7%; 83.0%]; H = 1.92 [1.52; 2.43]
##
## Test of heterogeneity:
## Q d.f. p-value
## 62.73 17 < 0.0001
##
## Details on meta-analytical method:
## - Mantel-Haenszel method
## - Paule-Mandel estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
```
In the output, we see that the pooled effect using odds ratios is OR = 2.29.
<br></br>
##### Pooling Pre-Calculated Binary Effect Sizes {#m-gen-bin}
---
It is sometimes not possible to extract the raw effect size data needed to calculate risk or odds ratios in each study. For example, a primary study may report an odds ratio, but not the data on which this effect size is based on. If the authors do not provide us with the original data, this may require us to perform a meta-analysis based on pre-calculated effect size data. As we learned, the function we can use to do this is `metagen`.
When dealing with binary outcome data, we should be really careful if there is no other option than using pre-calculated effect size data. The `metagen` function uses the inverse-variance method to pool effect sizes, and better options such as the Mantel-Haenszel approach can not be used. However, it is still a viable alternative if everything else fails.
Using the `DepressionMortality` data set, let us simulate that we are dealing with a pre-calculated effect size meta-analysis. We can extract the `TE` and `seTE` object in `m.bin` to get the effect size and standard error of each study. We save this information in our `DepressionMortality` data set.
```{r}
DepressionMortality$TE <- m.bin$TE
DepressionMortality$seTE <- m.bin$seTE
```
\index{Logarithm, Natural}
Now, imagine that there is one effect for which we know the lower and upper bound of the confidence interval, but not the standard error. To simulate such a scenario, we will (1) define the standard error of study 7 (Murphy et al., 1987) as missing (i.e. set its value to `NA`), (2) define two new empty columns, `lower` and `upper`, in our data set, and (3) fill `lower` and `upper` with the log-transformed "reported" confidence interval in study 7.
```{r}
# Set seTE of study 7 to NA
DepressionMortality$seTE[7] <- NA
# Create empty columns 'lower' and 'upper'
DepressionMortality[,"lower"] <- NA
DepressionMortality[,"upper"] <- NA
# Fill in values for 'lower' and 'upper' in study 7
# As always, binary effect sizes need to be log-transformed
DepressionMortality$lower[7] <- log(1.26)
DepressionMortality$upper[7] <- log(2.46)
```
Now let us have a look at the data we just created.
```{r, eval=F}
DepressionMortality[,c("author", "TE", "seTE", "lower", "upper")]
```
```
## author TE seTE lower upper
## 1 Aaroma et al., 1994 0.7418 0.20217 NA NA
## 2 Black et al., 1998 0.5603 0.14659 NA NA
## 3 Bruce et al., 1989 0.9235 0.43266 NA NA
## 4 Bruce et al., 1994 0.1488 0.15526 NA NA
## 5 Enzell et al., 1984 0.6035 0.17986 NA NA
## 6 Fredman et al., 1989 -0.9236 0.99403 NA NA
## 7 Murphy et al., 1987 0.5675 NA 0.2311 0.9001
## 8 Penninx et al., 1999 0.3816 0.22842 NA NA
## [...]
```
It is not uncommon to find data sets like this one in practice. It may be possible to calculate the log-risk ratio for most studies, but for a few other ones, the only information we often have is the (log-transformed) risk ratio and its confidence interval.
Fortunately, `metagen` allows us to pool even such data. We only have to provide the name of the columns containing the lower and upper bound of the confidence interval to the `lower` and `upper` argument. The `metagen` function will then use this information to weight the effects when the standard error is not available. Our function call looks like this:
```{r, eval=F}
m.gen_bin <- metagen(TE = TE,
seTE = seTE,
lower = lower,
upper = upper,
studlab = author,
data = DepressionMortality,
sm = "RR",
method.tau = "PM",
fixed = FALSE,
random = TRUE,
title = "Depression Mortality (Pre-calculated)")
summary(m.gen_bin)
```
```
## Review: Depression Mortality (Pre-calculated)
##
## [...]
##
## Number of studies combined: k = 18
##
## RR 95%-CI z p-value
## Random effects model 2.0218 [1.6066; 2.5442] 6.00 < 0.0001
##
## Quantifying heterogeneity:
## tau^2 = 0.1865 [0.0739; 0.5568]; tau = 0.4319 [0.2718; 0.7462];
## I^2 = 77.2% [64.3%; 85.4%]; H = 2.09 [1.67; 2.62]
##
## [...]
```
In the output, we see that all $K=$ 18 studies could be combined in the meta-analysis, meaning that `metagen` used the information in `lower` and `upper` provided for study 7. The output also shows that the results using the inverse variance method are nearly identical to the ones of the Mantel-Haenszel method from before.
<br></br>
#### Incidence Rate Ratios {#pooling-irr}
---
\index{Incidence Rate Ratio}
\index{Person-Time}
Effect sizes based on incidence rates (i.e. incidence rate ratios, Chapter \@ref(irr)) can be pooled using the `metainc` function. The arguments of this function are very similar to `metabin`:
* **`event.e`**: The number of events in the treatment/experimental group.
* **`time.e`**: The person-time at risk in the treatment/experimental group.
* **`event.c`**: The number of events in the control group.
* **`time.c`**: The person-time at risk in the control group.
* **`method`**: Like `metabin`, the default pooling method is the one by Mantel and Haenszel (`"MH"`). Alternatively, we can also use generic inverse variance pooling (`"Inverse"`).
* **`sm`**: The summary measure. We can choose between the incidence rate ratio (`"IRR"`) and the incidence rate difference (`"IRD"`).
* **`incr`**: The increment we want to add for the continuity correction of zero cells.
\index{Inverse-Variance Weighting}
In contrast to `metabin`, `metainc` does not use a continuity correction by default. Specifying `MH.exact` as `TRUE` is therefore not required. A continuity correction is only performed when we choose the generic inverse variance pooling method (`method = "Inverse"`).
In our hands-on example, we will use the `EatingDisorderPrevention` data set. This data is based on a meta-analysis which examined the effects of college-based preventive interventions on the incidence of eating disorders [@harrer2020prevention]. The person-time at risk is expressed as person-years in this data set.
```{block, type='boxdmetar'}
**The "EatingDisorderPrevention" Data Set**
\vspace{2mm}
The `EatingDisorderPrevention` data set is included in the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(EatingDisorderPrevention)` automatically saves the data set in your _R_ environment. The data set is then ready to be used. If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/eatingdisorderprevention.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
As always, let us first have a glimpse at the data:
```{r, message=F}
library(dmetar)
library(tidyverse)
library(meta)
data(EatingDisorderPrevention)
glimpse(EatingDisorderPrevention)
```
\index{Mantel-Haenszel Method}
\index{Paule-Mandel Estimator}
We use `metainc` to pool the effect size data, with the incidence rate ratio as the effect size metric. The Mantel-Haenszel method is used for pooling, and the Paule-Mandel estimator to calculate the between-study heterogeneity variance.
```{r}
m.inc <- metainc(event.e = event.e,
time.e = time.e,
event.c = event.c,
time.c = time.c,
studlab = Author,
data = EatingDisorderPrevention,
sm = "IRR",
method = "MH",
fixed = FALSE,
random = TRUE,
method.tau = "PM",
hakn = TRUE,
title = "Eating Disorder Prevention")
summary(m.inc)
```
We see that the pooled effect is IRR = 0.62. This effect is significant ($p=$ 0.04), albeit being somewhat closer to the conventional significance threshold than in the previous examples. Based on the pooled effect, we can say that the preventive interventions reduced the incidence of eating disorders within one year by 38%. Lastly, we see that the estimate of the heterogeneity variance $\tau^2$ is zero.
<br></br>
### Correlations {#pooling-cor}
---
\index{Correlation}
\index{Fisher's \textit{z}}
Correlations can be pooled using the `metacor` function, which uses the generic inverse variance pooling method. In Chapter \@ref(pearson-cors), we covered that correlations should be Fisher's $z$-transformed before pooling. By default, `metacor` does this transformation automatically for us. It is therefore sufficient to provide the function with the original, untransformed correlations reported in the studies. The `metacor` function has only two relevant function-specific arguments:
* **`cor`**. The (untransformed) correlation coefficient.
* **`n`**. The number of observations in the study.
To illustrate `metacor`'s functionality, we will use the `HealthWellbeing` data set. This data set is based on a large meta-analysis examining the association between health and well-being [@ngamaba2017strongly].
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "HealthWellbeing" Data Set**
\vspace{2mm}
The `HealthWellbeing` data set is included in the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(HealthWellbeing)` automatically saves the data set in your _R_ environment. The data set is then ready to be used.
\vspace{2mm}
If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/healthwellbeing.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
Let us have a look at the data:
```{r, message=F}
library(dmetar)
library(tidyverse)
library(meta)
data(HealthWellbeing)
glimpse(HealthWellbeing)
```
\index{Restricted Maximum Likelihood Estimator}
We expect considerable between-study heterogeneity in this meta-analysis, so a random-effects model is employed. The restricted maximum likelihood estimator is used for $\tau^2$.
```{r, eval=F}
m.cor <- metacor(cor = cor,
n = n,
studlab = author,
data = HealthWellbeing,
fixed = FALSE,
random = TRUE,
method.tau = "REML",
hakn = TRUE,
title = "Health and Wellbeing")
summary(m.cor)
```
```
## Review: Health and Wellbeing
## COR 95%-CI %W(random)
## An, 2008 0.6200 [0.4964; 0.7189] 2.8
## Angner, 2013 0.3720 [0.2823; 0.4552] 3.4
## Barger, 2009 0.2900 [0.2870; 0.2930] 3.8
## Doherty, 2013 0.3330 [0.2908; 0.3739] 3.7
## Dubrovina, 2012 0.7300 [0.7255; 0.7344] 3.8
## Fisher, 2010 0.4050 [0.2373; 0.5493] 2.8
## [...]
##
## Number of studies combined: k = 29
## Number of observations: o = 853794
##
## COR 95%-CI t p-value
## Random effects model 0.3632 [0.3092; 0.4148] 12.81 < 0.0001
##
## Quantifying heterogeneity:
## tau^2 = 0.0241 [0.0141; 0.0436]; tau = 0.1554 [0.1186; 0.2088];
## I^2 = 99.8% [99.8%; 99.8%]; H = 24.14 [23.29; 25.03]
##
## Test of heterogeneity:
## Q d.f. p-value
## 16320.87 28 0
##
## Details on meta-analytical method:
## - Inverse variance method
## - Restricted maximum-likelihood estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Fisher's z transformation of correlations
```
We see that the pooled association between health and well-being is $r=$ 0.36, and that this effect is significant ($p<$ 0.001). Using Cohen's convention, this can be considered a moderate-sized correlation.
In the output, `metacor` already reconverted the Fisher's $z$-transformed correlations to the original form. A look at the last line of the details section, however, tells us that $z$-values have indeed been used to pool the effects. Lastly, we see that the heterogeneity variance estimated for this meta-analysis is significantly larger than zero.
<br></br>
### Means {#pooling-mean}
---
\index{Mean, Arithmetic}
A meta-analysis of means can be conducted using the `metamean` function. This function uses the generic inverse variance method to pool the data. When using `metamean`, we have to determine first if we want to perform a meta-analysis of raw or log-transformed means.
In contrast to odds and risk ratios, a log-transformation of means is usually not necessary. However, it is advisable to use the transformation when dealing with means of a non-negative quantity (e.g. height), and when some means are close to zero. This is controlled via the `sm` argument. If we set `sm = "MRAW`, the raw means are pooled. The log-transformation is performed when `sm = "MLN"`. The function-specific arguments are:
* **`n`**: The number of observations.
* **`mean`**: The mean.
* **`sd`**: The standard deviation of the mean.
* **`sm`**: The type of summary measure to be used for pooling (see above).
For our hands-on example, we will use the `BdiScores` data set. This data set contains the mean score of the Beck Depression Inventory II [@beck1996beck], measured in samples of depression patients participating in psychotherapy and antidepressant trials [@furukawa2020translating].
\index{dmetar Package}
```{block, type='boxinfo'}
**The "BdiScores" Data Set**
\vspace{2mm}
The `BdiScores` data set is included directly in the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(BdiScores)` automatically saves the data set in your _R_ environment. The data set is then ready to be used. If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/bdiscores.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
```{r, message=F}
library(dmetar)
library(tidyverse)
library(meta)
data(BdiScores)
# We only need the first four columns
glimpse(BdiScores[,1:4])
```
Our goal is to calculate the overall mean depression score based on this collection of studies. We will use a random-effects model and the restricted maximum-likelihood estimator to pool the raw means in our data set. We save the results in an object called `m.mean`.
```{r}
m.mean <- metamean(n = n,
mean = mean,
sd = sd,
studlab = author,
data = BdiScores,
sm = "MRAW",
fixed = FALSE,
random = TRUE,
method.tau = "REML",
hakn = TRUE,
title = "BDI-II Scores")
summary(m.mean)
```
The pooled mean assuming a random-effects model is $m$ = 31.12. We also see that the between-study heterogeneity variance $\tau^2$ in this meta-analysis is significantly greater than zero.
<br></br>
### Proportions {#pooling-props}
---
\index{Proportion}
\index{Logit-Transformation}
The `metaprop` function can be used to pool proportions. In Chapter \@ref(props), we already discussed that it is best to logit-transform proportions before the meta-analysis is performed. The `metaprop` function does this automatically for us if we specify `sm = "PLOGIT"`. If the raw proportions should be pooled, we can use `sm = "PRAW"`, but remember that this is discouraged.
\index{Mixed-Effects Model}
The default method through which `metaprop` pools proportions is somewhat special. If we use logit-transformed values, the function does not use the inverse-variance method for pooling, but builds a **generalized linear mixed-effects model** (GLMM). Essentially, the function fits a logistic regression model to our data, which includes random-effects to account for the fact that true effect sizes vary between studies.
You may have heard of the term "mixed-effects model" before. Such models are commonly used in primary studies across many research fields. In Chapters \@ref(subgroup) and \@ref(metareg), we will delve into this topic a little deeper by discussing subgroup analysis and meta-regression, which are special applications of mixed-effects models. For now, however, it is sufficient to understand the general idea of what a mixed-effects model is.
Mixed-effects models are regression models which contain both "fixed" and "random" components. The fixed elements are the $\beta$ weights. A very simple regression model contains two $\beta$ terms; the intercept ${\beta_0}$, as well as a regression term ${\beta_1}x$. These are used in combination to predict observed data $y$ through some other quantity $x$. This prediction will hardly ever be perfect, leaving some random error $\epsilon_i$. Together, this gives the following formula:
\begin{equation}
{y}_i = {\beta_0} + {\beta_1}x_i + \epsilon_i
(\#eq:pes16)
\end{equation}
The crucial point is that the value of the $\beta$ weights in this equation remains the same for each observation $i$. The value of $x$ may vary from observation to observation, but $\beta_0$ and $\beta_1$ never do, since they are fixed.
This regression equation can be turned into a **mixed**-effects model when random effects are added. We denote this random-effect term with $u_i$. As indicated by the subscript $i$, the random effect term can have different values for each observation. The $u_i$ term is centered around zero and can increase or decrease the estimate produced by the fixed effects:
\begin{equation}
{y}_i = {\beta_0} + {\beta_1}x_i + u_i + \epsilon_i
(\#eq:pes17)
\end{equation}
Meta-analysis can be seen as a special type of this model in which there is no $\beta_1x_i$ term. The model only contains an intercept $\beta_0$, which corresponds with the overall effect size $\mu$ in the random-effects model. The $u_i$ and $\epsilon_i$ parts correspond with the $\zeta_k$ and $\epsilon_k$ error terms in meta-analyses. This makes it clear that meta-analysis is equivalent to a mixed-effects regression model. This mixed-effects model, however, only contains an intercept, as well as a random effect connected to that intercept. Using a binomial logit-link^[For study $k$, the logit transformation is defined as $\theta_k^{\text{LO}}=\log_e\left(\frac{p_k}{1-p_k}\right)$, leading to $\theta_k^{\text{LO}} \sim \theta+u_k$, with $u_k \sim \mathcal{N}(0, \tau^2)$. In meta-analytic GLMMs for proportions, the number of events in a study ($a_k$) is assumed to follow a binomial distribution: $a_k \sim \text{B}\left(n_k, \frac{\exp(\theta_k^{\text{LO}})}{1+ \exp(\theta_k^{\text{LO}})}\right)$. For a more detailed description, see @schwarzer2019seriously, A.2.2.], we can therefore apply a (generalized) logistic mixed-effect model to estimate the pooled effect.
GLMMs can be applied not only to proportions but also to other outcome measures based on binary and count data, such as odds ratios or incidence rate ratios [@stijnen2010random]. While GLMMs are not universally recommended for meta-analyses of binary outcome data [@bakbergenuly2018meta], their use has been advocated for proportions [@schwarzer2019seriously].
\index{Maximum Likelihood}
Using GLMMs as part of `metaprop` has three implications: (1) the output will display no meta-analytic weights for each effect, (2) the $\tau^2$ estimator can only be set to `"ML"` (since maximum-likelihood is used to estimate the GLMM), and (3) there will be no confidence intervals for our estimate of $\tau^2$. If this information is required, you may switch to performing an inverse-variance meta-analysis. There are five function-specific arguments for `metaprop`:
* **`event`**. The number of events.
* **`n`**. The number of observations.
* **`method`**. The pooling method. Can be either a GLMM (`method = "GLMM"`), or inverse-variance pooling (`method = "Inverse"`).
* **`incr`**. The increment to be added for continuity correction in zero cells. This is only relevant when inverse-variance pooling is used.
* **`sm`**. The summary measure to be used. It is advised to use logit-transformed proportions by setting `sm = "PLOGIT"` (default).
For our illustration of the `metaprop` function, we will use the `OpioidMisuse` data set. This data is derived from a meta-analysis which examined the 12-month prevalence of prescription opioid misuse among adolescents and young adults in the United States [@jordan2017past].
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "OpioidMisuse" Data Set**
\vspace{2mm}
The `OpioidMisuse` data set is included directly in the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(OpioidMisuse)` automatically saves the data set in your _R_ environment. The data set is then ready to be used. If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/opioidmisuse.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
Let us load the data set and have a look at it:
```{r, message=F}
library(dmetar)
library(meta)
library(tidyverse)
data(OpioidMisuse)
glimpse(OpioidMisuse)
```
We pool the prevalence data using a GLMM and logit-transformed proportions.
```{r, eval=F}
m.prop <- metaprop(event = event,
n = n,
studlab = author,
data = OpioidMisuse,
method = "GLMM",
sm = "PLOGIT",
fixed = FALSE,
random = TRUE,
hakn = TRUE,
title = "Opioid Misuse")
summary(m.prop)
```
```
## Review: Opioid Misuse
## proportion 95%-CI
## Becker, 2008 0.1002 [0.0962; 0.1042]
## Boyd, 2009 0.0998 [0.0811; 0.1211]
## Boyd, 2007 0.1162 [0.0978; 0.1368]
## Cerda, 2014 0.0710 [0.0654; 0.0770]
## Fiellin, 2013 0.1176 [0.1150; 0.1204]
## [...]
##
##
## Number of studies combined: k = 15
## Number of observations: o = 434385
## Number of events: e = 41364
##
## proportion 95%-CI
## Random effects model 0.0944 [0.0836; 0.1066]
##
## Quantifying heterogeneity:
## tau^2 = 0.0558; tau = 0.2362; I^2 = 98.3% [97.9%; 98.7%]; H = 7.74 [6.92; 8.66]
##
## Test of heterogeneity:
## Q d.f. p-value Test
## 838.21 14 < 0.0001 Wald-type
## 826.87 14 < 0.0001 Likelihood-Ratio
##
## Details on meta-analytical method:
## - Random intercept logistic regression model
## - Maximum-likelihood estimator for tau^2
## - Hartung-Knapp adjustment for random effects model
## - Logit transformation
## - Clopper-Pearson confidence interval for individual studies
```
In the output, we see that the pooled 12-month prevalence of prescription opioid misuse in the selected studies is 9.4%, with the confidence interval ranging from 8.36 to 10.66%.
As described before, the output does not display the individual weight of each effect. In the same vein, we get an estimate of the between-study heterogeneity ($\tau^2 =$ 0.056), but no confidence interval around it.
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. What is the difference between a fixed-effect model and a random-effects model?
\vspace{-2mm}
2. Can you think of a case in which the results of the fixed- and random-effects model are identical?
\vspace{-2mm}
3. What is $\tau^2$? How can it be estimated?
\vspace{-2mm}
4. On which distribution is the Knapp-Hartung adjustment based? What effect does it have?
\vspace{-2mm}
5. What does "inverse-variance" pooling mean? When is this method **not** the best solution?
\vspace{-2mm}
6. You want to meta-analyze binary outcome data. The number of observations in the study arms is roughly similar, the observed event is very rare, and you do no expect the treatment effect to be large. Which pooling method would you use?
\vspace{-2mm}
7. For which outcome measures can GLMMs be used?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda4) at the end of this book.**
```
<br></br>
## Summary
* In statistics, a model can be seen as a simplified "theory", describing the process through which observed data were generated. There are two alternative models in meta-analysis: the fixed-effect model, and the random-effects model.
* While the fixed-effect model assumes that there is one true effect size, the random-effects model states that the true effect sizes also vary within meta-analyses. The goal of the random-effects model is therefore to find the mean of the true effect size distribution underlying our data.
* The variance of true effect sizes $\tau^2$, also known as between-study heterogeneity variance, has to be estimated in random-effects meta-analyses. There are several methods for this, and which one works best depends on the context.
* The most common way to calculate a pooled effect size is through the inverse-variance method. However, for binary outcome data, other approaches such as the Mantel-Haenszel method may be preferable.
* In the **{meta}** package, there is a function to perform meta-analyses of pre-calculated effect size data, as well as a suite of functions that can be used for different types of "raw" outcome data.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# About the Authors {-}
---
<br></br>
<img src="images/harrer.png" width="130" align="right" alt="" class="cover" /> [**Mathias Harrer**](https://www.mharrer.dev) is a researcher at the Technical University of Munich and the Friedrich-Alexander-University Erlangen-Nuremberg. Mathias' research focuses on statistical and technological methods in psychotherapy research, methods for clinical research synthesis, and on the development of statistical software.
[](https://twitter.com/MathiasHarrer)
[](https://github.com/MathiasHarrer)
---
<img src="images/cuijpers.jpg" width="130" align="right" alt="" class="cover" /> [**Pim Cuijpers**](https://www.pimcuijpers.com/blog/) is professor of Clinical Psychology at the VU University Amsterdam. He is specialized in conducting randomized controlled trials and meta-analyses, with a focus on the prevention and treatment of common mental disorders. Pim has published more than 800 articles in international peer-reviewed scientific journals; many of which are meta-analyses of clinical trials.
[](https://twitter.com/pimcuijpers)
---
<img src="images/furukawa.jpg" width="130" align="right" alt="" class="cover" /> [**Toshi A. Furukawa**](http://ebmh.med.kyoto-u.ac.jp/professor.html) is professor of Health Promotion and Human Behavior at the Kyoto University School of Public Health. His seminal research focuses both on theoretical aspects of research synthesis and meta-analysis, as well as their application in evidence-based medicine.
[](https://twitter.com/Toshi_FRKW)
---
<img src="images/ebert.jpg" width="130" align="right" alt="" class="cover" /> [**David D. Ebert**](https://www.protectlab.org/en/author/prof.-dr.-david-daniel-ebert/) is professor of Psychology and Behavioral Health Technology at the Technical University of Munich. David's research focuses internet-based intervention, clinical epidemiology, as well as applied research synthesis in this field.
[](https://twitter.com/DDEbert)
---
<br></br>
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Publication Bias {#pub-bias}
---
<img src="_figs/pub_bias.jpg" />
<br></br>
<span class="firstcharacter">L</span>
ooking back at the last chapters, we see that we already covered a vast range of meta-analytic techniques. Not only did we learn how to pool effect sizes, we also know now how to assess the robustness of our findings, inspect patterns of heterogeneity, and test hypotheses on why effects differ.
All of these approaches can help us to draw valid conclusions from our meta-analysis. This, however, rests on a tacit assumption concerning the nature of our data, which we have not challenged yet. When conducting a meta-analysis, we take it as a given that the data we collected is **comprehensive**, or at least **representative** of the research field under examination.
Back in Chapter \@ref(study-search), we mentioned that meta-analyses usually try to include **all** available evidence, in order to derive a single effect size that adequately describes the research field. From a statistical perspective, we may be able to tolerate that a few studies are missing in our analysis--but only if these studies were "left out" by chance.
Unfortunately, meta-analyses are often unable to include all existing evidence. To make things worse, there are also good reasons to assume that some studies are not missing completely "at random" from our collected data. Our world is imperfect, and so are the incentives and "rules" that govern scientific practice. This means that there are systemic biases that can determine if a study ends up in our meta-analysis or not.
A good example of this problem can be found in a not-so-recent anecdote from pharmacotherapy research. Even back in the 1990s, it was considered secured knowledge that antidepressive medication (such as **selective serotonin re-uptake inhibitors**, or SSRIs) are effective in treating patients suffering from depression. Most of this evidence was provided by meta-analyses of published pharmacotherapy trials, in which an antidepressant is compared to a pill placebo. The question regarding the effects of antidepressive medication is an important one, considering that the antidepressant drug market is worth billions of dollars, and growing steadily.
This may help to understand the turmoil caused by an article called **The Emperor's New Drugs**, written by Irving Kirsch and colleagues [-@kirsch2002emperor], which argued that things may not look so bright after all.
Drawing on the "Freedom of Information Act", Kirsch and colleagues obtained previously unpublished antidepressant trial data which pharmaceutical companies had provided to the US Food and Drug Administration. They found that when this unpublished data was also considered, the benefits of antidepressants compared to placebos were at best minimal, and clinically negligible. Kirsch and colleagues argued that this was because companies only published studies with favorable findings, while studies with "disappointing" evidence were withheld [@kirschemperorbook].
\index{Publication Bias}
A contentious debate ensued, and Kirsch's claims have remained controversial until today. We have chosen this example not to pick sides, but to illustrate the potential threat that missing studies can pose to the validity of meta-analytic inferences. In the meta-analysis literature, such problems are usually summarized under the term **publication bias**.
\index{File Drawer Problem}
The problem of publication bias underlines that every finding in meta-analyses can only be as good as the data it is based on. Meta-analytic techniques can only work with the data at hand. Therefore, if the collected data is distorted, even the best statistical model will only reproduce inherent biases. Maybe you recall that we already covered this fundamental caveat at the very beginning of this book, where we discussed the "File Drawer" problem (see Chapter \@ref(pitfalls)). Indeed, the terms "file drawer problem" and "publication bias" are often used synonymously.
The consequences of publication bias and related issues on the results of meta-analyses can be enormous. It can cause us to overestimate the effects of treatments, overlook negative side-effects, or reinforce the belief in theories that are actually invalid.
In this chapter, we will therefore discuss the various shapes and forms through which publication bias can distort our findings. We will also have a look at a few approaches that we as meta-analysts can use to examine the risk of publication bias in our data; and how publication bias can be mitigated in the first place.
<br></br>
## What Is Publication Bias? {#types-of-pub-biases}
---
Publication bias exists when the probability of a study getting published is affected by its results [@duval2005publication, chapters 2 and 5]. There is widespread evidence that a study is more likely to find its way into the public if its findings are statistically significant, or confirm the initial hypothesis [@schmucker2014extent; @scherer2018full; @chan2014increasing; @dechartres2018association].
When searching for eligible studies, we are usually constrained to evidence that has been made public in some form or the other, for example through peer-reviewed articles, preprints, books, or other kinds of accessible reports. In the presence of publication bias, this not only means that some studies are missing in our data set--it also means that the missing studies are likely the ones with unfavorable findings.
Meta-analytic techniques allow us to find an unbiased estimate of the average effect size in the population. But if our sample itself is distorted, even an effect estimate that is "true" from a statistical standpoint will not be representative of the reality. It is like trying to estimate the size of an iceberg, but only measuring its tip: our finding will inevitably be wrong, even if we are able to measure the height above the water surface with perfect accuracy.
\index{Reporting Bias}
\index{Citation Bias}
\index{Time-Lag Bias}
\index{Multiple Publication Bias}
\index{Language Bias}
\index{Outcome Reporting Bias}
Publication bias is actually just one of many **non-reporting biases**. There are several other factors that can also distort the evidence that we obtain in our meta-analysis [@page2020investigating], including:
* **Citation bias**: Even when published, studies with negative or inconclusive findings are less likely to be cited by related literature. This makes it harder to detect them through reference searches, for example.
* **Time-lag bias**: Studies with positive results are often published earlier than those with unfavorable findings. This means that findings of recently conducted studies with positive findings are often already available, while those with non-significant results are not.
* **Multiple publication bias**: Results of "successful" studies are more likely to be reported in several journal articles, which makes it easier to find at least one of them. The practice of reporting study findings across several articles is also known as "salami slicing".
* **Language bias**: In most disciplines, the primary language in which evidence is published is English. Publications in other languages are less likely to be detected, especially when the researchers themselves cannot understand the contents without translation. If studies in English systematically differ from the ones published in other languages, this may also introduce bias.
* **Outcome reporting bias**: Many studies, and clinical trials in particular, measure more than one outcome of interest. Some researchers exploit this, and only report those outcomes for which positive results were attained, while the ones that did not confirm the hypothesis are dropped. This can also lead to bias: technically speaking, the study has been published, but its (unfavorable) result will still be missing in our meta-analysis because it is not reported.
\index{Questionable Research Practice (QRP)}
Non-reporting biases can be seen as systemic factors which make it harder for us to find existing evidence. However, even if we were able to include all relevant findings, our results may still be flawed. Bias may also exist due to **questionable research practices** (QRPs) that researchers have applied when analyzing and reporting their findings [@simonsohn2020specification].
\index{P-Hacking}
We already mentioned the concept of "researcher degrees of freedom" previously (Chapter \@ref(pitfalls)). QRPs can be defined as practices in which researchers abuse these degrees of freedom to "bend" results into the desired direction. Unfortunately, there is no clear consensus on what constitutes a QRP. There are, however, a few commonly suggested examples.
One of the most prominent QRPs is **p-hacking**, in which analyses are tweaked until the conventional significance threshold of $p<$ 0.05 is reached. This can include the way outliers are removed, analyses of subgroups, or the missing data handling.
\index{HARKing}
Another QRP is **HARKing** [@kerr1998harking], which stands for **hypothesizing after the results are known**. One way of HARKing is to pretend that a finding in exploratory analyses has been an a priori hypothesis of the study all along. A researcher, for example, may run various tests on a data set, and then "invent" hypotheses around all the tests that were significant. This is a seriously flawed approach, which inflates the false discovery rate of a study, and thus increases the risk of spurious findings (to name just a few problems). Another type of HARKing is to drop all hypotheses that were not supported by the data, which can ultimately lead to outcome reporting bias.
<br></br>
## Addressing Publication Bias in Meta-Analyses {#addressing-pubbias}
---
It is quite clear that publication bias, other reporting biases and QRPs can have a strong and deleterious effect on the validity of our meta-analysis. They constitute major challenges since it is usually practically impossible to know the exact magnitude of the bias--or if it exists at all.
\index{Study Search}
\index{Open Science Framework (OSF)}
In meta-analyses, we can apply techniques which can, to some extent, reduce the risk of distortions due to publication and reporting bias, as well as QRPs. Some of these approaches pertain to the study search, while others are statistical methods.
* **Study search**. In Chapter \@ref(study-search), we discussed the process of searching for eligible studies. If publication bias exists, this step is of great import, because it means that that a search of the published literature may yield data that is not fully representative of all the evidence. We can counteract this by also searching for **grey literature**, which includes dissertations, preprints, government reports, or conference proceedings. Fortunately, pre-registration is also becoming more common in many disciplines. This makes it possible to search study registries such as the ICTRP or **OSF Registries** (see Table \@ref(tab:bibdatabases) in Chapter \@ref(study-search)) for studies with unpublished data, and ask the authors if they can provide us with data that has not been made public (yet)^[Mahmood and colleagues [-@mahood2014searching] provide a detailed account of how a comprehensive grey literature search can be conducted, and what challenges this may entail. The article can be openly accessed online.]. Grey literature search can be tedious and frustrating, but it is worth the effort. One large study has found that the inclusion of grey and unpublished literature can help to avoid an overestimation of the true effects [@mcauley2000does].
* **Statistical methods**. It is also possible to examine the presence of publication through statistical procedures. None of these methods can identify publication bias directly, but they can examine certain properties of our data that may be indicative of it. Some methods can also be used to quantify the true overall effect when correcting for publication bias.
\index{Small-Study Effect}
In this chapter, we will showcase common **statistical** methods to evaluate and control for publication bias. We begin with methods focusing on **small-study effects** [@sterne2000publication; @schwarzer2015meta, chapter 5; @duval2005publication, chapter 5]. A common thread among these approaches is that they find indicators of publication bias by looking at the relationship between the precision and observed effect size of studies.
<br></br>
### Small-Study Effect Methods {#small-study-effects}
---
There are various small-study effect methods to assess and correct for publication bias in meta-analyses. Many of the techniques have been conventional for many years. As it says in the name, these approaches are particularly concerned with **small studies**. From a statistical standpoint, this translates to studies with a high standard error. Small-study effect methods assume that small studies are more likely to fall prey to publication bias.
This assumption is based on three core ideas [see @borenstein2011introduction, chapter 30]:
\index{File Drawer Problem}
1. Because they involve a large commitment of resources and time, large studies are likely to get published, no matter whether the results are significant or not.
2. Moderately sized studies are at greater risk of not being published. However, even when the statistical power is only moderate, this is still often sufficient to produce significant results. This means that only some studies will not get published because they delivered "undesirable" (i.e. non-significant) results.
3. Small studies are at the greatest risk of generating non-significant findings, and thus of remaining in the "file drawer". In small studies, only very large effects become significant. This means that only small studies with very high effect sizes will be published.
We see that the purported mechanism behind these assumptions is quite simple. Essentially, it says that publication bias exists because only significant effects are published. Since the probability of obtaining significant results rises with larger sample size, it follows that publication bias will disproportionately affect small studies.
<br></br>
#### The Funnel Plot {#funnel-plot}
---
\index{Funnel Plot}
Earlier in this guide (Chapter \@ref(what-is-es)), we learned that a study's sample size and standard error are closely related. Larger standard errors of an effect size result in wider confidence intervals and increase the chance that the effect is not statistically significant. Therefore, it is sensible to assume that small-study effects will largely affect studies with larger standard errors.
Suppose that our collected data is burdened by publication bias. If this is the case, we can assume that the studies with large standard errors have higher effect sizes than the ones with a low standard error. This is because the smaller studies with lower effects were not significant, and thus never considered for publication. Consequently, we never included them in our meta-analysis.
It is conventional to inspect small-study effects through **funnel plots**. A funnel plot is a scatter plot of the studies' observed effect sizes on the x-axis against a measure of their standard error on the y-axis. Usually, the y-axis in funnel plots is inverted (meaning that "higher" values on the y-axis represent **lower** standard errors).
When there is no publication bias, the data points in such a plot should form a roughly symmetrical, upside-down funnel. This is why they are called funnel plots. Studies in the top part of the plot (those with low standard errors), should lie closely together, and not far away from the pooled effect size. In the lower part of the plot, with increasing standard errors, the funnel "opens up", and effect sizes are expected to scatter more heavily to the left and right of the pooled effect.
It becomes easier to see why studies should form a funnel when we think back to what we learned about the behavior of effect sizes in Chapter \@ref(what-is-es), and when discussing the fixed-effect model in Chapter \@ref(fem) (Figure \@ref(fig:funnel1)). The standard error is indicative of a study's **precision**: with decreasing standard error, we expect the observed effect size to become an increasingly good estimator of the true effect size. When the standard error is high, the effect size has a low precision and is therefore much more likely to be far off from the actual effect in the population.
We will now make this more concrete by generating a funnel plot ourselves. In the **{meta}** package, the `funnel.meta` function can be used to print a funnel plot for a meta-analysis object. Here, we produce a funnel plot for our `m.gen` meta-analysis object. We specify two further arguments, `xlim` and `studlab`. The first controls the limits of the x-axis in the plot, while the latter tells the function to include study labels. A call to the `title` function after running `funnel` adds a title to the plot.
Our code looks like this:
\vspace{2mm}
```{r, message=F, fig.width=8, fig.height=6, out.width="85%", collapse = TRUE, results='hold', fig.align='center', eval = F}
# Load 'meta' package
library(meta)
# Produce funnel plot
funnel.meta(m.gen,
xlim = c(-0.5, 2),
studlab = TRUE)
# Add title
title("Funnel Plot (Third Wave Psychotherapies)")
```
```{r, message=F, fig.width=8, fig.height=6, out.width="85%", collapse = TRUE, results='hold', fig.align='center', echo=F}
# Load 'meta' package
library(meta)
par(bg="#FFFEFA")
# Produce funnel plot
funnel.meta(m.gen,
xlim = c(-0.5, 2),
studlab = TRUE)
# Add title
title("Funnel Plot (Third Wave Psychotherapies)")
```
As discussed, the resulting funnel plot shows the effect size of each study (expressed as the standardized mean difference) on the x-axis, and the standard error (from large to small) on the y-axis. To facilitate the interpretation, the plot also includes the idealized funnel-shape that we expect our studies to follow. The vertical line in the middle of the funnel shows the average effect size. Because we used a random-effects model when generating `m.gen`, the funnel plot also uses the random-effects estimate.
In the absence of small-study effects, our studies should roughly follow the shape delineated by the funnel displayed in the plot. Is this the case in our example? Well, not really. While we see that studies with lower standard errors lie more concentrated around the estimated true effect, the pattern overall looks asymmetrical. This is because there are three small studies with very high effect sizes in the bottom-right corner of the plot (the ones by Shapiro, Kang, and Danitz-Orsillo).
These studies, however, have no equivalent in the bottom-left corner in the plot. There are no small studies with very low or negative effect sizes to "balance out" the ones with very high effects. Another worrisome detail is that the study with the greatest precision in our sample, the one by de Vibe, does not seem to follow the funnel pattern well either. Its effect size is considerably smaller than expected.
Overall, the data set shows an asymmetrical pattern in the funnel plot that **might** be indicative of publication bias. It could be that the three small studies are the ones that were lucky to find effects high enough to become significant, while there is an underbelly of unpublished studies with similar standard errors, but smaller and thus non-significant effects which did not make the cut.
A good way to inspect how asymmetry patterns relate to statistical significance is to generate **contour-enhanced funnel plots** [@peters2008contour]. Such plots can help to distinguish publication bias from other forms of asymmetry. Contour-enhanced funnel plots include colors which signify the significance level of each study in the plot. In the `funnel.meta` function, contours can be added by providing the desired significance thresholds to the `contour` argument. Usually, these are `0.9`, `0.95` and `0.99`, which equals $p$ < 0.1, 0.05 and 0.01, respectively. Using the `col.contour` argument, we can also specify the color that the contours should have. Lastly, the `legend` function can be used afterwards to add a legend to the plot, specifying what the different colors mean. We can position the legend on the plot using the `x` and `y` arguments, provide labels in `legend`, and add fill colors using the `fill` argument.
This results in the following code:
```{r, fig.width=8, fig.height=6, out.width="82%", collapse=TRUE, fig.align='center', eval=F}
# Define fill colors for contour
col.contour = c("gray75", "gray85", "gray95")
# Generate funnel plot (we do not include study labels here)
funnel.meta(m.gen, xlim = c(-0.5, 2),
contour = c(0.9, 0.95, 0.99),
col.contour = col.contour)
# Add a legend
legend(x = 1.6, y = 0.01,
legend = c("p < 0.1", "p < 0.05", "p < 0.01"),
fill = col.contour)
# Add a title
title("Contour-Enhanced Funnel Plot (Third Wave Psychotherapies)")
```
```{r, fig.width=8, fig.height=6, out.width="75%", collapse=TRUE, fig.align='center', echo=F}
# Define fill colors for contour
col.contour = c("gray75", "gray85", "gray95")
par(bg="#FFFEFA")
# Generate funnel plot (we do not include study labels here)
funnel.meta(m.gen, xlim = c(-0.5, 2),
contour = c(0.9, 0.95, 0.99),
col.contour = col.contour)
# Add a legend
legend(x = 1.6, y = 0.01,
legend = c("p < 0.1", "p < 0.05", "p < 0.01"),
fill = col.contour)
# Add a title
title("Contour-Enhanced Funnel Plot (Third Wave Psychotherapies)")
```
We see that the funnel plot now contains three shaded regions. We are particularly interested in the $p<$ 0.05 and $p<$ 0.01 regions, because effect sizes falling into this area are traditionally considered significant.
Adding the contour regions is illuminating: it shows that the three small studies all have significant effects, despite having a large standard error. There is only one study with a similar standard error that is not significant. If we would "impute" the missing studies in the lower left corner of the plot to increase the symmetry, these studies would lie in the non-significance region of the plot; or they would actually have a significant negative effect.
The pattern looks a little different for the larger studies. We see that there are several studies for which $p>$ 0.05, and the distribution of effects is less lopsided. What could be problematic though is that, while not strictly significant, all but one study are very close to the significance threshold (i.e. they lie in the 0.1 $> p >$ 0.05 region). It is possible that these studies simply calculated the effect size differently in the original paper, which led to a significant result. Or maybe, finding effects that are significant on a trend level was already convincing enough to get the study published.
In sum, inspection of the contour-enhanced funnel plot corroborates our initial hunch that there is asymmetry in the funnel plot and that this may be caused by publication bias. It is crucial, however, not to jump to conclusions, and interpret the funnel plot cautiously. We have to keep in mind that publication bias is just one of many possible reasons for funnel plot asymmetry.
\index{Fidelity, Treatment}
```{block, type='boxinfo'}
**Alternative Explanations for Funnel Plot Asymmetry**
\vspace{2mm}
Although publication bias can lead to asymmetrical funnel plots, there are also other, rather "benign", causes that may produce similar patterns [@page2020investigating]:
\vspace{2mm}
* Asymmetry can also be caused by between-study heterogeneity. Funnel plots assume that the dispersion of effect sizes is caused by the studies' sampling error, but do not control for the fact the studies may be estimators of different true effects.
\vspace{2mm}
* It is possible that study procedures were different in small studies, and that this resulted in higher effects. In clinical studies, for example, it is easier to make sure that every participant receives the treatment as intended when the sample size is small. This may not be the case in large studies, resulting in a lower **treatment fidelity**, and thus lower effects. It can make sense to inspect the characteristics of the included studies in order to evaluate if such an alternative explanation is plausible.
\vspace{2mm}
* It is a common finding that low-quality studies tend to show larger effect sizes, because there is a higher risk of bias. Large studies require more investment, so it is likely that their methodology will also be more rigorous. This can also lead to funnel plot asymmetry, even when there is no publication bias.
\vspace{2mm}
* Lastly, it is perfectly possible that funnel plot asymmetry simply occurs by chance.
```
\index{Egger's Test}
We see that visual inspection of the (contour-enhanced) funnel plot can already provide us with a few "red flags" that indicate that our results may be affected by publication bias.
However, interpreting the funnel plot just by looking at it clearly also has its limitations. There is no explicit rule when our results are "too asymmetric", meaning that inferences from funnel plots are always somewhat subjective. Therefore, it is helpful to assess the presence of funnel plot asymmetry in a quantitative way. This is usually achieved through **Egger's regression test**, which we will discuss next.
<br></br>
#### Egger's Regression Test {#eggers-test}
---
Egger's regression test [@egger1997bias] is a commonly used quantitative method that tests for asymmetry in the funnel plot. Like visual inspection of the funnel plot, it can only identify small-study effects and not tell us directly if publication bias exists. The test is based on a simple linear regression model, the formula of which looks like this:
\begin{equation}
\frac{\hat\theta_k}{SE_{\hat\theta_k}} = \beta_0 + \beta_1 \frac{1}{SE_{\hat\theta_k}}
(\#eq:pub1)
\end{equation}
The responses $y$ in this formula are the observed effect sizes $\hat\theta_k$ in our meta-analysis, divided through their standard error. The resulting values are equivalent to $z$-scores. These scores tell us directly if an effect size is significant; when $z \geq$ 1.96 or $\leq$ -1.96, we know that the effect is significant ($p<$ 0.05). This response is regressed on the inverse of the studies' standard error, which is equivalent to their precision.
When using Egger's test, however, we are not interested in the size and significance of the regression weight $\beta_1$, but in the **intercept** $\beta_0$. To evaluate the funnel asymmetry, we inspect the size of $\hat\beta_0$, and if it differs significantly from zero. When this is the case, Egger's test indicates funnel plot asymmetry.
Let us take a moment to understand why the size of the regression intercept tells us something about asymmetry in the funnel plot. In every linear regression model, the intercept represents the value of $y$ when all other predictors are zero. The predictor in our model is the precision of a study, so the intercept shows the expected $z$-score when the precision is zero (i.e. when the standard error of a study is infinitely large).
When there is no publication bias, the expected $z$-score should be scattered around zero. This is because studies with extremely large standard errors have extremely large confidence intervals, making it nearly impossible to reach a value of $|z| \geq$ 1.96. However, when the funnel plot is asymmetric, for example due to publication bias, we expect that small studies with very high effect sizes will be considerably over-represented in our data, leading to a surprisingly high number of low-precision studies with $z$ values greater or equal to 1.96. Due to this distortion, the predicted value of $y$ for zero precision will be much larger than zero, resulting in a significant intercept.
The plots below illustrate the effects of funnel plot asymmetry on the regression slope and intercept underlying Egger's test.
```{r eggers_alt, echo=F, out.width="50%", message=F, warning=F, fig.width=6, fig.height=5, eval=F}
library(ggplot2)
load("data/m.egdat.rda")
load("data/m.egdat.bias.rda")
funnel(m.egdat, xlab = "Effect Size")
title("Funnel Plot (No Asymmetry)")
m.egdat$data %>%
mutate(y = .TE/.seTE, x = 1/.seTE) %>%
ggplot(aes(y = y, x = x)) +
xlim(c(0, 110)) +
#ylim(c(0, 110)) +
geom_point(fill = "grey", pch=21) +
geom_smooth(method = "lm", se = F, fullrange = T, color = "black") +
theme_minimal() +
ylab("Scaled Effect Size (z)") +
xlab("Inverse Standard Error (Precision)") +
annotate("text", x = 3, y = 33, label = bquote(hat(beta)[0]~"="~0.21), hjust = "left",
cex = 6) +
annotate(geom = "curve", x = 0, y = 0.21, xend = 5, yend = 30,
curvature = .3, arrow = arrow(length = unit(2, "mm")), linetype = "dashed") +
ggtitle("Regression Line (No Asymmetry)") +
theme(plot.title = element_text(color="black", size=14, face="bold",
hjust = 0.5),
plot.margin = margin(1.08, 1, 1.08, 1, "cm"),
plot.background = element_rect(fill = "#FFFEFA", color = "#fbfbfb"),
panel.background = element_rect(fill = "#FFFEFA")) # 'nearly-white' used to keep knitr from cropping
meta::funnel.meta(m.egdat.bias, xlab = "Effect Size")
title("Funnel Plot (Asymmetry)")
m.egdat.bias$data %>%
mutate(y = .TE/.seTE, x = 1/.seTE) %>%
ggplot(aes(y = y, x = x)) +
xlim(c(0, 9)) +
ylim(c(0,6)) +
geom_point(fill = "grey", pch=21) +
geom_smooth(method = "lm", se = F, fullrange = T, color = "black") +
theme_minimal() +
ylab("Scaled Effect Size (z)") +
xlab("Inverse Standard Error (Precision)") +
annotate("text", x = 0.8, y = 0.5, label = bquote(hat(beta)[0]~"="~2.85), hjust = "left",
cex = 6) +
annotate(geom = "curve", x = 0, y = 2.85, xend = 0.7, yend = 0.7,
curvature = .3, arrow = arrow(length = unit(2, "mm")), linetype = "dashed") +
ggtitle("Regression Line (Asymmetry)") +
theme(plot.title = element_text(color="black", size=14, face="bold",
hjust = 0.5),
plot.margin = margin(1.08, 1, 1.08, 1, "cm"),
plot.background = element_rect(fill = "#feffff", color = "#fbfbfb"),
panel.background = element_rect(fill = "#feffff")) # 'nearly-white' used to keep knitr from cropping
```
```{r eggers, echo=F, out.width="50%", message=F, warning=F, fig.width=6, fig.height=5}
library(OpenImageR)
knitr::include_graphics('images/eggers-1_sep.png')
knitr::include_graphics('images/eggers-2_sep.png')
knitr::include_graphics('images/eggers-3_sep.png')
knitr::include_graphics('images/eggers-4_sep.png')
```
\index{tidyverse Package}
Let us see what results we get when we fit such a regression model to the data in `m.gen`. Using _R_, we can extract the original data in `m.gen` to calculate the response `y` and our predictor `x`. In the code below, we do this using a pipe (Chapter \@ref(data-transform)) and the `mutate` function, which is part of the **{tidyverse}**. After that, we use the **l**inear **m**odel function `lm` to regress the $z$ scores `y` on the precision `x`. In the last part of the pipe, we request a `summary` of the results.
```{r, eval = F}
# Load required package
library(tidyverse)
m.gen$data %>%
mutate(y = TE/seTE, x = 1/seTE) %>%
lm(y ~ x, data = .) %>%
summary()
```
```
## [...]
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 4.1111 0.8790 4.677 0.000252 ***
## x -0.3407 0.1837 -1.855 0.082140 .
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## [...]
```
In the results, we see that the intercept of our regression model is $\hat\beta_0$ = 4.11. This is significantly larger than zero ($t$ = 4.677, $p<$ 0.001), and indicates that the data in the funnel plot is indeed asymmetrical. Overall, this corroborates our initial findings that there are small-study effects. Yet, to reiterate, it is uncertain if this pattern has been caused by publication bias.
A more convenient way to perform Egger's test of the intercept is to use the `metabias` function in **{meta}**. This function only needs the meta-analysis object as input, and we have to set the `method.bias` argument to `"linreg"`. If we apply the function to `m.gen`, we get the same results as before.
```{r}
metabias(m.gen, method.bias = "linreg")
```
```{block2, type='boxreport'}
**Reporting the Results of Egger's Test**
\vspace{2mm}
For Egger's tests, it is usually sufficient to report the value of the intercept, its 95% confidence interval, as well as the $t$ and $p$-value. In the **{dmetar}** package, we included a convenience function called `eggers.test`. This function is a wrapper for `metabias`, and provides the results of Egger's test in a format suitable for reporting. In case you do not have **{dmetar}** installed, you can find the function's source code [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/eggers.test.R). Here is an example:
`eggers.test(m.gen)`
$~$ | `Intercept` | `ConfidenceInterval` | `t` | `p`
-------------- | ----------- | -------------------- | ---------- | ---
`Egger's test` | `4.111` | `2.347-5.875` | `4.677` | `0.00025`
```
\index{Standardized Mean Difference}
The effect size metric used in `m.gen` is the small sample bias-corrected SMD (Hedges' $g$). It has been argued that running Egger's test on SMDs can lead to an inflation of false positive results [@pustejovsky2019testing]. This is because a study's standardized mean difference and standard error are not independent.
We can easily see this by looking at the formula used to calculate the standard error of between-group SMDs (equation 3.18, Chapter \@ref(b-group-smd)). This formula includes the SMD itself, which means that a study's standard error changes for smaller or larger values of the observed effect (i.e. there is an artifactual correlation between the SMD and its standard error).
Pustejovsky and Rodgers [-@pustejovsky2019testing] propose to use a modified version of the standard error when testing for the funnel plot asymmetry of standardized mean differences. Only the first part of the standard error formula is used, which means that the observed effect size drops out of the equation. Thus, the formula looks like this:
\begin{equation}
SE^*_{\text{SMD}_{\text{between}}}= \sqrt{\frac{n_1+n_2}{n_1n_2}}
(\#eq:pub2)
\end{equation}
Where $SE^*_{\text{SMD}_{\text{between}}}$ is the modified version of the standard error. It might be a good idea to check if Egger's test gives the same results when using this improvement. In the following code, we add the sample size per group of each study to our initial data set, calculate the adapted standard error, and then use it to re-run the analyses.
```{r}
# Add experimental (n1) and control group (n2) sample size
n1 <- c(62, 72, 44, 135, 103, 71, 69, 68, 95,
43, 79, 61, 62, 60, 43, 42, 64, 63)
n2 <- c(51, 78, 41, 115, 100, 79, 62, 72, 80,
44, 72, 67, 59, 54, 41, 51, 66, 55)
# Calculate modified SE
ThirdWave$seTE_c <- sqrt((n1+n2)/(n1*n2))
# Re-run 'metagen' with modified SE to get meta-analysis object
m.gen.c <- metagen(TE = TE, seTE = seTE_c,
studlab = Author, data = ThirdWave, sm = "SMD",
fixed = FALSE, random = TRUE,
method.tau = "REML", hakn = TRUE,
title = "Third Wave Psychotherapies")
# Egger's test
metabias(m.gen.c, method = "linreg")
```
We see that, although the exact values differ, the interpretation of the results remains the same. This points to the robustness of our previous finding.
```{block2, type='boxinfo'}
**Using the Pustejovsky-Rodgers Approach Directly in `metabias`**
In the latest versions of **{meta}**, the `metabias` function also contains an option to conduct Eggers' test with the corrected standard error formula proposed by Pustejovsky and Rodgers. The option can be used by setting `method.bias` to `"Pustejovsky"`.
Yet, this is only possible if the **{meta}** meta-analysis object already contains the sample sizes of the experimental and control groups in elements `n.e` and `n.c`, respectively. When using `metagen` objects (like in our example above), this is not typically the case, so these values need to be added manually. Let us use our `m.gen` meta-analysis object again as an example:
`m.gen$n.e = n1; m.gen$n.c = n2`
`metabias(m.gen, method.bias = "Pustejovsky")`
Please note that `metabias`, under these settings, uses equation \@ref(eq:pub5) to perform Egger's test, which is equivalent to equation \@ref(eq:pub1) shown before. The main difference is that `metabias` uses the corrected standard error as the predictor in the model, and the inverse variance of included effect sizes as weights.
In our example, however, we used the corrected standard error on both sides of equation \@ref(eq:pub1). This means that results of our approach as shown above, and results obtained via setting `method.bias` to `"Pustejovsky"`, will not be completely identical.
```
<br></br>
#### Peters' Regression Test {#peters-test}
---
\index{Peter's Test}
The dependence of effect size and standard error not only applies to standardized mean differences. This mathematical association also exists in effect sizes based on binary outcome data, such as (log) odds ratios (Chapter \@ref(or)), risk ratios (Chapter \@ref(rr)) or proportions (Chapter \@ref(props)).
To avoid an inflated risk of false positives when using binary effect size data, we can use another type of regression test, proposed by Peters and colleagues [@peters2006comparison]. To obtain the results of Peters' test, the log-transformed effect size is regressed on the inverse of the sample size:
\begin{equation}
\log\psi_k = \beta_0 + \beta_1\frac{1}{n_k}
(\#eq:pub3)
\end{equation}
In this formula, $\log\psi_k$ can stand for any log-transformed effect size based on binary outcome data (e.g. the odds ratio), and $n_k$ is the total sample size of study $k$.
\index{Weighted Least Squares (WLS)}
Importantly, when fitting the regression model, each study $k$ is given a different weight $w_k$, depending on its sample size and event counts. This results in a **weighted** linear regression, which is similar (but not identical) to a meta-regression model (see Chapter \@ref(metareg-model-fit)). The formula for the weights $w_k$ looks like this:
\begin{equation}
w_k = \frac{1}{\left(\dfrac{1}{a_k+c_k}+\dfrac{1}{b_k+d_k}\right)}
(\#eq:pub4)
\end{equation}
Where $a_k$ is the number of events in the treatment group, $c_k$ is the number of events in the control group; $b_k$ and $d_k$ are the number of non-events in the treatment and control group, respectively (see Chapter \@ref(rr)). In contrast to Eggers' regression test, Peters' test uses $\beta_1$ instead of the intercept to test for funnel plot asymmetry. When the statistical test reveals that $\beta_1 \neq 0$, we can assume that asymmetry exists in our data.
When we have calculated a meta-analysis based on binary outcome data using the `metabin` (Chapter \@ref(pooling-or-rr)) or `metaprop` (Chapter \@ref(pooling-props)) function, the `metabias` function can be used to conduct Peters' test. We only have to provide a fitting meta-analysis object and use `"peters"` as the argument in `method.bias`. Let us check for funnel plot asymmetry in the `m.bin` object we created in Chapter \@ref(pooling-or-rr).
As you might remember, we used the risk ratio as the summary measure for this meta-analysis.
```{r}
metabias(m.bin, method.bias = "peters")
```
We see that the structure of the output looks identical to the one of Eggers' test. The output tells us that the results are the ones of a regression test `based on sample size`, meaning that Peters' method has been used. The test is not significant ($t$ = -0.08, $p$ = 0.94), indicating no funnel plot asymmetry.
```{block, type='boximportant'}
**Statistical Power of Funnel Plot Asymmetry Tests**
\vspace{2mm}
It is advisable to only test for funnel plot asymmetry when our meta-analysis includes a sufficient number of studies. When the number of studies is low, the statistical power of Eggers' or Peters' test may not be high enough to detect real asymmetry. It is generally recommended to only perform a test when $K \geq 10$ [@sterne2011recommendations].
By default, `metabias` will throw an error when the number of studies in our meta-analysis is smaller than that. However, it is possible (although not advised) to prevent this by setting the `k.min` argument in the function to a lower number.
```
<br></br>
#### Duval & Tweedie Trim and Fill Method {#duval-and-tweedie}
---
\index{Trim and Fill Method}
We have now learned several ways to examine (and test for) small-study effects in our meta-analysis. While it is good to know that publication bias may exist in our data, what we are primarily interested in is the **magnitude** of that bias. We want to know if publication bias has only distorted our estimate slightly, or if it is massive enough to change the interpretation of our findings.
In short, we need a method which allows us to calculate a **bias-corrected** estimate of the true effect size. Yet, we already learned that publication bias cannot be measured directly. We can only use small-study effects as a proxy that may **point** to publication bias.
We can therefore only adjust for small-study effects to attain a corrected effect estimate, not for publication bias **per se**. When effect size asymmetry was indeed caused by publication bias, correcting for this imbalance will yield an estimate that better represents the true effect when **all** evidence is considered.
One of the most common methods to adjust for funnel plot asymmetry is the **Duval & Tweedie trim and fill method** [@duval2000trim]. The idea behind this method is simple: it imputes "missing" effects until the funnel plot is symmetric. The pooled effect size of the resulting "extended" data set then represents the estimate when correcting for small-study effects. This is achieved through a simple algorithm, which involves the "trimming" and "filling" of effects [@schwarzer2015meta, chapter 5.3.1]:
* **Trimming**. First, the method identifies all the outlying studies in the funnel plot. In our example from before, these would be all small studies scattered around the right side of the plot. Once identified, these studies are **trimmed**: they are removed from the analysis, and the pooled effect is recalculated without them. This step is usually performed using a fixed-effect model.
* **Filling**. For the next step, the recalculated pooled effect is now assumed to be the center of all effect sizes. For each trimmed study, one additional effect size is added, mirroring its results on the other side of the funnel. For example, if the recalculated mean effect is 0.5 and a trimmed study has an effect of 0.8, the mirrored study will be given an effect of 0.2. After this is done for all trimmed studies, the funnel plot will look roughly symmetric. Based on all data, including the trimmed and imputed effect sizes, the average effect is then recalculated again (typically using a random-effects model). The result is then used as the estimate of the corrected pooled effect size.
An important caveat pertaining to the trim-and-fill method is that it does not produce reliable results when the between-study heterogeneity is large [@peters2007performance; @terrin2003adjusting; @simonsohn2014p]. When studies do not share one true effect, it is possible that even large studies deviate substantially from the average effect. This means that such studies are also trimmed and filled, even though it is unlikely that they are affected by publication bias. It is easy to see that this can lead to invalid results.
We can apply the trim and fill algorithm to our data using the `trimfill` function in **{meta}**. The function has very sensible defaults, so it is sufficient to simply provide it with our meta-analysis object. In our example, we use our `m.gen` object again. However, before we start, let us first check the amount of $I^2$ heterogeneity we observed in this meta-analysis.
```{r}
m.gen$I2
```
We see that, with $I^2$ = 63%, the heterogeneity in our analysis is substantial. In light of the trim and fill method's limitations in heterogeneous data sets, this could prove problematic.
We will therefore conduct two trim and fill analyses: one with all studies, and a sensitivity analysis in which we exclude two outliers identified in chapter \@ref(outliers) (i.e. study 3 and 16). We save the results to `tf` and `tf.no.out`.
```{r}
# Using all studies
tf <- trimfill(m.gen)
# Analyze with outliers removed
tf.no.out <- trimfill(update(m.gen,
subset = -c(3, 16)))
```
First, let us have a look at the first analysis, which includes all studies.
```{r, eval=F}
summary(tf)
```
```
## Review: Third Wave Psychotherapies
## SMD 95%-CI %W(random)
## [...]
## Filled: Warnecke et al. 0.0520 [-0.4360; 0.5401] 3.8
## Filled: Song & Lindquist 0.0395 [-0.4048; 0.4837] 4.0
## Filled: Frogeli et al. 0.0220 [-0.3621; 0.4062] 4.2
## Filled: Call et al. -0.0571 [-0.5683; 0.4541] 3.8
## Filled: Gallego et al. -0.0729 [-0.5132; 0.3675] 4.0
## Filled: Kang et al. -0.6230 [-1.2839; 0.0379] 3.3
## Filled: Shapiro et al. -0.8277 [-1.4456; -0.2098] 3.4
## Filled: DanitzOrsillo -1.1391 [-1.8164; -0.4618] 3.3
##
## Number of studies combined: k = 26 (with 8 added studies)
##
## SMD 95%-CI t p-value
## Random effects model 0.3428 [0.1015; 0.5841] 2.93 0.0072
##
## Quantifying heterogeneity:
## tau^2 = 0.2557 [0.1456; 0.6642]; tau = 0.5056 [0.3816; 0.8150];
## I^2 = 76.2% [65.4%; 83.7%]; H = 2.05 [1.70; 2.47]
##
## [...]
##
## Details on meta-analytical method:
## - Inverse variance method
## - Restricted maximum-likelihood estimator for tau^2
## - Q-profile method for confidence interval of tau^2 and tau
## - Hartung-Knapp adjustment for random effects model
## - Trim-and-fill method to adjust for funnel plot asymmetry
```
We see that the trim and fill procedure added a total of eight studies. Trimmed and filled studies include our detected outliers, but also a few other smaller studies with relatively high effects. We see that the imputed effect sizes are all very low, and some are even highly negative. The output also provides us with the estimate of the corrected effect, which is $g=$ 0.34. This is still significant, but much lower than the effect of $g=$ 0.58 we initially calculated for `m.gen`.
Now, let us compare this to the results of the analysis in which outliers were removed.
```{r, eval=F}
summary(tf.no.out)
```
```
## Review: Third Wave Psychotherapies
## [...]
##
## Number of studies combined: k = 22 (with 6 added studies)
##
## SMD 95%-CI t p-value
## Random effects model 0.3391 [0.1904; 0.4878] 4.74 0.0001
##
## Quantifying heterogeneity:
## tau^2 = 0.0421 [0.0116; 0.2181]; tau = 0.2053 [0.1079; 0.4671];
## I^2 = 50.5% [19.1%; 69.7%]; H = 1.42 [1.11; 1.82]
## [...]
```
With $g=$ 0.34, the results are nearly identical. Overall, the trim and fill method indicates that the pooled effect of $g=$ 0.58 in our meta-analysis is overestimated due to small-study effects. In reality, the effect may be considerably smaller. It is likely that this overestimation has been caused by publication bias, but this is not certain. Other explanations are possible too, and this could mean that the trim and fill estimate is invalid.
Lastly, it is also possible to create a funnel plot including the imputed studies. We only have to apply the `funnel.meta` function to the output of `trimfill`. In the following code, we create contour-enhanced funnel plots for both trim and fill analyses (with and without outliers). Using the `par` function, we can print both plots side by side.
\vspace{4mm}
```{r, fig.width=12, fig.height=5, eval=F}
# Define fill colors for contour
contour <- c(0.9, 0.95, 0.99)
col.contour <- c("gray75", "gray85", "gray95")
ld <- c("p < 0.1", "p < 0.05", "p < 0.01")
# Use 'par' to create two plots in one row (row, columns)
par(mfrow=c(1,2))
# Contour-enhanced funnel plot (full data)
funnel.meta(tf,
xlim = c(-1.5, 2), contour = contour,
col.contour = col.contour)
legend(x = 1.1, y = 0.01,
legend = ld, fill = col.contour)
title("Funnel Plot (Trim & Fill Method)")
# Contour-enhanced funnel plot (outliers removed)
funnel.meta(tf.no.out,
xlim = c(-1.5, 2), contour = contour,
col.contour = col.contour)
legend(x = 1.1, y = 0.01,
legend = ld, fill = col.contour)
title("Funnel Plot (Trim & Fill Method) - Outliers Removed")
```
```{r, fig.width=12, fig.height=5, echo=F}
# Define fill colors for contour
contour <- c(0.9, 0.95, 0.99)
col.contour <- c("gray75", "gray85", "gray95")
ld <- c("p < 0.1", "p < 0.05", "p < 0.01")
# Use 'par' to create two plots in one row (row, columns)
par(mfrow=c(1,2),
bg="#FFFEFA")
# Contour-enhanced funnel plot (full data)
funnel.meta(tf,
xlim = c(-1.5, 2), contour = contour,
col.contour = col.contour)
legend(x = 1.1, y = 0.01,
legend = ld, fill = col.contour)
title("Funnel Plot (Trim & Fill Method)")
# Contour-enhanced funnel plot (outliers removed)
funnel.meta(tf.no.out,
xlim = c(-1.5, 2), contour = contour,
col.contour = col.contour)
legend(x = 1.1, y = 0.01,
legend = ld, fill = col.contour)
title("Funnel Plot (Trim & Fill Method) - Outliers Removed")
```
In these funnel plots, the imputed studies are represented by circles that have no fill color.
<br></br>
#### PET-PEESE {#pet-peese}
---
\index{PET-PEESE}
\index{Standardized Mean Difference}
Duval & Tweedie's trim and fill method is relatively old, and arguably one of the most common methods to adjust for small-study effects. However, as we mentioned, it is an approach that is far from perfect, and not the only way to estimate a bias-corrected version of our pooled effect. In recent years, a method called **PET-PEESE** [@stanley2014meta; @stanley2008meta] has become increasingly popular; particularly in research fields where SMDs are frequently used as the outcome measure (for example psychology or educational research). Like all previous techniques, PET-PEESE is aimed at small-study effects, which are seen as a potential indicator of publication bias.
PET-PEESE is actually a combination of two methods: the **precision-effect test** (PET) and the **precision-effect estimate with standard error** (PEESE). Let us begin with the former. The PET method is based on a simple regression model, in which we regress a study's effect size on its standard error:
\begin{equation}
\theta_k = \beta_0 + \beta_1SE_{\theta_k}
(\#eq:pub5)
\end{equation}
Like in Peters' test, we use a weighted regression. The study weight $w_k$ is calculated as the inverse of the variance--just like in a normal (fixed-effect) meta-analysis:
\begin{equation}
w_k = \frac{1}{s_k^2}
(\#eq:pub6)
\end{equation}
It is of note that the regression model used by the PET method is equivalent to the one of Eggers' test. The main difference is that in the PET formula, the $\beta_1$ coefficient quantifies funnel asymmetry, while in Eggers' test, this is indicated by the intercept.
\index{Limit}
When using the PET method, however, we are not interested in the funnel asymmetry measured by $\beta_1$, but in the intercept $\beta_0$. This is because, in the formula above, the intercept represents the so-called **limit effect**. This limit effect is the expected effect size of a study with a **standard error of zero**. This is the equivalent of an observed effect size measured without sampling error. All things being equal, we know that an effect size measured without sampling error $\epsilon_k$ will represent the true overall effect itself.
The idea behind the PET method is to **control** for the effect of small studies by including the standard error as a predictor. In theory, this should lead to an intercept $\beta_0$ which represents the true effect in our meta-analysis after correction for all small-study effects:
\begin{equation}
\hat\theta_{\text{PET}} = \hat\beta_{0_{\mathrm{PET}}}
(\#eq:pub7)
\end{equation}
The formula for the PEESE method is very similar. The only difference is that we use the **squared** standard error as the predictor (i.e. the effect size variance $s_k^2$):
\begin{equation}
\theta_k = \beta_0 + \beta_1SE_{\theta_k}^2
(\#eq:pub8)
\end{equation}
While the formula for the study weights $w_k$ remains the same. The idea behind squaring the standard error is that small studies are particularly prone to reporting **highly** over-estimated effects. This problem, it is assumed, is far less pronounced for studies with high statistical power.
While the PET method works best when the true effect captured by $\beta_0$ is **zero**, PEESE shows a better performance when the true effect is **not** zero. Stanley and Doucouliagos [-@stanley2014meta] therefore proposed to combine both methods, in order to balance out their individual strengths. The resulting approach is the PET-PEESE method. PET-PEESE uses the intercept $\beta_0$ of either PET or PEESE as the estimate of the corrected true effect.
Whether PET or PEESE is used depends on the size of the intercept calculated by the PET method. When $\beta_{0_{\text{PET}}}$ is significantly larger than zero in a one-sided test with $\alpha$ = 0.05, we use the intercept of PEESE as the true effect size estimate. If PET's intercept is not significantly larger than zero, we remain with the PET estimate.
In most implementations of regression models in _R_, it is conventional to test the significance of coefficients using a two-sided test (i.e. we test if a $\beta$ weight significantly differs from zero, no matter the direction). To assume a one-sided test with $\alpha$ = 0.05, we already regard the intercept as significant when $p$ < 0.1, and when the estimate of $\beta_0$ is larger than zero^[The latter condition ($\hat\beta_0$ > 0) only applies if positive effect sizes represent favorable outcomes (e.g. positive effect sizes mean that the intervention was effective). When negative effect sizes (e.g. SMD = -0.5) represent favorable outcomes, our one-side test should be in the other direction. This means that PEESE is used when the $p$-value of PET's intercept is smaller than 0.1, and when the intercept estimate is **smaller** than zero.].
The rule to obtain the true effect size as estimated by PET-PEESE, therefore, looks like this:
\begin{equation}
\hat\theta_{\text{PET-PEESE}}=\begin{cases}
\mathrm{P}(\beta_{0_{\text{PET}}} = 0) <0.1~\mathrm{and}~\hat\beta_{0_{\text{PET}}} > 0: & \hat\beta_{0_{\text{PEESE}}}\\
\text{else}: & \hat\beta_{0_{\text{PET}}}.
\end{cases}
(\#eq:pub9)
\end{equation}
It is somewhat difficult to wrap one's head around this if-else logic, but a hands-on example may help to clarify things. Using our `m.gen` meta-analysis object, let us see what PET-PEESE's estimate of the true effect size is.
There is currently no straightforward implementation of PET-PEESE in **{meta}**, so we write our own code using the linear model function `lm`. Before we can fit the PET and PEESE model, however, we first have to prepare all the variables we need in our data frame. We will call this data frame `dat.petpeese`. The most important variable, of course, is the standardized mean difference. No matter if we initially ran our meta-analysis using `metacont` or `metagen`, the calculated SMDs of each study will always be stored under `TE` in our meta-analysis object.
```{r}
# Build data set, starting with the effect size
dat.petpeese <- data.frame(TE = m.gen$TE)
```
\index{Standardized Mean Difference}
Next, we need the standard error of the effect size. For PET-PEESE, it is also advisable to use the modified standard error proposed by Pustejovsky and Rodgers [-@pustejovsky2019testing, see Chapter \@ref(eggers-test)]^[James Pustejovsky has also recommended this approach in a [blog post](https://www.jepusto.com/pet-peese-performance/) and termed this alternative method "SPET-SPEESE".].
Therefore, we use the adapted formula to calculate the corrected standard error `seTE_c`, so that it is not correlated with the effect size itself. We also save this variable to `dat.petpeese`. Furthermore, we add a variable `seTE_c2`, containing the **squared** standard error, since we need this as the predictor for PEESE.
```{r}
# Experimental (n1) and control group (n2) sample size
n1 <- c(62, 72, 44, 135, 103, 71, 69, 68, 95,
43, 79, 61, 62, 60, 43, 42, 64, 63)
n2 <- c(51, 78, 41, 115, 100, 79, 62, 72, 80,
44, 72, 67, 59, 54, 41, 51, 66, 55)
# Calculate modified SE
dat.petpeese$seTE_c <- sqrt((n1+n2)/(n1*n2))
# Add squared modified SE (= variance)
dat.petpeese$seTE_c2 <- dat.petpeese$seTE_c^2
```
Lastly, we need to calculate the inverse-variance weights `w_k` for each study. Here, we also use the squared modified standard error to get an estimate of the variance.
```{r}
dat.petpeese$w_k <- 1/dat.petpeese$seTE_c^2
```
Now, `dat.petpeese` contains all the variables we need to fit a weighted linear regression model for PET and PEESE. In the following code, we fit both models, and then directly print the estimated coefficients using the `summary` function. These are the results we get:
```{r}
# PET
pet <- lm(TE ~ seTE_c, weights = w_k, data = dat.petpeese)
summary(pet)$coefficients
# PEESE
peese <- lm(TE ~ seTE_c2, weights = w_k, data = dat.petpeese)
summary(peese)$coefficients
```
To determine if PET or PEESE should be used, we first need to have a look at the results of the PET method. We see that the limit estimate is $g$ = -1.35. This effect is significant ($p$ < 0.10), but considerably smaller than zero, indicating that the PET estimate should be used.
However, with, $g$ = -1.35, PET's estimate of the bias-corrected effect is not very credible. It indicates that in reality, the intervention type under study has a highly negative effect on the outcome of interest; that it is actually very harmful. That seems very unlikely. It may be possible that a "bona fide" intervention has no effect, but it is extremely uncommon to find interventions that are downright dangerous.
In fact, what we see in our results is a common limitation of PET-PEESE: it sometimes heavily **overcorrects** for biases in our data [@carter2019correcting]. This seems to be the case in our example: although all observed effect sizes have a positive sign, the corrected effect size is heavily negative. If we look at the second part of the output, we see that the same is also true for PEESE, even though its estimate is slightly less negative ($g=$ -0.44).
When this happens, it is best not to interpret the intercept as a point estimate of the true effect size. We can simply say that PET-PEESE indicates, when correcting for small-sample effects, that the intervention type under study has **no effect**. This basically means that we set $\hat\theta_{\mathrm{PET-PEESE}}$ to zero, instead of interpreting the negative effect size that was actually estimated.
```{block, type='boximportant'}
**Limitations of PET-PEESE**
\vspace{2mm}
PET-PEESE can not only systematically over-correct the pooled effect size--it also sometimes **overestimates** the true effect, even when there is no publication bias at all. Overall, the PET-PEESE method has been found to perform badly when the number of included studies is small (i.e. $K$ < 20), and the between-study heterogeneity is very high, i.e. $I^2$ > 80% [@stanley2017limitations].
Unfortunately, it is common to find meta-analyses with a small number of studies and high heterogeneity. This restricts the applicability of PET-PEESE, and we do not recommend its use as the **only** method to adjust for small-study effects. Yet, it is good to know that this method exists and how it can be applied since it has become increasingly common in some research fields.
```
```{block2, type='boxinfo'}
**Using `rma.uni` Instead of `lm` for PET-PEESE**
In our hands-on example, we used the `lm` function together with study weights to implement PET-PEESE. This approach, while used frequently, is not completely uncontroversial.
There is a minor, but crucial difference between weighted regression models implemented via `lm`, and meta-regression models employed by, for example, `rma.uni`. While `lm` uses a **multiplicative error model**, meta-analysis functions typically employ an **additive error model**. We will not delve into the technical minutiae of this difference here; more information can be found in an excellent [vignette](https://www.metafor-project.org/doku.php/tips:rma_vs_lm_lme_lmer) written by Wolfgang Viechtbauer on this topic.
The main takeaway is that `lm` models, by assuming a proportionality constant for the sampling error variances, are not perfectly suited for meta-analysis data. This means that implementing PET-PEESE via, say, `rma.uni` instead of `lm` is indicated, at least as a sensitivity analysis. Practically, this would mean running `rma.uni` with $SE_{\theta_k}^{(2)}$ added as a moderator variable in `mods`; e.g. for PET:
`rma.uni(TE, seTE^2, mods = ~seTE, data = dat, method = "FE")`.
```
<br></br>
#### Rücker's Limit Meta-Analysis Method {#rucker-ma}
---
\index{Limit}
Another way to calculate an estimate of the adjusted effect size is to perform a **limit meta-analysis** as proposed by Rücker and colleagues [-@rucker2011treatment]. This method is more sophisticated than PET-PEESE and involves more complex computations. Here, we therefore focus on understanding the general idea behind this method and let _R_ do the heavy lifting after that.
The idea behind Rücker's method is to build a meta-analysis model which explicitly accounts for bias due to small-study effects. As a reminder, the formula of a (random-effects) meta-analysis can be defined like this:
\begin{equation}
\hat\theta_k = \mu + \epsilon_k+\zeta_k
(\#eq:pub10)
\end{equation}
Where $\hat\theta_k$ is the observed effect size of study $k$, $\mu$ is the true overall effect size, $\epsilon_k$ is the sampling error, and $\zeta_k$ quantifies the deviation due to between-study heterogeneity.
In a limit meta-analysis, we extend this model. We account for the fact that the effect sizes and standard errors of studies are not independent when there are small-study effects. This is assumed because we know that publication bias particularly affects small studies, and that small studies will therefore have a larger effect size than big studies. In Rücker's method, this bias is added to our model by introducing a new term $\theta_{\text{Bias}}$. It is assumed that $\theta_{\text{Bias}}$ interacts with $\epsilon_k$ and $\zeta_k$. It becomes larger as $\epsilon_k$ increases. The adapted formula looks like this:
\begin{equation}
\hat\theta_k = \mu_* + \theta_{\text{Bias}}(\epsilon_k+\zeta_k)
(\#eq:pub11)
\end{equation}
It is important to note that in this formula, $\mu_*$ does not represent the overall true effect size anymore, but a global mean that has no direct equivalent in a "standard" random-effects meta-analysis (unless $\theta_{\text{Bias}}=$ 0).
The next step is similar to the idea behind PET-PEESE (see previous chapter). Using the formula above, we suppose that studies' effect size estimates become increasingly precise, meaning that their individual sampling error $\epsilon_k$ approaches zero. This means that $\epsilon_k$ ultimately drops out of the equation:
\begin{equation}
\mathrm{E}(\hat\theta_k) \rightarrow \mu_{*} + \theta_{\text{Bias}}\zeta_k ~ ~ ~ ~ \text{as} ~ ~ ~ ~ \epsilon_k \rightarrow 0.
(\#eq:pub12)
\end{equation}
In this formula, $\mathrm{E}(\hat\theta_k)$ stands for the **expected value** of $\hat\theta_k$ as $\epsilon_k$ approaches zero. The formula we just created is the one of a "limit meta-analysis". It provides us with an adjusted estimate of the effect when removing the distorting influence of studies with a large standard error. Since $\zeta_k$ is usually expressed by the between-study heterogeneity variance $\tau^2$ (or its square root, the standard deviation $\tau$), we can use it to replace $\zeta_k$ in the equation, which leaves us with this formula:
\begin{equation}
\hat\theta_{*} = \mu_* + \theta_{\mathrm{Bias}}\tau
(\#eq:pub13)
\end{equation}
Where $\hat\theta_*$ stands for the estimate of the **pooled** effect size after adjusting for small-study effects. Rücker's method uses maximum likelihood
to estimate the parameters in this formula, including the "shrunken" estimate of the true effect size $\hat\theta_*$. Furthermore, it is also possible to obtain a shrunken effect size estimate $\hat\theta_{{*}_k}$ for each individual study $k$, using this formula:
\begin{equation}
\hat\theta_{{*}_k} = \mu_* + \sqrt{\dfrac{\tau^2}{SE^2_k + \tau^2}}(\hat\theta_k - \mu_*)
\end{equation}
in which $SE^2_k$ stands for the squared standard error (i.e. the observed variance) of $k$, and with $\hat\theta_k$ being the originally observed effect size^[This formula can be derived from a less simplified version of equation 9.11. A technical description of how to do this can be found in Rücker and colleagues [-@rucker2011treatment], equations 2.4 to 2.6.].
\index{metasens Package}
An advantage of Rücker's limit meta-analysis method, compared to PET-PEESE, is that the heterogeneity variance $\tau^2$ is explicitly included in the model. Another more practical asset is that this method can be directly applied in _R_, using the `limitmeta` function. This function is included in the **{metasens}** package [@metasens].
Since **{metasens}** and **{meta}** have been developed by the same group of researchers, they usually work together quite seamlessly. To conduct a limit meta-analysis of our `m.gen` meta-analysis, for example, we only need to provide it as the first argument in our call to `limitmeta`.
```{r, echo=F, message=F}
library(metasens)
```
```{r, message=F, eval=F}
# Install 'metasens', then load from library
library(metasens)
# Run limit meta-analysis
limitmeta(m.gen)
```
```
## Results for individual studies
## (left: original data; right: shrunken estimates)
##
## SMD 95%-CI SMD 95%-CI
## Call et al. 0.70 [ 0.19; 1.22] -0.05 [-0.56; 0.45]
## Cavanagh et al. 0.35 [-0.03; 0.73] -0.09 [-0.48; 0.28]
## DanitzOrsillo 1.79 [ 1.11; 2.46] 0.34 [-0.33; 1.01]
## de Vibe et al. 0.18 [-0.04; 0.41] 0.00 [-0.22; 0.23]
## Frazier et al. 0.42 [ 0.13; 0.70] 0.13 [-0.14; 0.42]
## Frogeli et al. 0.63 [ 0.24; 1.01] 0.13 [-0.25; 0.51]
## Gallego et al. 0.72 [ 0.28; 1.16] 0.09 [-0.34; 0.53]
## Hazlett-Stevens & Oren 0.52 [ 0.11; 0.94] -0.00 [-0.41; 0.40]
## Hintz et al. 0.28 [-0.04; 0.61] -0.05 [-0.38; 0.26]
## Kang et al. 1.27 [ 0.61; 1.93] 0.04 [-0.61; 0.70]
## Kuhlmann et al. 0.10 [-0.27; 0.48] -0.29 [-0.67; 0.08]
## Lever Taylor et al. 0.38 [-0.06; 0.84] -0.18 [-0.64; 0.26]
## Phang et al. 0.54 [ 0.06; 1.01] -0.11 [-0.59; 0.36]
## Rasanen et al. 0.42 [-0.07; 0.93] -0.25 [-0.75; 0.25]
## Ratanasiripong 0.51 [-0.17; 1.20] -0.48 [-1.17; 0.19]
## Shapiro et al. 1.47 [ 0.86; 2.09] 0.26 [-0.34; 0.88]
## Song & Lindquist 0.61 [ 0.16; 1.05] 0.00 [-0.44; 0.44]
## Warnecke et al. 0.60 [ 0.11; 1.08] -0.09 [-0.57; 0.39]
##
## Result of limit meta-analysis:
##
## Random effects model SMD 95%-CI z pval
## Adjusted estimate -0.0345 [-0.3630; 0.2940] -0.21 0.8367
## Unadjusted estimate 0.5771 [ 0.3782; 0.7760] -0.21 < 0.0001
## [...]
```
The output first shows us the original (left) and shrunken estimates (right) of each study. We see that the adjusted effect sizes are considerably smaller than the observed ones--some are even negative now. In the second part of the output, we see the adjusted pooled effect estimate. It is $g=$ -0.03, indicating that the overall effect is approximately zero when correcting for small-study effects.
If the small-study effects are indeed caused by publication bias, this result would be discouraging. It would mean that our initial finding has been completely spurious and that selective publication has concealed the fact that the treatment is actually ineffective. Yet again, it is hard to prove that publication bias has been the only driving force behind the small-study effects in our data.
\index{Funnel Plot}
It is also possible to create funnel plots for the limit meta-analysis: we simply have to provide the results of `limitmeta` to the `funnel.limitmeta` function. This creates a funnel plot which looks exactly like the one produced by `funnel.meta`. The only difference is that a **gray curve** is added to the plot. This curve indicates the adjusted average effect size when the standard error on the y-axis is zero, but also symbolizes the increasing bias due to small-study effects as the standard error increases.
When generating a funnel plot for `limitmeta` objects, it is also possible to include the shrunken study-level effect size estimates, by setting the `shrunken` argument to `TRUE`. Here is the code to produce these plots:
```{r, fig.width=6, fig.height=5, out.width="50%", collapse=TRUE, results='hold', eval = F}
# Create limitmeta object
lmeta <- limitmeta(m.gen)
# Funnel with curve
funnel.limitmeta(lmeta, xlim = c(-0.5, 2))
# Funnel with curve and shrunken study estimates
funnel.limitmeta(lmeta, xlim = c(-0.5, 2), shrunken = TRUE)
```
```{r, fig.height=5, fig.width = 13, collapse=TRUE, results='hold', echo=F}
# Create limitmeta object
lmeta <- limitmeta(m.gen)
par(mfrow = c(1,2),
bg="#FFFEFA")
# Funnel with curve
funnel.limitmeta(lmeta, xlim = c(-0.5, 2))
# Funnel with curve and shrunken study estimates
funnel.limitmeta(lmeta, xlim = c(-0.5, 2), shrunken = TRUE)
```
Note that `limitmeta` can not only be applied to meta-analyses which use the standardized mean difference--any kind of **{meta}** meta-analysis object can be used. To exemplify this, let us check the adjusted effect size of `m.bin`, which used the risk ratio as the summary measure.
```{r, eval=F}
limitmeta(m.bin)
```
```
## Result of limit meta-analysis:
##
## Random effects model RR 95%-CI z pval
## Adjusted estimate 2.2604 [1.8066; 2.8282] 7.13 < 0.0001
## Unadjusted estimate 2.0217 [1.5786; 2.5892] 7.13 < 0.0001
```
We see that in this analysis, the original and adjusted estimate are largely identical. This is not very surprising, given that Peters' test (Chapter \@ref(peters-test)) already indicated that small-study effects seem to play a minor role in this meta-analysis.
<br></br>
### P-Curve {#p-curve}
---
\index{P-Curve}
Previously, we covered various approaches that assess the risk of publication bias by looking at small-study effects. Although their implementation differs, all of these methods are based on the idea that selective reporting causes a study's effect size to depend on its sample size. We assume that studies with a higher standard error (and thus a lower precision) have higher average effect sizes than large studies. This is because only small studies with a very high effect size are published, while others remain in the file drawer.
While this "theory" certainly sounds intuitive, one may also argue that it somewhat misses the point. Small-study methods assume that publication bias is driven by **effect sizes**. A more realistic stance, however, would be to say that it operates through $p$-**values**. In practice, research findings are only considered worth publishing when the results are $p<$ 0.05.
As we mentioned before, research is conducted by **humans**, and thus influenced by money and prestige--just like many other parts of our lives. The infamous saying "significant $p$, or no PhD" captures this issue very well. Researchers are often under enormous external pressure to "produce" $p$-values smaller than 0.05. They know that this significance threshold can determine if their work is going to get published, and if it is perceived as "successful". These incentives may explain why negative and non-significant findings are increasingly disappearing from the published literature [@fanelli2012negative].
One could say that small-study methods capture the mechanism behind publication bias **indirectly**. It is true that selective reporting can lead to smaller studies having higher effects. Yet, this is only correct because very high effects increase the chance of obtaining a test statistic for which $p<$ 0.05. For small-study effect methods, there is hardly a difference between a study in which $p=$ 0.049, and a study with a $p$-value of 0.051. In practice, however, this tiny distinction can mean the world to researchers.
\index{P-Value}
\index{P-Hacking}
\index{Questionable Research Practice (QRP)}
\index{Small-Study Effect}
In the following, we will introduce a method called **p-curve**, which focuses on $p$-values as the main driver of publication bias [@simonsohn2014p; @simonsohn2014es; @simonsohn2015better]. The special thing about this method is that it is restricted to **significant** effect sizes, and how their $p$-values are distributed. It allows to assess if there is a true effect behind our meta-analysis data, and can estimate how large it is. Importantly, it also explicitly controls for questionable research practices such as $p$-hacking, which small-study effect methods do not.
\index{Trim and Fill Method}
P-curve is a relatively novel method. It was developed in response to the "replication crisis" that affected the social sciences in recent years [@ioannidis2005most; @open2015estimating; @mcnutt2014reproducibility]. This crisis was triggered by the observation that many seemingly well-established research findings are in fact spurious--they can not be systematically replicated. This has sparked renewed interest in methods to detect publication bias, since this may be a logical explanation for failed replications. Meta-analyses, by not adequately controlling for selective reporting, may have simply reproduced biases that already exist in the published literature.
P-curve was also developed in response to deficiencies of standard publication bias methods, in particular the Duval & Tweedie trim-fill-method. Simonsohn and colleagues [-@simonsohn2014es] found that the trim-and-fill approach usually only leads to a **small** downward correction, and often misses the fact that there is no true effect behind the analyzed data at all.
P-curve is, as it says in the name, based on a curve of $p$-values. A $p$-curve is like a histogram, showing the number of studies in a meta-analysis for which $p<$ 0.05, $p<$ 0.04, $p<$ 0.03, and so forth. The p-curve method is based on the idea that the shape of this histogram of $p$-values depends on the sample sizes of studies, and--more importantly--on the **true** effect size behind our data.
To illustrate this, we simulated the results of nine meta-analyses. To make patterns clearly visible, each of these imaginary meta-analyses contains the huge number of $K=$ 10$^{\text{5}}$ studies. In each of the nine simulations, we assumed different sample sizes for each individual study (ranging from $n=$ 20 to $n=$ 100), and a different true effect size (ranging from $\theta=$ 0 to 0.5). We assumed that all studies in a meta-analysis share one true effect size, meaning effects follow the fixed-effect model. Then, we took the $p$-value of all significant effect sizes in our simulations and created a histogram. The results can be seen in the plot below.
\vspace{2mm}
```{r pcurve, fig.width=12, fig.height=9, message=F, echo=F, out.width="80%", fig.align="center", fig.cap="P-curves for varying study sample size and true effect."}
library(dplyr)
library(rlang)
# Define helpers
pdist = function(x, lower.tail) pnorm(x, lower.tail=lower.tail)
plotter = function(x, var){
x %>% filter(!!enquo(var) < 0.05) %>% pull(!!enquo(var)) %>%
hist(breaks = 20, plot = F) -> h
h$density = h$counts/sum(h$counts)*100
plot(h, freq = F, main = NULL, ylab = "Percentage",
xlab = bquote(italic("p")~"value"), col = "gray")
d = strsplit(deparse(substitute(var)), "_") %>% unlist() %>% nth(2)
n = strsplit(deparse(substitute(var)), "_") %>% unlist() %>% nth(3)
d = as.numeric(d)/10
title(bquote(theta~"="~.(d)~~~"n = "~.(n)))
}
# # Simulate
# dat = list()
# for (i in 1:1e5){
#
# dat.inner = list()
# for (n in c(20, 50, 100)){
#
# z = mean(rnorm(n, 0, 1))*sqrt(n)
# dat.inner[[paste0("s_0_", n)]] = z
#
# z = mean(rnorm(n, .2, 1))*sqrt(n)
# dat.inner[[paste0("s_2_", n)]] = z
#
# z = mean(rnorm(n, .5, 1))*sqrt(n)
# dat.inner[[paste0("s_5_", n)]] = z
# }
#
# dat[[i]] = unlist(dat.inner)
# }
#
# do.call(rbind, dat) %>%
# apply(., 2, function(x) 2*pmin(pdist(x, TRUE), pdist(x, FALSE))) %>%
# data.frame() -> simdat.p
load("data/simdat.p.rda")
par(mfrow = c(3,3), cex.main = 2, bg="#FFFEFA")
simdat.p %>% plotter(s_0_20)
simdat.p %>% plotter(s_0_50)
simdat.p %>% plotter(s_0_100)
simdat.p %>% plotter(s_2_20)
simdat.p %>% plotter(s_2_50)
simdat.p %>% plotter(s_2_100)
simdat.p %>% plotter(s_5_20)
simdat.p %>% plotter(s_5_50)
simdat.p %>% plotter(s_5_100)
```
\vspace{2mm}
The first row displays the distribution of significant $p$-values when there is no true effect. We see that the pattern is identical in all simulations, no matter how large the sample size of the individual studies. The $p$-values in all three examples seem to be evenly distributed: a barely significant value of $p=$ 0.04 seems to be just as likely as $p=$ 0.01. Such a flat $p$-curve emerges when there is no underlying effect in our data, i.e. when the **null hypothesis** of $\theta = 0$ is true.
When this is the case, $p$-values are assumed to follow a **uniform** distribution: every $p$-value is just as likely as the other. When the null hypothesis ($\theta = 0$) is true, it is still possible to find significant effect sizes just by chance. This results in a **false positive**, or $\alpha$ error. But this is unlikely, and we know exactly **how unlikely**. Since they are uniformly distributed when the effect size is zero, 5% of all $p$-values can be expected to be smaller than 0.05. This is exactly the significance threshold of $\alpha=$ 0.05 that we commonly use in hypothesis testing to reject the null hypothesis.
The $p$-curve looks completely different in the second and third row. In these examples, the null hypothesis is false, and a true effect exists in our data. This leads to a **right-skewed** distribution of $p$-values. When our data capture a true effect, highly significant (e.g. $p=$ 0.01) effect sizes are more likely than effects that are barely significant (e.g. $p=$ 0.049). This right-skew becomes more and more pronounced as the true effect size and study sample size increase.
Yet, we see that a right-skewed $p$-curve even emerges when the studies in our meta-analysis are drastically under-powered (i.e. containing only $n=$ 20 participants while aiming to detect a small effect of $\theta=$ 0.2). This makes it clear that $p$-curves are very sensitive to changes in the true underlying effect size. When a true effect size exists, we will often be able to detect it just by looking at the distribution of $p$-values that are significant.
Now, imagine how the $p$-curve would look like when researchers $p$-hacked their results. Usually, analysts start to use $p$-hacking when a result is not significant but **close** to that. Details of the analysis are then tweaked until a $p$-value smaller than 0.05 is reached. Since that is already enough to get the results published, no further $p$-hacking is conducted after that. It takes no imagination to see that widespread $p$-hacking would lead to a **left-skewed** $p$-curve: $p$-values slightly below 0.05 are over-represented, and highly significant results under-represented.
In sum, we see that a $p$-curve can be used as a diagnostic tool to assess the presence of publication bias and $p$-hacking. Next, we will discuss p-curve **analysis**, which is a collection of statistical tests based on an empirical $p$-curve. Importantly, none of these tests focuses on publication bias per se. The method instead tries to find out if our data contains **evidential value**. This is arguably what we are most interested in a meta-analysis: we want to make sure that the effect we estimated is not spurious; an artifact caused by selective reporting. P-curve addresses exactly this concern. It allows us to check if our findings are driven by an effect that exists in reality, or if they are--to put it dramatically--"a tale of sound and fury, signifying nothing".
<br></br>
#### Testing for Evidential Value
---
\index{Power}
To evaluate the presence of evidential value, p-curve uses two types of tests: a **test for right-skewness**, and a **test for 33% power** (the latter can be seen as a test for flatness of the $p$-curve). We begin with the test for right-skewness. As we learned, the right-skewness of the $p$-curve is a function of studies' sample sizes and their true underlying effect. Therefore, a test which allows us to confirm that the $p$-curve of our meta-analysis is significantly right-skewed is very helpful. When we find a significant right-skew in our distribution of significant $p$-values, this would indicate that our results are indeed driven by a true effect.
<br></br>
##### Test for Right-Skewness
---
\index{Binomial Test}
To test for right-skewness, the p-curve method first uses a **binomial test**. These tests can be used for data that follows a binomial distribution. A binomial distribution can be assumed for data that can be divided into two categories (e.g. success/failure, head/tail, yes/no), where $p$ indicates the probability of one of the outcomes, and $q = 1-p$ is the probability of the other outcome.
To use a binomial test, we have to split our $p$-curve into two sections. We do this by counting the number of $p$-values that are <0.025, and then the number of significant $p$-values that are >0.025. Since values in our $p$-curve can range from 0 to 0.05, we essentially use the middle of the **x**-axis as our cut-off. When the $p$-curve is indeed right-skewed, we would expect that the number of $p$-values in the two groups differ. This is because the probability $p$ of obtaining a result that is smaller than 0.025 is considerably higher than the probability $q$ of getting values that are higher than 0.025.
Imagine that our $p$-curve contains eight values, seven of which are below 0.025. We can use the `binom.test` function in _R_ to test how likely it is to find such data under the null hypothesis that small and high $p$-values are equally likely^[Under the null hypothesis, we assume that the true probability $p$ of obtaining results that are <0.025 is $\pi=$ 0.5. Our goal is to calculate the probability of getting $k=$ 7 or more highly significant studies when there are $n=$ 8 significant studies in total. This probability, the $p$-value of a one-sided binomial test, can be obtained using this formula: $$\text{P}(X\geq k) = \sum_{k=7}^{8}\frac{n!}{k!(n-k)!}p^k(1-p)^{n-k}$$ Where $p=$ 0.5, and the exclamation mark stands for the factorial. The sum symbol tells us that we have to sum up everything to the right for values of $k=$ 7 or higher, until $k=n$.].
Since we assume that small $p$-values are more frequent than high $p$-values, we can use a one-sided test by setting the `alternative` argument to `"greater"`.
```{r}
k <- 7 # number of studies p<0.025
n <- 8 # total number of significant studies
p <- 0.5 # assumed probability of k (null hypothesis)
binom.test(k, n, p, alternative = "greater")$p.value
```
We see that the binomial test is significant ($p<$ 0.05). This means there are significantly more high than low $p$-values in our example. Overall, this indicates that the $p$-curve is right-skewed, and that there is a true effect.
A drawback of the binomial test is that is requires us to dichotomize our $p$-values, while they are in fact continuous. To avoid information loss, we need a test which does not require us to transform our data into bins.
P-curve achieves this by calculating a $p$-value for each $p$-value, which results in a so-called $pp$-value of each study. The $pp$-value tells us how likely it is to get a value **at least as high** as $p$ when the $p$-curve is flat (i.e. when there is no true effect). It gives the probability of a $p$-value when **only** significant values are considered. Since $p$-values follow a uniform distribution when $\theta = 0$, $pp$-values are nothing but significant $p$-values which we project to the $[0,1]$ range. For continuous outcomes measures, this is achieved through multiplying the $p$-value by 20, for example: $p = 0.023\times20 = 0.46 \rightarrow pp$.
\index{Fisher's Method}
\index{Stouffer's Method}
\index{History of Meta-Analysis}
Using the $pp_k$-value of each significant study $k$ in our meta-analysis, we can test for right-skewness using **Fisher's method**. This method is an "archaic" type of meta-analysis developed by R. A. Fisher in the early 20<sup>th</sup> century (see Chapter \@ref(history)). Fisher's method allows to aggregate $p$-values from several studies, and to test if at least one of them measures a true effect (i.e. it tests if the distribution of submitted $p$-values is right-skewed). It entails log-transforming the $pp$-values, summing the result across all studies $k$, and then multiplying by -2.
The resulting value is a test statistic which follows a $\chi^2$ distribution (see Chapter \@ref(cochran-q)) with $2\times K$ degrees of freedom (where $K$ is the total number of $pp$-values)^[In newer versions, p-curve uses **Stouffer's method** instead of the one by Fisher to test for right-skewness [@simonsohn2015better]. Both methods are closely related, but Stouffer's method is based on $z$-scores instead of $p$-values.]:
\begin{equation}
\chi^2_{2K} = -2 \sum^K_{k=1} \log(pp_k)
(\#eq:pub14)
\end{equation}
Let us try out Fisher's method in a brief example. Imagine that our $p$-curve contains five $p$-values: $p=$ 0.001, 0.002, 0.003, 0.004 and 0.03. To test for right-skewness, we first have to transform these $p$-values into the $pp$-value:
```{r}
p <- c(0.001, 0.002, 0.003, 0.004, 0.03)
pp <- p*20
# Show pp values
pp
```
Using equation 9.15, we can calculate the value of $\chi^2$ using this code:
```{r}
chi2 <- -2*sum(log(pp))
chi2
```
This results in $\chi^2=$ 25.96. Since five studies were included, the degrees of freedom are $\text{d.f.}=2\times5=10$. We can use this information to check how likely our data are under the null hypothesis of no effect/no right-skewness. This can be done in _R_ using the `pchisq` function, which we have to provide with our value of $\chi^2$ as well as the number of d.f.:
```{r}
pchisq(26.96, df = 10, lower.tail = FALSE)
```
This gives us a $p$-value of 0.0026. This means that the null hypothesis is very unlikely, and therefore rejected. The significant value of the $\chi^2$ test tells us that, in this example, the $p$-values are indeed right-skewed. This can be seen as evidence for the assumption that there is evidential value behind our data.
<br></br>
##### Test for Flatness
---
\index{Power}
We have seen that the right-skewness test can be used to determine if the distribution of significant $p$-values represents a true overall effect. The problem is that this test depends on the **statistical power** of our data. Therefore, when the right-skewness test is **not** significant, this does not automatically mean that there is no evidential value. Two things are possible: either there is indeed no true effect, or the number of values in our $p$-curve is simply too small to render the $\chi^2$ test significant--even if the data is in fact right-skewed.
Thus, we have to rule out lacking power as an explanation of a non-significant right-skewness test. The null hypothesis of the right-skewness test is that there is no evidential value. In the test, we essentially try to reject this null hypothesis by showing that our empirical $p$-curve is **not** flat.
Now, we have to turn this logic around to show that the $p$-curve **is** flat. We can do this by changing the null hypothesis. Instead of no effect, our new null assumes that the $p$-curve contains a **small** effect, and as a consequence is **slightly** right-skewed. In a test for flatness, the goal then becomes to show that our $p$-curve is **not** slightly right-skewed. Or, to put it differently, we want to confirm that the $p$-curve is significantly flatter than the one we expect for a very, very small effect. When this is the case, we can say that even a very small effect can be ruled out for the data at hand, and that there is likely no evidential value at all.
\index{t-Distribution}
P-curve analysis achieves this through a test of 33% power. The idea is to construct the **expected** $pp$-value (i.e. the probability of $p$) of each significant study $k$ when the true effect is very small. By very small, we mean an effect size that can be detected with a power of 33% using the study's sample size.
This 33% threshold is, to some extent, arbitrary; it was chosen by the inventors of p-curve as a rough indicator of an effect that is on the verge of being practically negligible. We will spare you the statistical details behind how the 33% power $pp$-values are determined, but it is important to know that it involves the use of a **non-central** distribution, such as the non-central $F$, $t$ and $\chi^2$ distribution, depending on the outcome measure^[In the p-curve _R_ function that we will use in this book, effect sizes are first transformed to $z$ scores (using the formula $z = \frac{\hat\theta}{SE_{\hat\theta}}$). To calculate expected $pp$-values for the 33% power tests, a non-central $\chi^2$ distribution with d.f. = 1 is used. This is possible because the distribution of $\chi^2_1$ and $z$ (or, more precisely: $z^2$) is equivalent.].
In the following section on effect size estimation using p-curve (Chapter \@ref(p-curve-es)), we will describe the concept behind a non-central distribution in greater detail.
In sum, the flatness test first involves calculating $pp$-values based on an effect detectable with 33% power, for each significant $p$-value. If the 33% power estimate fits the distribution of our $p$-values well, the 33% power $pp$-values will be uniformly distributed--just as $p$-values follow a uniform distribution when the data fits the null hypothesis $\theta = 0$ well. Thus, we can apply the same methods we also used in the right-skewness test, but this time, we use the 33% power $pp$-values for our calculations.
The only difference now is that we are not particularly keen on rejecting the null hypothesis: this would mean that we reject the notion that at least a small effect exists in our data. We would be forced to say that there is either an effect small enough to be negligible--or that there is no effect at all.
<br></br>
##### Interpretation of P-Curve Results {#interpretation-p-curve-results}
---
We have now covered several tests that allow to analyze an empirical $p$-curve. Do not worry too much if you found some of the statistical concepts difficult to understand. It takes some time to get a grasp of the methodology behind p-curve, and the following hands-on example will certainly be helpful in this respect. The most important part is to understand the **idea** behind the p-curve tests, and how their results can be interpreted. In this section, we will now focus on the latter.
When we interpret p-curve, we have to make sense out of four test results: the ones of the binomial right-skewness and flatness test, as well as the ones of the right-skewness and flatness test based on $pp$-values. To make things worse, p-curve analysis also involves two additional tests that we have not covered yet: a right-skewness and flatness test based on the **half** $p$ _curve_. These tests are identical to the $pp$-value-based tests we covered before, but they are only applied to high $p$-values (i.e. $p<$ 0.025).
\index{P-Hacking}
The half $p$-curve tests were introduced as a safeguard against **ambitious** $p$**-hacking** [@simonsohn2015better]. Although arguably less likely, it is possible that researchers may have $p$-hacked results until they become highly significant. This, however, may distort the shape of the $p$-curve: it may not appear left-skewed, but slightly right-skewed, even when there is no true effect. A test based on the half $p$-curve can control for this, because it becomes increasingly difficult, even for ambitious $p$-hackers, to obtain **very** high $p$-values (e.g. $p<$ 0.01), unless there is a true effect. Since, by definition, the half $p$-curve only contains values smaller than 0.025, no binomial test is performed.
When interpreting p-curve's results, we essentially try to answer two questions. The first one is: does our $p$-curve indicate the presence of evidential value? This can be evaluated using the right-skewness tests. In case we can not confirm the presence of evidential value, we turn to the second question: is evidential value absent or inadequate? This can be assessed using the flatness tests. In practice, this guideline may be used [@simonsohn2015better]:
* **Evidential value present**: The right-skewness test is significant for the half $p$-curve ($p<$ 0.05) **or** the $p$-value of the right-skewness test is <0.1 for **both** the half and full curve.
* **Evidential value absent or inadequate**: The flatness test is significant with $p<$ 0.05 for the full curve **or** the flatness test for the half curve **and** the binomial test are $p<$ 0.1.
```{block, type='boxinfo'}
**How to Interpret a "No-No" Case**
\vspace{2mm}
Every p-curve analysis eventually ends up with one of three outcomes. When the right-skewness test is significant, we conclude that evidential value is present. When the right-skewness test is not significant, but the flatness test is, this indicates that evidential value is absent (or the effect is very, very small).
The third and last outcome is the trickiest. For lack of a better word, we call it a "no-no" case. It arises when we can neither verify that evidential value is present, nor that it is absent (i.e. neither the right-skewness test, nor the flatness test is significant).
\vspace{2mm}
In terms of interpretation, a "no-no" case means that we can not confirm that a true effect is present, but that we are not able to rule out a relatively small effect either.
This third outcome frequently occurs when the $p$-curve only contains a few studies and is admittedly somewhat disappointing. This result often communicates that we do not know if a true effect exists when looking a the $p$-curve, and that more evidence is needed to clarify things.
```
<br></br>
##### P-Curve Analysis in _R_
---
By now, we have learned a lot about the theory behind p-curve analysis, so it is high time we start applying the technique in a real-world example. Luckily, Simonsohn, Simmons and Nelson, the inventors of p-curve, have developed an application which automatically conducts all the tests we previously discussed, and returns the relevant results. This **p-curve app** can also be found [online](http://p-curve.com/).
To use p-curve in _R_, we can resort to the `pcurve` function. This function emulates the behavior of the app and was designed specifically for meta-analysis objects created with the **{meta}** package.
```{block, type='boxdmetar'}
**The "pcurve" Function**
\vspace{2mm}
\index{dmetar Package}
The `pcurve` function is included in the **{dmetar}** package. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
\vspace{2mm}
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/pcurve2.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{stringr}** and **{poibin}** package is installed and loaded.
```
\index{Outlier}
Using the `pcurve` function is easy. We only need to provide the function with a **{meta}** meta-analysis object that we created previously. In this example, we again use our `m.gen` meta-analysis object, which we created with the `metagen` function (Chapter \@ref(pre-calculated-es)).
However, before we run the analysis, we remove the two outlying studies that we identified previously (study 3 and study 16, see Chapter \@ref(outliers)). We will cover later why this is a good idea.
```{r, fig.align="center", fig.width=6, fig.height=5, out.width="70%", eval=F}
library(dmetar)
library(meta)
# Update m.gen to exclude outliers
m.gen_update <- update.meta(m.gen, subset = -c(3, 16))
# Run p-curve analysis
pcurve(m.gen_update)
```
\vspace{4mm}
```
## P-curve analysis
## -----------------------
## - Total number of provided studies: k = 16
## - Total number of p<0.05 studies included into the
## analysis: k = 9 (56.25%)
## - Total number of studies with p<0.025: k = 8 (50%)
##
## Results
## -----------------------
## pBinomial zFull pFull zHalf pHalf
## Right-skewness test 0.020 -3.797 0.000 -2.743 0.003
## Flatness test 0.952 1.540 0.938 3.422 1.000
## Note: p-values of 0 or 1 correspond to p<0.001 and p>0.999,
## respectively.
##
## Power Estimate: 66% (31.1%-87.8%)
##
## Evidential value
## -----------------------
## - Evidential value present: yes
## - Evidential value absent/inadequate: no
```
\vspace{4mm}
```{r, echo=F, fig.align="center", fig.width=7, fig.height=6, out.width="68%"}
library(poibin)
library(stringr)
source("data/pcurve.bw.R")
par(bg="#FFFEFA")
p <- pcurve(m.gen_update)
```
We see that running the `pcurve` function results in two outputs: our p-curve analysis results, and a plot of the observed $p$-curve.
The output tells us that the meta-analysis contained $k=$ 9 significant effects that were included in the $p$-curve. Most of these studies ($k=$ 8) had highly significant results (i.e. $p<$ 0.025). The `Results` section contains the main outcomes of the analysis. We see that all three right-skewness tests are significant: the binomial test (`pBinom`; $p=$ 0.02), the full $p$-curve $pp$-value test (`pFull`; $p<$ 0.001) and the test based on the half $p$-curve (`pHalf`; $p=$ 0.003)^[In the output, we also see that the test statistic associated with the full and half $p$ curve test is a $z$ score. This is because the `pcurve` function uses Stouffer's instead Fisher's method to aggregate the results. As we mentioned in footnote 6 in this chapter, both methods are closely related.].
Applying the criteria we set up in Chapter \@ref(interpretation-p-curve-results), this indicates that our data contains evidential value. In the next line, we see that the three flatness tests are **not** significant, with $p$s ranging from 0.938 to 1. This, quite logically, tells us that evidential value is neither absent nor inadequate. The same interpretation is provided for us in the `Evidential value` section of the output.
The $p$-curve plot produced by the function contains three lines: a solid one, representing the empirical $p$-curve based on our data; a dashed line showing the expected $p$-value distribution assuming 33% power; and a dotted line, which shows the uniform distribution we would expect when there is no effect. The solid line is visibly right-skewed, just as we would expect when the studies measure a true effect.
Overall, these results indicate the presence of evidential value and that there is a true non-zero effect. We can still not rule out that publication bias has affected the results of our meta-analysis. But, based on p-curve's results, we can conclude that the pooled effect we found is not completely spurious; it is not just a "mirage" produced by selective reporting.
Interestingly, this finding does not really co-align with the results we obtained using some of the small-study effect methods. Both PET-PEESE (Chapter \@ref(pet-peese)) and the limit meta-analysis method (Chapter \@ref(rucker-ma)) estimated a corrected average effect of approximately zero^[An attentive reader may have recognized that for the small-study effect methods, outliers were not removed, and that this may have caused the different results. We checked for this possibility by re-running PET-PEESE and a limit meta-analysis without the statistical outliers, which lead to largely the same results as before. If you want to, you can try to verify this yourself in _R_.].
<br></br>
#### P-Curve Effect Size Estimation {#p-curve-es}
---
We have now discovered how the analysis of an empirical $p$-curve can be used to determine if our meta-analysis contains evidential value. However, even when we reach conclusive results using this method, our insights will still be somewhat limited. It is certainly very helpful to know that our data contains a true effect, but it would be even better to know **how big** this true effect is. Luckily, p-curve can also help us with this question. When we know the sample size of our studies, it is possible to search for the true effect size that fits the shape of our $p$-curve best.
\index{t-Distribution}
To understand how this is feasible, we first have to discuss the concept of a **non-central** distribution, and how it relates to effect sizes. To exemplify what a non-central distribution is, we start with the arguably most common statistical test there is: the two-sample $t$-test. A $t$-test is commonly used to examine if the means of two groups differ.
The null hypothesis in a $t$-test is that both means $\mu_1$ and $\mu_2$ are identical, and that, therefore, their difference is zero. When the null hypothesis is true, we assume that the $t$ statistic follows a **central** $t$-distribution. The central $t$-distribution looks similar to a standard normal distribution. Since the null hypothesis assumes that there is no difference, the $t$-distribution is centered around zero.
\index{Non-Centrality Parameter}
Quite logically, this central $t$-distribution will not represent the reality well when the null hypothesis is incorrect. When there is a true difference between the means, we do not expect the $t$-values to be centered around zero. Instead, the $t$ statistics will follow a **non-central** $t$-distribution. This non-central distribution is usually asymmetric, and tends to have a wider spread.
Most importantly, however, its center is "shifted" away from zero. The magnitude of this shift is controlled by the **non-centrality parameter** $\delta$. The higher $\delta$, the further the peak of the non-central distribution will lie away from zero.
The graph below illustrates this behavior. On the left, we see a central $t$-distribution, for which $\delta=$ 0. To the right, a non-central $t$-distribution with $\delta=$ 5 is displayed. We see that the right curve is less symmetrical, and that it peaks around 5, whereas the center of the symmetrical, central $t$-distribution is zero.
\vspace{8mm}
```{r, echo=F, message=F, warning=F, out.width="70%", fig.align="center", fig.width=6, fig.height=3}
library(ggplot2)
bw = function(b, x) { b/bw.nrd0(x) }
set.seed(123)
df = data.frame(type = rep(c("ncp0", "ncp2"), each = 1e4),
value = c(rt(1e4, 9, 0), rt(1e4, 9, 5)))
ggplot(df, aes(x = value, fill = type, color = type)) +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_density(alpha = 0.5, adjust=bw(1.5, df$value)) +
xlim(c(-5,13)) +
xlab(bquote(italic(t)["d.f. = 9"])) +
ylab(bquote("P("~italic(t)~")")) +
theme_classic() +
theme(legend.position = "none") +
scale_fill_manual(values=c("gray40", "gray77")) +
scale_color_manual(values=c("gray30", "gray67")) +
annotate("text", x = 2, y = 0.33,
label = bquote("Central"~italic(t)~"distribution ("~delta~"= 0 )"),
hjust = "left") +
annotate(geom = "curve", x = 1.9, y = 0.32, xend = 0.9, yend = 0.28,
curvature = .1, arrow = arrow(length = unit(2, "mm"))) +
annotate("text", x = 5.8, y = 0.23,
label = bquote("Non-central"~italic(t)~"distribution ("~delta~"= 5 )"),
hjust = "left") +
annotate(geom = "curve", x = 8, y = 0.22, xend = 6.5, yend = 0.16,
curvature = .1, arrow = arrow(length = unit(2, "mm"))) +
theme(panel.background = element_rect(fill = "#FFFEFA",
size = 0),
plot.background = element_rect(fill = "#FFFEFA",
size = 0))
```
\vspace{4mm}
While the left central distribution shows the expected $t$-values when the null hypothesis is true, the right one shows the expected $t$-values when the **alternative hypothesis** is correct.
Another way to think about this is to say that the left curve shows the $t$-distribution when there is **no effect**, while the right one shows the distribution when there **is** an effect. The central distribution could, for example, represent an SMD of 0, while the non-central distribution represents the expected $t$-value distribution for an effect of SMD = 1.3 (this value is made up). The higher the effect size (i.e. the bigger the difference between the two samples), the higher the non-centrality parameter $\delta$ will be, and the non-central distribution will move further and further away from zero.
As we mentioned, the non-central $t$-distribution can be used to model the alternative hypothesis in a $t$-test. The reason why it is so uncommon to see a non-central distribution in statistical textbooks, however, is because we usually do not **need** it. In statistical hypothesis testing, our alternative hypotheses are usually **non-specific**. When we calculate a two-sample $t$-test, we are only interested in the null hypothesis ("there is no difference between the groups"). When our data does not fit the null hypothesis well, we reject it and conclude that **some** effect exists.
The alternative hypothesis in such a test is simply the opposite of the null hypothesis: that the mean difference between the two samples is **not** zero; not that the effect has this or that size.
\index{Power}
A **specific** alternative hypothesis is usually only needed when we want to calculate the **power** of a statistical test. When setting up an experiment, it is conventional to plan for a sample size large enough to make sure that the probability of a **false negative** is 20% or lower. By using an adequate sample size, we want to make sure that a true effect can be detected, provided that it exists. This probability of a statistical test to uncover a true effect is its statistical power. It is defined as 1 minus the probability of a false positive, also known as $\beta$.
To calculate the required sample size for an adequately powered $t$-test, we need the non-central $t$-distribution, because it shows us the expected behavior of $t$-values when there is an effect. To calculate the sample size, we also need to assume a value for the true effect size, since it affects the non-centrality parameter $\delta$, and thus the shape of the non-central distribution.
When we put all these pieces together, we see that the shape of a non-central $t$-distribution, and thus the statistical power, is controlled by only two things: the size of our sample, and the true underlying effect. The same is also true for the shape of our $p$-curve: its right-skewness depends on the sample size and true effect in our data. This is an important observation: it means that once we know the sample sizes of the studies in our $p$-curve, we are also able to estimate their true effect size.
To see how p-curve achieves this, let us go through a small example. For an independent two-sample $t$-test (assuming equal variances in both groups), the value of $t$ equals the between-group mean difference $\text{MD}_{\text{between}}$ divided through its standard error $SE_{\text{MD}_{\text{between}}}$:
\begin{equation}
t_{\text{d.f.}}= \frac{\text{MD}_{\text{between}}}{SE_{\text{MD}_{\text{between}}}}
(\#eq:pub15)
\end{equation}
When we insert the formula for the between-group mean difference and standard error (see equation 3.14 and 3.15 in Chapter \@ref(b-group-md)), we get the following formula:
\begin{equation}
t_{n_{1}+n_{2}-2} = \frac{\hat\mu_{1}-\hat\mu_{2}}{s_{\text{pooled}}\sqrt{\dfrac{1}{n_1}+\dfrac{1}{n_2}}}
(\#eq:pub16)
\end{equation}
We see in the equation that the degrees of freedom of $t$ are defined as the combined sample size ($n_1+n_2$) of both groups minus 2.
Using this formula, we can calculate the $t$-value based on data reported in a primary study. Imagine that a study contained $n_1=$ 30 participants in the experimental group, and $n_2=$ 20 participants in the control group. The study reports a mean of 13 and 10 for group 1 and 2, respectively, and that both groups had a standard deviation of 5. Based on this data, we can use the following code to calculate $t$:
```{r}
# Calculate mean difference
md <- 13-10
# Calculate SE of mean difference
n1 <- 30
n2 <- 20
s1 <- s2 <- 5
s_pooled <- sqrt((((n1-1)*s1^2) + ((n2-1)*s2^2))/
((n1-1)+(n2-1)))
se <- s_pooled*sqrt((n1+n2)/(n1*n2))
# Calculate t-value (equivalent to 2-sample t-test with equal variance)
md/se
```
The result is $t_{48}=$ 2.078. Does this result support the null hypothesis that both means are identical and that there is no effect?
To answer this question, we can use the `pt` function. This function gives us the probability of finding a $t$-value greater than 2.078 when d.f. = 48 **and** provided that the null hypothesis is true. This probability is equal to the $p$-value of a one-sided $t$-test.
```{r}
pt(2.078, df = 48, lower.tail = F)
```
The result is $p=$ 0.02, which means that the test is significant. Therefore, we reject the null hypothesis that the effect of the experimental group is zero (or negative). As a consequence, we accept the alternative hypothesis: there is a positive effect favoring the experimental groups (assuming that higher scores represent better outcomes).
We now know that the central $t$-distribution underlying the null hypothesis of the $t$ test does not suit our empirical data well. We also know that a non-central $t$-distribution fits our data better--but we do not know which one. For now, we can only guess which true effect size, and thus which non-centrality parameter $\delta$, really represents the population from which our empirical $t$-value was drawn.
As a first guess, we could assume that the true effect size behind our finding is a standardized mean difference of $\theta=$ 0.6. This would mean that we found $t=$ 2.078 because there was a medium-to-large effect in the experiment. Based on this value of $\theta$, the non-centrality parameter is calculated using this formula:
\begin{equation}
\delta = \frac{\theta}{\sqrt{\dfrac{n_{1}+n_{2}}{n_{1}n_{2}}}}
(\#eq:pub15)
\end{equation}
Where $n_1$ and $n_2$ are the sample sizes in both groups. In our example, we can calculate the non-centrality parameter $\delta$ using this code:
```{r}
theta <- 0.6
delta <- theta/sqrt((n1+n2)/(n1*n2))
# Show delta
delta
```
To see how a non-central $t$-distribution with $\delta=$ 2.078 looks like, let us do a little simulation. Using the `rt` function we draw one million random $t$-values with 48 degrees of freedom, twice: once when assuming a non-centrality parameter of zero (which is equal to the null of no effect), and once using the value of $\delta$ that we just calculated (meaning that the true effect is SMD = 0.6).
Then, using a pipe and the `hist` function, we let _R_ draw a histogram of both simulations.
```{r, eval=F, fig.align="center", fig.width=6, fig.height=4, out.width="75%"}
# '1 with 6 zeros' can also be written as '1e6' in R
# Make sure that the "tidyverse" package is loaded (for pipe)
rt(n = 1e6, df = 48, ncp = 0) %>%
hist(breaks = 100,
col = "gray50",
xlim = c(-4,8),
ylim = c(0, 40000),
xlab = "t-value",
main = NULL)
rt(n = 1e6, df = 48, ncp = delta) %>%
hist(breaks = 100,
col = "gray95",
xlim = c(-4,8),
ylim = c(0, 40000),
add = T)
```
This is the resulting plot:
```{r, echo=F, fig.align="center", fig.width=6, fig.height=4, out.width="75%"}
par(bg="#FFFEFA")
# '1 with 6 zeros' can also be written as '1e6' in R
rt(n = 1e6, df = 48, ncp = 0) %>%
hist(breaks = 100,
col = "gray50",
xlim = c(-4,8),
ylim = c(0, 40000),
xlab = "t-value",
main = NULL)
rt(n = 1e6, df = 48, ncp = delta) %>%
hist(breaks = 100,
col = "gray95",
xlim = c(-4,8),
ylim = c(0, 40000),
add = T)
```
In it, we see the central $t$-distribution (no effect) on the left, and the non-central distribution ($\theta=$ 0.6) to the right. Because we already have a moderately sized sample ($N=$ 50), the non-central distribution looks less right-skewed than in the previous visualization. Nevertheless, it is clearly visible that the distribution of our assumed alternative hypothesis is shifted to the right, and peaks at the value of $\delta$.
The central question, of course, is: how likely is it to obtain a value greater than $t_{48}=$ 2.078 when this alternative distribution is indeed the correct one, and when the true effect is in fact $\theta=$ 0.6? To examine this question, we can use the `pt` function again, but this time also provide our assumed non-centrality parameter $\delta$. This information can be added using the `ncp` argument. Let us check what result we get.
```{r}
# Remember that t=2.078
pt(2.078, df = 48, ncp = delta, lower.tail = FALSE)
```
We see that the probability of obtaining a value greater than our result under the specified alternative hypothesis is roughly 50%. This means that about half of the values are expected to be higher, and the other half lower than the $t$-value we found. All in all, this indicates that a non-central $t$-distribution assuming 48 degrees of freedom and a true effect of 0.6 approximates our finding very well. It seems very likely that the true population effect of the study is SMD = 0.6.
The steps we just made are, essentially, also the ones that p-curve uses to determine the true effect size. For every significant $p$-value in the $p$-curve, it calculates the probability of getting a value greater than $t$, assuming:
1. a certain effect size/non-centrality parameter;
2. that the $p$-value is based on $x$ degrees of freedom (which we can derive from the study's sample size); and
3. knowing that only significant values ($p<$ 0.05) are included in the $p$-curve.
This results in a $pp$-value for each significant study $k$. Based on all that we just covered, the formula for the $pp$-value of a study $k$ can be expressed like this:
\begin{equation}
pp(t_k) = \mathrm{P}(t>t_k~\vert~\delta,~\text{d.f.},~p<0.05)
(\#eq:pub16)
\end{equation}
\index{Kolmogorov-Smirnov Test}
Since the degrees of freedom of a study are usually known, the only unknown in the equation is $\delta$, and thus the true effect size $\theta$. However, it is possible to **find** this true effect size, because we know that the distribution of $pp$-values will be **uniform** when we assume the correct true effect size/$\delta$-value. Just like $p$-values follow a uniform distribution when our findings conform with the null hypothesis, the $pp$-values are uniformly distributed when the results conform with the **correct** non-central distribution (i.e. the point alternative hypothesis).
Therefore, we only have to try out many, many possible **candidate effect sizes**, plug the resulting $\delta$-value into the equation above, and evaluate the skewness of the resulting $pp$-values. The candidate effect size that comes closest to a uniform distribution of $pp$-values then represents our estimate of the true effect. P-curve uses the $D$ distance metric of a so-called Kolmogorov-Smirnov (KS) test to capture how much a $pp$ distribution deviates from a uniform distribution.
\index{Cohen's \textit{d}}
P-curve's effect estimation method is also implemented in the `pcurve` function. To use it, we have to set the `effect.estimation` argument to `TRUE`. We also have to specify `N`, the sample size of each study. Lastly, we can control the search space for candidate effect sizes using `dmax` and `dmin`. Here, we tell `pcurve` to search for effect sizes between Cohen's $d=$ 0 and 1. Please note that `dmin` must always be zero or greater--the minimum that p-curve can detect is no effect.
```{r, eval=F}
# Add experimental (n1) and control group (n2) sample size
# Sample sizes of study 3 and 16 removed
n1 <- c(62, 72, 135, 103, 71, 69, 68, 95,
43, 79, 61, 62, 60, 43, 64, 63)
n2 <- c(51, 78, 115, 100, 79, 62, 72, 80,
44, 72, 67, 59, 54, 41, 66, 55)
# Run p-curve analysis with effect estimation
pcurve(m.gen_update,
effect.estimation = TRUE,
N = n1+n2,
dmin = 0,
dmax = 1)
```
```
## P-curve analysis
## -----------------------
## [...]
##
## P-curve's estimate of the true effect size: d=0.389
```
\vspace{4mm}
```{r, echo=F, fig.width=6, fig.height=5, fig.align="center", out.width="45%"}
source("data/pcurve.bw.es.R")
m.gen_update <- update.meta(m.gen, subset = -c(3, 16))
n1 <- c(62, 72, 135, 103, 71, 69, 68, 95,
43, 79, 61, 62, 60, 43, 64, 63)
n2 <- c(51, 78, 115, 100, 79, 62, 72, 80,
44, 72, 67, 59, 54, 41, 66, 55)
par(bg="#FFFEFA")
pcurve.bw.es(m.gen_update,
effect.estimation = TRUE,
N = n1+n2,
dmin = 0,
dmax = 1) -> d
```
The output now contains two new elements: an estimate of the true effect size, as well as a plot showing results of the effect size search. In the plot, the candidate effect sizes form a smooth, V-shaped gradient, which peaks at an effect size of $d=$ 0.389. At this point, the difference between the calculated $pp$ distribution and a uniform distribution (represented by the value of $D$ on the y-axis) is minimal, which means that it represents the best estimate of the true effect.
Importantly, p-curve's estimate of the effect size is **only** trustworthy when the plot shows a V-shape similar to the one here. Other, more erratic shapes indicate that p-curve may not have found the minimum. A plot with a smooth, descending line might indicate that our search space is simply too narrow. In this case, it makes sense to re-run the analysis with a higher `dmax` value.
Overall, p-curve's estimate of $d=$ 0.389 is somewhat lower than the pooled effect we found in the meta-analysis ($g=$ 0.45 when outliers are removed). However, it is still large enough to be in line with our previous finding, that the studies contain evidential value.
```{block, type='boxreport'}
**Reporting P-Curve Analysis Results**
\vspace{4mm}
When we report the results of a p-curve analysis, it is a good idea to at least include the $p$-values of the three right-skewness and flatness tests, as well as how these results are interpreted. When the true effect size was estimated, this should also be included. All of these results can be combined in a table that looks somewhat like this:
```
```{r, echo=F}
library(kableExtra)
dat = data.frame(pBinomial = c("0.020", "0.952"),
zFull = c(-3.797, 1.540),
pFull = c("<0.001", "0.938"),
zHalf = c(-2.743, 3.422),
pHalf = c("0.003", ">0.999"),
present = c("yes", "yes"),
absent = c("no", "no"),
d = c(0.39, 0.39))
rownames(dat) = c("Right-Skewness Test", "Flatness Test")
colnames(dat) = c("$p_{\\text{Binomial}}$", "$z_{\\text{Full}}$", "$p_{\\text{Full}}$", "$z_{\\text{Half}}$",
"$p_{\\text{Half}}$", "present", "absent", "$\\hat{d}$")
kable(dat, booktabs = T, digits = 2, escape = FALSE) %>%
kable_styling(latex_options = c("scale_down"),
bootstrap_options = c("condensed")) %>%
add_header_above(c(" " = 2, "Full Curve" = 2, "Half Curve" = 2, "Evidential Value" = 2, " ")) %>%
collapse_rows(columns = c(7:9), latex_hline = "major", valign = "middle") %>%
kable_styling(font_size = 14)
```
```{block, type='boxempty'}
The developers of p-curve also highly recommend to create a **disclosure table** for each analysis, describing from which part of an article the result was extracted, and how it was originally reported. An example for such a disclosure table, along with several other practical guidelines, can be found in Simonsohn et al. [-@simonsohn2014p].
```
```{block, type='boximportant'}
**P-Curve & Between-Study Heterogeneity**
\vspace{4mm}
We still owe you an explanation of why we excluded outlying studies from the p-curve analysis. With outliers included, our meta-analysis had a between-study heterogeneity of $I^2=$ 63%, which is quite substantial. This is problematic because it has been found that p-curve is not a robust method to estimate the true effect size when the between-study heterogeneity of our data is high.
\vspace{2mm}
Van Aert and colleagues [-@aert2016conducting] have therefore proposed to only use p-curve when the heterogeneity is small to moderate. They proposed a threshold of $I^2=$ 50% as rule of thumb to determine if p-curve can be applied. When the between-study heterogeneity in our meta-analysis is higher than that, one workaround is to p-curve effect sizes without outliers, like we did in the example. An even better solution is to perform a separate analysis in sensible subgroups of studies, provided they exist.
```
<br></br>
### Selection Models {#selection-models}
---
\index{Selection Model}
The last type of publication bias method we cover are so-called **selection models**. Although selection models have been proposed to examine the impact of selective publication for some time [@hedges1992modeling; @iyengar1988selection; @hedges1996estimating; @hedges1984estimation], interest in their application has particularly increased in the last few years [@mcshane2016adjusting; @carter2019correcting].
All publication bias methods we covered previously are based on some kind of "theory", which is used to explain why and how selective publication affects the results of a meta-analysis. Small-study effect methods, for example, assume that a study's risk of non-publication is proportional to its sample and effect size. P-curve is based on the idea that a $p$-value of 0.05 serves as a "magic threshold", where results with $p \geq$ 0.05 are generally much more likely to be missing in our data than statistically significant findings.
Selection models can be seen as a generalized version of these methods. They allow to model **any** kind of process through which we think that publication bias has affected our results. This makes them very versatile: selection models can be used to model our data based on very simple, or highly sophisticated hypotheses concerning the genesis of publication bias.
The idea behind all selection models is to specify a distribution which predicts, often in a highly idealized way, how likely it is that some study is published (i.e. "selected"), depending on its results. Usually, this result is the study's $p$-value, and a selection model can be seen like a function that returns the probability of publication for varying values of $p$. Once such a selection function has been defined, it can be used to "remove" the assumed bias due to selective publication, and derive a corrected estimate of the true effect size.
Yet, this corrected effect will only be appropriate when the selection model we defined is indeed correct. We always have to keep in mind that our model is just one of many ways to explain the selection process--even if our model seems to fit the data well. The exact processes through which publication bias has shaped our results will inevitably remain unknown. Nevertheless, selection models can be enormously helpful to broadly assess if, and more importantly, **how** publication may have influenced our data.
\index{Step Function}
In this chapter, we will cover two types of (rather simple) selection models based on **step functions**. Therefore, let us first clarify what step functions are.
<br></br>
#### Step Function Selection Models {#step-function-selmodels}
---
To perform any kind of selection model analysis, we need two ingredients: an **effect size model**, and the **selection model** itself. We can think of both of these models as **functions**, which use some input value $x$ and then return the probability of that value.
The effect size model, described by the function $f(x_k)$, is identical to the random-effects model. It assumes that the observed effect sizes $\hat\theta_k$ are normally distributed around an average effect $\mu$, and deviate from $\mu$ due to sampling error and between-study heterogeneity variance $\tau^2$. Knowing $\mu$, $\tau^2$, a study's standard error, and that effect sizes are normally distributed, the function $f(x_k)$ predicts how likely it is to observe some effect size value $x_k$--assuming that there is no publication bias.
Yet, when there **is** publication bias, this effect size distribution, and thus $f(x)$ itself, is an incorrect representation of reality. Due to selective publication, some studies are over-represented in our data--presumably those with surprisingly high effect sizes and small samples. This means that we have, without knowing, given these studies a higher weight in our pooling model. We, therefore, need to derive a more "realistic" version of $f(x)$, which incorporates the fact that some results had a greater chance of being included than others; that they were given a higher "weight".
\index{Weight}
This is achieved through a **weight function** $w(p_k)$. The weight function tells us the selection probability of a study $k$, depending on its $p$-value. Based on this, we can define an adapted version of $f(x_k)$, which also incorporates the publication bias mechanism. This function $f^*(x_k)$ is symbolized by this formula [@vevea2005publication]:
\begin{equation}
f^*(x_k) = \frac{w(p_k)f(x_k)}{\int w(p_k) f(x_k) dx_k}
(\#eq:pub17)
\end{equation}
Where the denominator in the fraction stands for the integral of $w(p_k) f(x_k)$. The weight function $w(p_k)$ in this equation represents our assumed selection model.
\index{Step Function}
Although $w(p_k)$ can technically have any shape, it is often implemented as a **step function** [@hedges1996estimating]. When $w(p_k)$ is a step function, this means that values $p_k$ which fall into the same interval (e.g. all $p$-values smaller than 0.05) are selected with the same probability. This interval-specific selection probability is denoted with $\omega_i$, and can differ from interval to interval. Essentially, we split up the range of possible $p$-values (0 to 1) into different segments, and give each segment its own selection probability $\omega_1$, $\omega_2$, ..., $\omega_c$.
The size of the segments is determined by several **cut-points** (which we denote with $a_i$). The number of cut-points, as well as their exact value, can be chosen by us. For example, when $w(p_k)$ contains four segments (and thus four cut-points), we can define its inner workings like so:
\begin{equation}
w(p_k) =\begin{cases}
\omega_1~~~\text{if}~~~0 \leq p_k \leq a_1 \\
\omega_2~~~\text{if}~~~a_1 \leq p_k \leq a_2 \\
\omega_3~~~\text{if}~~~a_2 \leq p_k \leq a_3 \\
\omega_4~~~\text{if}~~~a_3 \leq p_k \leq a_4~~~(\text{where}~~~a_4=1).
\end{cases}
(\#eq:pub18)
\end{equation}
We see that for any value of $p$, the function above returns a specific selection probability $\omega$, based on the $p$-value interval into which our value falls. To make this more concrete, we now define a selection model with actual values filled in for the cut-points $a_i$ and selection probabilities $\omega_i$.
We could assume, for example, that the publication bias mechanism in our meta-analysis can be described with three cut-points. First, there is $a_1=$ 0.025. This value equals a one-sided $p$-value of 0.025, and two-sided $p$-value of 0.05. Since this is the conventional significance threshold used in most studies, it makes sense to assume that all $p$-values smaller $a_1=$ 0.025 have a selection probability $\omega_1$ of 100%. After all, there is no reason to put a study into the file drawer when its results were positive. The next cut-point we define is $a_2=$ 0.05. For results in this range, we assume a selection probability of 80%: still high, but lower compared to results that are clearly significant. Then, we specify a large interval, ranging from $a_2=$ 0.05 to $a_3=$ 0.5, in which the selection probability is 60%. Lastly, for studies with a very high $p$-value of $\geq$ 0.5, we define an even lower probability of $\omega_4=$ 35%.
This results in a selection model as depicted in Figure \@ref(fig:stepcurve) below.
\vspace{2mm}
```{r stepcurve, warning = FALSE, message=F, echo=F, fig.align="center", out.width="56%",fig.cap="Selection model based on a step function.", fig.width=6, fig.height=4.5}
library(ggplot2)
df = data.frame(x = c(0, 0.025, 0.05, 0.5, 1),
y = c(1, 0.8, 0.6, 0.35, 0.35))
ggplot(data = df, aes(x = x, y = y)) +
geom_step(cex = 1) +
geom_vline(xintercept = 0.025, linetype = "dotted", color = "gray30") +
geom_vline(xintercept = 0.05, linetype = "dotted", color = "gray30") +
geom_vline(xintercept = 0.5, linetype = "dotted", color = "gray30") +
geom_vline(xintercept = 1, linetype = "dotted", color = "gray30") +
annotate("text", x = 0.10, y = 1.03, label = bquote(a[1]~"="~0.025), hjust = "left",
color = "gray30") +
annotate(geom = "curve", x = 0.095, y = 1.04, xend = 0.025, yend = 1.05,
curvature = .2, arrow = arrow(length = unit(2, "mm")), linetype = "solid",
color = "gray30") +
annotate("text", x = 0.15, y = 0.76, label = bquote(a[2]~"="~0.05), hjust = "left",
color = "gray30") +
annotate(geom = "curve", x = 0.145, y = 0.77, xend = 0.05, yend = 0.85,
curvature = .2, arrow = arrow(length = unit(2, "mm")), linetype = "solid",
color = "gray30") +
annotate("text", x = 0.6, y = 0.80, label = bquote(a[3]~"="~0.50), hjust = "left",
color = "gray30") +
annotate(geom = "curve", x = 0.59, y = 0.79, xend = 0.5, yend = 0.73,
curvature = .2, arrow = arrow(length = unit(2, "mm")), linetype = "solid",
color = "gray30") +
annotate("text", x = 0.86, y = 0.62, label = bquote(a[4]~"="~1), hjust = "right",
color = "gray30") +
annotate(geom = "curve", x = 0.87, y = 0.61, xend = 1, yend = 0.55,
curvature = .2, arrow = arrow(length = unit(2, "mm")), linetype = "solid",
color = "gray30") +
theme_classic() +
ylab(bquote("Probability of Selection ("~omega~")")) +
xlab("p-value") +
scale_x_continuous(breaks = c(0, 0.025, 0.05, 0.5, 1), expand = c(0, 0), limits = c(0, 1.05),
labels = c("0", "0.025", "0.05", "0.5", "1")) +
scale_y_continuous(expand = c(0,0), limits = c(0, 1.1),
labels = c("0%", "25%", "50%", "75%", "100%")) +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
plot.background = element_rect(fill = "#FFFEFA", color = "#fbfbfb"),
panel.background = element_rect(fill = "#FFFEFA"))
```
When we define a selection model based on a step function, we usually only specify the cut-points $a_i$. These are the only fixed parameters in our model, while the selection probabilities $\omega = \omega_1, \omega_2, \dots, \omega_c$ are estimated from the data. Based on the formula in equation 9.20, the selection model can then be fitted to our data. This involves using maximum likelihood procedures to jointly estimate $\omega$, as well as a corrected estimate of $\mu$ and $\tau^2$ which takes the disparate selection probabilities $\omega$ into account. The resulting corrected estimate of $\mu$ then represents the true average effect size when controlling for the assumed publication bias mechanism.
Previously, we expressed the selection probabilities $\omega$ as percentages ranging from 0% to 100%. However, when we fit the selection model, $\omega_i$ is not estimated as an absolute selection probability, but in terms of its **relative likelihood** of selection. This entails giving the first interval in the step function a reference value of 1, while all other values of $\omega$ represent the likelihood of selection **in relation** to this reference group. If, for example, we estimate a value of $\omega_2$=0.5 in the second interval, this means that studies in this segment were only half as likely to be selected compared to the first interval (for which $\omega_1 = 1$).
Of course, our corrected estimate of the true average effect $\mu$ will only be accurate when our selection model itself is appropriate. A rough indication of this is a significant likelihood ratio test (LRT) of the selection model parameters. The test is based on the null hypothesis that there is no selection, and that the relative selection likelihood is identical for all intervals (i.e. that $\omega_1 = \omega_2 = \dots = \omega_c$). It should be noted, however, that this significance test has been found to frequently produce anti-conservative results [@hedges1996estimating]. This means that its results should be interpreted cautiously.
In theory, the number of cut-points $a_i$ used in our selection model can be chosen **ad libitum**. Yet, with every additional cut-point, an additional value of $\omega_i$ has to be estimated. Depending on the size of our meta-analysis, this can soon lead to the problem that only few, if any, studies are available for each interval. This makes it increasingly difficult to estimate each $\omega_i$ properly. Complex selection models with many cut-points can therefore only be applied when the number of studies is large (i.e. $K \geq$ 100).
\index{Three-Parameter Selection Model}
Unfortunately, most meta-analyses only contain a small number of studies. This means that only simple selection models with very few cut-points can be applied. One variant of such a simple model the **three-parameter selection model**, which we will discuss next. This model has the advantage of being applicable even when the number of included studies is small (e.g. $K=$ 15--20).
<br></br>
##### Three-Parameter Selection Model {#three-param-selmodel}
---
The three-parameter model is a selection model with only one cut-point [@mcshane2016adjusting]. It is called a **three-parameter** model because only three parameters need to be estimated: the true effect $\mu$, the between-study heterogeneity variance $\tau^2$, and the relative likelihood of the second interval $\omega_2$^[It is not necessary to estimate $\omega_1$, since this interval serves as the reference category with a selection likelihood fixed at 1.].
In the three-parameter selection model, the single cut-point $a_1$ is set to 0.025, which is equal to a one-sided $p$-value of 0.05. This divides the range of $p$-values into two bins: those which can be considered statistically significant, and those which are not significant. Thus, $\omega_2$ represents the probability that a non-significant result is selected for publication^[P-curve (Chapter \@ref(p-curve)) can be seen as a special type of three-parameter selection model. It also uses a $p$-value of 0.05 as the cut-point for selection, but only focuses on the significant results (for which the selection probability is assumed to be 100%); and it assumes that $\tau^2$ is zero. This means that for p-curve, only one parameter is actually estimated from the data: the true effect size $\mu$.].
\index{metafor Package}
The `selmodel` function in the **{metafor}** package allows to fit various kinds of selection models in _R_ ^[At the time we are writing this, the `selmodel` function is only available through the **development version** of **{metafor}**. The development version of **{metafor}** can be downloaded by first installing the **{remotes}** package, and then running this code: `remotes::install_github("wviechtb/metafor")`. After that, **{metafor}** can be called from the library and the `selmodel` function should be available. Please note that it is likely that, by the time you are reading this, the `selmodel` function has already been integrated into the standard version of **{metafor}**, which means that installation of the development version is not necessary anymore.]. It can also be used for three-parameter selection models, which we will try out now. As before, we will use the `ThirdWave` data set for our example.
The `selmodel` function only accepts meta-analysis objects created by **{metafor}**'s `rma` function (see Chapter \@ref(multiple-metareg-R)). Therefore, we have to create such an object first. In our call to `rma`, we use settings identical to the ones we used for **{meta}**'s `metagen` function (Chapter \@ref(pre-calculated-es)).
```{r, message=F}
library(metafor)
# We name the new object 'm.rma'
m.rma <- rma(yi = TE,
sei = seTE,
data = ThirdWave,
slab = Author,
method = "REML",
test = "knha")
```
Using the `m.rma` object, we can now fit a three-parameter selection model using `selmodel`. To tell the function that we want to apply a step function, we have to set the `type` argument to `"stepfun"`. In the `steps` argument, we can specify the cut-point, which, in our model, is $a_1$=0.025. Let us have a look at the results:
\vspace{4mm}
```{r, eval=F}
selmodel(m.rma,
type = "stepfun",
steps = 0.025)
```
```
## [...]
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## 0.5893 0.1274 4.6260 <.0001 0.3396 0.8390 ***
##
## Test for Selection Model Parameters:
## LRT(df = 1) = 0.0337, p-val = 0.8544
##
## Selection Model Results:
##
## k estimate se pval ci.lb ci.ub
## 0 < p <= 0.025 11 1.0000 --- --- --- ---
## 0.025 < p <= 1 7 1.1500 0.8755 0.8639 0.0000 2.8660
##
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
```
Under `Model Results`, we can see that the selection model's estimate of the true average effect size is $g=$ 0.59 (95%CI: 0.34-0.84). Interestingly, this estimate is nearly identical to the pooled effect size that we obtained previously ($g=$ 0.58).
Overall, this does **not** indicate that our meta-analysis was substantially biased by a lower selection probability of non-significant results. This finding is corroborated by the `Test of Selection Model Parameters`, which is not significant ($\chi^2_1=$ 0.034, $p=$ 0.85), and thus tells us that $\omega_1$ and $\omega_2$ do not differ significantly from each other.
Under `Selection Model Results`, we can see an estimate of the relative selection likelihood in both bins. We see that, with $\omega_2=$ 1.15, the selection probability in the second segment is actually slightly higher than in the first one. In case of substantial publication bias, we would expect just the opposite: that the relative selection likelihood of a non-significant result is considerably **lower** compared to significant findings.
As a sensitivity analysis, we can change $a_1$ from 0.025 to 0.05, and then re-run the analysis. Setting the cut-point to 0.05 means that we assume that a two-sided $p$-value between 0.05 and 0.10 is just as "publishable" as one below 0.05. It could be, for example, that results which are significant on a "trend level" are still likely to be selected--or that some of the original studies used a one-sided test to evaluate if the study groups differ. Let us see if this altered cut-point changes the results:
```{r, eval=F}
selmodel(m.rma,
type = "stepfun",
steps = 0.05)
```
```
## [...]
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## 0.3661 0.1755 2.0863 0.0370 0.0222 0.7100 *
##
## Test for Selection Model Parameters:
## LRT(df = 1) = 3.9970, p-val = 0.0456
##
## Selection Model Results:
##
## k estimate se pval ci.lb ci.ub
## 0 < p <= 0.05 15 1.0000 --- --- --- ---
## 0.05 < p <= 1 3 0.1786 0.1665 <.0001 0.0000 0.5050 ***
##
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
```
\vspace{2mm}
Interestingly enough, we now see a different pattern. The new average effect size estimate is $g=$ 0.37, which is smaller than before. Furthermore, the likelihood test is significant, indicating that the intervals differ. We can see that, with $\omega_2=$ 0.18, the selection likelihood of values $p>$ 0.1 (two-sided) is much lower than the one of (marginally) significant $p$-values.
This indicates that our pooled effect may have been slightly distorted by selective reporting--particularly because studies with **clearly** non-significant results landed in the file-drawer.
<br></br>
##### Fixed Weights Selection Model {#fixed-weights-selmodel}
---
In the three-parameter selection model we just discussed, only a single cut-point $a_1$ is specified, while the selection likelihood is freely estimated by the model. As we mentioned, the fact that three-parameter models only use one cut-point makes them applicable even to meta-analyses with relatively few studies. This is because there is a lower chance that the model will "run out of studies" in some of the bins.
However, the large sample size requirement of selection models may be avoided if the selection likelihoods $\omega_i$ do not have to be estimated from the data. We can simply provide a fixed value of $\omega_i$ for each interval, and then check what the estimate of $\mu$ is under the imposed model. While this approach allows to fit more complex selection models (i.e. models with more cut-points), it also comes with a drawback.
When imposing such a **fixed weights selection model**, we simply assume that all pre-specified $\omega_i$ values are correct. Yet, when the model is not appropriate, this means that the estimate of the average effect will not be trustworthy. A fixed weights model should therefore be seen as a way to check how the true effect size **would** look like, if the assumed selection process applies.
Vevea and Woods [-@vevea2005publication] provide a few examples of how such multi-cutpoint, fixed weights selection models can look like. The plot below shows two illustrative examples of a step function representing moderate and severe selection:
\vspace{4mm}
```{r, echo=F, fig.width=4, fig.height=4, fig.align="center", out.width="55%"}
df = data.frame(x = rep(c(0.005, 0.01, 0.05, 0.10, 0.25, 0.35, 0.50, 0.65, 0.75, 0.90, 0.95, 0.99, 0.995, 1), 2),
y = c(1, 0.99, 0.95, 0.80, 0.75, 0.65, 0.60, 0.55, 0.50, 0.50, 0.50, 0.50, 0.50, 0.50, 1,
0.99, 0.90, 0.75, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.10, 0.10, 0.10, 0.10),
Selection = c(rep("moderate", 14), rep("severe", 14)))
ggplot(data = df, aes(x = x, y = y, fill = Selection, color = Selection, linetype = Selection)) +
geom_step(cex = 1) +
theme_classic() +
scale_color_manual(values = c("gray15", "gray60")) +
ylab(bquote("Probability of Selection ("~omega~")")) +
xlab("p-value") +
scale_x_continuous(breaks = 1:20/20, expand = c(0, 0), limits = c(0, 1.05)) +
scale_y_continuous(expand = c(0,0), limits = c(0, 1),
labels = c("0%", "25%", "50%", "75%", "100%")) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
theme(legend.position="top",
plot.background = element_rect(fill = "#FFFEFA", color = "#fbfbfb"),
panel.background = element_rect(fill = "#FFFEFA"))
```
As a sensitivity analysis, we could check how the estimate of $\mu$ changes when we assume that these selection models are appropriate for our meta-analysis. To do this, we have to define all the cut-points used in the models displayed above, as well as the likelihood $\omega_i$ given to each interval.
```{r}
# Define the cut-points
a <- c(0.005, 0.01, 0.05, 0.10, 0.25, 0.35, 0.50,
0.65, 0.75, 0.90, 0.95, 0.99, 0.995)
# Define the selection likelihood for each interval
# (moderate/severe selection)
w.moderate <- c(1, 0.99, 0.95, 0.80, 0.75, 0.65, 0.60,
0.55, 0.50, 0.50, 0.50, 0.50, 0.50, 0.50)
w.severe <- c(1, 0.99, 0.90, 0.75, 0.60, 0.50, 0.40, 0.35,
0.30, 0.25, 0.10, 0.10, 0.10, 0.10)
```
Once these parameters are defined, we can use them in our call to `selmodel`. The new cut-points must be provided to the `steps` arguments, and the fixed likelihoods to `delta`:
```{r, eval=F}
# Fit model assuming moderate selection
selmodel(m.rma, type = "stepfun", steps = a, delta = w.moderate)
```
```
## [...]
##
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## 0.5212 0.0935 5.5741 <.0001 0.3380 0.7045 ***
##
## [...]
```
```{r, eval=F}
# Fit model assuming severe selection
selmodel(m.rma, type = "stepfun", steps = a, delta = w.severe)
```
```
## [...]
## Model Results:
##
## estimate se zval pval ci.lb ci.ub
## 0.4601 0.1211 3.8009 0.0001 0.2229 0.6974 ***
## [...]
```
We see that, when imposing a selection model representative of moderate selection, the estimate of the pooled effect size is $g=$ 0.52. When a severe selection process is assumed, we obtain a slightly lower effect of $g=$ 0.46.
Both of these results indicate that our observed effect is quite robust, even when controlling for selective publication. Importantly, however, these estimates are valid if--and only if--the selection model we specified is representative of the reality.
```{block, type='boxinfo'}
**Other Selection Model Functions**
\vspace{2mm}
In the examples, we only discussed step functions as the basis of selection models. It should noted, however, that this is not the only type of function one can use to model the selection process. The `selmodel` function also contains several functions based on **continuous** distributions, for example the half-normal, logistic, or negative-exponential selection model. These models can be chosen by changing the specification of the `type` argument.
It is beyond the scope of this guide to discuss all of these models, but the documentation of the `selmodel` function provides an excellent introduction. You can access the documentation by running `?selmodel` in _R_ once **{metafor}** is loaded.
```
<br></br>
## Which Method Should I Use? {#pub-bias-which-method}
---
This concludes our discussion of statistical methods for publication bias. This chapter has been quite long, and one might ask why we discussed so many different approaches. Is it not enough to simply choose one method, assess the risk of publication bias with it, and then move on?
The short answer is no. Publication bias methods remain a highly active research topic, and many studies have evaluated the performance of different approaches over the years [e.g. @simonsohn2014es; @stanley2017limitations; @aert2016conducting; @mcshane2016adjusting; @rucker2011treatment; @terrin2003adjusting; @peters2007performance]. Alas, no clear winner has yet become apparent. On the contrary, there is evidence that no publication bias method consistently outperforms all the others [@carter2019correcting].
It is possible, and in fact quite common, that different publication bias methods yield wildly different results. Our own hands-on exercises in this chapter are a very good example. Although we used the same data set each time, estimates of the true bias-corrected effect ranged from practically zero to $g=$ 0.59. This underlines that the choice of method can have a profound impact on the results, and thus on our conclusions. While some methods indicated that our pooled effect completely disappears once we control for small-study effects, others largely corroborated our initial finding.
To address this issue, we recommend to always use **several** methods when evaluating publication bias. It is often difficult, if not impossible, to know which approach is suited best for our data and if its results are trustworthy. As we mentioned before, the exact extent to which selective reporting has affected our results will always be unknown. However, by applying several publication bias techniques, we can produce something similar to a **range** of credible true effects.
\index{PET-PEESE}
\index{P-Curve}
\index{Three-Parameter Selection Model}
The size of this range can be used to guide our interpretation. If, for example, both PET-PEESE, p-curve and the three-parameter selection model arrive at estimates that are close to our initial pooled effect, this boosts our confidence in the robustness of our finding. Less so if we discover that the methods disagree. This means that the impact of publication bias and small-study effects is much more uncertain, as is the trustworthiness of our pooled effect.
In any case, results of publication bias methods should always be interpreted with caution. There are instances where the results of publication bias analyses have led to contentious debates--for example in the "ego-depletion" literature [@friese2019ego]. It is important to keep in mind that the best way to control for publication bias is to perform an adequate search for unpublished evidence, and to change publication practices altogether. Every statistical "proof" of publication bias that we as meta-analysts can come up with is weak at best.
To make it easier for you to decide which method may be applied when we created a brief overview of the advantages and disadvantages of each approach (see Table \@ref(tab:pubtab)).
```{r pubtab, echo=F, message=F}
library(kableExtra)
library(dplyr)
library(stringr)
dat = readxl::read_excel("data/pubbias_procon.xlsx")
dat$Advantages = str_replace_all(dat$Advantages, "BREAK", "")
dat$Disadvantages = str_replace_all(dat$Disadvantages, "BREAK", "")
colnames(dat)[1] = " "
dat[1,1]<- c("Duval & Tweedie Trim-and-Fill")
kable(dat %>% mutate_all(linebreak), "html", booktabs = T, escape = FALSE,
align = "l", longtable = T,
caption = "Methods to estimate the true effect size corrected for publication bias: Overview of advantages and disadvantages.") %>%
kable_styling(latex_options = c("repeat_header"),
bootstrap_options = c("condensed", "striped"),
font_size = 15) %>%
row_spec(0, bold=TRUE) %>%
column_spec(1, width = "2cm", bold = T) %>%
column_spec(2, width = "4cm") %>%
column_spec(3, width = "5cm") %>%
column_spec(1, italic= FALSE)
```
The table contains both statistical and practical considerations and should be seen as neither comprehensive nor final. Publication bias methods are an ongoing field of investigation, and it is likely that things will look differently once more evidence has been established.
\index{Outlier}
\index{Heterogeneity}
\index{I$^2$, Higgins \& Thompson's}
Lastly, it is of note that there is currently no method providing acceptable results when the between-study heterogeneity is high [@aert2016conducting, i.e. $I^2 \approx$ 75%]. This means that publication bias analyses of meta-analyses with very high heterogeneity should at best be avoided altogether. Analyses without outliers or in more homogeneous subgroups can often be used as a practical workaround, but do not solve the general problem.
$$\tag*{$\blacksquare$}$$
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. How can the term "publication bias" be defined? Why is publication bias problematic in meta-analyses?
\vspace{-2mm}
2. What other reporting biases are there? Name and explain at least three.
\vspace{-2mm}
3. Name two questionable research practices (QRPs), and explain how they can threaten the validity of our meta-analysis.
\vspace{-2mm}
4. Explain the core assumptions behind small-study effect methods.
\vspace{-2mm}
5. When we find out that our data displays small-study effects, does this automatically mean that there is publication bias?
\vspace{-2mm}
6. What does p-curve estimate: the true effect of all studies included in our meta-analysis, or just the true effect of all **significant** effect sizes?
\vspace{-2mm}
7. Which publication bias method has the best performance?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda9) at the end of this book.**
```
<br></br>
## Summary
* Publication bias occurs when some studies are systematically missing in the published literature, and thus in our meta-analysis. Strictly defined, publication bias exists when the probability of a study to get published depends on its results. However, there is also a range of other **reporting biases**. These reporting biases also influence how likely it is that a finding will end up in our meta-analysis. Examples are citation bias, language bias, or outcome reporting bias.
* It is also possible that **published** evidence is biased, for example due to questionable research practices (QRPs). Two common QRPs are $p$-hacking and HARKing, and both can increase the risk of overestimating effects in a meta-analysis.
* Many publication bias methods are based on the idea of **small-study effects**. These approaches assume that only small studies with a surprisingly high effect size obtain significant results and are therefore selected for publication. This leads to an asymmetric funnel plot, which can be a sign of publication bias. But it does not have to be. Various "benign" causes of small-study effects are also possible.
* A relatively novel method, **p-curve**, is based on the idea that we can control for evidential value just by looking at the pattern of significant ($p<$ 0.05) effects in our data. It can be used to test for both the presence and absence of a true effect, and can estimate its magnitude.
* **Selection models** are a very versatile method and can be used to model different publication bias processes. However, they only provide valid results when the assumed model is adequate, and often require a very large number of studies. A very simple selection model, the three-parameter model, can also be used for smaller data sets.
* No publication bias method consistently outperforms all the others. It is therefore advisable to always apply **several** techniques, and interpret the corrected effect size cautiously. Thorough searches for unpublished evidence mitigate the risk of publication bias in a much better way than current statistical approaches.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
<link rel="stylesheet" href="https://code.cdn.mozilla.net/fonts/fira.css">
<style>
:root {
--background-color: #FFFEFA;
--text-color: #000;
--highlight-color: #277DB0;
--border-color: #999896;
--grey-color: #6C6C6C;
--line-color: #eee;
--bg-box: #f5f5f5;
--box-border-color: #eeeeee;
--code-bg: #f8f8f8;
--code-chunk-bg: linear-gradient(160deg,#f8f8f8 0, #f1f1f1 100%);
--code-text-color: #212529;
--hr-border-top: 1px solid rgba(0,0,0,0.1);
--table-text-color: #000;
--toc-active: #eee;
--form-color: #4D4C4B;
--form-background-color: #FFFEFA;
--form-border: 1px solid #999896;
--aa-cursor-background-color: #B2D7FF;
}
[data-theme="dark"] {
--background-color: #2b2b2b;
--text-color: #ffffff;
--highlight-color: #34a8ed;
--border-color: yellow;
--grey-color: #cfcfcf;
--line-color: #575757;
--bg-box: #474747;
--box-border-color: #474747;
--code-bg: #474747;
--code-chunk-bg: linear-gradient(160deg,#CDCDCD 0,#f1f1f1 100%);
--code-text-color: #ffffff;
--hr-border-top: 1px solid #575757;
--table-text-color: #ffffff;
--toc-active: #474747;
--form-color: #cfcfcf;
--form-background-color: #474747;
--form-border: 1px solid #575757;
--aa-cursor-background-color: #2b2b2b;
}
.algolia-autocomplete .aa-dropdown-menu .aa-suggestion {
color: var(--form-color);
border-bottom: 1px var(--form-border) solid;
background-color: var(--form-background-color);
}
.algolia-autocomplete .aa-dropdown-menu .aa-suggestion.aa-cursor {
background-color: var(--aa-cursor-background-color);
}
strong {
font-weight: 500;
}
.form-control {
color: var(--form-color);
background-color: var(--form-background-color);
border: var(--form-border);
}
.form-control:focus {
color: var(--text-color);
background-color: var(--form-background-color);
border: var(--form-border);
}
nav[data-toggle="toc"] .nav a.nav-link.active, nav[data-toggle="toc"] .nav .nav-link.active > li > a {
background-color: var(--toc-active);
}
.btn-circle.btn-md {
width: 50px;
height: 50px;
padding: 7px 10px;
border-radius: 25px;
font-size: 10px;
text-align: center;
}
.lightable-classic td {
color: var(--text-color);
}
.table {
color: var(--table-text-color);
}
hr {
border-top: var(--hr-border-top);
}
code {
background-color: var(--code-bg);
color: var(--code-text-color);
}
pre {
background-image: var(--code-chunk-bg);
}
body {
background-color: var(--background-color);
color: var(--text-color);
}
a {
color: var(--highlight-color);
}
.header-section-number {
color: var(--grey-color);
}
small.text-muted {
color: var(--grey-color) !important;
}
.firstcharacter {
color: var(--highlight-color);
}
p.caption {
color: var(--grey-color);
}
.figure {
border-top: 2px solid var(--line-color);
border-bottom: 2px solid var(--line-color);
}
.inline-figure {
border: 0;
box-shadow: none;
}
.boxinfo {
padding: 1em 1em 1em 3em;
margin-bottom: 10px;
background: var(--bg-box);
position:relative;
border-width: 3px;
border-style: solid;
border-color: var(--box-border-color);
border-radius: .25rem;
}
.boxinfo:before {
content: '\f0eb';
font-family: FontAwesome;
left:20px;
position:absolute;
font-size: 30px;
color: var(--highlight-color);
}
.boxempty {
padding: 1em 1em 1em 1.1em;
margin-bottom: 10px;
background: var(--bg-box);
position:relative;
border-width: 3px;
border-style: solid;
border-color: var(--box-border-color);
border-radius: .25rem;
}
.boxempty:before {
content: '\f0eb';
font-family: FontAwesome;
left:0px;
position:absolute;
font-size: 30px;
color: transparent;
}
.boximportant {
padding: 1em 1em 1em 3em;
margin-bottom: 10px;
background: var(--bg-box);
position:relative;
border-width: 3px;
border-style: solid;
border-color: var(--box-border-color);
border-radius: .25rem;
}
.boximportant:before {
content: '\f06a';
font-family: FontAwesome;
left:13px;
position:absolute;
font-size: 30px;
color: var(--highlight-color);
}
.boxreport {
padding: 1em 1em 1em 3em;
margin-bottom: 10px;
background: var(--bg-box);
position:relative;
border-width: 3px;
border-style: solid;
border-color: var(--box-border-color);
border-radius: .25rem;
}
.boxreport:before {
content: '\f570';
font-family: FontAwesome;
left:15px;
position:absolute;
font-size: 30px;
color: var(--highlight-color);
}
.boxdmetar {
padding: 1em 1em 1em 3em;
margin-bottom: 10px;
background: var(--bg-box);
position:relative;
border-width: 3px;
border-style: solid;
border-color: var(--box-border-color);
border-radius: .25rem;
}
.csl-entry {
margin-bottom: 20px;
padding-left: 1.5em;
text-indent:-1.5em;
}
.boxdmetar:before {
content: '\f4f7';
font-family: FontAwesome;
left:10px;
position:absolute;
font-size: 30px;
color: var(--highlight-color);
}
.boxquestion {
padding: 1em 1em 1em 3em;
margin-bottom: 10px;
background: var(--bg-box);
position:relative;
border-width: 3px;
border-style: solid;
border-color: var(--box-border-color);
border-radius: .25rem;
}
.boxquestion:before {
content: '\f059';
font-family: FontAwesome;
left:12px;
position:absolute;
font-size: 30px;
color: var(--highlight-color);
}
.mode-switch {
font-size: 0.9rem;
}
img {
max-width: 100%;
height: auto;
border-radius: .25rem;
}
.firstcharacter {
color: var(--highlight-color);
float: left;
font-size: 40px;
line-height: 50px;
padding-bottom: 2px;
padding-right: 8px;
padding-left: 2px;
}
.btn-circle.btn-sm {
width: 30px;
height: 30px;
padding: 6px 0px;
border-radius: 15px;
font-size: 20px;
text-align: center;
}
.btn-circle.btn-md {
width: 50px;
height: 50px;
padding: 7px 10px;
border-radius: 25px;
font-size: 20px;
text-align: center;
}
.btn-circle.btn-xl {
width: 60px;
height: 60px;
padding: 7px 7px;
border-radius: 35px;
font-size: 30px;
text-align: center;
}
#critical-btn.btn-secondary {
background-color: var(--grey-color);
border-color: var(--grey-color);
}
.sticky-item {
position: fixed;
bottom: 30px;
right: 30px;
}
</style>
<link rel="shortcut icon" href="favicon.ico" />
<script type="text/x-mathjax-config">
const popovers = document.querySelectorAll('a.footnote-ref[data-toggle="popover"]');
for (let popover of popovers){
const div = document.createElement('div');
div.setAttribute('style', 'position: absolute; top: 0, left:0; width:0, height:0, overflow: hidden; visibility: hidden;');
div.innerHTML = popover.getAttribute('data-content');
// Will this work with TeX on its own line?
var has_math = div.querySelector("span.math");
if (has_math) {
document.body.appendChild(div);
MathJax.Hub.Queue(["Typeset", MathJax.Hub, div]);
MathJax.Hub.Queue(function(){
popover.setAttribute('data-content', div.innerHTML);
})
}
}
</script>
<link href="https://cdn.jsdelivr.net/gh/gitbrent/[email protected]/css/bootstrap4-toggle.min.css" rel="stylesheet">
<script src="https://cdn.jsdelivr.net/gh/gitbrent/[email protected]/js/bootstrap4-toggle.min.js"></script>
<script type="text/javascript">
// Add switch element
document.addEventListener('DOMContentLoaded', function(event){
let darkSwitch = document.createElement('div');
darkSwitch.innerHTML = "<div class='sticky-item'><button type='button' id='critical-btn' class='btn btn-dark btn-circle btn-xl'>🌓</button></div>"
document.body.appendChild(darkSwitch);
})
$(document).ready(function() {
$("#critical-btn").click(function () {
if ($(this).hasClass('btn-dark')){
$('#critical-btn').removeClass('btn-dark').addClass('btn-secondary');
$(this).addClass('btn-secondary').removeClass('btn-dark');
}
else if ($(this).hasClass('btn-secondary')){
$('#critical-btn').removeClass('btn-secondary').addClass('btn-dark');
$(this).addClass('btn-dark').removeClass('btn-secondary');
}
})
})
</script>
<script type="text/javascript">
// Wait for document to load
document.addEventListener("DOMContentLoaded", function(event) {
document.documentElement.setAttribute("data-theme", "light");
// Get our button switcher
var themeSwitcher = document.getElementById("critical-btn");
// When our button gets clicked
themeSwitcher.onclick = function() {
// Get the current selected theme, on the first run
// it should be `light`
var currentTheme = document.documentElement.getAttribute("data-theme");
// Switch between `dark` and `light`
var switchToTheme = currentTheme === "dark" ? "light" : "dark"
// Set our currenet theme to the new one
document.documentElement.setAttribute("data-theme", switchToTheme);
// Save current selection
localStorage.setItem('themeMode', switchToTheme);
}
});
// keep selection across pages
document.addEventListener('DOMContentLoaded', function(event){
const currentTheme = localStorage.getItem('themeMode')
if (currentTheme === 'dark'){
document.documentElement.setAttribute("data-theme", 'dark');
if ($("#critical-btn").hasClass('btn-dark')) {
$('#critical-btn').removeClass('btn-dark').addClass('btn-secondary');
$('#critical-btn').addClass('btn-secondary').removeClass('btn-dark');
}
} else {
document.documentElement.setAttribute("data-theme", 'light');
if ($("#critical-btn").hasClass('btn-secondary')) {
$('#critical-btn').removeClass('btn-secondary').addClass('btn-dark');
$('#critical-btn').addClass('btn-dark').removeClass('btn-secondary');
}
}})
</script>
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# (APPENDIX) Appendix {-}
<br></br>
# Questions & Answers {#qanda}
---
## Chapter 1: Introduction {#qanda1}
---
**1. How can meta-analysis be defined? What differentiates a meta-analysis from other types of literature reviews?**
Meta-analysis can be defined as an **analysis of analyses** (definition by Glass). In contrast to other types of (systematic) reviews, meta-analysis aims to synthesize evidence in a quantitative way. Usually, the goal is to derive a numerical estimate that describes a clearly circumscribed research field **as a whole**.
**2. Can you name one of the founding mothers and fathers of meta-analysis? What achievement can be attributed to her or him?**
Karl Pearson: combination of typhoid inoculation data across the British Empire; Ronald Fisher: approaches to synthesize data of agricultural research studies; Mary Smith and Gene Glass: coined the term "meta-analysis", first meta-analysis of psychotherapy trials; John Hunter and Frank Schmidt: meta-analysis with correction of measurement artifacts (psychometric meta-analysis); Rebecca DerSimonian and Nan Laird: method to calculate random-effects model meta-analyses; Peter Elwood and Archie Cochrane: pioneer meta-analysis in medicine.
**3. Name three common problems of meta-analyses and describe them in one or two sentences.**
"Apples and Oranges": studies are too different to be synthesized; "Garbage In, Garbage Out": invalid evidence is only reproduced by meta-analyses; "File Drawer": negative results are not published, leading to biased findings in meta-analyses; "Researcher Agenda": researchers can tweak meta-analyses to prove what they want to prove.
**4. Name qualities that define a good research question for a meta-analysis.**
FINER: feasible, interesting, novel, ethical, relevant; PICO: clearly defined population, intervention/exposure, control group/comparison, and analyzed outcome.
**5. Have a look again at the eligibility criteria of the meta-analysis on sleep interventions in college students (end of Chapter 1.4.1). Can you extract the PICO from the inclusion and exclusion criteria of this study?**
Population: tertiary education students; Intervention: sleep-focused psychological interventions; Comparison: passive control condition; Outcome: sleep disturbance, as measured by standardized symptom measures.
**6. Name a few important sources that can be used to search studies.**
Review articles, references in studies, "forward search" (searching for studies that have cited a relevant article), searching relevant journals, bibliographic database search.
**7. Describe the difference between "study quality" and "risk of bias" in one or two sentences.**
A study can fulfill all study quality criteria that are considered important in a research field and still have a high risk of bias (e.g. because bias is difficult to avoid for this type of study or research topic).
<br></br>
## Chapter 2: Discovering R {#qanda2}
---
**1. Show the variable `Author`.**
```{r, eval=F}
data$Author
```
**2. Convert `subgroup` to a factor.**
```{r, eval=F}
data$subgroup <- as.factor(data$subgroup)
```
**3. Select all the data of the "Jones" and "Martin" study.**
```{r, eval=F}
library(tidyverse)
data %>%
filter(Author %in% c("Jones", "Martin"))
```
**4. Change the name of the study "Rose" to "Bloom".**
```{r, eval=F}
data[5,1] <- "Bloom"
```
**5. Create a new variable `TE_seTE_diff` by subtracting `seTE` from `TE`. Save the results in `data`.**
```{r, eval=F}
TE_seTE_diff <- data$TE - data$seTE
```
**6. Use a pipe to (1) filter all studies in subgroup "one" or "two", (2) select the variable `TE_seTE_diff`, (3) take the mean of the variable, and then apply the `exp` function to it.**
```{r, eval=F}
data %>%
deplyr::filter(subgroup %in% c("one", "two")) %>%
pull(TE_seTE_diff) %>%
mean() %>%
exp()
```
<br></br>
## Chapter 3: Effect Sizes {#qanda3}
---
**1. Is there a clear definition of the term "effect size"? What do people refer to when they speak of effect sizes?**
No, there is no universally accepted definition. Some reserve the term "effect size" for differences between intervention and control groups. Others use a more liberal definition, and only exclude "one-variable" measures (e.g. means and proportions).
**2. Name a primary reason why observed effect sizes deviate from the true effect size of the population. How can it be quantified?**
Observed effect sizes are assumed to deviate from the true effect size because of sampling error. The expected size of a study's sampling error can be expressed by its standard error.
**3. Why are large studies better estimators of the true effect than small ones?**
Because they are assumed to have a smaller sampling error, which leads to more precise effect estimates.
**4. What criteria does an effect size metric have to fulfill to be usable for meta-analyses?**
It needs to be comparable, computable, reliable and interpretable.
**5. What does a standardized mean difference of 1 represent?**
It represents that the means of the two groups differ by one pooled standard deviation.
**6. What kind of transformation is necessary to pool effect sizes based on ratios (e.g. an odds ratio)?**
The effect size needs to be log-transformed (in order to use the inverse-variance pooling method).
**7. Name three types of effect size corrections.**
Small sample bias correction of standardized mean differences (Hedges' $g$); correction for unreliability; correction for range restriction.
**8. When does the unit-of-analysis problem occur? How can it be avoided?**
When effect sizes in our data set are correlated (for example because they are part of the same study). The unit-of-analysis problem can be (partly or fully) avoided by (1) splitting the sample size of the shared group, (2) removing comparisons, (3) combining groups, or (4) using models that account for the effect size dependencies (e.g. three-level models).
<br></br>
## Chapter 4: Pooling Effect Sizes {#qanda4}
---
**1. What is the difference between a fixed-effect model and a random-effects model?**
The fixed-effect model assumes that all studies are estimators of the same true effect size. The random-effects model assumes that the true effect sizes of studies vary because of between-study heterogeneity (captured by the variance $\tau^2$), which needs to be estimated.
**2. Can you think of a case in which the results of the fixed- and random-effects model are identical?**
When the between-study heterogeneity variance $\tau^2$ is zero.
**3. What is $\tau^2$? How can it be estimated?**
The between-study heterogeneity variance. It can be estimated using different methods, for example restricted maximum likelihood (REML), the Paule-Mandel estimator, or the DerSimonian-Laird estimator.
**4. Which distribution is the Knapp-Hartung adjustment based on? What effect does it have?**
It is based on a $t$-distribution. The Knapp-Hartung adjustment usually leads to more conservative (i.e. wider) confidence intervals.
**5. What does "inverse-variance" pooling mean? When is this method not the best solution?**
The method is called inverse-variance pooling because it uses the inverse of a study's variance as the pooling weight. The generic inverse-variance method is not the preferred option for meta-analyses of binary outcome data (e.g. risk or odds ratios).
**6. You want to meta-analyze binary outcome data. The number of observations in the study arms is roughly similar, the observed event is very rare, and you do no expect the treatment effect to be large. Which pooling method would you use?**
This is a scenario in which the Peto method may perform well.
**7. For which outcome measures can GLMMs be used?**
Proportions. It is also possible to use them for other binary outcome measures, but not generally recommended.
<br></br>
## Chapter 5: Between-Study Heterogeneity {#qanda5}
---
**1. Why is it important to examine the between-study heterogeneity of a meta-analysis?**
When the between-study heterogeneity is large, the true effect sizes can be assumed to vary considerably. In this case, a point estimate of the average true effect may not represent the data well in their totality. Between-study heterogeneity can also lead to effect estimates that are not robust, for example because a few outlying studies distort the overall result.
**2. Can you name the two types of heterogeneity? Which one is relevant in the context of calculating a meta-analysis?**
Baseline/design-related heterogeneity and statistical heterogeneity. Only statistical heterogeneity is assessed quantitatively in meta-analyses.
**3. Why is the significance of Cochran's $Q$ not a sufficient measure of between-study heterogeneity?**
Because the significance of the $Q$ test heavily depends on the number of studies included in our meta-analysis, and their size.
**4. What are the advantages of using prediction intervals to express the amount of heterogeneity in a meta-analysis?**
Prediction intervals allow to express the impact of between study heterogeneity on future studies on the same scale as the summary measure.
**5. What is the difference between statistical outliers and influential studies?**
Statistical outliers are studies with **extreme** effect sizes. Studies are influential when their impact on the overall result is large. It is possible that a study can be defined as a statistical outlier without being very influential, and vice versa. For example, a large study may have a big impact on the pooled results, even though its effect size is not particularly small or large.
**6. For what can GOSH plots be used?**
GOSH plots can be used to explore patterns of heterogeneity in our data, and which studies contribute to them.
<br></br>
## Chapter 6: Forest Plots {#qanda6}
---
**1. What are the key components of a forest plot?**
Graphical representation of each study's observed effect size, with confidence intervals; the weight of each study, represented by the size of squares around the observed effect sizes; the numeric value of each study's observed effect and weight; the pooled effect, represented by a diamond; a reference line, usually representing no effect.
**2. What are the advantages of presenting a forest plot of our meta-analysis?**
They allow to quickly examine the number, effect size and precision of all included studies, and how the observed effects "add up" to the pooled effect.
**3. What are the limitations of forest plots, and how do drapery plots overcome this limitation?**
Forest plots can only show the confidence intervals of effects assuming a fixed significance threshold (usually $\alpha$ = 0.05). Drapery plots can be used to show the confidence intervals (and thus the significance) of effect sizes for varying $p$-values.
<br></br>
## Chapter 7: Subgroup Analyses {#qanda7}
---
**1. In the best case, what can a subgroup analysis tell us that influence and outlier analyses cannot?**
Subgroup analyses can potentially explain **why** certain heterogeneity patterns exist in our data, versus only telling us **that** they exist.
**2. Why is the model behind subgroup analyses called the fixed-effects (plural) model?**
Because it assumes that, while studies within subgroups follow a random-effects model, the subgroup levels themselves are fixed. There are several fixed subgroup effects.
**3. As part of your meta-analysis, you want to examine if the effect of an educational training program differs depending on the school district in which it was delivered. Is a subgroup analysis using the fixed-effects (plural) model appropriate to answer this question?**
Probably not. It makes more sense to assume that the school districts represent draws from a larger population of districts, not all school districts there are.
**4. A friend of yours conducted a meta-analysis containing a total of nine studies. Five of these studies fall into one subgroup, four into the other. She asks you if it makes sense to perform a subgroup analysis. What would you recommend?**
It is probably not a good idea to conduct a subgroup analysis, since the total number of studies is smaller than ten.
**5. You found a meta-analysis in which the authors claim that the analyzed treatment is more effective in women than men. This finding is based on a subgroup analysis, in which studies were divided into subgroups based on the share of females included in the study population. Is this finding credible, and why (not)?**
The finding is based a subgroup variable that has been created using aggregated study data. This may introduce ecological bias, and the results are therefore questionable.
<br></br>
## Chapter 8: Meta-Regression {#qanda8}
---
**1. What is the difference between a conventional regression analysis used in primary studies, and meta-regression?**
The unit of analysis are studies (instead of persons), the effect sizes of which are more or less precise. In meta-regression, we have to build regression models that account for the fact that some studies should have a greater weight than others.
**2. Subgroup analyses and meta-regression are closely related. How can the meta-regression formula be adapted to subgroup data?**
By using dummy/categorical predictors.
**3. Which method is used in meta-regression to give individual studies a differing weight?**
Meta-regression uses **weighted least squares** to give studies with higher precision a greater weight.
**4. What characteristics mark a meta-regression model that fits our data well? Which index can be used to examine this?**
A "good" meta-regression model should lead to a large reduction in the amount of unexplained between-study heterogeneity variance. An index which covers this increase in explained variance is the $R^2$ analog.
**5. When we calculate a subgroup analysis using meta-regression techniques, do we assume a separate or common value of $\tau^2$ in the subgroups?**
A common estimate of $\tau^2$ is assumed in the subgroups.
**6. What are the limitations and pitfalls of (multiple) meta-regression?**
Overfitting meta-regression can lead to false positive results; multicollinearity can lead to parameter estimates that are not robust.
**7. Name two methods that can be used to improve the robustness of (multiple) meta-regression models, and why they are helpful.**
We can conduct a permutation test or use multi-model inference.
<br></br>
## Chapter 9: Publication Bias {#qanda9}
---
**1. How can the term "publication bias" be defined? Why is it problematic in meta-analyses?**
Publication bias exists when the probability of a study to get published depends on its results. This is problematic because it can lead to biased results in meta-analyses. Because not all evidence is considered, meta-analyses may result in findings that would not have materialized when all existing information had been considered.
**2. What other reporting biases are there? Name and explain at least three.**
Citation bias: studies with negative findings are less likely to be cited; time-lag bias: studies with negative findings are published later; multiple publication bias: studies with positive findings are more likely to be reported in several articles; language bias: evidence may be omitted because it is not published in English; outcome reporting bias: positive outcomes of a study are more likely to be reported than negative outcomes.
**3. Name two questionable research practices (QRPs), and explain how they can threaten the validity of our meta-analysis.**
P-hacking, HARKing. Both lead to an inflation of positive findings, even when there is no true effect.
**4. Explain the core assumptions behind small-study effect methods.**
Large studies (i.e. studies with a small standard error) are very likely to get published, no matter what their findings are. Smaller studies have a smaller precision, which means that very high effect sizes are needed to attain statistical significance. Therefore, only small studies with very high effects are published, while the rest ends up in the "file drawer".
**5. When we find out that our data displays small-study effects, does this automatically mean that there is publication bias?**
No. There are several other explanations why we find small-study effects, including between-study heterogeneity, effects of co-variates (e.g. treatment fidelity is higher in smaller studies), or chance.
**6. What does p-curve estimate: the true effect of all studies included in our meta-analysis, or just the true effect of all _significant_ effect sizes?**
P-curve only estimates the true effect of all significant effect sizes. This is one of the reasons why it does not perform well when there is between-study heterogeneity.
**7. Which publication bias method has the best performance?**
No publication bias method consistently outperforms all the others. Therefore, it is helpful to apply several methods, and see if their results coalign.
<br></br>
## Chapter 10: "Multilevel" Meta-Analysis {#qanda10}
---
**1. Why is it more accurate to speak of "three-level" instead of "multilevel" models?**
Because the "conventional" random-effects model is already a multilevel model. It assumes that participants are nested within studies, and that the studies themselves are drawn from a population of true effect sizes.
**2. When are three-level meta-analysis models useful?**
When we are dealing with correlated or nested data. Three-level models are particularly useful when studies contribute multiple effect sizes, or when there is good reason to believe that studies themselves fall into larger clusters.
**3. Name two common causes of effect size dependency.**
Dependence caused by the researchers involved in the primary studies; dependency created by the meta-analyst herself.
**4. How can the multilevel $I^2$ statistic be interpreted?**
It tells us the amount of variance not attributable to sampling error, and differentiates between heterogeneity variance **within** clusters, and heterogeneity variance **between** clusters.
**5. How can a three-level model be expanded to incorporate the effect of moderator variables?**
By integrating a fixed-effect term to the model formula.
<br></br>
## Chapter 11: Structural Equation Modeling Meta-Analysis {#qanda11}
---
**1. What is structural equation modeling, and what is used for?**
Structural equation modeling is a statistical method that can be used to test assumed relationships between manifest and latent variables.
**2. What are the two ways through which SEM can be represented?**
SEM can be represented graphically or through matrices.
**3. Describe a random-effects meta-analysis from a SEM perspective.**
From a SEM perspective, the true overall effect size in a random-effects meta-analysis can be seen as a latent variable. It is "influenced" by two arms: the sampling error on level 1 and the true effect size heterogeneity variance on level 2.
**4. What is a multivariate meta-analysis, and when is it useful?**
Multivariate meta-analysis allows to simultaneously pool two (or more) outcomes of studies. An asset of jointly estimating the two outcome variables is that the correlation between outcomes can be taken into account.
**5. When we find that our proposed meta-analytic SEM fits the data well, does this automatically mean that this model is the "correct" one?**
No. Frequently, there is more than one model that fits the data well.
<br></br>
## Chapter 12: Network Meta-Analysis {#qanda12}
---
**1. When are network meta-analyses useful? What is their advantage compared to standard meta-analyses?**
Network meta-analyses are useful when there are several competing treatments for some problem area, and we want to estimate which one has the largest benefits. In contrast to conventional meta-analyses, network meta-analysis models can integrate both direct and indirect evidence.
**2. What is the difference between direct and indirect evidence in a treatment network? How can direct evidence be used to generate indirect evidence?**
Direct evidence is information provided by comparisons that have actually been investigated in the included studies. Indirect evidence is derived from direct evidence by subtracting the effect of one (directly observed) comparison from the one of a related comparison (e.g. a comparison that used the same control group).
**3. What is the main idea behind the assumption of transitivity in network meta-analyses?**
The assumption of transitivity stipulates that direct evidence can be used to infer unobserved, indirect evidence, and that direct and indirect evidence is consistent.
**4. What is the relationship between transitivity and consistency?**
Transitivity is a pre-requisite to conduct network meta-analyses, and cannot be tested directly. The statistical manifestation of transitivity is consistency, and is fulfilled when effect size estimates based on direct evidence are identical/similar to estimates based on indirect evidence.
**5. Name two modeling approaches that can be used to conduct network meta-analyses. Is one of them better than the other?**
Network meta-analysis can be conducted using a frequentist or Bayesian model. Both models are equivalent and produce converging results with increasing sample size.
**6. When we include several comparisons from one study (i.e. multi-arm studies), what problem does this cause?**
This means that the effect estimates are correlated, causing a unit-of-analysis error.
**7. What do we have to keep in mind when interpreting the P- or SUCRA score of different treatments?**
That the effect estimates of different treatments often overlap. This means that P-/SUCRA scores should always be interpreted with some caution.
<br></br>
## Chapter 13: Bayesian Meta-Analysis {#qanda13}
---
**1. What are differences and similarities between the "conventional" random-effects model and a Bayesian hierarchical model?**
The random-effects model underlying frequentist meta-analysis is conceptually identical to the Bayesian hierarchical model. The main difference is that the Bayesian hierarchical model includes (weakly informative) prior distributions for the overall true effect size $\mu$ and between-study heterogeneity $\tau$.
**2. Name three advantages of Bayesian meta-analyses compared to their frequentist counterpart.**
Uncertainty of the $\tau^2$ estimate is directly modeled; a posterior distribution for $\mu$ is produced, which can be used to calculate the probability of $\mu$ lying below a certain value; prior knowledge or beliefs can be integrated into the model.
**3. Explain the difference between a weakly informative and non-informative prior.**
Non-informative priors assume that all or a range of possible values are equally likely. Weakly informative priors represent a **weak** belief that some values are more probable than others.
**4. What is a Half-Cauchy distribution, and why is it useful for Bayesian meta-analysis?**
The Half-Cauchy distribution is a Cauchy distribution that is only defined for positive values. It is controlled by a location and scaling parameter, the latter determining how heavy the tails of the distribution are. Half-Cauchy distributions can be used as priors for $\tau$.
**5. What is an ECDF, and how can it be used in Bayesian meta-analyses?**
ECDF stands for **empirical cumulative distribution function**. ECDFs based on the posterior distribution of $\mu$ (or $\tau$) can be used to determine the (cumulative) probability that the estimated parameter is below or above some specified threshold.
\qed
<br></br>
# Effect Size Formulas {#formula}
---
\renewcommand{\arraystretch}{2}
```{r esformula, echo=F, message=F, fig.align='center'}
library(kableExtra)
library(openxlsx)
dat = read.xlsx("data/estable2.xlsx")
colnames(dat) = c(" ", "Effect Size ($\\hat\\theta$)",
"Standard Error (SE)", "Function")
dat[1][is.na(dat[1])] = " "
dat[2][is.na(dat[2])] = " "
dat[3][is.na(dat[3])] = " "
dat[4][is.na(dat[4])] = " "
dat[1][dat[1] == "XXX"] = " "
#dat[5,2] = cell_spec(dat[5,2], "latex", font_size = 6, escape = FALSE) # cor2
dat[6,2] = cell_spec(dat[6,2], "latex", font_size = 4, escape = FALSE) # pb
dat[7,3] = cell_spec(dat[7,3], "latex", font_size = 6, escape = FALSE) # between md
dat[8,3] = cell_spec(dat[8,3], "latex", font_size = 6, escape = FALSE) # between smd
dat[9,3] = cell_spec(dat[9,3], "latex", font_size = 6, escape = FALSE) # within md
dat[10,2] = cell_spec(dat[10,2], "latex", font_size = 6, escape = FALSE) # within smd
dat[10,3] = cell_spec(dat[10,3], "latex", font_size = 6, escape = FALSE) # within smd
dat[11,3] = cell_spec(dat[11,3], "latex", font_size = 6, escape = FALSE) # rr 1
dat[26,2] = cell_spec(dat[26,2], "latex", font_size = 6, escape = FALSE) # range 2
dat[27,2] = cell_spec(dat[27,2], "latex", font_size = 6, escape = FALSE) # range 3
dat[1,1] = "Arithmetic Mean (\\@ref(means))"
dat[c(2,3),1] = "Proportion (\\@ref(props))"
dat[4:5,1] = "Product-Moment Correlation (\\@ref(pearson-cors))"
dat[6,1] = "Point-Biserial Correlation<sup>1</sup> (\\@ref(pb-cors))"
dat[7,1] = "Between-Group Mean Difference (\\@ref(b-group-md))"
dat[8,1] = "Between-Group Standardized Mean Difference (\\@ref(b-group-smd))"
dat[9,1] = "Within-Group Mean Difference (\\@ref(w-group-smd))"
dat[10,1] = "Within-Group Standardized Mean Difference (\\@ref(w-group-smd))"
dat[11:14,1] = "Risk Ratio (\\@ref(rr))"
dat[15:18,1] = "Odds Ratio (\\@ref(or))"
dat[19:20,1] = "Incidence Rate Ratio (\\@ref(irr))"
dat[21,1] = "Small Sample Bias (\\@ref(hedges-g))"
dat[22:24,1] = "Unreliability (\\@ref(unrealiable))"
dat[25:27,1] = "Range Restriction (\\@ref(range))"
kableExtra::kable(dat, "html",
#longtable = TRUE,
escape = FALSE,
#booktabs = TRUE,
align = "lccl",
linesep = "") %>%
kable_classic(font_size = 12,
html_font = "Roboto") %>%
#column_spec(1, width = "2cm") %>%
#column_spec(2, width = "3cm") %>%
#column_spec(3, width = "4cm") %>%
column_spec(4, monospace = T) %>%
collapse_rows(columns = 1, latex_hline = "none", valign = "top") %>%
pack_rows(" ", 1, 1, indent = FALSE) %>%
pack_rows(" ", 2, 3, indent = FALSE) %>%
pack_rows(" ", 4, 6, indent = FALSE) %>%
pack_rows(" ", 6, 6, indent = FALSE) %>%
pack_rows(" ", 8, 8, indent = FALSE) %>%
pack_rows(" ", 9, 9, indent = FALSE) %>%
pack_rows(" ", 10, 10, indent = FALSE) %>%
pack_rows(" ", 11, 15, indent = FALSE) %>%
pack_rows(" ", 15, 18, indent = FALSE) %>%
pack_rows(" ", 19, 20, indent = FALSE) %>%
pack_rows(" ", 21, 21, indent = FALSE) %>%
pack_rows(" ", 22, 24, indent = FALSE) %>%
pack_rows(" ", 25, 27, indent = FALSE) %>%
pack_rows("Correlation", 4, 6, indent = FALSE,
label_row_css = "background-color: #277DB0; color: #fff;") %>%
pack_rows("(Standardized) Mean Difference", 7, 10, hline_after = FALSE, indent = FALSE,
label_row_css = "background-color: #277DB0; color: #fff;") %>%
pack_rows("Binary Outcome Effect Size", 11, 20, hline_after = FALSE, indent = FALSE,
label_row_css = "background-color: #277DB0; color: #fff;") %>%
pack_rows("Effect Size Correction", 21, 27, hline_after = FALSE, indent = FALSE,
label_row_css = "background-color: #277DB0; color: #fff;") %>%
footnote(number = c("Point-biserial correlations may be converted to SMDs for meta-analysis (see Chapter \\@ref(pb-cors))."),
symbol = "The pooled standard deviation is defined as $s_{\\text{pooled}} = \\sqrt{\\dfrac{(n_1-1)s^2_1+(n_2-1)s^2_2}{(n_1-1)+(n_2-1)}}$.",
escape=FALSE) %>%
row_spec(0, background = "#277DB0", color = "#f5f5f5") %>%
row_spec(1, extra_css = "border-bottom: 1px solid") %>%
row_spec(2:3, background = "#f5f5f5") %>%
row_spec(6, extra_css = "border-top: 1px solid", background = "#f5f5f5") %>%
#row_spec(6, background = "#f5f5f5") %>%
row_spec(7, extra_css = "border-bottom: 1px solid") %>%
row_spec(8, background = "#f5f5f5", extra_css = "border-bottom: 1px solid") %>%
row_spec(9, extra_css = "border-bottom: 1px solid") %>%
row_spec(10, background = "#f5f5f5") %>%
row_spec(14, extra_css = "border-bottom: 1px solid") %>%
row_spec(15:18, background = "#f5f5f5") %>%
row_spec(19, extra_css = "border-top: 1px solid") %>%
row_spec(21, extra_css = "border-bottom: 1px solid") %>%
row_spec(22:24, background = "#f5f5f5") %>%
row_spec(25, extra_css = "border-top: 1px solid")
```
\renewcommand{\arraystretch}{1}
<br></br>
# List of Symbols {#symbollist}
---
```{r symbol, echo=F, message=F, fig.align='center', warning = F}
library(kableExtra)
library(openxlsx)
# dat = read.xlsx("data/symbols.xlsx")
dat = read.csv("data/symbols.csv")
dat = dat[2:5]
dat[14,4] = "Risk ratio, odds ratio, incidence rate ratio."
dat[2,3] = "$\\mathcal{HC}(x_0,s)$"
dat[11,3] = "$\\mathcal{N}(\\mu, \\sigma^2)$"
rbind(c("$~$", "$~$", "$~$", "$~$"), dat) -> dat
below = function(data, i){rbind(data[i,], c(" ", " ", " ", "$~$"))}
l = list()
for (i in 1:nrow(dat)){l[[i]] = below(dat, i)}
do.call(rbind, l) %>% as.data.frame() -> dat
rownames(dat) = 1:nrow(dat)
kableExtra::kable(dat, "html",
col.names = NULL, booktabs = TRUE,
longtable = T,
escape = FALSE,
linesep = "") %>%
kable_classic(font_size = 14,
html_font = "Roboto",
bootstrap_options = c("striped")) %>%
column_spec(1, width = "1cm") %>%
column_spec(2, width = "4cm") %>%
column_spec(3, width = "1cm") %>%
column_spec(4, width = "4cm")
```
**Note.** Vectors and matrices are written in **bold**. For example, we can denote all observed effect sizes in a meta-analysis with a vector $\boldsymbol{\hat\theta} = (\hat\theta_1, \hat\theta_2, \dots, \hat\theta_K)^\top$, where $K$ is the total number of studies. The $\top$ symbol indicates that the vector is **transposed**. This means that elements in the vector are arranged vertically instead of horizontally. This is sometimes necessary to do further operations with the vector, for example multiplying it with another matrix.
<br></br>
# _R_ & Package Information {#attr}
---
<br></br>
This book was compiled using _R_ version 4.2.0 ("Vigorous Calisthenics", 2022-04-22) running under macOS Catalina 10.15.4 (Apple Darwin 17.0 64-bit x86-64). The following package versions are used in the book:
\vspace{4mm}
```
brms 2.18.0 clubSandwich 0.5.6
dmetar 0.0.9000 dplyr 1.0.10
esc 0.5.1 extraDistr 1.9.1
forcats 0.5.1 gemtc 0.8-6
ggplot2 3.3.5 ggridges 0.5.2
glue 1.4.1 igraph 1.2.5
meta 6.1.0 metafor 3.8.1
metaSEM 1.3.0 metasens 1.5.1
netmeta 2.1.0 openxlsx 4.2.5
osfr 0.2.8 PerformanceAnalytics 2.0.4
rjags 4-10 robvis 0.3.0
semPlot 1.1.5 stringr 1.4.0
tidybayes 2.1.1 tidyverse 1.3.1
wildmeta 0.1.0
```
\vspace{4mm}
Attached base packages:
```
base 4.2.0 datasets 4.2.0 graphics 4.2.0
grDevices 4.2.0 methods 4.2.0 stats 4.2.0
utils 4.2.0
```
\vspace{4mm}
Locale: `en_US.UTF-8`
```{block, type='boxinfo'}
**Installing Specific Package Versions**
Are you having trouble running some of the code presented in the guide? Sometimes, this is caused by having an **older or newer version** of a package installed on your computer. If this is the case, it can be helpful to install the exact version of a package that we also used to compile this book. Some helpful tips on how to install specific versions of an R package can be found [here](https://support.posit.co/hc/en-us/articles/219949047-Installing-older-versions-of-packages).
```
\vspace{8mm}
<br></br>
**Attributions**
Figure \@ref(fig:eysenck): Sirswindon at [English Wikipedia](https://commons.wikimedia.org/wiki/File:Hans.Eysenck.jpg), [CC BY-SA 3.0](https://creativecommons.org/licenses/by-sa/3.0), via Wikimedia Commons. Desaturated from original.
<br></br>
# Corrections & Remarks {#corrections}
---
```{block2, type='boxinfo'}
Errata and remarks concerning the first edition print version of the book are displayed here.
```
Last updated: `r format(Sys.time(), '%d %B, %Y')`.
## Chapter 3.3.3 {-}
In the imaginary experiment in Figure 3.4, four participants experience the event during the 10-year observation period. In the calculation example, however, we use $E=3$ to calculate the incidence rate. In the online version, we have now corrected this so that $E=4$, like in the experiment.
## Chapter 4.2 {-}
A new version of **{meta}** (version 5.0-0) has recently been released. We adapted the code in this chapter accordingly to avoid deprecation messages:
- The `comb.fixed` and `comb.random` arguments are now called `fixed` and `random`, respectively.
- To print all studies, one now has to use `summary` method for **{meta}** meta-analysis objects.
## Chapter 7.3 {-}
A new version of **{meta}** (version 5.0-0) has recently been released. We adapted the code in this chapter accordingly to avoid deprecation messages:
- The `byvar` argument is now called `subgroup`.
- To print all studies, one now has to use `summary` method for **{meta}** meta-analysis objects.
## Chapter 12.2.1 {-}
The print version contains a factual error concerning the definition of full rank in non-square (rectangular) matrices. It is stated that a "matrix is not of full rank when its rows are not all independent". This, however, only applies to square matrices and non-square matrices with less rows than columns ($m < n$). In our example, there are more rows than columns; this means that $\boldsymbol{X}$ is not full rank because its **columns** are not all independent (in $m > n$ matrices, rows are always linearly dependent). This erratum has been corrected in the online version.
## Chapter 12.2.2 {-}
A new version of **{netmeta}** (version 2.0-0) has recently been released. We adapted the code in this chapter accordingly to avoid error messages:
- The latest version of **{netmeta}** resulted in non-convergence of the Fisher scoring algorithm implemented in `rma.mv`. This problem pertains to all versions of **{dmetar}** installed before 24-Oct-2021. To avoid the issue, simply [reinstall the lastest version of **{dmetar}**](https://dmetar.protectlab.org/#installation).
<br></br>
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# Reporting & Reproducibility {#reporting-reproducibility}
---
<img src="_figs/reporting.jpg" />
<br></br>
<span class="firstcharacter">I</span>
n the previous chapters, we discussed various techniques, approaches, and strategies we can use to conduct a meta-analysis in _R_. However, running the statistical analyses only makes up a small proportion of the entire meta-analysis "process" in practice. "In the wild", it is common that:
* We find an error in our _R_ code, and therefore have to redo parts of the analysis with a few changes.
* Collaborators or reviewers suggest using a different approach or model, or performing an additional sensitivity analysis.
* We need to delegate some parts of the analysis to one of our collaborators and have to send her the current status of our work.
* We had to stop working on our project for some time, which means that we have forgotten many things by the time we resume working on it.
* We want to share results of our analysis with project collaborators but they do not know _R_ and do not have R Studio installed.
These are just a few scenarios, but they illustrate that a **reproducible workflow** when conducting meta-analyses in _R_ is beneficial to you and the people you work with. Aiming for reproducibility is also a cornerstone of **open science** practices. Fully reproducible meta-analyses make it as transparent as possible to others how we ended up with our results.
\index{Markdown, _R_}
\index{Open Science Framework (OSF)}
R Studio is an optimal tool to create a reproducible workflow and to facilitate cooperation. In this chapter, we introduce three tools to reproduce, report and disseminate our analyses: _R_ Projects, **R Markdown**, and the **Open Science Framework**.
<br></br>
## Using _R_ Projects
---
\index{Project, _R_}
A good way to start with your analysis is to first set up an _R_ **project** in R Studio. _R_ projects create a new environment in a folder on our computer. In this folder, all the data and _R_ code you need for your analyses is saved. Conducting analyses in an _R_ project means that all objects we create are temporarily saved in the project environment and will be accessible the next time we reopen it. To create a new _R_ project, we can click on the **R project** field in the upper right corner of the R Studio window, and then on **New Project...** in the drop-down menu.
```{r, message = F, out.width = '60%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rproj1_col.png')
```
Then we create a **New Directory**, a new folder on our computer, which will become the working directory of the project.
```{r, message = F, out.width = '45%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rproj2_col.png')
```
Then, we click on **New Project**.
```{r, message = F, out.width = '45%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rproj3_col.png')
```
We give our new project the name "Meta-Analysis Project". The project folder will be stored in **~Documents/R**.
```{r, message = F, out.width = '45%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rproj4_col.png')
```
After clicking on **Create Project**, the _R_ project is set. A great feature of _R_ projects is that we do not have to use **absolute paths** to the files we want to reference. We only use the file name, or, if the file is in a (sub-)folder, the folder and file name. Suppose that we stored our data set **data.xlsx** in the sub-folder "data". Using the **{openxlsx}** package (Chapter \@ref(data-prep-R)), we can import the data set with a relative path.
```{r, eval=F}
read_excel("data/data.xlsx")
```
<br></br>
## Writing Reproducible Reports with R Markdown
---
\index{Markdown, _R_}
**Markdown** is a simple markup language for text formatting. **R Markdown** [@xie2018r] is an extension of Markdown and makes it easy to combine plain text, _R_ code, and _R_ output in one document. This makes R Markdown an extremely helpful reporting tool. Using R Markdown, we can create HTML or PDF files containing all code used in our analyses, the output produced by the code, and can add detailed information on what we did in each analysis step.
It is very easy to build R Markdown files in R Studio. We only have to click on the white symbol with the green "plus" sign in the top left corner of the R Studio window. Then, in the drop-down menu, we click on **R Markdown...**.
\vspace{2mm}
```{r, message = F, out.width = '35%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rmd1_col.png')
```
\vspace{2mm}
After defining the name of the new R Markdown document, it should pop up in the upper-left corner of the R Studio window.
\vspace{2mm}
```{r, message = F, out.width = '55%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rmd2_col.png')
```
\vspace{2mm}
The file already contains some exemplary content, which we can delete, except for the first six lines:
```
---
title: "Analysis"
author: "Author Name"
date: "10/16/2020"
output: html_document
---
```
This part is the so-called **YAML** header. It controls the title, author, date, and export format of the document. The output format we chose for our document is `html_document`, meaning that the document will be exported as an HTML page once it is rendered.
All _R Markdown documents_ consist of two parts: plain Markdown text, and so-called **R chunks**, shown in grey. We will not go into detail how the text parts in the R Markdown document are formatted, but there is an online [cheat sheet](https://rstudio.com/wp-content/uploads/2015/02/rmarkdown-cheatsheet.pdf), which is a great resource to start learning Markdown syntax (this should only take about twenty minutes). The _R_ code chunks, on the other hand, simply contain all the code we would usually type into the console. By clicking on the **Insert** field in the upper right corner of the document, we can add new code chunks. The code can be run by clicking on the little green triangle above each chunk.
\vspace{2mm}
```{r, message = F, out.width = '25%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rmd3_col.png')
```
\vspace{2mm}
Once we are finished writing our document, we can export it as an HTML, PDF, or MS Word document by clicking on the **Knit** symbol in the upper left corner. This renders the document, including all text, code, and output, and exports it in the defined format. The final document is automatically saved in our project folder.
\vspace{2mm}
```{r, message = F, out.width = '40%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/rmd4_col.png')
```
<br></br>
## OSF Repositories {#osf}
---
\index{Open Science Framework (OSF)}
The **Open Science Framework** ([OSF](https://www.osf.io)) is an open-source online platform to facilitate collaboration and reproducibility in research. The OSF includes an online **repository**, where researchers deposit their study material to collaborate and make all steps of the research process (more) transparent. The OSF is a spearhead of the open science movement, which has gathered much momentum in the last decade.
It is encouraged that all meta-analysts make their research and analysis process transparent to the public, by providing open access to the collected data and _R_ code used for their analyses. The OSF is a great tool to do this--all repositories you created for yourself are private by default, and it is up to you to decide if, when, and what you want to make public. In the following, we will show you how to set up an OSF repository in _R_, upload and download files, and how to add collaborators.
<br></br>
### Access Token
---
To start using the OSF, we first have to create a personal account on the [website](https://osf.io/register). After the account has been created, we also have to generate an **access token** so that we can manipulate our repository directly using _R_. To get the access token, we have to navigate to **Profile** > **Settings** > **Personal access tokens**. There, we click on **Create token**.
\vspace{4mm}
```{r, message = F, out.width = '60%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/osf1_col.png')
```
\vspace{4mm}
Then, under **Scopes**, we check all boxes, and click on **Create token** again. After that, our personal access token should appear. We copy the token and save it for later.
\vspace{4mm}
```{r, message = F, out.width = '60%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/osf2_col.png')
```
<br></br>
### The **{osfr}** Package & Authentication
---
To access our OSF repository directly via _R_, we can use the **{osfr}** package [@osfr]. Before we can use the functionality of this package, we first have to use our access token to authenticate. To do this, we use the `osf_auth` function, providing it with the access token we just received (the token displayed below is made up):
```{r, eval=F}
library(osfr)
osf_auth("AtmuMZ3pSuS7tceSMz2NNSAmVDNTzpm2Ud87")
```
<br></br>
### Repository Setup
---
Using **{osfr}**, we can now initialize an OSF repository using _R_. Imagine that we are working on a new meta-analysis project, and that we want to upload our data as well as an R Markdown script to an OSF repository. The name of the repository should be "Meta-Analysis Project".
To create a new repository, the `osf_create_project` function can be used. We save the new OSF repository in _R_ as `meta_analysis_project`.
\vspace{2mm}
```{r, eval=F}
meta_analysis_project <- osf_create_project("Meta-Analysis Project")
```
Using the `osf_open` function, we can then access the newly created repository online:
\vspace{2mm}
```{r, eval=F}
osf_open(meta_analysis_project)
```
```{r, message = F, out.width = '60%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/osf3_col.png')
```
Now that the repository has been created, we can proceed by adding **components** to it. In OSF, components work like folders on a computer. Suppose we want to create two components: one for our data sets, and one for our R Markdown scripts. To do this, we can use the `osf_create_component` function. We have to provide the function with the _R_ repository object (`meta_analysis_project`), and then set the title of the new component.
```{r, eval=F}
scripts <- osf_create_component(meta_analysis_project,
title = "Analysis Scripts")
datasets <- osf_create_component(meta_analysis_project,
title = "Datasets")
```
When we go to the online page of the repository now, we see that the two components have been added.
<br></br>
### Upload & Download
---
To upload data to the OSF repository, we can use the `osf_upload` function. The function requires us to specify the component to which we want to add the file, and the path to the file that should be uploaded. Suppose that we want to upload an R Markdown script called “Analysis.rmd”, which is currently saved in our _R_ project sub-folder "scripts". To upload, we can use the following code:
```{r, eval = F}
osf_upload(scripts, "scripts/Analysis.rmd")
```
To see if the file has been uploaded successfully, we can access contents of the component using the `osf_ls_files` function.
```{r, eval=F}
osf_ls_files(scripts)
```
```
## # A tibble: 2 x 3
## name id meta
## <chr> <chr> <list>
## 1 Analysis.rmd 1db74s7bfcf91f0012567572l <named list [3]>
```
We see in the output that the upload was successful. To download a file, we can select a row from the `osf_ls_files` function output, and use it in the `osf_download` function to download the file back into the project folder on our computer.
```{r, eval = F}
osf_download(osf_ls_files(scripts)[1,])
```
<br></br>
### Collaboration, Open Access & Pre-Registration {#pre-registration}
---
\index{Preregistration}
On the OSF repository website, we can also add collaborators under the **Contributors** field.
```{r, message = F, out.width = '65%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/osf4_col.png')
```
At any time, it is possible to make the repository **public** by clicking on the **Make Public** button in the upper right corner of the website.
```{r, message = F, out.width = '40%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/osf5.png')
```
In Chapter \@ref(analysis-plan), we discussed that analysis plans and pre-registration are essential parts of a high-quality meta-analysis. The OSF makes it very convenient to also create an openly accessible pre-registration for our project. We simply have to click on the **Registrations** button on top, and then create a **New registration**. This leads us to the **OSF Registries** website, where we can provide detailed information on our planned study, including our analysis plan.
\vspace{2mm}
```{r, message = F, out.width = '57%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/osf6_col.png')
```
After specifying all the required details, the study can be registered. This creates a register entry that can be accessed through a unique ID (e.g. **osf.io/q2jp7**). After the registration is completed, it is not possible to change the stated search plan, hypotheses and/or analysis strategy anymore.
$$\tag*{$\blacksquare$}$$
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#!/bin/sh
set -e
[ -z "${GITHUB_PAT}" ] && exit 0
[ "${TRAVIS_BRANCH}" != "master" ] && exit 0
git config --global user.email "[email protected]"
git config --global user.name "Yihui Xie"
git clone -b gh-pages https://${GITHUB_PAT}@github.com/${TRAVIS_REPO_SLUG}.git book-output
cd book-output
cp -r ../_book/* ./
git add --all *
git commit -m"Update the book" || true
git push -q origin gh-pages
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
Version: 1.0
RestoreWorkspace: Default
SaveWorkspace: Default
AlwaysSaveHistory: Default
EnableCodeIndexing: Yes
UseSpacesForTab: Yes
NumSpacesForTab: 2
Encoding: UTF-8
RnwWeave: knitr
LaTeX: pdfLaTeX
AutoAppendNewline: Yes
StripTrailingWhitespace: Yes
BuildType: Website
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
Version: 1.0
RestoreWorkspace: Default
SaveWorkspace: Default
AlwaysSaveHistory: Default
EnableCodeIndexing: Yes
UseSpacesForTab: Yes
NumSpacesForTab: 2
Encoding: UTF-8
RnwWeave: Sweave
LaTeX: pdfLaTeX
BuildType: Website
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
@import url('https://fonts.googleapis.com/css?family=Lato:700');
h1, h2, h3, h4, h5, h6, .book-summary {
font-family: 'Lato', sans-serif;
color: #277DB0;
}
#header .subtitle em {
font-style: normal;
font-size: 1em;
color: #777;
margin-bottom: -40px;
}
#header .title {
font-size: 3em;
margin-bottom: -40px;
}
#header .author em {
font-size: 1em;
font-style: normal;
}
#header .date em {
font-size: 0.9em;
}
p.caption {
color: #777;
margin-top: 10px;
}
p code {
white-space: inherit;
}
pre {
word-break: normal;
word-wrap: normal;
}
pre code {
white-space: inherit;
}
.rmdcomment {
padding: 1em 1em 1em 4em;
margin-bottom: 10px;
background: #f5f5f5;
position:relative;
}
.rmdcomment:before {
content: "\f075";
font-family: FontAwesome;
left:10px;
position:absolute;
font-size: 45px;
}
.rmdachtung {
padding: 1em 1em 1em 4em;
margin-bottom: 10px;
background: #f5f5f5;
position:relative;
}
.rmdachtung:before {
content: "\f06a";
font-family: FontAwesome;
left:10px;
position:absolute;
font-size: 45px;
}
.rmdinfo {
padding: 1em 1em 1em 4em;
margin-bottom: 10px;
background: #f5f5f5;
position:relative;
}
.rmdinfo:before {
content: "\f129";
font-family: FontAwesome;
left:10px;
position:absolute;
font-size: 45px;
}
blockquote {
position: relative;
}
blockquote:hover > .copy-to-clipboard-button {
visibility: visible;
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
# (PART) Advanced Methods {-}
# "Multilevel" Meta-Analysis {#multilevel-ma}
---
<img src="_figs/multilevel_felder.jpg" />
<br></br>
\index{Multilevel Meta-Analysis}
<span class="firstcharacter">W</span>
elcome to the advanced methods section. In the previous part of the guide, we took a deep dive into topics that we consider highly relevant for almost every meta-analysis. With this background, we can now proceed to somewhat more advanced techniques.
We consider the following methods "advanced" because their mathematical underpinnings are more involved, or because of their implementation in _R_. However, if you have worked yourself through the previous chapters of the guide, you should be more than well equipped to understand and implement the contents that are about to follow. Many of the following topics merit books of their own, and what we cover here should only be considered as a brief introduction. Where useful, we will therefore also provide literature for further reading.
This first chapter deals with the topic of "multilevel" meta-analyses. You probably wonder why we put the word "multilevel" into quotation marks. Describing a study as a "multilevel" meta-analysis insinuates that this is something special or extraordinary compared to "standard" meta-analyses.
Yet, that is not true. Every meta-analytic model presupposes a multilevel structure of our data to pool results [@pastor2018multilevel]. In the chapters before, we have already fitted a multilevel (meta-analysis) model several times--without even knowing.
When people talk about multilevel meta-analysis, what they think of are **three-level meta-analysis models**. Such models are indeed somewhat different to the fixed-effect and random-effects model we already know. In this chapter, we will therefore first describe why meta-analysis naturally implies a multilevel structure of our data, and how we can extend a conventional meta-analysis to a three-level model. As always, we will also have a look at how such models can be fitted in _R_ using a hands-on example.
<br></br>
## The Multilevel Nature of Meta-Analysis {#multilevel-nature}
---
To see why meta-analysis has multiple levels by default, let us go back to the formula of the random-effects model that we discussed in Chapter \@ref(rem):
\begin{equation}
\hat\theta_k = \mu + \epsilon_k + \zeta_k
(\#eq:mlm1)
\end{equation}
\index{Sampling Error}
We discussed that the terms $\epsilon_k$ and $\zeta_k$ are introduced in a random-effects model because we assume that there are two sources of variability. The first one is caused by the sampling error ($\epsilon_k$) of individual studies, which leads effect size estimates to deviate from the true effect size $\theta_k$.
The second one, $\zeta_k$, represents the between-study heterogeneity. This heterogeneity is caused by the fact that the true effect size of some study $k$ is again only part of an overarching **distribution of true effect sizes**. This distribution is from where the individual true effect size $\theta_k$ was drawn. Therefore, our aim in the random-effects model is to estimate the mean of the distribution of true effect sizes, denoted with $\mu$.
The two error terms $\epsilon_k$ and $\zeta_k$ correspond with the two levels in our meta-analysis data: the "participant" level (level 1) and the "study" level (level 2). Figure \@ref(fig:multilevel1) below symbolizes this structure.
\vspace{2mm}
```{r multilevel1, message = F, out.width = '100%', echo = F, fig.align='center', fig.cap="Multilevel structure of the conventional random-effects model."}
library(OpenImageR)
knitr::include_graphics('images/multilevel-model_col_sep.png')
```
\vspace{2mm}
At the lowest level (level 1) we have the participants (or patients, specimens, etc., depending on the research field). These participants are part of larger units: the studies included in our meta-analysis. This overlying layer of studies constitutes our second level.
When we conduct a meta-analysis, data on level 1 usually already reaches us in a "pooled" form (e.g. the authors of the paper provide us with the mean and standard deviation of their studied sample instead of the raw data). Pooling on level 2, the study level, however, has to be performed as part of the meta-analysis. Traditionally, such type of data is called **nested**: one can say that participants are "nested" within studies.
\index{Random-Effects Model}
Let us go back to the random-effects model formula in equation \@ref(eq:mlm1). Implicitly, this formula already describes the multilevel structure of our meta-analysis data. To make this more obvious, we have to split the equation into two formulas, where each corresponds to one of the two levels. If we do this, we get the following result:
\vspace{4mm}
**Level 1 (participants) model:**
\begin{equation}
\hat\theta_k = \theta_k + \epsilon_k
(\#eq:mlm2)
\end{equation}
\vspace{2mm}
**Level 2 (studies) model:**
\begin{equation}
\theta_k = \mu + \zeta_k
(\#eq:mlm3)
\end{equation}
\vspace{2mm}
You might have already detected that we can substitute $\theta_k$ in the first equation with its definition in the second equation. What we then obtain is a formula exactly identical to the one of the random-effects model from before. The fixed-effects model can also be written in this way--we only have to set $\zeta_k$ to zero. Evidently, our plain old meta-analysis model already has multilevel properties "built in". It exhibits this property because we assume that participants are nested within studies in our data.
This makes it clear that meta-analysis naturally possesses a multilevel structure. It is possible to expand this structure even further in order to better capture certain mechanisms that generated our data. This is where **three-level models** [@cheung2014modeling; @assink2016fitting] come into play.
\index{Unit-of-Analysis Problem}
Statistical independence is one of the core assumptions when we pool effect sizes in a meta-analysis. If there is a dependency between effect sizes (i.e. effect sizes are correlated), this can artificially reduce heterogeneity and thus lead to false-positive results. This issue is known as the **unit-of-analysis error**, which we already covered before (see Chapter \@ref(unit-of-analysis)). Effect size dependence can stem from different sources [@cheung2014modeling]:
* **Dependence introduced by the authors of the individual studies**. For example, scientists conducting the study may have collected data from multiple sites, compared multiple interventions to one single control group, or used different questionnaires to measure the same outcome. In all of these scenarios, we can assume that some kind of dependency is introduced within the reported data.
* **Dependence introduced by the meta-analyst herself**. As an example, think of a meta-analysis that focuses on some psychological mechanism. This meta-analysis includes studies which were conducted in different cultural regions of the world (e.g. East Asian and Western European societies). Depending on the type of psychological mechanism, it could be that results of studies conducted in the same cultural region are more similar compared to those conducted in a different culture.
We can take such dependencies into account by integrating a third layer into the structure of our meta-analysis model. For example, one could model that effect sizes based on different questionnaires are nested within studies. Or one could create a model in which studies are nested within cultural regions. This creates a three-level meta-analysis model, as illustrated by the next figure.
\vspace{2mm}
```{r multilevel2, message = F, out.width = '100%', echo = F, fig.align='center'}
library(OpenImageR)
knitr::include_graphics('images/multilevel-model2_col_sep.png')
```
\vspace{2mm}
\index{Cluster Effect}
We see that a three-level model contains three pooling steps. First, researchers themselves "pool" the results of individual participants in their primary studies, and report the aggregated effect size. Then, on level 2, these effect sizes are nested within several **clusters**, denoted by $\kappa$. These cluster can either be individual studies (i.e. many effect sizes are nested in one study), or subgroups of studies (i.e. many studies are nested in one subgroup, where each study contributes only one effect size).
Lastly, pooling the aggregated cluster effects leads to the overall true effect size $\mu$. Conceptually, this average effect is very close to the pooled true effect $\mu$ in a fixed- or random-effects model. The difference, however, is that it is based on a model in which we explicitly account for dependent effect sizes in our data.
It is possible to write down the formula of the three-level model using the same level notation we used before. The greatest distinction is that now, we need to define three formulas instead of two:
\vspace{4mm}
**Level 1 model:**
\begin{equation}
\hat\theta_{ij} = \theta_{ij} + \epsilon_{ij}
(\#eq:mlm4)
\end{equation}
\vspace{2mm}
**Level 2 model:**
\begin{equation}
\theta_{ij} = \kappa_{j} + \zeta_{(2)ij}
(\#eq:mlm5)
\end{equation}
\vspace{2mm}
**Level 3 model:**
\begin{equation}
\kappa_{j} = \mu + \zeta_{(3)j}
(\#eq:mlm6)
\end{equation}
\vspace{2mm}
Where $\hat\theta_{ij}$ is an estimate of the true effect size $\theta_{ij}$. The term $ij$ can be read as "some effect size $i$ nested in cluster $j$". Parameter $\kappa_{j}$ is the average effect size in cluster $j$, and $\mu$ the overall average population effect. Like before, we can piece these formulas together and thus reduce the formula to one line:
\begin{equation}
\hat\theta_{ij} = \mu + \zeta_{(2)ij} + \zeta_{(3)j} + \epsilon_{ij}
(\#eq:mlm7)
\end{equation}
We see that, in contrast to the random-effects model, this formula now contains **two** heterogeneity terms. One is $\zeta_{(2)ij}$, which stands for the **within-cluster** heterogeneity on level 2 (i.e. the **true** effect sizes within cluster $j$ follow a distribution with mean $\kappa_j$). The other is $\zeta_{(3)j}$, the **between-cluster** heterogeneity on level 3. Consequentially, fitting a three-level meta-analysis model does not only involve the estimation of one heterogeneity variance parameter $\tau^2$. We have to estimate two $\tau^2$ values: one for level 2, and the other for level 3.
\index{meta Package}
\index{metafor Package}
The **{metafor}** package is particularly well suited for fitting meta-analytic three-level models. It uses (restricted) maximum likelihood procedures to do so. Previously, we primarily used functions of the **{meta}** package to run meta-analyses. We did this because this package is a little less technical, and thus better suited for beginners. Yet, the **{metafor}** package, as we have seen in Chapter \@ref(multiple-metareg-R), is also fairly easy to use once the data is prepared correctly. How exactly one can use **{metafor}** to fit three-level models in _R_ will be the topic of the next section^[Please note that the latest versions of **{meta}** now also allow to implement three-level meta-analytic models. In all meta-analytic pooling functions that we covered in Chapter \@ref(pooling-es-r), there is now an argument called `cluster`. This argument allows to define the name of a variable in our data set which contains the (level 3) cluster that each effect size belongs into. If the `cluster` argument is specified, a hierarchical three-level model is fitted automatically. For example, we could turn our meta-analysis from Chapter \@ref(pre-calculated-es) into a three-level model by running `metagen(TE, seTE, cluster = InterventionType, data = ThirdWave)`. Nevertheless, it makes a lot of sense to learn how to fit three-level models using **{metafor}**: first because **{meta}** also uses **{metafor}** in the background to fit these types of models, and secondly because the `rma.mv` function we are covering in this chapter is very versatile. It can be used for much more than just "simple" hierarchical three-level models, as we will see in Chapter \@ref(rve).].
<br></br>
## Fitting Three-Level Meta-Analysis Models in _R_ {#multilevel-R}
---
As mentioned before, we need the **{metafor}** package to fit three-level meta-analysis models. Therefore, we need to load it from our library first.
```{r, message=F, warning=F}
library(metafor)
```
In our hands-on example, we will use the `Chernobyl` data set. This data set is loosely based on a real meta-analysis which examined the correlation between ionizing radiation ("nuclear fallout") and mutation rates in humans, caused by the devastating [1986 Chernobyl reactor disaster](https://www.britannica.com/event/Chernobyl-disaster) [@moller2015strong].
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "Chernobyl" Data Set**
\vspace{2mm}
The `Chernobyl` data set is part of the **{dmetar}** package. If you have installed **{dmetar}**, and loaded it from your library, running `data(Chernobyl)` automatically saves the data set in your _R_ environment. The data set is then ready to be used.
If you do not have **{dmetar}** installed, you can download the data set as an _.rda_ file from the [Internet](https://www.protectlab.org/meta-analysis-in-r/data/Chernobyl.rda), save it in your working directory, and then click on it in your R Studio window to import it.
```
```{r, message=F, warning=F}
# Load data set from 'dmetar'
library(dmetar)
data("Chernobyl")
```
To see the general structure of the data, we can use the `head` function. This prints the first six rows of the data frame that we just loaded into our global environment.
```{r, message=F, warning=F, eval=F}
head(Chernobyl)
```
```
## author cor n z se.z var.z radiation es.id
## 1 Aghajanyan & Suskov (2009) 0.20 91 0.20 0.10 0.01 low id_1
## 2 Aghajanyan & Suskov (2009) 0.26 91 0.27 0.10 0.01 low id_2
## 3 Aghajanyan & Suskov (2009) 0.20 92 0.20 0.10 0.01 low id_3
## 4 Aghajanyan & Suskov (2009) 0.26 92 0.27 0.10 0.01 low id_4
## 5 Alexanin et al. (2010) 0.93 559 1.67 0.04 0.00 low id_5
## 6 Alexanin et al. (2010) 0.44 559 0.47 0.04 0.00 low id_6
```
\index{Fisher's \textit{z}}
The data set contains eight columns. The first one, `author`, displays the name of the study. The `cor` column shows the (un-transformed) correlation between radiation exposure and mutation rates, while `n` stands for the sample size. The columns `z`, `se.z`, and `var.z` are the Fisher-$z$ transformed correlations (Chapter \@ref(pearson-cors)), as well their standard error and variance. The `radiation` column serves as a moderator, dividing effect sizes into subgroups with low and high overall radiation exposure. The `es.id` column simply contains a unique ID for each effect size (i.e. each row in our data frame).
A peculiar thing about this data set is that it contains repeated entries in `author`. This is because most studies in this meta-analysis contributed more than one observed effect size. Some studies used several methods to measure mutations or several types of index persons (e.g. exposed parents versus their offspring), all of which leads to multiple effects per study.
Looking at this structure, it is quite obvious that effect sizes in our data set are not independent. They follow a nested structure, where various effect sizes are nested in one study. Thus, it might be a good idea to fit a three-level meta-analysis in order to adequately model these dependencies in our data.
<br></br>
### Model Fitting
---
A three-level meta-analysis model can be fitted using the `rma.mv` function in **{metafor}**. Here is a list of the most important arguments for this function, and how they should be specified:
* **`yi`**. The name of the column in our data set which contains the calculated effect sizes. In our example, this is `z`, since Fisher-$z$ transformed correlations have better mathematical properties than "untransformed" correlations.
* **`V`**. The name of the column in our data set which contains the **variance** of the calculated effect sizes. In our case, this is `var.z`. It is also possible to use the **squared** standard error of the effect size, since $SE_k^2 = v_k$.
* **`slab`**. The name of the column in our data set which contains the study labels, similar to `studlab` in **{meta}**.
* **`data`**. The name of the data set.
* **`test`**. The test we want to apply for our regression coefficients. We can choose from `"z"` (default) and `"t"` (recommended; uses a test similar to the Knapp-Hartung method).
* **`method`**. The method used to estimate the model parameters. Both `"REML"` (recommended; restricted maximum-likelihood) and `"ML"` (maximum likelihood) are possible. Please note that other types of between-study heterogeneity estimators (e.g. Paule-Mandel) are not applicable here.
The most important argument, however, is **`random`**. Arguably, it is also the trickiest one. In this argument, we specify a formula which defines the (nested) random effects. For a three-level model, the formula always starts with `~ 1`, followed by a vertical bar `|`. Behind the vertical bar, we assign a **random effect** to a grouping variable (such as studies, measures, regions, etc.). This grouping variable is often called a **random intercept** because it tells our model to assume different effects (i.e. intercepts) for each group.
In a three-level model, there are two grouping variables: one on level 2, and another on level 3. We assume that these grouping variables are nested: several effects on level 2 together make up a larger cluster on level 3.
There is a special way through which we can tell `rma.mv` to assume such nested random effects. We do this using a slash (`/`) to separate the higher- and lower-level grouping variable. To the left of `/`, we put in the level 3 (cluster) variable. To the right, we insert the lower-order variable nested in the larger cluster. Therefore, the general structure of the formula looks like this: `~ 1 | cluster/effects_within_cluster`.
In our example, we assume that individual effect sizes (level 2; defined by `es.id`) are nested within studies (level 3; defined by `author`). This results in the following formula: `~ 1 | author/es.id`. The complete `rma.mv` function call looks like this:
\vspace{2mm}
```{r}
full.model <- rma.mv(yi = z,
V = var.z,
slab = author,
data = Chernobyl,
random = ~ 1 | author/es.id,
test = "t",
method = "REML")
```
We gave the output the name `full.model`. To print an overview of the results, we can use the `summary` function.
```{r, eval=F}
summary(full.model)
```
```
## Multivariate Meta-Analysis Model (k = 33; method: REML)
## [...]
## Variance Components:
##
## estim sqrt nlvls fixed factor
## sigma^2.1 0.1788 0.4229 14 no author
## sigma^2.2 0.1194 0.3455 33 no author/es.id
##
## Test for Heterogeneity:
## Q(df = 32) = 4195.8268, p-val < .0001
##
## Model Results:
##
## estimate se tval pval ci.lb ci.ub
## 0.5231 0.1341 3.9008 0.0005 0.2500 0.7963 ***
## [...]
```
First, have a look at the `Variance Components`. Here, we see the random-effects variances calculated for each level of our model. The first one, `sigma^2.1`, shows the level 3 **between-cluster** variance. In our example, this is equivalent to the between-study heterogeneity variance $\tau^2$ in a conventional meta-analysis (since clusters represent studies in our model).
The second variance component `sigma^2.2` shows the variance **within** clusters (level 2). In the `nlvls` column, we see the number of groups on each level. Level 3 has 14 groups, equal to the $K=$ 14 included studies. Together, these 14 studies contain 33 effect sizes, as shown in the second row.
\index{esc Package}
Under `Model Results`, we see the estimate of our pooled effect, which is $z=$ 0.52 (95%CI: 0.25--0.80). To facilitate the interpretation, it is advisable to transform the effect back to a normal correlation. This can be done using the `convert_z2r` function in the **{esc}** package:
```{r}
library(esc)
convert_z2r(0.52)
```
We see that this leads to a correlation of approximately $r \approx$ 0.48. This can be considered large. There seems to be a substantial association between mutation rates and exposure to radiation from Chernobyl.
The `Test for Heterogeneity` in the output points at true effect size differences in our data ($p<$ 0.001). This result, however, is not very informative. We are more interested in the precise amount of heterogeneity variance captured by each level in our model. It would be good to know how much of the heterogeneity is due to differences **within** studies (level 2), and how much is caused by **between**-study differences (level 3).
<br></br>
### Distribution of Variance Across Levels
---
\index{I$^2$, Higgins \& Thompson's}
We can answer this question by calculating a multilevel version of $I^2$ [@cheung2014modeling]. In conventional meta-analyses, $I^2$ represents the amount of variation not attributable to sampling error (see Chapter \@ref(i-squared); i.e. the between-study heterogeneity). In three-level models, this heterogeneity variance is split into two parts: one attributable to true effect size differences **within** clusters, and the other to **between**-cluster variation. Thus, there are two $I^2$ values, quantifying the percentage of total variation associated with either level 2 or level 3.
\index{dmetar Package}
```{block, type='boxdmetar'}
**The "var.comp" Function**
\vspace{4mm}
The `var.comp` function in **{dmetar}** can be used to calculate multilevel $I^2$ values. Once **{dmetar}** is installed and loaded on your computer, the function is ready to be used. If you did **not** install **{dmetar}**, follow these instructions:
\vspace{2mm}
1. Access the source code of the function [online](https://raw.githubusercontent.com/MathiasHarrer/dmetar/master/R/mlm.variance.distribution.R).
2. Let _R_ "learn" the function by copying and pasting the source code in its entirety into the console (bottom left pane of R Studio), and then hit "Enter".
3. Make sure that the **{ggplot2}** package is installed and loaded.
```
The `var.comp` function only needs a fitted `rma.mv` model as input. We save the output in `i2` and then use the `summary` function to print the results.
```{r}
i2 <- var.comp(full.model)
summary(i2)
```
In the output, we see the percentage of total variance attributable to each of the three levels. The sampling error variance on level 1 is very small, making up only roughly 1%. The value of $I^2_{\text{Level 2}}$, the amount of heterogeneity variance within clusters, is much higher, totaling roughly 40%. The largest share, however, falls to level 3. Between-cluster (here: between-study) heterogeneity makes up $I^2_{\text{Level 3}}=$ 59% of the total variation in our data.
Overall, this indicates that there is substantial between-study heterogeneity on the third level. Yet, we also see that a large proportion of the total variance, more than one third, can be explained by differences **within studies**.
It is also possible to visualize this distribution of the total variance. We only have to plug the `var.comp` output into the `plot` function.
```{r, warning=F, message=F, fig.width=5, fig.height=5, out.width="55%", fig.align='center'}
plot(i2)
```
<br></br>
### Comparing Models
---
\index{Occam's Razor}
Fitting a three-level model only makes sense when it represents the variability in our data better than a two-level model. When we find that a two-level model provides a fit comparable to a three-level model, **Occam's razor** should be applied: we favor the two-level model over the three-level model, since it is less complex, but explains our data just as well.
Fortunately, the **{metafor}** package makes it possible to compare our three-level model to one in which a level is removed. To do this, we use the `rma.mv` function again; but this time, set the variance component of one level to zero. This can be done by specifying the `sigma2` parameter. We have to provide a vector with the generic form `c(level 3, level 2)`. In this vector, we fill in `0` when a variance component should be set to zero, while using `NA` to indicate that a parameter should be estimated from the data.
In our example, it makes sense to check if nesting individual effect sizes in studies has improved our model. Thus, we fit a model in which the level 3 variance, representing the between-study heterogeneity, is set to zero. This is equal to fitting a simple random-effects model in which we assume that all effect sizes are independent (which we know they are not). Since level 3 is held constant at zero, the input for `sigma2` is `c(0, NA)`. This results in the following call to `rma.mv`, the output of which we save under the name `l3.removed`.
\vspace{2mm}
```{r, echo=F}
l3.removed <- rma.mv(yi = z,
V = var.z,
slab = author,
data = Chernobyl,
random = ~ 1 | author/es.id,
test = "t",
method = "REML",
sigma2 = c(0, NA))
```
```{r,eval=F}
l3.removed <- rma.mv(yi = z,
V = var.z,
slab = author,
data = Chernobyl,
random = ~ 1 | author/es.id,
test = "t",
method = "REML",
sigma2 = c(0, NA))
summary(l3.removed)
```
```
## [...]
## Variance Components:
##
## estim sqrt nlvls fixed factor
## sigma^2.1 0.0000 0.0000 14 yes author
## sigma^2.2 0.3550 0.5959 33 no author/es.id
##
## Test for Heterogeneity:
## Q(df = 32) = 4195.8268, p-val < .0001
##
## Model Results:
##
## estimate se tval pval ci.lb ci.ub
## 0.5985 0.1051 5.6938 <.0001 0.3844 0.8126 ***
## [...]
```
\index{Analysis of Variance}
In the output, we see that `sigma^2.1` has been set to zero--just as intended. The overall effect has also changed. But is this result better than the one of the three-level model? To assess this, we can use the `anova` function to compare both models.
```{r, eval=F}
anova(full.model, l3.removed)
```
```
## df AIC BIC AICc logLik LRT pval QE
## Full 3 48.24 52.64 49.10 -21.12 4195.82
## Reduced 2 62.34 65.27 62.76 -29.17 16.10 <.0001 4195.82
```
We see that the `Full` (three-level) model, compared to the `Reduced` one with two levels, does indeed show a better fit. The Akaike (AIC) and Bayesian Information Criterion (BIC) are lower for this model, which indicates favorable performance. The likelihood ratio test (`LRT`) comparing both models is significant ($\chi^2_1=$ 16.1, $p<$ 0.001), and thus points in the same direction.
We can say that, although the three-level model introduces one additional parameter (i.e. it has 3 degrees of freedom instead of 2), this added complexity seems to be justified. Modeling of the nested data structure was probably a good idea, and has improved our estimate of the pooled effect.
However, please note that there are often good reasons to stick with a three-level structure--even when it does **not** provide a significantly better fit. In particular, it makes sense to keep a three-level model when we think that it is based on a solid theoretical rationale.
When our data contains studies with multiple effect sizes, for example, we **know** that these effects can not be independent. It thus makes sense to keep the nested model, since it more adequately represents how the data were "generated". If the results of `anova` in our example had favored a two-level solution, we would have concluded that effects within studies were **largely** homogeneous. But we likely would have reported results of the three-level model anyway. This is because we know that a three-level model represents the data-generating process better.
The situation is somewhat different when the importance of the cluster variable is unclear. Imagine, for example, that clusters on level 3 represent different cultural regions in a three-level model. When we find that the phenomenon under study shows no variation between cultures, it is perfectly fine to drop the third level and use a two-level model instead.
<br></br>
## Subgroup Analyses in Three-Level Models {#three-level-subgroup}
---
\index{Subgroup Analysis}
\index{Moderator Analysis}
Once our three-level model is set, it is also possible to assess putative moderators of the overall effect. Previously in this guide, we discovered that subgroup analyses can be expressed as a meta-regression model with a dummy-coded predictor (Chapter \@ref(the-metareg-model)). In a similar vein, we can add regression terms to a "multilevel" model, which leads to a **three-level mixed-effects model**:
\begin{equation}
\hat\theta_{ij} = \theta + \beta x_i + \zeta_{(2)ij} + \zeta_{(3)j} + \epsilon_{ij}
(\#eq:mlm8)
\end{equation}
Where $\theta$ is the intercept and $\beta$ the regression weight of a predictor variable $x$. When we replace $x_i$ with a dummy (Chapter \@ref(the-metareg-model)), we get a model that can be used for subgroup analyses. When $x$ is continuous, the formula above represents a three-level meta-regression model.
Categorical or continuous predictors can be specified in `rma.mv` using the `mods` argument. The argument requires a formula, starting with a tilde (`~`), and then the name of the predictor. Multiple meta-regression is also possible by providing more than one predictor (e.g. `~ var1 + var2`).
In our `Chernobyl` example, we want to check if correlations differ depending on the overall amount of radiation in the studied sample (low, medium, or high). This information is provided in the `radiation` column in our data set. We can fit a three-level moderator model using this code:
\vspace{2mm}
```{r, message=F, warning=F, eval=F}
mod.model <- rma.mv(yi = z, V = var.z,
slab = author, data = Chernobyl,
random = ~ 1 | author/es.id,
test = "t", method = "REML",
mods = ~ radiation)
summary(mod.model)
```
```
## [...]
## Test of Moderators (coefficients 2:3):
## F(df1 = 2, df2 = 28) = 0.4512, p-val = 0.6414
##
## Model Results:
## estimate se tval pval ci.lb ci.ub
## intrcpt 0.58 0.36 1.63 0.11 -0.14 1.32
## radiationlow -0.19 0.40 -0.48 0.63 -1.03 0.63
## radiationmedium 0.20 0.54 0.37 0.70 -0.90 1.31
## [...]
```
The first important output is the `Test of Moderators`. We see that $F_{2, 28}=$ 0.45, with $p=$ 0.64. This means that there is no significant difference between the subgroups.
The `Model Results` are printed within a meta-regression framework. This means that we cannot directly extract the estimates in order to obtain the pooled effect sizes within subgroups.
The first value, the intercept (`intrcpt`), shows the $z$ value when the overall radiation exposure was high ($z=$ 0.58). The effect in the low and medium group can be obtained by adding their `estimate` to the one of the intercept. Thus, the effect in the low radiation group is $z$ = 0.58 - 0.19 = 0.39, and the one in the medium exposure group is $z$ = 0.58 + 0.20 = 0.78.
```{block2, type='boxreport'}
**Reporting the Results of Three-Level (Moderator) Models**
\vspace{2mm}
When we report the results of a three-level model, we should at least mention the estimated variance components alongside the pooled effect. The `rma.mv` function denotes the random-effects variance on level 3 and 2 with $\sigma^2_1$ and $\sigma^2_2$, respectively.
When we report the estimated variance, however, using $\tau^2_{\text{Level 3}}$ and $\tau^2_{\text{Level 2}}$ may be preferable since this makes it clear that we are dealing with variances of **true (study) effects** (i.e. heterogeneity variance). Adding the multilevel $I^2$ values also makes sense, since they are easier for others to interpret--provided we first explain what they represent.
\vspace{2mm}
When you conducted a model comparison using `anova`, you may at least report the results of the likelihood ratio test. Results of moderator analyses can be reported in a table such as the one presented in Chapter \@ref(subgroup-R). Here is one way to report the results in our example:
> _"The pooled correlation based on the three-level meta-analytic model was $r=$ 0.48 (95%CI: 0.25-0.66; $p$ < 0.001). The estimated variance components were $\tau^2_{\text{Level 3}}=$ 0.179 and $\tau^2_{\text{Level 2}}=$ 0.119. This means that $I^2_{\text{Level 3}}=$ 58.22% of the total variation can be attributed to between-cluster, and $I^2_{\text{Level 2}}=$ 31.86% to within-cluster heterogeneity. We found that the three-level model provided a significantly better fit compared to a two-level model with level 3 heterogeneity constrained to zero ($\chi^2_1=$ 16.10; $p$< 0.001)."_
```
<br></br>
## Robust Variance Estimation {#rve}
---
In the last chapters, we introduced three-level meta-analytic models, and how they can be used to model dependencies between effect sizes in our data. The hierarchical model that we fitted before clearly provides a better representation of our data set than a "conventional" meta-analysis, which assumes that all effect sizes are completely independent. But it is still a **simplification of reality**. In practice, there are often forms of dependence between effect sizes that are **more complex** than what is currently captured by our nested model.
We already see this when we go back to our `Chernobyl` data set. In the data, most studies provide more than one effect size, but the **reason** for this **differs between studies**. Some studies compared the effect of radiation in different target populations, and therefore reported more than one effect size. Others used different methods on the same sample, which also means that the study provides more than one effect size.
When several effect sizes in one study are based on the same sample, we expect their sampling errors (the $\epsilon_{ij}$ terms in equation 10.7 and 10.8 in Chapters \@ref(multilevel-nature) and \@ref(three-level-subgroup), respectively) to be **correlated**. This, however, is not yet captured by our three-level model. Our model above assumes that, within clusters/studies, the correlation (and thus the covariance) between sampling errors is zero. Or, to put it differently, it assumes that, **within** one cluster or study, effect size estimates are **independent**.
```{r multilevel3, message = F, out.width = '100%', echo = F, fig.align='center', fig.cap="In its original form, the three-level (hierarchical) model assumes that effect size estimates within studies or clusters are independent."}
library(OpenImageR)
knitr::include_graphics('images/multilevel-model3_col_sep.png')
```
In this section, we will therefore devote some time to an extended three-level architecture, the so-called **Correlated and Hierarchical Effects** (CHE) model [@pustejovsky2021meta]. Like our previous (hierarchical) three-level model, the CHE model allows to combine several effect sizes into larger clusters, based on certain commonalities (e.g. because they stem from the same study, work group, cultural region, etc.).
But in addition, this model also explicitly takes into account that some effect sizes within clusters are based on the same sample (e.g. because several measurements were taken), and that their sampling errors are therefore correlated. In many real-life scenarios, the CHE model should therefore provide a **good starting point**; especially when the dependence structure in our data is complex, or only partially known [@pustejovsky2021meta]^[Pustejovsky and Tipton [-@pustejovsky2021meta] also provide a decision-tree to determine if and when the CHE model is appropriate (see [Figure 1](https://link.springer.com/content/pdf/10.1007/s11121-021-01246-3.pdf)). You can use this heuristic as a way to check if the CHE model provides the best assumptions for your data, or if another working model is more reasonable.].
Along with the CHE model, we will also discuss **Robust Variance Estimation** (RVE) in meta-analytic contexts [@hedges2010robust; @tipton2015small; @tipton2015small2]. This is a set of methods which has been frequently used to handle dependent effect sizes in meta-analyses in the past. In its core, RVE revolves around the so-called **Sandwich estimator**. This estimator can be used in combination with the CHE model (as well as other meta-analytic models) to obtain robust confidence intervals and $p$-values; even when our selected model does not capture the intricate dependence structure of our data perfectly well.
Thus, before fitting our first CHE model, let us start with an overview of meta-analytic RVE as well as the Sandwich estimator, and explore why the latter bears such an appetizing name.
<br></br>
### The Sandwich-Type Variance Estimator {#sandwich}
---
In published meta-analyses, the term "robust variance estimation" is sometimes used in a peculiar way, which may lead one to believe that this is a specific method **only applicable** for meta-analytic data with dependent effect sizes. The opposite is true. The robust variance estimator has originally been developed as a method for **conventional regression models**, where it is used to calculate the variance of regression weights $\hat\beta$ [see e.g. @aronow2019foundations, chapter 4.2.2].
It is called a "robust" estimator because it provides a **consistent estimate** of the asymptotic standard error(s); even when usual assumptions of linear models are not met^[One of these assumptions is the homogeneity of the residual variance, known as **homoskedasticity**. Homoskedasticity, among other assumptions, is required for "classical" estimators of the coefficient variance to be valid.]. Robust estimates of the coefficient variance in a regression model are crucial. The variance estimate^[Or rather its **square root**, since this represents the **standard error** of the coefficient: $\sqrt{V_{\hat\beta}}={SE}_{\hat\beta}$.] is used to calculate **confidence intervals** around the estimated regression weight, as well as $p$-values, and thus has direct implications on the inferences we draw from a model.
The type of robust variance estimator we cover here is simply a **special version** of the original method used in "normal" regression models. Hedges, Tipton and Jackson [-@hedges2010robust] laid out an adapted type of RVE that can be used for **meta-regression models** with dependent effect sizes, and this approach has been extended in the last years few years.
To understand it, we first have to look at the formula of a meta-regression again. Conceptually, this formula is very similar to equation 8.2 presented in Chapter \@ref(the-metareg-model). We simply display it differently using **matrix notation**^[In this chapter, we largely follow the notation of Hedges, Tipton and Jackson [-@hedges2010robust] and its follow-up papers when discussing meta-analytic RVE.]:
\begin{equation}
\boldsymbol{T}_{j}=\boldsymbol{X}_{j}\boldsymbol{\beta} + \boldsymbol{u}_j +\boldsymbol{e}_j
(\#eq:mlm9)
\end{equation}
This formula simply tells us that some effect sizes in $\boldsymbol{T}$ are predicted by regression weights $\beta$ associated with certain covariates in $\boldsymbol{X}$. It also tells us that, besides the sampling error (symbolized by $\boldsymbol{e}_j$), there are random effects for each study (denoted by $\boldsymbol{u}_j$), thus producing a (mixed-effects) meta-regression model.
The special thing are the subscript $j$'s in our formula. They, along with the letters in our formula being **bold**, symbolize that each study or cluster $j$ in our data set provides, or can provide, more than one effect size. Say that $n_j$ is the number of effect sizes in some study $j$. The effect sizes in $j$ can then be written down as the column vector we see in the formula: $\boldsymbol{T}_j = (T_{j,1}, \dots, T_{j,{n_j}})^\top$. Similarly, $\boldsymbol{X}_j$ is the **design matrix** containing the covariate values of some study $j$:
\begin{equation}
\boldsymbol{X}_j =
\begin{bmatrix}
x_{1,1} & \cdots & x_{1,p} \\
\vdots & \ddots & \vdots \\
x_{n_j,1} & \cdots & x_{n_j,p}
\end{bmatrix}
(\#eq:mlm10)
\end{equation}
Where $p-1$ is the total number of covariates^[In a linear regression model, a **design matrix** (or **model matrix**) contains all **covariate values** used to estimate the regression coefficients. In its easiest form, the design matrix can be seen like a **data frame of covariates**, with a **column of 1's** added to the first column. This first column is used to model the regression **intercept**. Imagine that, in our meta-regression, there are three covariates. If the fourth study in our dataset contributes three effect sizes, its design matrix could look like this: $$\boldsymbol{X}_4 = \begin{bmatrix} 1 & 4.5 & 0 & 2 \\ 1 & 7.3 & 1 & 2 \\ 1 & 2.4 & 0 & 2 \end{bmatrix}.$$]. The vector of regression coefficients $\boldsymbol{\beta} = (\beta_1, \dots, \beta_{p})^\top$ we want to estimate contains no subscript $j$, since it is assumed to be fixed across all studies.
Overall, this notation emphasizes that, when studies can contribute more than one effect size, our data looks like several smaller data sets **stacked on top of each other**, where $J$ is the total number of studies or clusters in our data:
\begin{equation}
\begin{bmatrix}
\boldsymbol{T}_1 \\
\boldsymbol{T}_2 \\
\vdots \\
\boldsymbol{T}_J
\end{bmatrix}
=
\begin{bmatrix}
\boldsymbol{X}_1 \\
\boldsymbol{X}_2 \\
\vdots \\
\boldsymbol{X}_J
\end{bmatrix}
\boldsymbol{\beta}
+
\begin{bmatrix}
\boldsymbol{u}_1 \\
\boldsymbol{u}_2 \\
\vdots \\
\boldsymbol{u}_J
\end{bmatrix}
+
\begin{bmatrix}
\boldsymbol{e}_1 \\
\boldsymbol{e}_2 \\
\vdots \\
\boldsymbol{e}_J
\end{bmatrix}.
(\#eq:mlm11)
\end{equation}
Based on this formula, we can estimate the meta-regression coefficients $\boldsymbol{\hat\beta}$. To calculate confidence intervals and conduct significance tests of the coefficients, we need an estimate of their variance $\boldsymbol{V_{\hat\beta}}$. This can be achieved using the robust sampling variance estimator. Its formula looks like this [@hedges2010robust; @pustejovsky2021meta, suppl. S1]:
\begin{equation}
\scriptsize\boldsymbol{V}^{\text{R}}_{\boldsymbol{\hat\beta}} =
\left(\sum^J_{j=1}\boldsymbol{X}_j^\top\boldsymbol{W}_j\boldsymbol{X}_j \right)^{-1}
\left(\sum^J_{j=1}\boldsymbol{X}_j^\top\boldsymbol{W}_j \boldsymbol{A}_j\Phi_j \boldsymbol{A}_j \boldsymbol{W}_j \boldsymbol{X}_j \right)
\left(\sum^J_{j=1}\boldsymbol{X}_j^\top\boldsymbol{W}_j\boldsymbol{X}_j \right)^{-1}
(\#eq:mlm12)
\end{equation}
This equation looks quite complicated, and its not necessary to understand every detail. What matters for now is the **form** and some of the **"ingredients"** of it.
First, we see that the formula has a tripartite structure. The bracketed components to the left and right are identical, surrounding the part in the middle. This looks like a sandwich, where the outer parts are the "bread" and the inner part is the "meat", and is the reason for the name **"Sandwich estimator"**. The crucial "ingredients" of the formula are the $\boldsymbol{\Phi}_j$, $\boldsymbol{W}_j$ and $\boldsymbol{A}_j$ matrices:
- The first one, $\boldsymbol{\Phi}_j=\text{Var}(\boldsymbol{u}_j +\boldsymbol{e}_j)$, is a variance-covariance matrix with $n_j$ rows and $n_j$ columns. This matrix describes the **true dependence structure** of the effect sizes in some study $j$ [@pustejovsky2021meta, Suppl. S1]. Unfortunately, it is rarely known how and to what extent effect sizes are correlated within a study; and it is even more difficult to know this for **all** studies in our meta-analysis. Therefore, it is necessary to make a few simplifying assumptions in our model^[In the original approach by Hedges, Tipton and Jackson [-@hedges2010robust], for example, $\boldsymbol{\Phi}_j$ is replaced by the **cross-product of the model residuals**, $\boldsymbol{\Phi}_j=\boldsymbol{e}_j{\boldsymbol{e}_j}^\top$, because these are readily available. This is a crude estimate of the true dependence structure, but works as a "best guess" if the number of studies in a meta-analysis is high.]. The CHE model, for example, assumes the there is a **known correlation** $\rho$ between effect sizes in the same study; and that $\rho$ has the same value within and across all studies in our meta-analysis [the "constant sampling correlation" assumption, @pustejovsky2021meta].
- The $\boldsymbol{W}_j$ matrix contains the **weights** of each effect size. In previous chapters (see \@ref(fem) and \@ref(metareg-model-fit)), we already learned that we need to take the precision of effect size estimates into account before we can pool them. The optimal way to do this is to take the inverse of the variance, which would mean that $\boldsymbol{W}_j = \boldsymbol{\Phi}^{-1}_j$. As we mentioned, the true values of $\boldsymbol{\Phi}_j$ are hardly ever known, so an estimate based on our model, $(\boldsymbol{\hat\Phi}_j)^{-1}$, is used^[The original approach by Hedges, Tipton and Jackson [-@hedges2010robust] employs a different method using simplified diagonal weight matrices, which are approximately efficient.].
- The last part, $\boldsymbol{A}_j$, is an **adjustment matrix**, and ensures that the estimator provides valid results even when the number of studies in our meta-analysis is small [say 40 or less, @hedges2010robust; @tipton2015small]. The recommended approach is to use a matrix based on the bias-reduced linearization, or **"CR2" method** [@tipton2015small]^[The CR2 adjustment matrix looks like this: $$\scriptsize \boldsymbol{A}^{\text{CR2}}_j = \boldsymbol{W}_j^{-1/2}\left\{\boldsymbol{W}_j^{-1/2} \left[\boldsymbol{W}_j^{-1}-\boldsymbol{X}_j \left(\sum^J_{j=1}\boldsymbol{X}_j^\top\boldsymbol{W}_j\boldsymbol{X}_j \right)^{-1} \boldsymbol{X}_j^\top \right] \boldsymbol{W}_j^{-1/2}\right\}^{-1/2} \boldsymbol{W}_j^{-1/2}.$$ It entails taking the symmetric square root of the weight matrix $\boldsymbol{W}_j$.].
<br></br>
### Fitting a CHE Model With Robust Variance Estimation {#fit-rve}
---
It is now time to fit our first correlated and hierarchical effects model with _R_, while employing robust variance estimation to guard our model against misspecification. Like before, we can use the `rma.mv` function in **{metafor}** to run our model. This time, we also need some additional functions provided by the **{clubSandwich}** package [@clubSandwich]. Therefore, make sure to install the package, and then load it from your library.
```{r, eval=F}
library(clubSandwich)
```
As mentioned above, the CHE model assumes that effect sizes within studies or clusters are **correlated**; and that this correlation is identical within and across studies.
Thus, we have to **define a correlation coefficient** to be used within our model. For our `Chernobyl` data, let us assume that the correlation is large, so that $\rho$=0.6. This is no more than a guess, and it is highly recommended to run **several sensitivity analyses** for varying values of $\rho$.
```{r, eval=F}
# constant sampling correlation assumption
rho <- 0.6
```
Now, using this correlation, we can calculate an assumed variance-covariance matrix for each of our studies. We do this using the `impute_covariance_matrix` function in **{clubSandwich}**:
- For the `vi` argument, we specify the name of the variable in our data set that contains the *variance* of each effect size (i.e., the squared standard error).
- The `cluster` argument defines the variable associating each effect size with a **study** or **cluster**. In the `Chernobyl` dataset, this is `author`.
- The `r` argument takes the **constant correlation coefficient** between effect sizes that we assume.
```{r, eval=F}
# constant sampling correlation working model
V <- with(Chernobyl,
impute_covariance_matrix(vi = var.z,
cluster = author,
r = rho))
```
Using the prepared variance-covariance matrices in `V`, we can now fit our `rma.mv` model. Let us say that we want to analyze the same meta-regression model as in Chapter \@ref(three-level-subgroup), in which `radiation` was used as a covariate.
The **arguments** look a **little different now**: the first argument is a `formula` object, in which we tell the function that our effect sizes `z` should be predicted by an intercept (`1`) and the `radiation` covariate. The `V` argument takes our list of variance-covariance matrices that we just created; and the `sparse` argument can be set to `TRUE` to speed up our computations.
Only the the `random` and `data` arguments stay the same. We save the results under the name `che.model`.
```{r, eval=F}
che.model <- rma.mv(z ~ 1 + radiation,
V = V,
random = ~ 1 | author/es.id,
data = Chernobyl,
sparse = TRUE)
```
To calculate the **confidence intervals** of our meta-regression coefficients, we can use the `conf_int` function in **{clubSandwich}**. We only have to provide the fitted model, and specify the **small-sample adjustment** to be used under `vcov`. As recommended, we use the `"CR2"` adjustment (see Chapter \@ref(sandwich)).
```{r, eval=F}
conf_int(che.model,
vcov = "CR2")
```
```
## Coef. Estimate SE d.f. Lower 95% CI Upper 95% CI
## intrcpt 0.584 0.578 1.00 -6.76 7.93
## radiationlow -0.190 0.605 1.60 -3.52 3.14
## radiationmedium 0.207 0.603 1.98 -2.41 2.83
```
We see that the point estimates under `Estimate` are similar to the ones we obtained in Chapter \@ref(three-level-subgroup). The estimated standard errors and confidence intervals, however, are much larger. It is also possible to **calculate the** $p$**-values** of the regression weights using the `coef_test` function:
```{r, eval=F}
coef_test(che.model,
vcov = "CR2")
```
```
## Coef. Estimate SE t-stat d.f. (Satt) p-val (Satt) Sig.
## intrcpt 0.584 0.578 1.010 1.00 0.497
## radiationlow -0.190 0.605 -0.315 1.60 0.789
## radiationmedium 0.207 0.603 0.344 1.98 0.764
```
We see that none of the coefficients are significant when robust variance estimation is used^[By default, the `cont_int` and `coef_test` functions use the **Satterthwaite-corrected** degrees of freedom [@tipton2015small2]. It is recommended to keep this default setting.].
```{block2, type='boxinfo'}
**Robust Variance Estimation & Model Misspecification**
Some readers may wonder why we make such a big fuss about using Robust Variance Estimation for our model. The main reason is that multivariate and multilevel models can **easily be misspecified**. We already learned that even the CHE model is somewhat crude by assuming that correlations are identical within and across studies. Often, it will be somewhat **unclear** if our model **approximates the complex dependencies** in our data **reasonably well**.
Robust variance estimates are helpful in this respect because they allow to **guard our inferences** (i.e., the confidence intervals and $p$-values we calculate) against potential **misspecification** of our model.
```
```{block2, type='boxinfo'}
**The {robumeta} package**
In this section, we covered robust variance estimation in combination with the correlated and hierarchical effects model. This model, together with a few other innovations, has been proposed by Pustejosky and Tipton [-@pustejovsky2021meta].
The **"original" RVE approach** by Hedges, Tipton and Jackson [-@hedges2010robust], along with a few small-sample extensions, can be applied using the **{robumeta}** package [@robumeta]. The package allows to fit a meta-regression using the two model types originally proposed by Hedges, Tipton and Jackson: the **hierarchical**, as well as the **correlated effects** model (but not both combined).
```
<br></br>
## Cluster Wild Bootstrapping {#cwb}
---
In the last chapter, we learned how to fit a correlated and hierarchical effects model, and how to calculate confidence intervals and coefficient tests using robust variance estimation.
Another, and sometimes favorable way to test coefficients in our model are bootstrapping procedures, a special variant of which is so-called **cluster wild bootstrapping** [@joshi2021clusterwild]. This method is well suited if the total number of studies $J$ in our meta-analysis is **small**; especially compared to RVE, which can lead to overly conservative results in small samples (as we have seen in our own `Chernobyl` example).
This method is also sensible whenever we want to test so-called **multiple-contrast hypotheses**. Multiple-constrast hypotheses are needed, for example, if we want to test the overall effect of a dummy-coded categorical covariate.
```{block2, type='boxinfo'}
**The Cluster Wild Bootstrapping Algorithm**
The wild bootstrap is a method based on the residuals of a null model (i.e. a model fitted without any additional covariates). In cluster wild bootstrapping, residuals are transformed using an adjustment matrix $\boldsymbol{A}_j$, for example based on the CR2 method (see Chapter \@ref(sandwich)), to handle dependent effect sizes. A general algorithm for wild boostrapping looks like this [@joshi2021clusterwild]:
1. Calculate the full model based on the original data, and derive the test statistic of interest (e.g. a $t$ or $F$ value).
2. Fit a null model based on the original data and extract its residuals $\boldsymbol{e}$.
3. For each study or cluster $j$, draw a random value from a distribution^[In **{wildmeta}**, the **R** package we use in this guide, a **Rademacher** distribution is used for this.]; multiply the residuals of $j$ by this random value.
4. Generate new, bootstrapped effect sizes by adding the transformed residuals to the predicted values of the null model based on the original data.
5. Fit the full model again, using the bootstrapped effect size values; calculate the test statistic again.
Steps 3 to 5 are then repeated $R$ times. The boostrap $p$-value can be derived as the **proportion of times** the boostrap test statistic was **more extreme** than the one based on the original data.
```
To test multiple-contrast hypotheses using the bootstrap, we can use the **{wildmeta}** package [@wildmeta]. This package needs to be installed and loaded from the library for our next example. Furthermore, using functions of the **{tidyverse}**, we generate a new variable in our `Chernobyl` data set, in which we save the year of each study.
```{r, eval=F}
# Make sure {wildmeta} and {tidyverse} is loaded
library(wildmeta)
library(tidyverse)
# Add year as extra variable
Chernobyl$year <- str_extract(Chernobyl$author,
"[0-9]{4}") %>% as.numeric()
```
Next, we use this variable as a **new predictor** in our `rma.mv` meta-regression model. We simply add `year` to the formula in the first argument, while applying the `scale` function to center and scale the covariate. There is also another thing we do in the formula: we change the `1` of the intercept to a `0`. This means that there is no intercept, and that predictions of `year` are **stratified** by different levels of radiation^[This change has an impact on the way the different levels of `radiation` are **presented**: we do not get regression weights with one level serving as the reference group, but three separate pooled effect size estimates, one for each level.]. We save the result as `che.model.bs`.
```{r, eval=F}
che.model.bs <- rma.mv(z ~ 0 + radiation + scale(year),
V = V,
random = ~ 1 | author/es.id,
data = Chernobyl,
sparse = TRUE)
```
Before we start bootstrapping, we need to define a **linear contrast** for the test we want to conduct. Let us say that we want to test the **overall moderation effect** of our `radiation` variable. To do this, we have to use the `constrain_equal` function in **{clubSandwich}** to create a constraint matrix for out test. The null hypothesis is that effects are equal among all three levels of our `radiation` variable, so we set the `constraints` argument to `1:3`. Furthermore, we specify the `coefs` argument by providing the coefficients of the model we just fitted. We save the results as `rad.constraints`.
```{r, eval=F}
rad.constraints <- constrain_equal(constraints = 1:3,
coefs = coef(che.model.bs))
rad.constraints
```
```
## [,1] [,2] [,3] [,4]
## [1,] -1 1 0 0
## [2,] -1 0 1 0
```
We can now calculate the bootstrap $p$-value of the multiple-contrast hypothesis, using the `Wald_test_cwb` function in **{wildmeta}**. We have to specify our fitted full model, our constraint matrix, the type of small-sample adjustment we want to use, as well as `R`, the number of bootstrap replications. It is recommended to use a **high number of replications** (e.g. 1000 or more), since this improves our power. In our example, we use 2000 replications, and save the results as `cw.boot`. Please note that, depending on the number of iterations, this process can **take several minutes to finish**.
```{r, eval=F}
cw.boot <- Wald_test_cwb(full_model = che.model.bs,
constraints = rad.constraints,
adjust = "CR2",
R = 2000)
cw.boot
```
```
## Test Adjustment CR_type Statistic R p_val
## 1 CWB Adjusted CR2 CR0 Naive-F 2000 0.3595
```
We see that the $p$-value of our test of moderation is 0.36, which is **not significant**. We had a similar finding in our previous moderator analysis of the radiation strength in Chapter \@ref(three-level-subgroup).
Using the `plot` function, we can also visualize the **density of the test statistics** across all bootstrap replications.
```{r, eval=F}
plot(cw.boot,
fill = "lightblue",
alpha = 0.5)
```
```{r, fig.width=5, fig.height=3, out.width="75%", fig.align='center', echo=F, message=FALSE, warning=FALSE}
library(wildmeta)
load("data/cw.boot.rda")
plot(cw.boot, fill = "lightblue", alpha = 0.5) +
ggplot2::theme(plot.background = element_rect(fill = "#FFFEFA",
color = "#FFFEFA"),
panel.background = element_blank(),
panel.border = element_blank())
```
\qed
<br></br>
## Questions & Answers
```{block, type='boxquestion'}
**Test your knowledge!**
\vspace{4mm}
1. Why is it more accurate to speak of "three-level" instead of "multilevel" models?
\vspace{-2mm}
2. When are three-level meta-analysis models useful?
\vspace{-2mm}
3. Name two common causes of effect size dependency.
\vspace{-2mm}
4. How can the multilevel $I^2$ statistic be interpreted?
\vspace{-2mm}
5. How can a three-level model be expanded to incorporate the effect of moderator variables?
\vspace{4mm}
**Answers to these questions are listed in [Appendix A](https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/qanda.html#qanda10) at the end of this book.**
```
$$\tag*{$\blacksquare$}$$
<br></br>
## Summary
* All random-effects meta-analyses are based on a multilevel model. When a third layer is added, we speak of a three-level meta-analysis model. Such models are well suited to handle **clustered** effect size data.
* Three-level models can be used for dependent effect sizes. When a study contributes more than one effect size, for example, we typically can not assume that these results are independent. Three-level model control for this problem by assuming that effect sizes are **nested** in larger clusters (e.g. studies).
* In contrast to a conventional meta-analysis, three-level models estimate two heterogeneity variances: the random-effects variance **within** clusters, and the **between**-cluster heterogeneity variance.
* It is also possible to test categorical or continuous predictors using a three-level model. This results in a three-level mixed-effects model.
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
#TOC ul,
#TOC li,
#TOC span,
#TOC a {
margin: 0;
padding: 0;
position: relative;
}
#TOC {
line-height: 1;
border-radius: 5px 5px 0 0;
background: #141414;
background: linear-gradient(to bottom, #333333 0%, #141414 100%);
border-bottom: 2px solid #0fa1e0;
width: auto;
}
#TOC:after,
#TOC ul:after {
content: '';
display: block;
clear: both;
}
#TOC a {
background: #141414;
background: linear-gradient(to bottom, #333333 0%, #141414 100%);
color: #ffffff;
display: block;
padding: 19px 20px;
text-decoration: none;
text-shadow: none;
}
#TOC ul {
list-style: none;
}
#TOC > ul > li {
display: inline-block;
float: left;
margin: 0;
}
#TOC > ul > li > a {
color: #ffffff;
}
#TOC > ul > li:hover:after {
content: '';
display: block;
width: 0;
height: 0;
position: absolute;
left: 50%;
bottom: 0;
border-left: 10px solid transparent;
border-right: 10px solid transparent;
border-bottom: 10px solid #0fa1e0;
margin-left: -10px;
}
#TOC > ul > li:first-child > a {
border-radius: 5px 0 0 0;
}
#TOC.align-right > ul > li:first-child > a,
#TOC.align-center > ul > li:first-child > a {
border-radius: 0;
}
#TOC.align-right > ul > li:last-child > a {
border-radius: 0 5px 0 0;
}
#TOC > ul > li.active > a,
#TOC > ul > li:hover > a {
color: #ffffff;
box-shadow: inset 0 0 3px #000000;
background: #070707;
background: linear-gradient(to bottom, #262626 0%, #070707 100%);
}
#TOC .has-sub {
z-index: 1;
}
#TOC .has-sub:hover > ul {
display: block;
}
#TOC .has-sub ul {
display: none;
position: absolute;
width: 200px;
top: 100%;
left: 0;
}
#TOC .has-sub ul li a {
background: #0fa1e0;
border-bottom: 1px dotted #31b7f1;
filter: none;
display: block;
line-height: 120%;
padding: 10px;
color: #ffffff;
}
#TOC .has-sub ul li:hover a {
background: #0c7fb0;
}
#TOC ul ul li:hover > a {
color: #ffffff;
}
#TOC .has-sub .has-sub:hover > ul {
display: block;
}
#TOC .has-sub .has-sub ul {
display: none;
position: absolute;
left: 100%;
top: 0;
}
#TOC .has-sub .has-sub ul li a {
background: #0c7fb0;
border-bottom: 1px dotted #31b7f1;
}
#TOC .has-sub .has-sub ul li a:hover {
background: #0a6d98;
}
#TOC ul ul li.last > a,
#TOC ul ul li:last-child > a,
#TOC ul ul ul li.last > a,
#TOC ul ul ul li:last-child > a,
#TOC .has-sub ul li:last-child > a,
#TOC .has-sub ul li.last > a {
border-bottom: 0;
}
#TOC ul {
font-size: 1.2rem;
}
| {
"repo_name": "MathiasHarrer/Doing-Meta-Analysis-in-R",
"stars": "272",
"repo_language": "R",
"file_name": "pcurve.bw.es.R",
"mime_type": "text/plain"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.