Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit c4346a2

Browse files
authoredJun 22, 2023
Add Workflow and CBMAWorkflow classes. Support pairwise CBMA workflows (#809)
* Convert cbma_workflow into a class. Support pairwise estimators * Add support for reports and diagnostics with pairwise estimator * fix dicstring * add deprecation warning * fix test for tables == None * add version changes to docstring * fix documentation * improve coverage * Use specificity maps instead * Merge _preprocess_input * remove Workflow from __init__ * Add Pairwise estimator report * update diagnostics * reduce the number of iterations. we are running out of time/memory in rtd * see if using focuscounter reduces the time for building the documentation * New parameter display_second_group * Update 08_plot_cbma_subtraction_conjunction.py * Add dataset 2 to summary * Update diagnostics.py * Update versionchanged * Reorder matrix only if more than 1 cluster/experiment * display_second_group in the example * fix #814 * Use iframe only for connectome * Set the size of the heatmap proportional to rows and columns * Separate positive from negative tail contribution table for pairwise estimator * Add subsubtitle * Test a realistic scenario with different dset1 and desert 2 * Apply @jdkent code review * consider the length of the study label in the figure size * fix issues with figure sizes * Define "PositiveTail" and "NegativeTail" as variables * Update diagnostics.py * Make a distinction between studies and experiments in report * Restore the diagnostics summary * Limit the colormap to the total number of clusters * Update 08_plot_cbma_subtraction_conjunction.py
1 parent 3801b67 commit c4346a2

File tree

15 files changed

+765
-415
lines changed

15 files changed

+765
-415
lines changed
 

‎docs/api.rst

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,8 @@ For more information about fetching data from the internet, see :ref:`fetching t
334334

335335
workflows.ale_sleuth_workflow
336336
workflows.macm_workflow
337-
workflows.cbma_workflow
337+
workflows.cbma.CBMAWorkflow
338+
workflows.cbma.PairwiseCBMAWorkflow
338339

339340
:mod:`nimare.reports`: NiMARE report
340341
--------------------------------------------------

‎examples/02_meta-analyses/08_plot_cbma_subtraction_conjunction.py

Lines changed: 36 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
6. Compare the two within-sample meta-analyses with a conjunction analysis.
2121
"""
2222
import os
23+
from pathlib import Path
2324

2425
import matplotlib.pyplot as plt
2526
from nilearn.plotting import plot_stat_map
@@ -104,19 +105,20 @@
104105
target_image="z_desc-size_level-cluster_corr-FWE_method-montecarlo",
105106
voxel_thresh=None,
106107
)
107-
knowledge_corrected_results = counter.transform(knowledge_corrected_results)
108+
knowledge_diagnostic_results = counter.transform(knowledge_corrected_results)
108109

109110
###############################################################################
110111
# Clusters table.
111-
knowledge_clusters_table = knowledge_corrected_results.tables[
112+
knowledge_clusters_table = knowledge_diagnostic_results.tables[
112113
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_tab-clust"
113114
]
114115
knowledge_clusters_table.head(10)
115116

116117
###############################################################################
117118
# Contribution table. Here ``PostiveTail`` refers to clusters with positive statistics.
118-
knowledge_count_table = knowledge_corrected_results.tables[
119-
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-FocusCounter_tab-counts"
119+
knowledge_count_table = knowledge_diagnostic_results.tables[
120+
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-FocusCounter"
121+
"_tab-counts_tail-positive"
120122
]
121123
knowledge_count_table.head(10)
122124

@@ -127,9 +129,9 @@
127129
target_image="z_desc-size_level-cluster_corr-FWE_method-montecarlo",
128130
voxel_thresh=None,
129131
)
130-
related_corrected_results = jackknife.transform(related_corrected_results)
131-
related_jackknife_table = related_corrected_results.tables[
132-
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-Jackknife_tab-counts"
132+
related_diagnostic_results = jackknife.transform(related_corrected_results)
133+
related_jackknife_table = related_diagnostic_results.tables[
134+
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-Jackknife_tab-counts_tail-positive"
133135
]
134136
related_jackknife_table.head(10)
135137

@@ -138,20 +140,36 @@
138140
# -----------------------------------------------------------------------------
139141
# Typically, one would use at least 10000 iterations for a subtraction analysis.
140142
# However, we have reduced this to 100 iterations for this example.
143+
# Similarly here we use a voxel-level z-threshold of 0.01, but in practice one would
144+
# use a more stringent threshold (e.g., 1.65).
141145
from nimare.meta.cbma import ALESubtraction
146+
from nimare.reports.base import run_reports
147+
from nimare.workflows import PairwiseCBMAWorkflow
142148

143-
sub = ALESubtraction(n_iters=100, n_cores=1)
144-
res_sub = sub.fit(knowledge_dset, related_dset)
145-
img_sub = res_sub.get_map("z_desc-group1MinusGroup2")
146-
147-
plot_stat_map(
148-
img_sub,
149-
cut_coords=4,
150-
display_mode="z",
151-
title="Subtraction",
152-
cmap="RdBu_r",
153-
vmax=4,
149+
workflow = PairwiseCBMAWorkflow(
150+
estimator=ALESubtraction(n_iters=10, n_cores=1),
151+
corrector="fdr",
152+
diagnostics=FocusCounter(voxel_thresh=0.01, display_second_group=True),
154153
)
154+
res_sub = workflow.fit(knowledge_dset, related_dset)
155+
156+
###############################################################################
157+
# Report
158+
# -----------------------------------------------------------------------------
159+
# Finally, a NiMARE report is generated from the MetaResult.
160+
# root_dir = Path(os.getcwd()).parents[1] / "docs" / "_build"
161+
# Use the previous root to run the documentation locally.
162+
root_dir = Path(os.getcwd()).parents[1] / "_readthedocs"
163+
html_dir = root_dir / "html" / "auto_examples" / "02_meta-analyses" / "08_subtraction"
164+
html_dir.mkdir(parents=True, exist_ok=True)
165+
166+
run_reports(res_sub, html_dir)
167+
168+
####################################
169+
# .. raw:: html
170+
#
171+
# <iframe src="./08_subtraction/report.html" style="border:none;" seamless="seamless" \
172+
# width="100%" height="1000px"></iframe>
155173

156174
###############################################################################
157175
# Conjunction analysis

‎examples/02_meta-analyses/10_plot_cbma_workflow.py

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from nimare.dataset import Dataset
2121
from nimare.reports.base import run_reports
2222
from nimare.utils import get_resource_path
23-
from nimare.workflows import cbma_workflow
23+
from nimare.workflows.cbma import CBMAWorkflow
2424

2525
###############################################################################
2626
# Load Dataset
@@ -32,25 +32,26 @@
3232
###############################################################################
3333
# Run CBMA Workflow
3434
# -----------------------------------------------------------------------------
35-
# The CBMA workflow function runs the following steps:
35+
# The fit method of a CBMA workflow class runs the following steps:
3636
#
3737
# 1. Runs a meta-analysis using the specified method (default: ALE)
3838
# 2. Applies a corrector to the meta-analysis results (default: FWECorrector, montecarlo)
3939
# 3. Generates cluster tables and runs diagnostics on the corrected results (default: Jackknife)
4040
#
41-
# All in one function call!
41+
# All in one call!
4242
#
43-
# result = cbma_workflow(dset)
43+
# result = CBMAWorkflow().fit(dset)
4444
#
4545
# For this example, we use an FDR correction because the default corrector (FWE correction with
4646
# Monte Carlo simulation) takes a long time to run due to the high number of iterations that
4747
# are required
48-
result = cbma_workflow(dset, corrector="fdr")
48+
workflow = CBMAWorkflow(corrector="fdr")
49+
result = workflow.fit(dset)
4950

5051
###############################################################################
5152
# Plot Results
5253
# -----------------------------------------------------------------------------
53-
# The CBMA workflow function returns a :class:`~nimare.results.MetaResult` object,
54+
# The fit method of the CBMA workflow class returns a :class:`~nimare.results.MetaResult` object,
5455
# where you can access the corrected results of the meta-analysis and diagnostics tables.
5556
#
5657
# Corrected map:
@@ -73,22 +74,22 @@
7374
###############################################################################
7475
# Contribution table
7576
# ``````````````````````````````````````````````````````````````````````````````
76-
result.tables["z_corr-FDR_method-indep_diag-Jackknife_tab-counts"]
77+
result.tables["z_corr-FDR_method-indep_diag-Jackknife_tab-counts_tail-positive"]
7778

7879
###############################################################################
7980
# Report
8081
# -----------------------------------------------------------------------------
8182
# Finally, a NiMARE report is generated from the MetaResult.
82-
root_dir = Path(os.getcwd()).parents[1]
83-
# Use the following path to run the documentation locally:
84-
# html_dir = root_dir / "docs" / "_build" / "html" / "auto_examples" / "02_meta-analyses"
85-
html_dir = root_dir / "_readthedocs" / "html" / "auto_examples" / "02_meta-analyses"
83+
# root_dir = Path(os.getcwd()).parents[1] / "docs" / "_build"
84+
# Use the previous root to run the documentation locally.
85+
root_dir = Path(os.getcwd()).parents[1] / "_readthedocs"
86+
html_dir = root_dir / "html" / "auto_examples" / "02_meta-analyses" / "10_plot_cbma_workflow"
8687
html_dir.mkdir(parents=True, exist_ok=True)
8788

8889
run_reports(result, html_dir)
8990

9091
####################################
9192
# .. raw:: html
9293
#
93-
# <iframe src="./report.html" style="border:none;" seamless="seamless" width="100%"\
94-
# height="1000px"></iframe>
94+
# <iframe src="./10_plot_cbma_workflow/report.html" style="border:none;" seamless="seamless"\
95+
# width="100%" height="1000px"></iframe>

‎nimare/diagnostics.py

Lines changed: 104 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,26 @@
1212
from tqdm.auto import tqdm
1313

1414
from nimare.base import NiMAREBase
15+
from nimare.meta.cbma.base import PairwiseCBMAEstimator
16+
from nimare.meta.ibma import IBMAEstimator
1517
from nimare.utils import _check_ncores, get_masker, mm2vox, tqdm_joblib
1618

1719
LGR = logging.getLogger(__name__)
1820

21+
POSTAIL_LBL = "PositiveTail" # Label assigned to positive tail clusters
22+
NEGTAIL_LBL = "NegativeTail" # Label assigned to negative tail clusters
23+
1924

2025
class Diagnostics(NiMAREBase):
2126
"""Base class for diagnostic methods.
2227
28+
.. versionchanged:: 0.1.2
29+
30+
* New parameter display_second_group, which controls whether the second group is displayed.
31+
2332
.. versionchanged:: 0.1.0
2433
25-
- Transform now returns a MetaResult object.
34+
* Transform now returns a MetaResult object.
2635
2736
.. versionadded:: 0.0.14
2837
@@ -51,11 +60,13 @@ def __init__(
5160
target_image="z_desc-size_level-cluster_corr-FWE_method-montecarlo",
5261
voxel_thresh=None,
5362
cluster_threshold=None,
63+
display_second_group=False,
5464
n_cores=1,
5565
):
5666
self.target_image = target_image
5767
self.voxel_thresh = voxel_thresh
5868
self.cluster_threshold = cluster_threshold
69+
self.display_second_group = display_second_group
5970
self.n_cores = _check_ncores(n_cores)
6071

6172
@abstractmethod
@@ -109,19 +120,10 @@ def transform(self, result):
109120
correspond to positive and negative tails.
110121
If no clusters are found, this list will be empty.
111122
"""
123+
self._is_pairwaise_estimator = issubclass(type(result.estimator), PairwiseCBMAEstimator)
112124
masker = result.estimator.masker
113125
diag_name = self.__class__.__name__
114126

115-
none_contribution_table = False
116-
if not hasattr(result.estimator, "dataset"):
117-
LGR.warning(
118-
"MetaResult was not generated by an Estimator with a `dataset` attribute. "
119-
"This may be because the Estimator was a pairwise Estimator. The "
120-
"Jackknife/FocusCounter method does not currently work with pairwise Estimators. "
121-
"The ``contribution_table`` will be returned as ``None``."
122-
)
123-
none_contribution_table = True
124-
125127
# Collect the thresholded cluster map
126128
if self.target_image in result.maps:
127129
target_img = result.get_map(self.target_image, return_type="image")
@@ -152,15 +154,15 @@ def transform(self, result):
152154
clusters_table = clusters_table.astype({"Cluster ID": "str"})
153155
# Rename the clusters_table cluster IDs to match the contribution table columns
154156
clusters_table["Cluster ID"] = [
155-
f"PositiveTail {row['Cluster ID']}"
157+
f"{POSTAIL_LBL} {row['Cluster ID']}"
156158
if row["Peak Stat"] > 0
157-
else f"NegativeTail {row['Cluster ID']}"
159+
else f"{NEGTAIL_LBL} {row['Cluster ID']}"
158160
for _, row in clusters_table.iterrows()
159161
]
160162

161163
# Define bids-like names for tables and maps
162164
image_name = "_".join(self.target_image.split("_")[1:])
163-
image_name = "_" + image_name if image_name else image_name
165+
image_name = f"_{image_name}" if image_name else image_name
164166
clusters_table_name = f"{self.target_image}_tab-clust"
165167
contribution_table_name = f"{self.target_image}_diag-{diag_name}_tab-counts"
166168
label_map_names = (
@@ -170,23 +172,44 @@ def transform(self, result):
170172
)
171173

172174
# Check number of clusters
173-
if (n_clusters == 0) or none_contribution_table:
175+
if n_clusters == 0:
174176
result.tables[clusters_table_name] = clusters_table
175177
result.tables[contribution_table_name] = None
176178
result.maps[label_map_names[0]] = None
177179

178180
result.diagnostics.append(self)
179181
return result
180182

183+
tables_dict = {clusters_table_name: clusters_table}
184+
maps_dict = {
185+
label_map_name: np.squeeze(masker.transform(label_map))
186+
for label_map_name, label_map in zip(label_map_names, label_maps)
187+
}
188+
181189
# Use study IDs in inputs_ instead of dataset, because we don't want to try fitting the
182190
# estimator to a study that might have been filtered out by the estimator's criteria.
183-
meta_ids = result.estimator.inputs_["id"]
184-
rows = list(meta_ids)
191+
# For pairwise estimators, use id1 for positive tail and id2 for negative tail.
192+
# Run diagnostics with id2 for pairwise estimators and display_second_group=True.
193+
if self._is_pairwaise_estimator:
194+
if self.display_second_group and len(label_maps) == 2:
195+
meta_ids_lst = [result.estimator.inputs_["id1"], result.estimator.inputs_["id2"]]
196+
signs = [POSTAIL_LBL, NEGTAIL_LBL]
197+
else:
198+
meta_ids_lst = [result.estimator.inputs_["id1"]]
199+
signs = [POSTAIL_LBL]
200+
elif len(label_maps) == 2:
201+
# Non pairwise estimator with two tails (IBMA estimators)
202+
meta_ids_lst = [result.estimator.inputs_["id"], result.estimator.inputs_["id"]]
203+
signs = [POSTAIL_LBL, NEGTAIL_LBL]
204+
else:
205+
# Non pairwise estimator with one tail (CBMA estimators)
206+
meta_ids_lst = [result.estimator.inputs_["id"]]
207+
signs = [POSTAIL_LBL]
185208

186209
contribution_tables = []
187-
signs = ["PositiveTail", "NegativeTail"] if len(label_maps) == 2 else ["PositiveTail"]
188-
for sign, label_map in zip(signs, label_maps):
210+
for sign, label_map, meta_ids in zip(signs, label_maps, meta_ids_lst):
189211
cluster_ids = sorted(list(np.unique(label_map.get_fdata())[1:]))
212+
rows = list(meta_ids)
190213

191214
# Create contribution table
192215
cols = [f"{sign} {int(c_id)}" for c_id in cluster_ids]
@@ -195,7 +218,7 @@ def transform(self, result):
195218

196219
with tqdm_joblib(tqdm(total=len(meta_ids))):
197220
contributions = Parallel(n_jobs=self.n_cores)(
198-
delayed(self._transform)(expid, label_map, result) for expid in meta_ids
221+
delayed(self._transform)(expid, label_map, sign, result) for expid in meta_ids
199222
)
200223

201224
# Add results to table
@@ -204,21 +227,21 @@ def transform(self, result):
204227

205228
contribution_tables.append(contribution_table.reset_index())
206229

207-
# Concat PositiveTail and NegativeTail tables
208-
contribution_table = pd.concat(contribution_tables, ignore_index=True, sort=False)
230+
tails = ["positive", "negative"] if len(contribution_tables) == 2 else ["positive"]
231+
if not self._is_pairwaise_estimator and len(contribution_tables) == 2:
232+
# Merge POSTAIL_LBL and NEGTAIL_LBL tables for IBMA
233+
contribution_table = (
234+
contribution_tables[0].merge(contribution_tables[1], how="outer").fillna(0)
235+
)
236+
tables_dict[contribution_table_name] = contribution_table
237+
else:
238+
# Plot separate tables for CBMA
239+
for tail, contribution_table in zip(tails, contribution_tables):
240+
tables_dict[f"{contribution_table_name}_tail-{tail}"] = contribution_table
209241

210242
# Save tables and maps to result
211-
diag_tables_dict = {
212-
clusters_table_name: clusters_table,
213-
contribution_table_name: contribution_table,
214-
}
215-
diag_maps_dict = {
216-
label_map_name: np.squeeze(masker.transform(label_map))
217-
for label_map_name, label_map in zip(label_map_names, label_maps)
218-
}
219-
220-
result.tables.update(diag_tables_dict)
221-
result.maps.update(diag_maps_dict)
243+
result.tables.update(tables_dict)
244+
result.maps.update(maps_dict)
222245

223246
# Add diagnostics class to result, since more than one can be run
224247
result.diagnostics.append(self)
@@ -228,14 +251,18 @@ def transform(self, result):
228251
class Jackknife(Diagnostics):
229252
"""Run a jackknife analysis on a meta-analysis result.
230253
254+
.. versionchanged:: 0.1.2
255+
256+
* Support for pairwise meta-analyses.
257+
231258
.. versionchanged:: 0.0.14
232259
233260
* New parameter: `cluster_threshold`.
234261
* Return clusters table.
235262
236263
.. versionchanged:: 0.0.13
237264
238-
Change cluster neighborhood from faces+edges to faces, to match Nilearn.
265+
* Change cluster neighborhood from faces+edges to faces, to match Nilearn.
239266
240267
.. versionadded:: 0.0.11
241268
@@ -246,14 +273,9 @@ class Jackknife(Diagnostics):
246273
statistic for all experiments *except* the target experiment, dividing the resulting test
247274
summary statistics by the summary statistics from the original meta-analysis, and finally
248275
averaging the resulting proportion values across all voxels in each cluster.
249-
250-
Warnings
251-
--------
252-
Pairwise meta-analyses, like ALESubtraction and MKDAChi2, are not yet supported in this
253-
method.
254276
"""
255277

256-
def _transform(self, expid, label_map, result):
278+
def _transform(self, expid, label_map, sign, result):
257279
"""Apply transform to study ID and label map.
258280
259281
Parameters
@@ -262,6 +284,8 @@ def _transform(self, expid, label_map, result):
262284
Study ID.
263285
label_map : :class:`nibabel.Nifti1Image`
264286
The cluster label map image.
287+
sign : :obj:`str`
288+
The sign of the label map.
265289
result : :obj:`~nimare.results.MetaResult`
266290
A MetaResult produced by a coordinate- or image-based meta-analysis.
267291
@@ -274,29 +298,39 @@ def _transform(self, expid, label_map, result):
274298
# with one missing a study in its inputs.
275299
estimator = copy.deepcopy(result.estimator)
276300

277-
dset = estimator.dataset
301+
if self._is_pairwaise_estimator:
302+
all_ids = estimator.inputs_["id1"] if sign == POSTAIL_LBL else estimator.inputs_["id2"]
303+
else:
304+
all_ids = estimator.inputs_["id"]
305+
278306
original_masker = estimator.masker
279-
all_ids = estimator.inputs_["id"]
280307

281308
# Mask using a labels masker, so that we can easily get the mean value for each cluster
282309
cluster_masker = input_data.NiftiLabelsMasker(label_map)
283310
cluster_masker.fit(label_map)
284311

285-
# CBMAs have "stat" maps, while most IBMAs have "est" maps.
312+
# CBMAs have "stat" maps, while most IBMAs have "est" maps. ALESubtraction has
313+
# stat_desc-group1MinusGroup2" maps, while MKDAChi2 has "z_desc-specificity" maps.
286314
# Fisher's and Stouffer's only have "z" maps though.
287-
if "est" in result.maps:
288-
target_value_map = "est"
289-
elif "stat" in result.maps:
290-
target_value_map = "stat"
291-
else:
292-
target_value_map = "z"
315+
target_value_keys = {"stat", "est", "stat_desc-group1MinusGroup2", "z_desc-specificity"}
316+
avail_value_keys = set(result.maps.keys())
317+
union_value_keys = list(target_value_keys & avail_value_keys)
318+
target_value_map = union_value_keys[0] if union_value_keys else "z"
293319

294320
stat_values = result.get_map(target_value_map, return_type="array")
295321

296322
# Fit Estimator to all studies except the target study
297323
other_ids = [id_ for id_ in all_ids if id_ != expid]
298-
temp_dset = dset.slice(other_ids)
299-
temp_result = estimator.fit(temp_dset)
324+
if self._is_pairwaise_estimator:
325+
if sign == POSTAIL_LBL:
326+
temp_dset = estimator.dataset1.slice(other_ids)
327+
temp_result = estimator.fit(temp_dset, estimator.dataset2)
328+
else:
329+
temp_dset = estimator.dataset2.slice(other_ids)
330+
temp_result = estimator.fit(estimator.dataset1, temp_dset)
331+
else:
332+
temp_dset = estimator.dataset.slice(other_ids)
333+
temp_result = estimator.fit(temp_dset)
300334

301335
# Collect the target values (e.g., ALE values) from the N-1 meta-analysis
302336
temp_stat_img = temp_result.get_map(target_value_map, return_type="image")
@@ -317,6 +351,10 @@ def _transform(self, expid, label_map, result):
317351
class FocusCounter(Diagnostics):
318352
"""Run a focus-count analysis on a coordinate-based meta-analysis result.
319353
354+
.. versionchanged:: 0.1.2
355+
356+
* Support for pairwise meta-analyses.
357+
320358
.. versionchanged:: 0.0.14
321359
322360
* New parameter: `cluster_threshold`.
@@ -337,12 +375,9 @@ class FocusCounter(Diagnostics):
337375
Warnings
338376
--------
339377
This method only works for coordinate-based meta-analyses.
340-
341-
Pairwise meta-analyses, like ALESubtraction and MKDAChi2, are not yet supported in this
342-
method.
343378
"""
344379

345-
def _transform(self, expid, label_map, result):
380+
def _transform(self, expid, label_map, sign, result):
346381
"""Apply transform to study ID and label map.
347382
348383
Parameters
@@ -351,6 +386,8 @@ def _transform(self, expid, label_map, result):
351386
Study ID.
352387
label_map : :class:`nibabel.Nifti1Image`
353388
The cluster label map image.
389+
sign : :obj:`str`
390+
The sign of the label map.
354391
result : :obj:`~nimare.results.MetaResult`
355392
A MetaResult produced by a coordinate- or image-based meta-analysis.
356393
@@ -359,11 +396,22 @@ def _transform(self, expid, label_map, result):
359396
stat_prop_values : 1D :obj:`numpy.ndarray`
360397
1D array with the contribution of `expid` in each cluster of `label_map`.
361398
"""
399+
if issubclass(type(result.estimator), IBMAEstimator):
400+
raise ValueError("This method only works for coordinate-based meta-analyses.")
401+
362402
affine = label_map.affine
363403
label_arr = label_map.get_fdata()
364404
clust_ids = sorted(list(np.unique(label_arr)[1:]))
365405

366-
coordinates_df = result.estimator.inputs_["coordinates"]
406+
if self._is_pairwaise_estimator:
407+
coordinates_df = (
408+
result.estimator.inputs_["coordinates1"]
409+
if sign == POSTAIL_LBL
410+
else result.estimator.inputs_["coordinates2"]
411+
)
412+
else:
413+
coordinates_df = result.estimator.inputs_["coordinates"]
414+
367415
coords = coordinates_df.loc[coordinates_df["id"] == expid]
368416
ijk = mm2vox(coords[["x", "y", "z"]], affine)
369417

‎nimare/reports/base.py

Lines changed: 92 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,8 @@
2929
import jinja2
3030
from pkg_resources import resource_filename as pkgrf
3131

32-
from nimare.meta.cbma.base import CBMAEstimator
32+
from nimare.meta.cbma.base import CBMAEstimator, PairwiseCBMAEstimator
33+
from nimare.meta.ibma import IBMAEstimator
3334
from nimare.reports.figures import (
3435
gen_table,
3536
plot_clusters,
@@ -40,31 +41,6 @@
4041
plot_static_brain,
4142
)
4243

43-
SVG_SNIPPET = [
44-
"""\
45-
<object class="svg-reportlet" type="image/svg+xml" data="./{0}">
46-
Problem loading figure {0}. If the link below works, please try \
47-
reloading the report in your browser.</object>
48-
</div>
49-
<div class="elem-filename">
50-
Get figure file: <a href="./{0}" target="_blank">{0}</a>
51-
</div>
52-
""",
53-
"""\
54-
<img class="svg-reportlet" src="./{0}" style="width: 100%" />
55-
</div>
56-
<div class="elem-filename">
57-
Get figure file: <a href="./{0}" target="_blank">{0}</a>
58-
</div>
59-
""",
60-
]
61-
62-
IFRAME_SNIPPET = """\
63-
<div class="igraph-container">
64-
<iframe class="igraph" src="./{0}"></iframe>
65-
</div>
66-
"""
67-
6844
PARAMETERS_DICT = {
6945
"kernel_transformer__fwhm": "FWHM",
7046
"kernel_transformer__sample_size": "Sample size",
@@ -77,23 +53,39 @@
7753
"fdr": "False discovery rate (FDR) correction",
7854
"method": "Method",
7955
"alpha": "Alpha",
56+
"prior": "Prior",
8057
}
8158

59+
PNG_SNIPPET = """\
60+
<img class="png-reportlet" src="./{0}" style="width: 100%" /></div>
61+
<div class="elem-filename">
62+
Get figure file: <a href="./{0}" target="_blank">{0}</a>
63+
</div>
64+
"""
65+
66+
IFRAME_SNIPPET = """\
67+
<div class="igraph-container">
68+
<iframe class="igraph" src="./{0}"></iframe>
69+
</div>
70+
"""
71+
8272
SUMMARY_TEMPLATE = """\
8373
<ul class="elem-desc">
84-
<li>Number of studies: {n_exps:d}</li>
85-
<li>Number of studies included: {n_exps_sel:d}</li>
74+
<li>Number of studies: {n_studies:d}</li>
75+
<li>Number of experiments: {n_exps:d}</li>
76+
<li>Number of experiments included: {n_exps_sel:d}</li>
8677
<li>Number of foci: {n_foci:d} </li>
8778
<li>Number of foci outside the mask: {n_foci_nonbrain:d} </li>
8879
</ul>
8980
<details>
90-
<summary>Studies excluded</summary><br />
81+
<summary>Experiments excluded</summary><br />
9182
<p>{exc_ids}</p>
9283
</details>
9384
"""
9485

9586
ESTIMATOR_TEMPLATE = """\
9687
<ul class="elem-desc">
88+
<li>Estimator: {est_name}</li>
9789
<li>Kernel Transformer: {kernel_transformer}{ker_params_text}</li>
9890
{est_params_text}
9991
</ul>
@@ -124,17 +116,17 @@ def _gen_est_summary(obj, out_filename):
124116
ker_params = {k: v for k, v in params_dict.items() if k.startswith("kernel_transformer__")}
125117

126118
ker_params_text = ["<ul>"]
127-
for k, v in ker_params.items():
128-
ker_params_text.append(f"<li>{PARAMETERS_DICT[k]}: {v}</li>")
119+
ker_params_text.extend(f"<li>{PARAMETERS_DICT[k]}: {v}</li>" for k, v in ker_params.items())
129120
ker_params_text.append("</ul>")
130121
ker_params_text = "".join(ker_params_text)
131122

132-
est_params_text = []
133-
for k, v in est_params.items():
134-
est_params_text.append(f"<li>{PARAMETERS_DICT[k]}: {v}</li>")
123+
est_params_text = [f"<li>{PARAMETERS_DICT[k]}: {v}</li>" for k, v in est_params.items()]
135124
est_params_text = "".join(est_params_text)
136125

126+
est_name = obj.__class__.__name__
127+
137128
summary_text = ESTIMATOR_TEMPLATE.format(
129+
est_name=est_name,
138130
kernel_transformer=str(params_dict["kernel_transformer"]),
139131
ker_params_text=ker_params_text,
140132
est_params_text=est_params_text,
@@ -146,14 +138,13 @@ def _gen_cor_summary(obj, out_filename):
146138
"""Generate html with parameter use in obj (e.g., corrector)."""
147139
params_dict = obj.get_params()
148140

149-
cor_params_text = []
150-
for k, v in params_dict.items():
151-
cor_params_text.append(f"<li>{PARAMETERS_DICT[k]}: {v}</li>")
141+
cor_params_text = [f"<li>{PARAMETERS_DICT[k]}: {v}</li>" for k, v in params_dict.items()]
152142
cor_params_text = "".join(cor_params_text)
153143

154144
ext_params_text = ["<ul>"]
155-
for k, v in obj.parameters.items():
156-
ext_params_text.append(f"<li>{PARAMETERS_DICT[k]}: {v}</li>")
145+
ext_params_text.extend(
146+
f"<li>{PARAMETERS_DICT[k]}: {v}</li>" for k, v in obj.parameters.items()
147+
)
157148
ext_params_text.append("</ul>")
158149
ext_params_text = "".join(ext_params_text)
159150

@@ -200,6 +191,7 @@ def _gen_fig_summary(img_key, threshold, out_filename):
200191
def _gen_summary(dset, out_filename):
201192
"""Generate preliminary checks from dataset for the report."""
202193
mask = dset.masker.mask_img
194+
n_studies = len(dset.coordinates["study_id"].unique())
203195
sel_ids = dset.get_studies_by_mask(mask)
204196
sel_dset = dset.slice(sel_ids)
205197

@@ -213,6 +205,7 @@ def _gen_summary(dset, out_filename):
213205
exc_ids_str = ", ".join(exc_ids)
214206

215207
summary_text = SUMMARY_TEMPLATE.format(
208+
n_studies=n_studies,
216209
n_exps=n_exps,
217210
n_exps_sel=n_exps_sel,
218211
n_foci=n_foci,
@@ -237,28 +230,45 @@ def _gen_figures(results, img_key, diag_name, threshold, fig_dir):
237230
if cluster_table is not None and not cluster_table.empty:
238231
gen_table(cluster_table, fig_dir / "diagnostics_tab-clust_table.html")
239232

240-
# Get label maps
233+
# Get label maps and contribution_table
234+
contribution_tables = []
235+
heatmap_names = []
241236
lbl_name = "_".join(img_key.split("_")[1:])
242-
lbl_name = "_" + lbl_name if lbl_name else lbl_name
237+
lbl_name = f"_{lbl_name}" if lbl_name else lbl_name
243238
for tail in ["positive", "negative"]:
244239
lbl_key = f"label{lbl_name}_tail-{tail}"
245240
if lbl_key in results.maps:
246241
label_map = results.get_map(lbl_key)
247242
plot_clusters(label_map, fig_dir / f"diagnostics_tail-{tail}_figure.png")
248243

244+
contribution_table_name = f"{img_key}_diag-{diag_name}_tab-counts_tail-{tail}"
245+
if contribution_table_name in results.tables:
246+
contribution_table = results.tables[contribution_table_name]
247+
if contribution_table is not None and not contribution_table.empty:
248+
contribution_table = contribution_table.set_index("id")
249+
contribution_tables.append(contribution_table)
250+
heatmap_names.append(
251+
f"diagnostics_diag-{diag_name}_tab-counts_tail-{tail}_figure.html"
252+
)
253+
254+
# For IBMA plot only one heatmap with both positive and negative tails
255+
contribution_table_name = f"{img_key}_diag-{diag_name}_tab-counts"
256+
if contribution_table_name in results.tables:
257+
contribution_table = results.tables[contribution_table_name]
258+
if contribution_table is not None and not contribution_table.empty:
259+
contribution_table = contribution_table.set_index("id")
260+
contribution_tables.append(contribution_table)
261+
heatmap_names.append(f"diagnostics_diag-{diag_name}_tab-counts_figure.html")
262+
263+
# Plot heatmaps
264+
[
265+
plot_heatmap(contribution_table, fig_dir / heatmap_name)
266+
for heatmap_name, contribution_table in zip(heatmap_names, contribution_tables)
267+
]
268+
249269
else:
250270
_no_clusts_found(fig_dir / "diagnostics_tab-clust_table.html")
251271

252-
# Plot heatmap if contribution_table is not empty
253-
if f"{img_key}_diag-{diag_name}_tab-counts" in results.tables:
254-
contribution_table = results.tables[f"{img_key}_diag-{diag_name}_tab-counts"]
255-
if contribution_table is not None and not contribution_table.empty:
256-
contribution_table = contribution_table.set_index("id")
257-
plot_heatmap(
258-
contribution_table,
259-
fig_dir / f"diagnostics_diag-{diag_name}_tab-counts_figure.html",
260-
)
261-
262272

263273
class Element(object):
264274
"""Just a basic component of a report."""
@@ -292,6 +302,7 @@ def __init__(self, out_dir, config=None):
292302
self.name = config.get("name", bids_name)
293303
self.title = config.get("title")
294304
self.subtitle = config.get("subtitle")
305+
self.subsubtitle = config.get("subsubtitle")
295306
self.description = config.get("description")
296307

297308
files = glob(str(out_dir / "figures" / f"{self.name}.*"))
@@ -308,7 +319,7 @@ def __init__(self, out_dir, config=None):
308319
if ext == ".html":
309320
contents = IFRAME_SNIPPET.format(html_anchor) if iframe else src.read_text()
310321
elif ext == ".png":
311-
contents = SVG_SNIPPET[config.get("static", True)].format(html_anchor)
322+
contents = PNG_SNIPPET.format(html_anchor)
312323

313324
if contents:
314325
self.components.append((contents, desc_text))
@@ -351,6 +362,9 @@ def __init__(
351362
out_filename="report.html",
352363
):
353364
self.results = results
365+
self._is_pairwise_estimator = issubclass(
366+
type(self.results.estimator), PairwiseCBMAEstimator
367+
)
354368

355369
# Initialize structuring elements
356370
self.sections = []
@@ -360,35 +374,41 @@ def __init__(
360374
self.fig_dir = self.out_dir / "figures"
361375
self.fig_dir.mkdir(parents=True, exist_ok=True)
362376

363-
# Generate summary text
364-
_gen_summary(self.results.estimator.dataset, self.fig_dir / "preliminary_summary.html")
365-
366-
# Plot mask
367-
plot_mask(
368-
self.results.estimator.dataset.masker.mask_img,
369-
self.fig_dir / "preliminary_figure-mask.png",
377+
datasets = (
378+
[self.results.estimator.dataset1, self.results.estimator.dataset2]
379+
if self._is_pairwise_estimator
380+
else [self.results.estimator.dataset]
370381
)
371-
372-
if issubclass(type(self.results.estimator), CBMAEstimator):
373-
# Plot coordinates for CBMA estimators
374-
plot_coordinates(
375-
self.results.estimator.dataset.coordinates,
376-
self.fig_dir / "preliminary_figure-static.png",
377-
self.fig_dir / "preliminary_figure-interactive.html",
378-
self.fig_dir / "preliminary_figure-legend.png",
382+
for dset_i, dataset in enumerate(datasets):
383+
# Generate summary text
384+
_gen_summary(dataset, self.fig_dir / f"preliminary_dset-{dset_i+1}_summary.html")
385+
386+
# Plot mask
387+
plot_mask(
388+
dataset.masker.mask_img,
389+
self.fig_dir / f"preliminary_dset-{dset_i+1}_figure-mask.png",
379390
)
380391

392+
if issubclass(type(self.results.estimator), CBMAEstimator):
393+
# Plot coordinates for CBMA estimators
394+
plot_coordinates(
395+
dataset.coordinates,
396+
self.fig_dir / f"preliminary_dset-{dset_i+1}_figure-static.png",
397+
self.fig_dir / f"preliminary_dset-{dset_i+1}_figure-interactive.html",
398+
self.fig_dir / f"preliminary_dset-{dset_i+1}_figure-legend.png",
399+
)
400+
elif issubclass(type(self.results.estimator), IBMAEstimator):
401+
raise NotImplementedError
402+
381403
_gen_est_summary(self.results.estimator, self.fig_dir / "estimator_summary.html")
382404
_gen_cor_summary(self.results.corrector, self.fig_dir / "corrector_summary.html")
383405
for diagnostic in self.results.diagnostics:
384406
img_key = diagnostic.target_image
385407
diag_name = diagnostic.__class__.__name__
386408
threshold = diagnostic.voxel_thresh
387409

388-
_gen_fig_summary(img_key, threshold, self.fig_dir / "corrector_fig-summary.html")
389-
_gen_diag_summary(
390-
diagnostic, self.fig_dir / f"diagnostics_diag-{diag_name}_summary.html"
391-
)
410+
_gen_fig_summary(img_key, threshold, self.fig_dir / "corrector_figure-summary.html")
411+
_gen_diag_summary(diagnostic, self.fig_dir / "diagnostics_summary.html")
392412
_gen_figures(self.results, img_key, diag_name, threshold, self.fig_dir)
393413

394414
# Default template from nimare
@@ -412,9 +432,7 @@ def index(self, config):
412432
for subrep_cfg in config:
413433
reportlets = [Reportlet(self.out_dir, config=cfg) for cfg in subrep_cfg["reportlets"]]
414434

415-
# Filter out empty reportlets
416-
reportlets = [r for r in reportlets if not r.is_empty()]
417-
if reportlets:
435+
if reportlets := [r for r in reportlets if not r.is_empty()]:
418436
sub_report = SubReport(
419437
subrep_cfg["name"],
420438
isnested=False,

‎nimare/reports/default.yml

Lines changed: 33 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,24 @@ package: nimare
4141
sections:
4242
- name: Summary
4343
reportlets:
44-
- bids: {value: preliminary, suffix: summary}
45-
- bids: {value: preliminary, suffix: figure-mask}
44+
- bids: {value: preliminary, dset: 1, suffix: summary}
45+
title: Dataset 1
46+
- bids: {value: preliminary, dset: 1, suffix: figure-mask}
4647
title: Mask
47-
- bids: {value: preliminary, suffix: figure-static}
48+
- bids: {value: preliminary, dset: 1, suffix: figure-static}
4849
title: Peak coordinates
49-
- bids: {value: preliminary, suffix: figure-legend}
50-
- bids: {value: preliminary, suffix: figure-interactive}
50+
- bids: {value: preliminary, dset: 1, suffix: figure-legend}
51+
- bids: {value: preliminary, dset: 1, suffix: figure-interactive}
52+
title: Explorer
53+
iframe: True
54+
- bids: {value: preliminary, dset: 2, suffix: summary}
55+
title: Dataset 2
56+
- bids: {value: preliminary, dset: 2, suffix: figure-mask}
57+
title: Mask
58+
- bids: {value: preliminary, dset: 2, suffix: figure-static}
59+
title: Peak coordinates
60+
- bids: {value: preliminary, dset: 2, suffix: figure-legend}
61+
- bids: {value: preliminary, dset: 2, suffix: figure-interactive}
5162
title: Explorer
5263
iframe: True
5364
- name: Meta-Analysis
@@ -58,7 +69,7 @@ sections:
5869
- bids: {value: corrector, suffix: summary}
5970
title: Corrector
6071
description: Parameters use to fit the corrector.
61-
- bids: {value: corrector, suffix: fig-summary}
72+
- bids: {value: corrector, suffix: figure-summary}
6273
- bids: {value: corrector, suffix: figure-non}
6374
- bids: {value: corrector, suffix: figure-interactive}
6475
subtitle: Explorer
@@ -67,7 +78,7 @@ sections:
6778
description: This panel shows the the corrrected meta-analytic map.
6879
- name: Diagnostics
6980
reportlets:
70-
- bids: {value: diagnostics, suffix: diag-summary}
81+
- bids: {value: diagnostics, suffix: summary}
7182
- bids: {value: diagnostics, tab: clust, suffix: table}
7283
subtitle: Significant clusters
7384
- bids: {value: diagnostics, tail: positive, suffix: figure}
@@ -81,4 +92,18 @@ sections:
8192
- bids: {value: diagnostics, diag: Jackknife, tab: counts, suffix: figure}
8293
subtitle: Jackknife
8394
caption: *heatmap_text
84-
description: *jackknife_text
95+
description: *jackknife_text
96+
- bids: {value: diagnostics, diag: FocusCounter, tab: counts, tail: positive, suffix: figure}
97+
subtitle: FocusCounter
98+
subsubtitle: "Heatmap: positive tail"
99+
caption: *heatmap_text
100+
description: *focuscounter_text
101+
- bids: {value: diagnostics, diag: FocusCounter, tab: counts, tail: negative, suffix: figure}
102+
subsubtitle: "Heatmap: negative tail"
103+
- bids: {value: diagnostics, diag: Jackknife, tab: counts, tail: positive, suffix: figure}
104+
subtitle: Jackknife
105+
subsubtitle: "Heatmap: positive tail"
106+
caption: *heatmap_text
107+
description: *jackknife_text
108+
- bids: {value: diagnostics, diag: Jackknife, tab: counts, tail: negative, suffix: figure}
109+
subsubtitle: "Heatmap: negative tail"

‎nimare/reports/figures.py

Lines changed: 29 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def _reorder_matrix(mat, row_labels, col_labels, reorder):
8181

8282
# Order columns
8383
col_linkage_matrix = linkage(mat.T, method=reorder)
84-
col_ordered_linkage = optimal_leaf_ordering(col_linkage_matrix, mat)
84+
col_ordered_linkage = optimal_leaf_ordering(col_linkage_matrix, mat.T)
8585
col_index = leaves_list(col_ordered_linkage)
8686

8787
# Make sure labels is an ndarray and copy it
@@ -124,7 +124,6 @@ def plot_static_brain(img, out_filename, threshold=1e-06):
124124
black_bg=False,
125125
draw_cross=False,
126126
threshold=threshold,
127-
vmax=4,
128127
display_mode="mosaic",
129128
)
130129
fig.savefig(out_filename, dpi=300)
@@ -191,7 +190,7 @@ def plot_coordinates(
191190
adjacency_matrix = np.zeros((n_coords, n_coords))
192191

193192
# Generate dictionary and array of colors for each unique ID
194-
ids = coordinates_df["id"].to_list()
193+
ids = coordinates_df["study_id"].to_list()
195194
unq_ids = np.unique(ids)
196195
cmap = plt.cm.get_cmap("tab20", len(unq_ids))
197196
colors_dict = {unq_id: mcolors.to_hex(cmap(i)) for i, unq_id in enumerate(unq_ids)}
@@ -207,9 +206,10 @@ def plot_coordinates(
207206
]
208207

209208
# Plot legeng
210-
ncol = 10
209+
max_len_per_page = 200
210+
max_legend_len = max(len(id_) for id_ in unq_ids)
211+
ncol = 1 if max_legend_len > max_len_per_page else int(max_len_per_page / max_legend_len)
211212
labl_fig, ax = plt.subplots(1, 1)
212-
labl_fig.set_size_inches(ncol, len(patches_lst) / ncol**2)
213213
labl_fig.legend(
214214
handles=patches_lst,
215215
ncol=ncol,
@@ -251,7 +251,7 @@ def plot_interactive_brain(img, out_filename, threshold=1e-06):
251251
_check_extention(out_filename, [".html"])
252252

253253
template = datasets.load_mni152_template(resolution=1)
254-
html_view = view_img(img, bg_img=template, black_bg=False, threshold=threshold, vmax=4)
254+
html_view = view_img(img, bg_img=template, black_bg=False, threshold=threshold)
255255
html_view.save_as_html(out_filename)
256256

257257

@@ -271,26 +271,27 @@ def plot_heatmap(contribution_table, out_filename):
271271
"""
272272
_check_extention(out_filename, [".html"])
273273

274-
mat = contribution_table.to_numpy()
275-
row_labels, col_labels = (
276-
contribution_table.index.to_list(),
277-
contribution_table.columns.to_list(),
278-
)
274+
n_studies, n_clusters = contribution_table.shape
275+
if (n_studies > 2) and (n_clusters > 2):
276+
# Reorder matrix only if more than 1 cluster/experiment
277+
mat = contribution_table.to_numpy()
278+
row_labels, col_labels = (
279+
contribution_table.index.to_list(),
280+
contribution_table.columns.to_list(),
281+
)
282+
new_mat, new_row_labels, new_col_labels = _reorder_matrix(
283+
mat,
284+
row_labels,
285+
col_labels,
286+
"single",
287+
)
288+
contribution_table = pd.DataFrame(new_mat, columns=new_col_labels, index=new_row_labels)
279289

280-
new_mat, new_row_labels, new_col_labels = _reorder_matrix(
281-
mat,
282-
row_labels,
283-
col_labels,
284-
"single",
285-
)
286-
new_df = pd.DataFrame(new_mat, columns=new_col_labels, index=new_row_labels)
290+
fig = px.imshow(contribution_table, color_continuous_scale="Reds", aspect="equal")
287291

288292
pxs_per_sqr = 50 # Number of pixels per square in the heatmap
289-
plot2bar_space = 2 # Number of pixels between the heatmap and the barplot
290-
width, height = (len(col_labels) + plot2bar_space) * pxs_per_sqr, len(row_labels) * pxs_per_sqr
291-
292-
fig = px.imshow(new_df, color_continuous_scale="Reds", aspect="auto")
293-
fig.update_layout(autosize=False, width=width, height=height)
293+
height = n_studies * pxs_per_sqr
294+
fig.update_layout(autosize=True, height=height)
294295
fig.write_html(out_filename, full_html=True, include_plotlyjs=True)
295296

296297

@@ -340,12 +341,16 @@ def plot_clusters(img, out_filename):
340341

341342
template = datasets.load_mni152_template(resolution=1)
342343

344+
# Define cmap depending on the number of clusters
345+
clust_ids = list(np.unique(img.get_fdata())[1:])
346+
cmap = plt.cm.get_cmap("tab20", len(clust_ids))
347+
343348
fig = plot_roi(
344349
img,
345350
bg_img=template,
346351
black_bg=False,
347352
draw_cross=False,
348-
cmap="tab20",
353+
cmap=cmap,
349354
alpha=0.8,
350355
colorbar=True,
351356
display_mode="mosaic",

‎nimare/reports/report.tpl

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,8 @@
1010
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
1111
<style type="text/css">
1212
.sub-report-title {}
13-
.run-title {}
13+
.sub-title {}
14+
.sub-sub-title {}
1415
1516
h1 { padding-top: 35px; }
1617
h2 { padding-top: 20px; }
@@ -28,7 +29,7 @@ div.elem-image {
2829
page-break-before:always;
2930
}
3031
31-
.elem-image object.svg-reportlet {
32+
.elem-image object.png-reportlet {
3233
width: 100%;
3334
padding-bottom: 5px;
3435
}
@@ -42,6 +43,7 @@ body {
4243
width: 100%;
4344
padding-top: 50%;
4445
}
46+
4547
.igraph {
4648
position: absolute;
4749
border: none;
@@ -108,11 +110,12 @@ div#boilerplate pre {
108110
{% for run_report in sub_report.reportlets %}
109111
<div id="{{run_report.name}}">
110112
{% if run_report.title %}<h2 class="sub-report-group">{{ run_report.title }}</h2>{% endif %}
111-
{% if run_report.subtitle %}<h3 class="run-title">{{ run_report.subtitle }}</h3>{% endif %}
113+
{% if run_report.subtitle %}<h3 class="sub-title">{{ run_report.subtitle }}</h3>{% endif %}
112114
{% if run_report.description %}<p class="elem-desc">{{ run_report.description }}</p>{% endif %}
113115
{% for elem in run_report.components %}
114116
{% if elem[0] %}
115117
{% if elem[1] %}<p class="elem-caption">{{ elem[1] }}</p>{% endif %}
118+
{% if run_report.subsubtitle %}<h4 class="sub-sub-title">{{ run_report.subsubtitle }}</h4>{% endif %}
116119
{{ elem[0] }}
117120
{% endif %}
118121
{% endfor %}

‎nimare/tests/test_diagnostics.py

Lines changed: 52 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -10,17 +10,17 @@
1010

1111

1212
@pytest.mark.parametrize(
13-
"estimator,meta_type,n_samples,target_image",
13+
"estimator,meta_type,n_samples,target_image,voxel_thresh",
1414
[
15-
(cbma.ALE, "cbma", "onesample", "z"),
16-
(cbma.MKDADensity, "cbma", "onesample", "z"),
17-
(cbma.KDA, "cbma", "onesample", "z"),
18-
(cbma.MKDAChi2, "cbma", "twosample", "z_desc-consistency"),
19-
(ibma.Fishers, "ibma", "onesample", "z"),
20-
(ibma.Stouffers, "ibma", "onesample", "z"),
21-
(ibma.WeightedLeastSquares, "ibma", "onesample", "z"),
22-
(ibma.DerSimonianLaird, "ibma", "onesample", "z"),
23-
(ibma.Hedges, "ibma", "onesample", "z"),
15+
(cbma.ALE, "cbma", "onesample", "z", 1.65),
16+
(cbma.MKDADensity, "cbma", "onesample", "z", 1.65),
17+
(cbma.KDA, "cbma", "onesample", "z", 1.65),
18+
(cbma.MKDAChi2, "cbma", "twosample", "z_desc-consistency", 1.65),
19+
(ibma.Fishers, "ibma", "onesample", "z", 0.1),
20+
(ibma.Stouffers, "ibma", "onesample", "z", 0.1),
21+
(ibma.WeightedLeastSquares, "ibma", "onesample", "z", 0.1),
22+
(ibma.DerSimonianLaird, "ibma", "onesample", "z", 0.1),
23+
(ibma.Hedges, "ibma", "onesample", "z", 0.1),
2424
# (ibma.SampleSizeBasedLikelihood, "ibma", "onesample", "z"),
2525
# (ibma.VarianceBasedLikelihood, "ibma", "onesample", "z"),
2626
# (ibma.PermutedOLS, "ibma", "onesample", "z"),
@@ -33,31 +33,36 @@ def test_jackknife_smoke(
3333
meta_type,
3434
n_samples,
3535
target_image,
36+
voxel_thresh,
3637
):
3738
"""Smoke test the Jackknife method."""
39+
dset1 = testdata_cbma_full.slice(testdata_cbma_full.ids[:10])
40+
dset2 = testdata_cbma_full.slice(testdata_cbma_full.ids[10:])
41+
3842
meta = estimator()
3943
testdata = testdata_ibma if meta_type == "ibma" else testdata_cbma_full
40-
if n_samples == "twosample":
41-
res = meta.fit(testdata, testdata)
42-
else:
43-
res = meta.fit(testdata)
44+
res = meta.fit(dset1, dset2) if n_samples == "twosample" else meta.fit(testdata)
4445

45-
jackknife = diagnostics.Jackknife(target_image=target_image, voxel_thresh=1.65)
46+
jackknife = diagnostics.Jackknife(target_image=target_image, voxel_thresh=voxel_thresh)
4647
results = jackknife.transform(res)
4748

4849
image_name = "_".join(target_image.split("_")[1:])
49-
image_name = "_" + image_name if image_name else image_name
50-
contribution_table = results.tables[f"{target_image}_diag-Jackknife_tab-counts"]
50+
image_name = f"_{image_name}" if image_name else image_name
51+
52+
# For ibma.WeightedLeastSquares we have both positive and negative tail combined.
53+
contribution_table = (
54+
results.tables[f"{target_image}_diag-Jackknife_tab-counts"]
55+
if estimator == ibma.WeightedLeastSquares
56+
else results.tables[f"{target_image}_diag-Jackknife_tab-counts_tail-positive"]
57+
)
58+
5159
clusters_table = results.tables[f"{target_image}_tab-clust"]
5260
label_maps = results.maps[f"label{image_name}_tail-positive"]
53-
if n_samples == "twosample":
54-
assert contribution_table is None
55-
assert not clusters_table.empty
56-
assert label_maps is None
57-
else:
58-
assert contribution_table.shape[0] == len(meta.inputs_["id"])
59-
assert clusters_table.shape[0] >= contribution_table.shape[1] - 1
60-
assert len(label_maps) > 0
61+
ids_ = meta.inputs_["id"] if n_samples == "onesample" else meta.inputs_["id1"]
62+
63+
assert contribution_table.shape[0] == len(ids_)
64+
assert clusters_table.shape[0] >= contribution_table.shape[1] - 1
65+
assert len(label_maps) > 0
6166

6267

6368
def test_jackknife_with_zero_clusters(testdata_cbma_full):
@@ -90,7 +95,7 @@ def test_jackknife_with_custom_masker_smoke(testdata_ibma):
9095

9196
jackknife = diagnostics.Jackknife(target_image="z", voxel_thresh=0.5)
9297
results = jackknife.transform(res)
93-
contribution_table = results.tables["z_diag-Jackknife_tab-counts"]
98+
contribution_table = results.tables["z_diag-Jackknife_tab-counts_tail-positive"]
9499
assert contribution_table.shape[0] == len(meta.inputs_["id"])
95100

96101
# A Jackknife with a target_image that isn't present in the MetaResult raises a ValueError.
@@ -106,6 +111,7 @@ def test_jackknife_with_custom_masker_smoke(testdata_ibma):
106111
(cbma.MKDADensity, "cbma", "onesample", "z"),
107112
(cbma.KDA, "cbma", "onesample", "z"),
108113
(cbma.MKDAChi2, "cbma", "twosample", "z_desc-consistency"),
114+
(ibma.Stouffers, "ibma", "onesample", "z"),
109115
],
110116
)
111117
def test_focuscounter_smoke(
@@ -117,27 +123,31 @@ def test_focuscounter_smoke(
117123
target_image,
118124
):
119125
"""Smoke test the FocusCounter method."""
126+
dset1 = testdata_cbma_full.slice(testdata_cbma_full.ids[:10])
127+
dset2 = testdata_cbma_full.slice(testdata_cbma_full.ids[10:])
128+
120129
meta = estimator()
121130
testdata = testdata_ibma if meta_type == "ibma" else testdata_cbma_full
122-
if n_samples == "twosample":
123-
res = meta.fit(testdata, testdata)
124-
else:
125-
res = meta.fit(testdata)
131+
res = meta.fit(dset1, dset2) if n_samples == "twosample" else meta.fit(testdata)
126132

127133
counter = diagnostics.FocusCounter(target_image=target_image, voxel_thresh=1.65)
128-
results = counter.transform(res)
129-
130-
image_name = "_".join(target_image.split("_")[1:])
131-
image_name = "_" + image_name if image_name else image_name
132-
contribution_table = results.tables[f"{target_image}_diag-FocusCounter_tab-counts"]
133-
clusters_table = results.tables[f"{target_image}_tab-clust"]
134-
label_maps = results.maps[f"label{image_name}_tail-positive"]
135-
if n_samples == "twosample":
136-
assert contribution_table is None
137-
assert not clusters_table.empty
138-
assert label_maps is None
134+
if meta_type == "ibma":
135+
with pytest.raises(ValueError):
136+
counter.transform(res)
139137
else:
140-
assert contribution_table.shape[0] == len(meta.inputs_["id"])
138+
results = counter.transform(res)
139+
140+
image_name = "_".join(target_image.split("_")[1:])
141+
image_name = f"_{image_name}" if image_name else image_name
142+
143+
contribution_table = results.tables[
144+
f"{target_image}_diag-FocusCounter_tab-counts_tail-positive"
145+
]
146+
clusters_table = results.tables[f"{target_image}_tab-clust"]
147+
label_maps = results.maps[f"label{image_name}_tail-positive"]
148+
ids_ = meta.inputs_["id"] if n_samples == "onesample" else meta.inputs_["id1"]
149+
150+
assert contribution_table.shape[0] == len(ids_)
141151
assert clusters_table.shape[0] >= contribution_table.shape[1] - 1
142152
assert len(label_maps) > 0
143153

‎nimare/tests/test_reports.py

Lines changed: 33 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,29 @@
33

44
import pytest
55

6-
from nimare import workflows
76
from nimare.correct import FWECorrector
7+
from nimare.diagnostics import FocusCounter, Jackknife
8+
from nimare.meta.cbma import ALESubtraction
9+
from nimare.meta.cbma.base import PairwiseCBMAEstimator
810
from nimare.reports.base import run_reports
11+
from nimare.workflows import CBMAWorkflow, PairwiseCBMAWorkflow
912

1013

1114
@pytest.mark.parametrize(
1215
"estimator,corrector,diagnostics",
1316
[
1417
("ale", FWECorrector(method="montecarlo", n_iters=10), "jackknife"),
1518
("kda", "fdr", "focuscounter"),
19+
(
20+
"mkdachi2",
21+
FWECorrector(method="montecarlo", n_iters=10),
22+
Jackknife(voxel_thresh=0.1),
23+
),
24+
(
25+
ALESubtraction(n_iters=10),
26+
"fdr",
27+
FocusCounter(voxel_thresh=0.01, display_second_group=True),
28+
),
1629
],
1730
)
1831
def test_reports_function_smoke(
@@ -24,13 +37,25 @@ def test_reports_function_smoke(
2437
):
2538
"""Run smoke test for CBMA workflow."""
2639
tmpdir = tmp_path_factory.mktemp("test_reports_function_smoke")
27-
results = workflows.cbma_workflow(
28-
testdata_cbma_full,
29-
estimator=estimator,
30-
corrector=corrector,
31-
diagnostics=diagnostics,
32-
output_dir=tmpdir,
33-
)
40+
if estimator == "mkdachi2" or issubclass(type(estimator), PairwiseCBMAEstimator):
41+
dset1 = testdata_cbma_full.slice(testdata_cbma_full.ids[:10])
42+
dset2 = testdata_cbma_full.slice(testdata_cbma_full.ids[10:])
43+
44+
workflow = PairwiseCBMAWorkflow(
45+
estimator=estimator,
46+
corrector=corrector,
47+
diagnostics=diagnostics,
48+
output_dir=tmpdir,
49+
)
50+
results = workflow.fit(dset1, dset2)
51+
else:
52+
workflow = CBMAWorkflow(
53+
estimator=estimator,
54+
corrector=corrector,
55+
diagnostics=diagnostics,
56+
output_dir=tmpdir,
57+
)
58+
results = workflow.fit(testdata_cbma_full)
3459

3560
run_reports(results, tmpdir)
3661

‎nimare/tests/test_workflows.py

Lines changed: 71 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,10 @@
77
from nimare import cli, workflows
88
from nimare.correct import FWECorrector
99
from nimare.diagnostics import FocusCounter, Jackknife
10-
from nimare.meta.cbma import ALE, MKDAChi2
10+
from nimare.meta.cbma import ALE, ALESubtraction, MKDAChi2
1111
from nimare.meta.ibma import Fishers
1212
from nimare.tests.utils import get_test_data_path
13+
from nimare.workflows import CBMAWorkflow, PairwiseCBMAWorkflow
1314

1415

1516
def test_ale_workflow_function_smoke(tmp_path_factory):
@@ -102,7 +103,7 @@ def test_ale_workflow_cli_smoke_2(tmp_path_factory):
102103
(Fishers, "montecarlo", "jackknife"),
103104
],
104105
)
105-
def test_cbma_workflow_function_smoke(
106+
def test_cbma_workflow_smoke(
106107
tmp_path_factory,
107108
testdata_cbma_full,
108109
estimator,
@@ -114,51 +115,93 @@ def test_cbma_workflow_function_smoke(
114115

115116
if estimator == MKDAChi2:
116117
with pytest.raises(AttributeError):
117-
workflows.cbma_workflow(
118-
testdata_cbma_full,
119-
estimator=estimator,
120-
corrector=corrector,
121-
diagnostics=diagnostics,
122-
)
118+
CBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)
123119
elif estimator == Fishers:
124120
with pytest.raises((AttributeError, ValueError)):
125-
workflows.cbma_workflow(
126-
testdata_cbma_full,
127-
estimator=estimator,
128-
corrector=corrector,
129-
diagnostics=diagnostics,
130-
)
121+
CBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)
131122
elif estimator == "ales":
132123
with pytest.raises(ValueError):
133-
workflows.cbma_workflow(
134-
testdata_cbma_full,
135-
estimator=estimator,
136-
corrector=corrector,
137-
diagnostics=diagnostics,
138-
)
124+
CBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)
139125
else:
140-
cres = workflows.cbma_workflow(
141-
testdata_cbma_full,
126+
workflow = CBMAWorkflow(
142127
estimator=estimator,
143128
corrector=corrector,
144129
diagnostics=diagnostics,
145130
output_dir=tmpdir,
146131
)
132+
cres = workflow.fit(testdata_cbma_full)
147133

148134
assert isinstance(cres, nimare.results.MetaResult)
149135
assert op.isfile(op.join(tmpdir, "boilerplate.txt"))
150136
assert op.isfile(op.join(tmpdir, "references.bib"))
151137

152138
for imgtype in cres.maps.keys():
153-
filename = imgtype + ".nii.gz"
139+
filename = f"{imgtype}.nii.gz"
154140
outpath = op.join(tmpdir, filename)
155-
# For estimator == ALE, maps are None
156-
if estimator != ALE:
141+
# For ALE maps are None
142+
if not cres.maps[imgtype] is None:
157143
assert op.isfile(outpath)
158144

159145
for tabletype in cres.tables.keys():
160-
filename = tabletype + ".tsv"
146+
filename = f"{tabletype}.tsv"
161147
outpath = op.join(tmpdir, filename)
162-
# For estimator == ALE, tables are None
163-
if estimator != ALE:
148+
# For ALE tables are None
149+
if not cres.tables[tabletype] is None:
150+
assert op.isfile(outpath)
151+
152+
153+
@pytest.mark.parametrize(
154+
"estimator,corrector,diagnostics",
155+
[
156+
(MKDAChi2, FWECorrector(method="montecarlo", n_iters=10), [FocusCounter]),
157+
("mkdachi", "bonferroni", FocusCounter),
158+
("mkdachi2", "bonferroni", "jackknife"),
159+
(ALESubtraction(n_iters=10), "fdr", Jackknife(voxel_thresh=0.01)),
160+
(ALE, "montecarlo", None),
161+
(Fishers, "montecarlo", "jackknife"),
162+
],
163+
)
164+
def test_pairwise_cbma_workflow_smoke(
165+
tmp_path_factory,
166+
testdata_cbma_full,
167+
estimator,
168+
corrector,
169+
diagnostics,
170+
):
171+
"""Run smoke test for CBMA workflow."""
172+
tmpdir = tmp_path_factory.mktemp("test_cbma_workflow_function_smoke")
173+
174+
dset1 = testdata_cbma_full.slice(testdata_cbma_full.ids[:10])
175+
dset2 = testdata_cbma_full.slice(testdata_cbma_full.ids[10:])
176+
if estimator in [ALE, "mkdachi"]:
177+
with pytest.raises(ValueError):
178+
PairwiseCBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)
179+
elif estimator == Fishers:
180+
with pytest.raises((AttributeError, ValueError)):
181+
PairwiseCBMAWorkflow(estimator=estimator, corrector=corrector, diagnostics=diagnostics)
182+
else:
183+
workflow = PairwiseCBMAWorkflow(
184+
estimator=estimator,
185+
corrector=corrector,
186+
diagnostics=diagnostics,
187+
output_dir=tmpdir,
188+
)
189+
cres = workflow.fit(dset1, dset2)
190+
191+
assert isinstance(cres, nimare.results.MetaResult)
192+
assert op.isfile(op.join(tmpdir, "boilerplate.txt"))
193+
assert op.isfile(op.join(tmpdir, "references.bib"))
194+
195+
for imgtype in cres.maps.keys():
196+
filename = f"{imgtype}.nii.gz"
197+
outpath = op.join(tmpdir, filename)
198+
# For MKDAChi2 maps are None
199+
if cres.maps[imgtype] is not None:
200+
assert op.isfile(outpath)
201+
202+
for tabletype in cres.tables.keys():
203+
filename = f"{tabletype}.tsv"
204+
outpath = op.join(tmpdir, filename)
205+
# For MKDAChi2 tables are None
206+
if cres.tables[tabletype] is not None:
164207
assert op.isfile(outpath)

‎nimare/workflows/__init__.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,12 @@
11
"""Common meta-analytic workflows."""
22

33
from .ale import ale_sleuth_workflow
4-
from .cbma import cbma_workflow
4+
from .cbma import CBMAWorkflow, PairwiseCBMAWorkflow
55
from .macm import macm_workflow
66

7-
__all__ = ["ale_sleuth_workflow", "cbma_workflow", "macm_workflow"]
7+
__all__ = [
8+
"ale_sleuth_workflow",
9+
"CBMAWorkflow",
10+
"PairwiseCBMAWorkflow",
11+
"macm_workflow",
12+
]

‎nimare/workflows/ale.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,11 @@ def ale_sleuth_workflow(
2525
n_cores=1,
2626
):
2727
"""Perform ALE meta-analysis from Sleuth text file."""
28+
LGR.warning(
29+
"The ale_sleuth_workflow function is deprecated and will be removed in release 0.1.3. "
30+
"Use CBMAWorkflow or PairwiseCBMAWorkflow instead."
31+
)
32+
2833
LGR.info("Loading coordinates...")
2934

3035
if not sleuth_file2:
@@ -50,7 +55,8 @@ def ale_sleuth_workflow(
5055
)
5156
cres = fcounter.transform(cres)
5257
count_df = cres.tables[
53-
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-FocusCounter_tab-counts"
58+
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-FocusCounter"
59+
"_tab-counts_tail-positive"
5460
]
5561
boilerplate = cres.description_
5662
bibtex = cres.bibtex_
@@ -85,15 +91,17 @@ def ale_sleuth_workflow(
8591
)
8692
cres1 = fcounter.transform(cres1)
8793
count_df1 = cres1.tables[
88-
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-FocusCounter_tab-counts"
94+
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-FocusCounter"
95+
"_tab-counts_tail-positive"
8996
]
9097

9198
cres2 = corr.transform(res2)
9299
boilerplate += "\n" + cres2.description_
93100

94101
cres2 = fcounter.transform(cres2)
95102
count_df2 = cres2.tables[
96-
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-FocusCounter_tab-counts"
103+
"z_desc-size_level-cluster_corr-FWE_method-montecarlo_diag-FocusCounter"
104+
"_tab-counts_tail-positive"
97105
]
98106

99107
sub = ALESubtraction(n_iters=n_iters, kernel__fwhm=fwhm)

‎nimare/workflows/base.py

Lines changed: 178 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,178 @@
1+
"""Base class for workflow."""
2+
import copy
3+
import itertools
4+
import logging
5+
import os.path as op
6+
from abc import abstractmethod
7+
8+
from nimare.base import NiMAREBase
9+
from nimare.correct import Corrector, FDRCorrector, FWECorrector
10+
from nimare.diagnostics import Diagnostics, FocusCounter, Jackknife
11+
from nimare.meta import ALE, KDA, SCALE, ALESubtraction, MKDAChi2, MKDADensity
12+
from nimare.meta.cbma.base import CBMAEstimator, PairwiseCBMAEstimator
13+
from nimare.utils import _check_ncores, _check_type
14+
15+
LGR = logging.getLogger(__name__)
16+
17+
18+
def _str_to_class(str_name):
19+
"""Match a string to a class name without initializing the class."""
20+
classes = {
21+
"ale": ALE,
22+
"scale": SCALE,
23+
"mkdadensity": MKDADensity,
24+
"kda": KDA,
25+
"mkdachi2": MKDAChi2,
26+
"alesubtraction": ALESubtraction,
27+
"montecarlo": FWECorrector,
28+
"fdr": FDRCorrector,
29+
"bonferroni": FWECorrector,
30+
"jackknife": Jackknife,
31+
"focuscounter": FocusCounter,
32+
}
33+
return classes[str_name]
34+
35+
36+
def _check_input(obj, clss, options, **kwargs):
37+
"""Check input for workflow functions."""
38+
if isinstance(obj, str):
39+
if obj not in options:
40+
raise ValueError(f'"{obj}" of kind string must be {", ".join(options)}')
41+
42+
# Get the class from the string
43+
obj_str = obj
44+
obj = _str_to_class(obj_str)
45+
46+
# Add the method to the kwargs if it's a FWECorrector
47+
if obj == FWECorrector:
48+
kwargs["method"] = obj_str
49+
50+
return _check_type(obj, clss, **kwargs)
51+
52+
53+
class Workflow(NiMAREBase):
54+
"""Base class for workflow methods.
55+
56+
.. versionadded:: 0.1.2
57+
"""
58+
59+
def __init__(
60+
self,
61+
estimator=None,
62+
corrector=None,
63+
diagnostics=None,
64+
voxel_thresh=1.65,
65+
cluster_threshold=10,
66+
output_dir=None,
67+
n_cores=1,
68+
):
69+
self.voxel_thresh = voxel_thresh
70+
self.cluster_threshold = cluster_threshold
71+
self.output_dir = output_dir
72+
self.n_cores = _check_ncores(n_cores)
73+
self._preprocess_input(estimator, corrector, diagnostics)
74+
75+
def _preprocess_input(self, estimator, corrector, diagnostics):
76+
pairwaise_workflow = self.__class__.__name__ == "PairwiseCBMAWorkflow"
77+
estm_base = PairwiseCBMAEstimator if pairwaise_workflow else CBMAEstimator
78+
79+
if not isinstance(diagnostics, list) and diagnostics is not None:
80+
diagnostics = [diagnostics]
81+
82+
# Check inputs and set defaults if input is None
83+
default_esimator = (
84+
MKDAChi2(n_cores=self.n_cores) if pairwaise_workflow else ALE(n_cores=self.n_cores)
85+
)
86+
estimator = (
87+
default_esimator
88+
if estimator is None
89+
else _check_input(estimator, estm_base, self._estm_options, n_cores=self.n_cores)
90+
)
91+
92+
corrector = (
93+
FWECorrector(method="montecarlo", n_cores=self.n_cores)
94+
if corrector is None
95+
else _check_input(corrector, Corrector, self._corr_options, n_cores=self.n_cores)
96+
)
97+
98+
diag_kwargs = {
99+
"voxel_thresh": self.voxel_thresh,
100+
"cluster_threshold": self.cluster_threshold,
101+
"n_cores": self.n_cores,
102+
}
103+
if diagnostics is None:
104+
diagnostics = [Jackknife(**diag_kwargs)]
105+
else:
106+
diagnostics = [
107+
_check_input(diagnostic, Diagnostics, self._diag_options, **diag_kwargs)
108+
for diagnostic in diagnostics
109+
]
110+
111+
if (not pairwaise_workflow) and isinstance(estimator, PairwiseCBMAEstimator):
112+
raise AttributeError('"CBMAWorkflow" does not work with pairwise Estimators.')
113+
114+
self.estimator = estimator
115+
self.corrector = corrector
116+
self.diagnostics = diagnostics
117+
118+
@abstractmethod
119+
def fit(self, dataset):
120+
"""Apply estimation to dataset and output results."""
121+
122+
def _transform(self, result):
123+
"""Implement the correction procedure and perform diagnostics.
124+
125+
Parameters
126+
----------
127+
result : :obj:`~nimare.results.MetaResult`
128+
MetaResult object from which to extract the p value map and Estimator.
129+
130+
Returns
131+
-------
132+
:obj:`~nimare.results.MetaResult`
133+
Results of Estimator, Corrector, and Diagnostics fitting with label maps,
134+
cluster and diagnostic tables.
135+
"""
136+
LGR.info("Performing correction on meta-analysis...")
137+
corr_result = self.corrector.transform(result)
138+
139+
LGR.info("Performing diagnostics on corrected meta-analyses...")
140+
# Perform diagnostic only on desc-mass when using montecarlo correction
141+
corr_method = corr_result.get_params()["corrector__method"]
142+
143+
if issubclass(type(result.estimator), PairwiseCBMAEstimator):
144+
modalities = (
145+
["_desc-specificityMass", "_corr-"]
146+
if corr_method == "montecarlo"
147+
else ["_desc-", "_corr-"]
148+
)
149+
else:
150+
modalities = ["_desc-mass", "_corr-"] if corr_method == "montecarlo" else ["_corr-"]
151+
152+
img_keys = [
153+
img_key
154+
for img_key in corr_result.maps.keys()
155+
if img_key.startswith("z_") and all(mod in img_key for mod in modalities)
156+
]
157+
158+
for img_key, diagnostic in itertools.product(img_keys, self.diagnostics):
159+
# Work on copy of diagnostic:
160+
diagnostic_cp = copy.deepcopy(diagnostic)
161+
diagnostic_cp = diagnostic_cp.set_params(target_image=img_key)
162+
corr_result = diagnostic_cp.transform(corr_result)
163+
164+
if self.output_dir is not None:
165+
LGR.info(f"Saving meta-analytic maps, tables and boilerplate to {self.output_dir}...")
166+
corr_result.save_maps(output_dir=self.output_dir)
167+
corr_result.save_tables(output_dir=self.output_dir)
168+
169+
boilerplate = corr_result.description_
170+
with open(op.join(self.output_dir, "boilerplate.txt"), "w") as fo:
171+
fo.write(boilerplate)
172+
173+
bibtex = corr_result.bibtex_
174+
with open(op.join(self.output_dir, "references.bib"), "w") as fo:
175+
fo.write(bibtex)
176+
177+
LGR.info("Workflow completed.")
178+
return corr_result

‎nimare/workflows/cbma.py

Lines changed: 97 additions & 135 deletions
Original file line numberDiff line numberDiff line change
@@ -1,73 +1,27 @@
11
"""Workflow for running an coordinates-based meta-analysis from a NiMARE database."""
2-
import copy
3-
import itertools
42
import logging
5-
import os.path as op
63

7-
from nimare.correct import Corrector, FDRCorrector, FWECorrector
84
from nimare.dataset import Dataset
9-
from nimare.diagnostics import Diagnostics, FocusCounter, Jackknife
10-
from nimare.meta import ALE, KDA, SCALE, MKDADensity
11-
from nimare.meta.cbma.base import CBMAEstimator, PairwiseCBMAEstimator
12-
from nimare.utils import _check_ncores, _check_type
5+
from nimare.utils import _check_type
6+
from nimare.workflows.base import Workflow
137

148
LGR = logging.getLogger(__name__)
159

1610

17-
def _str_to_class(str_name):
18-
"""Match a string to a class name without initializing the class."""
19-
classes = {
20-
"ale": ALE,
21-
"scale": SCALE,
22-
"mkdadensity": MKDADensity,
23-
"kda": KDA,
24-
"montecarlo": FWECorrector,
25-
"fdr": FDRCorrector,
26-
"bonferroni": FWECorrector,
27-
"jackknife": Jackknife,
28-
"focuscounter": FocusCounter,
29-
}
30-
return classes[str_name]
31-
32-
33-
def _check_input(obj, clss, options, **kwargs):
34-
"""Check input for workflow functions."""
35-
if isinstance(obj, str):
36-
if obj not in options:
37-
raise ValueError(f'"{obj}" of kind string must be {", ".join(options)}')
38-
39-
# Get the class from the string
40-
obj_str = obj
41-
obj = _str_to_class(obj_str)
42-
43-
# Add the method to the kwargs if it's a FWECorrector
44-
if obj == FWECorrector:
45-
kwargs["method"] = obj_str
46-
47-
return _check_type(obj, clss, **kwargs)
48-
49-
50-
def cbma_workflow(
51-
dataset,
52-
estimator=None,
53-
corrector=None,
54-
diagnostics=None,
55-
voxel_thresh=1.65,
56-
cluster_threshold=10,
57-
output_dir=None,
58-
n_cores=1,
59-
):
11+
class CBMAWorkflow(Workflow):
6012
"""Compose a coordinate-based meta-analysis workflow.
6113
14+
.. versionchanged:: 0.1.2
15+
16+
- `cbma_workflow` function was converted to CBMAWorkflow class.
17+
6218
.. versionadded:: 0.0.14
6319
6420
This workflow performs a coordinate-based meta-analysis, multiple comparison corrections,
6521
and diagnostics analyses on corrected meta-analytic z-score maps.
6622
6723
Parameters
6824
----------
69-
dataset : :obj:`~nimare.dataset.Dataset`
70-
Dataset for which to run meta-analyses to generate maps.
7125
estimator : :class:`~nimare.base.CBMAEstimator`, :obj:`str` {'ale', 'scale', 'mkdadensity', \
7226
'kda'}, or optional
7327
Meta-analysis estimator. Default is :class:`~nimare.meta.cbma.ale.ALE`.
@@ -99,90 +53,98 @@ def cbma_workflow(
9953
If estimator, corrector, or diagnostics are passed as initialized objects, this parameter
10054
will be ignored.
10155
Default is 1.
102-
103-
Returns
104-
-------
105-
:obj:`~nimare.results.MetaResult`
106-
Results of Estimator, Corrector, and Diagnostics fitting with label maps,
107-
cluster and diagnostic tables.
10856
"""
109-
n_cores = _check_ncores(n_cores)
11057

111-
if not isinstance(diagnostics, list) and diagnostics is not None:
112-
diagnostics = [diagnostics]
58+
# Options allows for string input
59+
_estm_options = ("ale", "scale", "mkdadensity", "kda")
60+
_corr_options = ("montecarlo", "fdr", "bonferroni")
61+
_diag_options = ("jackknife", "focuscounter")
62+
63+
def fit(self, dataset, drop_invalid=True):
64+
"""Fit Workflow to a Dataset.
65+
66+
Parameters
67+
----------
68+
dataset : :obj:`~nimare.dataset.Dataset`
69+
Dataset to analyze.
70+
71+
Returns
72+
-------
73+
:obj:`~nimare.results.MetaResult`
74+
Results of Estimator fitting.
75+
"""
76+
# Check dataset type
77+
dataset = _check_type(dataset, Dataset)
78+
79+
LGR.info("Performing meta-analysis...")
80+
results = self.estimator.fit(dataset, drop_invalid=drop_invalid)
81+
82+
return self._transform(results)
83+
84+
85+
class PairwiseCBMAWorkflow(Workflow):
86+
"""Base class for pairwise coordinate-based meta-analysis workflow methods.
11387
114-
# Check dataset type
115-
dataset = _check_type(dataset, Dataset)
88+
.. versionadded:: 0.1.2
89+
90+
Parameters
91+
----------
92+
estimator : :class:`~nimare.base.CBMAEstimator`, :obj:`str` {'alesubtraction', 'mkdachi2', \
93+
or optional
94+
Meta-analysis estimator. Default is :class:`~nimare.meta.cbma.kda.MKDAChi2`.
95+
corrector : :class:`~nimare.correct.Corrector`, :obj:`str` {'montecarlo', 'fdr', \
96+
'bonferroni'} or optional
97+
Meta-analysis corrector. Default is :class:`~nimare.correct.FWECorrector`.
98+
diagnostics : :obj:`list` of :class:`~nimare.diagnostics.Diagnostics`, \
99+
:class:`~nimare.diagnostics.Diagnostics`, :obj:`str` {'jackknife', 'focuscounter'}, \
100+
or optional
101+
List of meta-analysis diagnostic classes. A single diagnostic class can also be passed.
102+
Default is :class:`~nimare.diagnostics.FocusCounter`.
103+
voxel_thresh : :obj:`float` or None, optional
104+
An optional voxel-level threshold that may be applied to the ``target_image`` in the
105+
:class:`~nimare.diagnostics.Diagnostics` class to define clusters. This can be None or 0
106+
if the ``target_image`` is already thresholded (e.g., a cluster-level corrected map).
107+
If diagnostics are passed as initialized objects, this parameter will be ignored.
108+
Default is 1.65, which corresponds to p-value = .05, one-tailed.
109+
cluster_threshold : :obj:`int` or None, optional
110+
Cluster size threshold, in :term:`voxels<voxel>`.
111+
If None, then no cluster size threshold will be applied.
112+
If diagnostics are passed as initialized objects, this parameter will be ignored.
113+
Default is 10.
114+
output_dir : :obj:`str`, optional
115+
Output directory in which to save results. If the directory doesn't
116+
exist, it will be created. Default is None (the results are not saved).
117+
n_cores : :obj:`int`, optional
118+
Number of cores to use for parallelization.
119+
If <=0, defaults to using all available cores.
120+
If estimator, corrector, or diagnostics are passed as initialized objects, this parameter
121+
will be ignored.
122+
Default is 1.
123+
"""
116124

117125
# Options allows for string input
118-
estm_options = ("ale", "scale", "mkdadensity", "kda")
119-
corr_options = ("montecarlo", "fdr", "bonferroni")
120-
diag_options = ("jackknife", "focuscounter")
121-
122-
# Check inputs and set defaults if input is None
123-
estimator = (
124-
ALE(n_cores=n_cores)
125-
if estimator is None
126-
else _check_input(estimator, CBMAEstimator, estm_options, n_cores=n_cores)
127-
)
128-
corrector = (
129-
FWECorrector(method="montecarlo", n_cores=n_cores)
130-
if corrector is None
131-
else _check_input(corrector, Corrector, corr_options, n_cores=n_cores)
132-
)
133-
134-
diag_kwargs = {
135-
"voxel_thresh": voxel_thresh,
136-
"cluster_threshold": cluster_threshold,
137-
"n_cores": n_cores,
138-
}
139-
if diagnostics is None:
140-
diagnostics = [Jackknife(**diag_kwargs)]
141-
else:
142-
diagnostics = [
143-
_check_input(diagnostic, Diagnostics, diag_options, **diag_kwargs)
144-
for diagnostic in diagnostics
145-
]
146-
147-
if isinstance(estimator, PairwiseCBMAEstimator):
148-
raise AttributeError(
149-
'The "cbma_workflow" function does not currently work with pairwise Estimators.'
150-
)
151-
152-
LGR.info("Performing meta-analysis...")
153-
results = estimator.fit(dataset)
154-
155-
LGR.info("Performing correction on meta-analysis...")
156-
corr_results = corrector.transform(results)
157-
158-
LGR.info("Generating clusters tables and performing diagnostics on corrected meta-analyses...")
159-
# Perform diagnostic only on desc-mass when using montecarlo correction
160-
corr_method = corr_results.get_params()["corrector__method"]
161-
modalities = ["_desc-mass", "_corr-"] if corr_method == "montecarlo" else ["_corr-"]
162-
img_keys = [
163-
img_key
164-
for img_key in corr_results.maps.keys()
165-
if img_key.startswith("z_") and all(mod in img_key for mod in modalities)
166-
]
167-
168-
for img_key, diagnostic in itertools.product(img_keys, diagnostics):
169-
# Work on copy of diagnostic:
170-
diagnostic_cp = copy.deepcopy(diagnostic)
171-
diagnostic_cp = diagnostic_cp.set_params(target_image=img_key)
172-
corr_results = diagnostic_cp.transform(corr_results)
173-
174-
if output_dir is not None:
175-
LGR.info(f"Saving meta-analytic maps, tables and boilerplate to {output_dir}...")
176-
corr_results.save_maps(output_dir=output_dir)
177-
corr_results.save_tables(output_dir=output_dir)
178-
179-
boilerplate = corr_results.description_
180-
with open(op.join(output_dir, "boilerplate.txt"), "w") as fo:
181-
fo.write(boilerplate)
182-
183-
bibtex = corr_results.bibtex_
184-
with open(op.join(output_dir, "references.bib"), "w") as fo:
185-
fo.write(bibtex)
186-
187-
LGR.info("Workflow completed.")
188-
return corr_results
126+
_estm_options = ("alesubtraction", "mkdachi2")
127+
_corr_options = ("montecarlo", "fdr", "bonferroni")
128+
_diag_options = ("jackknife", "focuscounter")
129+
130+
def fit(self, dataset1, dataset2, drop_invalid=True):
131+
"""Fit Workflow to two Datasets.
132+
133+
Parameters
134+
----------
135+
dataset1/dataset2 : :obj:`~nimare.dataset.Dataset`
136+
Dataset objects to analyze.
137+
138+
Returns
139+
-------
140+
:obj:`~nimare.results.MetaResult`
141+
Results of Estimator fitting.
142+
"""
143+
# Check dataset type
144+
dataset1 = _check_type(dataset1, Dataset)
145+
dataset2 = _check_type(dataset2, Dataset)
146+
147+
LGR.info("Performing meta-analysis...")
148+
results = self.estimator.fit(dataset1, dataset2, drop_invalid=drop_invalid)
149+
150+
return self._transform(results)

0 commit comments

Comments
 (0)
Please sign in to comment.