Skip to content

Commit

Permalink
[DATALAD RUNCMD] run codespell throughout fixing typo automagically
Browse files Browse the repository at this point in the history
=== Do not change lines below ===
{
 "chain": [],
 "cmd": "codespell -w",
 "exit": 0,
 "extra_inputs": [],
 "inputs": [],
 "outputs": [],
 "pwd": "."
}
^^^ Do not change lines above ^^^
  • Loading branch information
yarikoptic committed Nov 17, 2023
1 parent 5504535 commit 7b09601
Show file tree
Hide file tree
Showing 43 changed files with 128 additions and 128 deletions.
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ The workflow for contributing to Caiman is roughly illustrated by the numbers in
Below we have instructions on how to do all of the above steps. While all of this may seem like a lot, some of the steps are extremely simple. Also, once you have done it once, you will have the recipe and it will be pretty easy. Finally, it is a very rewarding experience to contribute to an open source project -- we hope you'll take the plunge!

## First, create a dedicated development environment
If you have downloaded Caiman for standard use, you probably installed it using `conda` or `mamba` as described on the README page. As a contributor, you will want to set up a dedicated development environment. This means you will be setting up a version of Caiman you will edit and tweak, uncoupled from your main installation for everyday use. To set up a development environment so you can follow the worflow outlined above, do the following:
If you have downloaded Caiman for standard use, you probably installed it using `conda` or `mamba` as described on the README page. As a contributor, you will want to set up a dedicated development environment. This means you will be setting up a version of Caiman you will edit and tweak, uncoupled from your main installation for everyday use. To set up a development environment so you can follow the workflow outlined above, do the following:

1. Fork and clone the caiman repository
Go to the [Caiman repo](https://github.com/flatironinstitute/CaImAn) and hit the `Fork` button at the top right of the page. You now have Caiman on your own GitHub page! On your computer, in your conda prompt, go to a directory where you want Caiman to download, and clone your personal Caiman repo: `git clone https://github.com/<your-username>/CaImAn.git` where <your-username> is replaced by your github username.
Expand Down
2 changes: 1 addition & 1 deletion bin/caiman_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def selectionchange(self,i):
estimates.Cn = cm.local_correlations(mov, swap_dim=False)
#Cn = estimates.Cn

# We rotate our components 90 degrees right because of incompatiability of pyqtgraph and pyplot
# We rotate our components 90 degrees right because of incompatibility of pyqtgraph and pyplot
def rotate90(img, right=None, vector=None, sparse=False):
# rotate the img 90 degrees
# we first transpose the img then flip axis
Expand Down
12 changes: 6 additions & 6 deletions caiman/base/movies.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def motion_correct(self,
Returns:
self: motion corected movie, it might change the object itself
self: motion corrected movie, it might change the object itself
shifts : tuple, contains x & y shifts and correlation with template
Expand Down Expand Up @@ -208,7 +208,7 @@ def motion_correct_3d(self,
Returns:
self: motion corected movie, it might change the object itself
self: motion corrected movie, it might change the object itself
shifts : tuple, contains x, y, and z shifts and correlation with template
Expand Down Expand Up @@ -535,7 +535,7 @@ def removeBL(self, windowSize:int=100, quantilMin:int=8, in_place:bool=False, re
window size over which to compute the baseline (the larger the faster the algorithm and the less granular
quantilMin: float
percentil to be used as baseline value
percentile to be used as baseline value
in_place: bool
update movie in place
returnBL:
Expand Down Expand Up @@ -764,7 +764,7 @@ def IPCA(self, components: int = 50, batch: int = 1000) -> tuple[np.ndarray, np.
frame_size = h * w
frame_samples = np.reshape(self, (num_frames, frame_size)).T

# run IPCA to approxiate the SVD
# run IPCA to approximate the SVD
ipca_f = sklearn.decomposition.IncrementalPCA(n_components=components, batch_size=batch)
ipca_f.fit(frame_samples)

Expand Down Expand Up @@ -1783,7 +1783,7 @@ def sbxread(filename: str, k: int = 0, n_frames=np.inf) -> np.ndarray:
# Determine number of frames in whole file
max_idx = os.path.getsize(filename + '.sbx') / info['recordsPerBuffer'] / info['sz'][1] * factor / 4 - 1

# Paramters
# Parameters
N = max_idx + 1 # Last frame
N = np.minimum(N, n_frames)

Expand Down Expand Up @@ -1834,7 +1834,7 @@ def sbxreadskip(filename: str, subindices: slice) -> np.ndarray:
# Determine number of frames in whole file
max_idx = int(os.path.getsize(filename + '.sbx') / info['recordsPerBuffer'] / info['sz'][1] * factor / 4 - 1)

# Paramters
# Parameters
if isinstance(subindices, slice):
if subindices.start is None:
start = 0
Expand Down
10 changes: 5 additions & 5 deletions caiman/base/rois.py
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,7 @@ def distance_masks(M_s:list, cm_s: list[list], max_dist: float, enclosed_thr: Op
for gt_comp, test_comp, cmgt_comp, cmtest_comp in zip(M_s[:-1], M_s[1:], cm_s[:-1], cm_s[1:]):

# todo : better with a function that calls itself
# not to interfer with M_s
# not to interfere with M_s
gt_comp = gt_comp.copy()[:, :]
test_comp = test_comp.copy()[:, :]

Expand Down Expand Up @@ -753,8 +753,8 @@ def distance_masks(M_s:list, cm_s: list[list], max_dist: float, enclosed_thr: Op
# if we don't have even a union this is pointless
if union > 0:

# intersection is removed from union since union contains twice the overlaping area
# having the values in this format 0-1 is helpfull for the hungarian algorithm that follows
# intersection is removed from union since union contains twice the overlapping area
# having the values in this format 0-1 is helpful for the hungarian algorithm that follows
D[i, j] = 1 - 1. * intersection / \
(union - intersection)
if enclosed_thr is not None:
Expand Down Expand Up @@ -791,7 +791,7 @@ def find_matches(D_s, print_assignment: bool = False) -> tuple[list, list]:
matches.append(indexes)
DD = D.copy()
total = []
# we want to extract those informations from the hungarian algo
# we want to extract those information from the hungarian algo
for row, column in indexes2:
value = DD[row, column]
if print_assignment:
Expand Down Expand Up @@ -1095,7 +1095,7 @@ def extract_binary_masks_blob(A,
Args:
A: scipy.sparse matrix
contains the components as outputed from the CNMF algorithm
contains the components as outputted from the CNMF algorithm
neuron_radius: float
neuronal radius employed in the CNMF settings (gSiz)
Expand Down
4 changes: 2 additions & 2 deletions caiman/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,14 +386,14 @@ def setup_cluster(backend:str = 'multiprocessing',
'You have configured the cluster setup to not raise an exception.')
else:
raise Exception(
'A cluster is already runnning. Terminate with dview.terminate() if you want to restart.')
'A cluster is already running. Terminate with dview.terminate() if you want to restart.')
if platform.system() == 'Darwin':
try:
if 'kernel' in get_ipython().trait_names(): # type: ignore
# If you're on OSX and you're running under Jupyter or Spyder,
# which already run the code in a forkserver-friendly way, this
# can eliminate some setup and make this a reasonable approach.
# Otherwise, seting VECLIB_MAXIMUM_THREADS=1 or using a different
# Otherwise, setting VECLIB_MAXIMUM_THREADS=1 or using a different
# blas/lapack is the way to avoid the issues.
# See https://github.com/flatironinstitute/CaImAn/issues/206 for more
# info on why we're doing this (for now).
Expand Down
6 changes: 3 additions & 3 deletions caiman/components_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def compute_event_exceptionality(traces: np.ndarray,
value estimate of the quality of components (the lesser the better)
erfc: ndarray
probability at each time step of observing the N consequtive actual trace values given the distribution of noise
probability at each time step of observing the N consecutive actual trace values given the distribution of noise
noise_est: ndarray
the components ordered according to the fitness
Expand Down Expand Up @@ -394,10 +394,10 @@ def evaluate_components(Y: np.ndarray,
value estimate of the quality of components (the lesser the better) on diff(trace)
erfc_raw: ndarray
probability at each time step of observing the N consequtive actual trace values given the distribution of noise on the raw trace
probability at each time step of observing the N consecutive actual trace values given the distribution of noise on the raw trace
erfc_raw: ndarray
probability at each time step of observing the N consequtive actual trace values given the distribution of noise on diff(trace)
probability at each time step of observing the N consecutive actual trace values given the distribution of noise on diff(trace)
r_values: list
float values representing correlation between component and spatial mask obtained by averaging important points
Expand Down
8 changes: 4 additions & 4 deletions caiman/mmapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ def save_memmap(filenames:list[str],
x,y, and z downsampling factors (0.5 means downsampled by a factor 2)
remove_init: int
number of frames to remove at the begining of each tif file
number of frames to remove at the beginning of each tif file
(used for resonant scanning images if laser in rutned on trial by trial)
idx_xy: tuple size 2 [or 3 for 3D data]
Expand Down Expand Up @@ -449,11 +449,11 @@ def save_memmap(filenames:list[str],
if slices is not None:
Yr = Yr[tuple(slices)]
else:
if idx_xy is None: #todo remove if not used, superceded by the slices parameter
if idx_xy is None: #todo remove if not used, superseded by the slices parameter
Yr = Yr[remove_init:]
elif len(idx_xy) == 2: #todo remove if not used, superceded by the slices parameter
elif len(idx_xy) == 2: #todo remove if not used, superseded by the slices parameter
Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]]
else: #todo remove if not used, superceded by the slices parameter
else: #todo remove if not used, superseded by the slices parameter
Yr = Yr[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

else:
Expand Down
20 changes: 10 additions & 10 deletions caiman/motion_correction.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig
will quickly initialize a template with the first frames
splits_rig': int
for parallelization split the movies in num_splits chuncks across time
for parallelization split the movies in num_splits chunks across time
num_splits_to_process_rig: list,
if none all the splits are processed and the movie is saved, otherwise at each iteration
Expand All @@ -129,7 +129,7 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig
flag for performing motion correction when calling motion_correct
splits_els':list
for parallelization split the movies in num_splits chuncks across time
for parallelization split the movies in num_splits chunks across time
num_splits_to_process_els: list,
if none all the splits are processed and the movie is saved otherwise at each iteration
Expand Down Expand Up @@ -404,7 +404,7 @@ def apply_shifts_movie(self, fname, rigid_shifts:bool=None, save_memmap:bool=Fal
rigid_shifts: bool (True)
apply rigid or pw-rigid shifts (must exist in the mc object)
deprectated (read directly from mc.pw_rigid)
deprecated (read directly from mc.pw_rigid)
save_memmap: bool (False)
flag for saving the resulting file in memory mapped form
Expand Down Expand Up @@ -2118,7 +2118,7 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N
strides of the patches in which the FOV is subdivided
overlaps: tuple
amount of pixel overlaping between patches along each dimension
amount of pixel overlapping between patches along each dimension
max_shifts: tuple
max shifts in x and y
Expand All @@ -2127,7 +2127,7 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N
strides between patches along each dimension when upsampling the vector fields
newoverlaps:tuple
amount of pixel overlaping between patches along each dimension when upsampling the vector fields
amount of pixel overlapping between patches along each dimension when upsampling the vector fields
upsample_factor_grid: int
if newshapes or newstrides are not specified this is inferred upsampling by a constant factor the cvector field
Expand Down Expand Up @@ -2306,7 +2306,7 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N

new_img = new_img / normalizer

else: # in case the difference in shift between neighboring patches is larger than 0.5 pixels we do not interpolate in the overlaping area
else: # in case the difference in shift between neighboring patches is larger than 0.5 pixels we do not interpolate in the overlapping area
half_overlap_x = int(newoverlaps[0] / 2)
half_overlap_y = int(newoverlaps[1] / 2)
for (x, y), (idx_0, idx_1), im, (_, _), weight_mat in zip(start_step, xy_grid, imgs, total_shifts, weight_matrix):
Expand Down Expand Up @@ -2364,7 +2364,7 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over
strides of the patches in which the FOV is subdivided
overlaps: tuple
amount of pixel overlaping between patches along each dimension
amount of pixel overlapping between patches along each dimension
max_shifts: tuple
max shifts in x, y, and z
Expand All @@ -2373,7 +2373,7 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over
strides between patches along each dimension when upsampling the vector fields
newoverlaps:tuple
amount of pixel overlaping between patches along each dimension when upsampling the vector fields
amount of pixel overlapping between patches along each dimension when upsampling the vector fields
upsample_factor_grid: int
if newshapes or newstrides are not specified this is inferred upsampling by a constant factor the cvector field
Expand Down Expand Up @@ -2570,7 +2570,7 @@ def tile_and_correct_3d(img:np.ndarray, template:np.ndarray, strides:tuple, over

new_img = new_img / normalizer

else: # in case the difference in shift between neighboring patches is larger than 0.5 pixels we do not interpolate in the overlaping area
else: # in case the difference in shift between neighboring patches is larger than 0.5 pixels we do not interpolate in the overlapping area
half_overlap_x = int(newoverlaps[0] / 2)
half_overlap_y = int(newoverlaps[1] / 2)
half_overlap_z = int(newoverlaps[2] / 2)
Expand Down Expand Up @@ -2964,7 +2964,7 @@ def motion_correct_batch_pwrigid(fname, max_shifts, strides, overlaps, add_to_mo
list of produced templates, one per batch
shifts: list
inferred rigid shifts to corrrect the movie
inferred rigid shifts to correct the movie
Raises:
Exception 'You need to initialize the template with a good estimate. See the motion'
Expand Down
8 changes: 4 additions & 4 deletions caiman/source_extraction/cnmf/deconvolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def constrained_foopsi(fluor, bl=None, c1=None, g=None, sn=None, p=None, metho

c1 = c[0]

# remove intial calcium to align with the other foopsi methods
# remove initial calcium to align with the other foopsi methods
# it is added back in function constrained_foopsi_parallel of temporal.py
c -= c1 * g**np.arange(len(fluor))
elif p == 2:
Expand Down Expand Up @@ -323,7 +323,7 @@ def cvxpy_foopsi(fluor, g, sn, b=None, c1=None, bas_nonneg=True, solvers=None):
should the baseline be estimated
solvers: tuple of two strings
primary and secondary solvers to be used. Can be choosen between ECOS, SCS, CVXOPT
primary and secondary solvers to be used. Can be chosen between ECOS, SCS, CVXOPT
Returns:
c: estimated calcium trace
Expand All @@ -332,7 +332,7 @@ def cvxpy_foopsi(fluor, g, sn, b=None, c1=None, bas_nonneg=True, solvers=None):
c1: esimtated initial calcium value
g: esitmated parameters of the autoregressive model
g: estimated parameters of the autoregressive model
sn: estimated noise level
Expand Down Expand Up @@ -501,7 +501,7 @@ def _nnls(KK, Ky, s=None, mask=None, tol=1e-9, max_iter=None):
w = np.argmax(l)
P[w] = True

try: # likely unnnecessary try-except-clause for robustness sake
try: # likely unnecessary try-except-clause for robustness sake
#mu = np.linalg.inv(KK[P][:, P]).dot(Ky[P])
mu = np.linalg.solve(KK[P][:, P], Ky[P])
except:
Expand Down
6 changes: 3 additions & 3 deletions caiman/source_extraction/cnmf/estimates.py
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ def nb_view_components_3d(self, Yr=None, image_type='mean', dims=None,
image_type: 'mean'|'max'|'corr'
image to be overlaid to neurons (average of shapes,
maximum of shapes or nearest neigbor correlation of raw data)
maximum of shapes or nearest neighbor correlation of raw data)
max_projection: bool
plot max projection along specified axis if True, o/w plot layers
Expand Down Expand Up @@ -981,7 +981,7 @@ class label for neuron shapes
Returns:
self: Estimates object
self.idx_components contains the indeced of components above
the required treshold.
the required threshold.
"""
dims = params.get('data', 'dims')
gSig = params.get('init', 'gSig')
Expand Down Expand Up @@ -1415,7 +1415,7 @@ def remove_small_large_neurons(self, min_size_neuro, max_size_neuro,
Returns:
neurons_to_keep: np.array
indeces of components with size within the acceptable range
indices of components with size within the acceptable range
'''
if self.A_thr is None:
raise Exception('You need to compute thresholded components before calling remove_duplicates: use the threshold_components method')
Expand Down
Loading

0 comments on commit 7b09601

Please sign in to comment.