-
Notifications
You must be signed in to change notification settings - Fork 1
/
velocity.py
19846 lines (17402 loc) · 819 KB
/
velocity.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# coding=utf-8
import copy
import cv2
import glob
import h5py
import itertools
import math
import matplotlib.cbook
import matplotlib.path as mpltPath
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import os
import pickle
import re
import subprocess
import sys
import time as time_mod
import warnings
from scipy import fftpack, signal, integrate
from scipy import ndimage, interpolate, signal, special
from scipy.interpolate import interp2d, griddata, UnivariateSpline, RegularGridInterpolator, LinearNDInterpolator
from scipy.optimize import minimize, curve_fit
from scipy.signal import butter, medfilt, filtfilt # filters for signal processing
from scipy.stats import binned_statistic, binned_statistic_2d, binned_statistic_dd, multivariate_normal
from tqdm import tqdm
warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation)
warnings.simplefilter('ignore', RuntimeWarning)
warnings.simplefilter('ignore', FutureWarning)
import tflow.vector as vec
import tflow.graph as graph
global bezier_installed
try:
import bezier # required to plot bezier curves (not critical)
bezier_installed = True
except:
bezier_installed = False
print('velocity module: bezier is missing in a current environment. pip install bezier')
global useNotebook
useNotebook = True
path_mod = os.path.abspath(__file__)
mod_dir_path = os.path.dirname(path_mod)
"""
Module designed to process a planar/volumetric velocity field
- energy, enstrophy, vorticity fields
- energy spectra
- n-th order structure functions
- dissipation rate
- turbulent length scales
Philosophy:
Prepare a velocity field array "udata".
Pass it to functions in this module to obtain any quantities related to turbulence.
It should require a single line to obtain the desired quantity from a velocity field
unless an intermediate step is computationally expensive.
The primary example for this is an autocorrelation function which is used for various quantities like Taylor microscale.
udata = (ux, uy, uz) or (ux, uy)
each ui has a shape (height, width, (depth), duration)
If ui-s are individually given, make udata like
udata = np.stack((ux, uy))
Dependencies:
h5py, tqdm, numpy, scipy, matplotlib
author: takumi matsuzawa
"""
########## Fundamental operations ##########
def get_duidxj_tensor(udata, dx=1., dy=1., dz=1., xyz_orientations=np.asarray([1, -1, 1]),
xx=None, yy=None, zz=None):
"""
Assumes udata has a shape (d, nrows, ncols, duration) or (d, nrows, ncols)
... one can easily make udata by np.stack((ux, uy))
Important Warning:
... udata is np.stack((ux, uy, uz))
... udata.shape = dim, nrows, ncols, duration
Parameters
----------
udata: numpy array with shape (ux, uy) or (ux, uy, uz)
... assumes ux/uy/uz has a shape (nrows, ncols, duration) or (nrows, ncols, nstacks, duration)
... can handle udata without a temporal axis
dx: float, x spacing
dy: float, y spacing
dz: float, z spacing
xyz_orientations: 1d array-like with shape (3,)
... xyz_orientations = [djdx, didy, dkdz]
... HOW TO DETERMINE xyz_orientations:
1. Does the xx[0, :] (or xx[0, :, 0] for 3D) monotonically increase as the index increases?
If True, djdx = 1
If False, djdx = -1
2. Does the yy[:, 0] (or yy[:, 0, 0] for 3D) monotonically increase as the index increases?
If True, didy = 1
If False, didy = -1
3. Does the zz[0, 0, :] monotonically increase as the index increases?
If True, dkdz = 1
If False, dkdz = -1
... If you are not sure what this is, try `xyz_orientations = get_jacobian_xyz_ijk(xx, yy, zz)`
... Factors between index space (ijk) and physical space (xyz)
... This factor is necessary since the conventions used for the index space and the physical space are different.
Consider a 3D array a. a[i, j, k]. The convention used for this module is to interpret this array as a[y, x, z].
(In case of udata, udata[dim, y, x, z, t])
All useful modules such as numpy are written in the index space, but this convention is not ideal for physicists
for several reasons.
1. many experimental data are not organized in the index space.
2. One always requires conversion between the index space and physical space especially at the end of the analysis.
(physicists like to present in the units of physical units not in terms of ijk)
... This array is essentially a Jacobian between the index space basis (ijk) and the physical space basis (xyz)
All information needed is just dx/dj, dy/di, dz/dk because ijk and xyz are both orthogonal bases.
There is no off-diagonal elements in the Jacobian matrix, and one needs to supply only 3 elements for 3D udata.
If I strictly use the Cartesian basis for xyz (as it should), then they are both orthonormal.
This makes each element of the Jacobian to be either 1 or -1, reflecting the directions of +x/+y/+z
with respect to +j/+i/+k
Returns
-------
sij: numpy array with shape (nrows, ncols, duration, 2, 2) (dim=2) or (nrows, ncols, nstacks, duration, 3, 3) (dim=3)
... idea is... sij[spacial coordinates, time, tensor indices]
e.g.- sij(x, y, t) = sij[y, x, t, i, j]
... sij = d ui / dxj
"""
if xx is not None and yy is not None:
xyz_orientations = get_jacobian_xyz_ijk(xx, yy, zz)
if zz is None:
dx, dy = get_grid_spacing(xx, yy)
else:
dx, dy, dz = get_grid_spacing(xx, yy, zz)
shape = udata.shape # shape=(dim, nrows, ncols, nstacks) if nstacks=0, shape=(dim, nrows, ncols)
dim = shape[0]
if dim == 2:
ux, uy = udata[0, ...], udata[1, ...]
try:
dim, nrows, ncols, duration = udata.shape
except:
dim, nrows, ncols = udata.shape
duration = 1
ux = ux.reshape((ux.shape[0], ux.shape[1], duration))
uy = uy.reshape((uy.shape[0], uy.shape[1], duration))
duxdx = np.gradient(ux, dx, axis=1) * xyz_orientations[0]
duxdy = np.gradient(ux, dy, axis=0) * xyz_orientations[
1] # +dy is the column up. np gradient computes difference by going DOWN in the column, which is the opposite
duydx = np.gradient(uy, dx, axis=1) * xyz_orientations[0]
duydy = np.gradient(uy, dy, axis=0) * xyz_orientations[1]
sij = np.zeros((nrows, ncols, duration, dim, dim))
sij[..., 0, 0] = duxdx
sij[..., 0, 1] = duxdy
sij[..., 1, 0] = duydx
sij[..., 1, 1] = duydy
elif dim == 3:
ux, uy, uz = udata[0, ...], udata[1, ...], udata[2, ...]
try:
nrows, ncols, nstacks, duration = ux.shape
except:
nrows, ncols, nstacks = ux.shape
duration = 1
ux = ux.reshape((ux.shape[0], ux.shape[1], ux.shape[2], duration))
uy = uy.reshape((uy.shape[0], uy.shape[1], uy.shape[2], duration))
uz = uz.reshape((uz.shape[0], uz.shape[1], uz.shape[2], duration))
duxdx = np.gradient(ux, dx, axis=1) * xyz_orientations[0]
duxdy = np.gradient(ux, dy, axis=0) * xyz_orientations[1]
duxdz = np.gradient(ux, dz, axis=2) * xyz_orientations[2]
duydx = np.gradient(uy, dx, axis=1) * xyz_orientations[0]
duydy = np.gradient(uy, dy, axis=0) * xyz_orientations[1]
duydz = np.gradient(uy, dz, axis=2) * xyz_orientations[2]
duzdx = np.gradient(uz, dx, axis=1) * xyz_orientations[0]
duzdy = np.gradient(uz, dy, axis=0) * xyz_orientations[1]
duzdz = np.gradient(uz, dz, axis=2) * xyz_orientations[2]
sij = np.zeros((nrows, ncols, nstacks, duration, dim, dim))
sij[..., 0, 0] = duxdx
sij[..., 0, 1] = duxdy
sij[..., 0, 2] = duxdz
sij[..., 1, 0] = duydx
sij[..., 1, 1] = duydy
sij[..., 1, 2] = duydz
sij[..., 2, 0] = duzdx
sij[..., 2, 1] = duzdy
sij[..., 2, 2] = duzdz
elif dim > 3:
raise ValueError('udata should represent either a 2D or 3D v-field. ')
return sij
def decompose_duidxj(sij):
"""
Decompose a duidxj tensor into a symmetric and an antisymmetric parts
Returns symmetric part (eij) and anti-symmetric part (gij)
Parameters
----------
sij, 5d or 6d numpy array (x, y, t, i, j) or (x, y, z, t, i, j)
Returns
-------
eij: 5d or 6d numpy array, symmetric part of rate-of-strain tensor.
5d if spatial dimensions are x and y. 6d if spatial dimensions are x, y, and z.
gij: 5d or 6d numpy array, anti-symmetric part of rate-of-stxxain tensor.
5d if spatial dimensions are x and y. 6d if spatial dimensions are x, y, and z.
"""
dim = len(sij.shape) - 3 # spatial dim
if dim == 2:
duration = sij.shape[2]
elif dim == 3:
duration = sij.shape[3]
eij = np.zeros(sij.shape)
# gij = np.zeros(sij.shape) #anti-symmetric part
for t in range(duration):
for i in range(dim):
for j in range(dim):
if j >= i:
eij[..., t, i, j] = 1. / 2. * (sij[..., t, i, j] + sij[..., t, j, i])
# gij[..., i, j] += 1./2. * (sij[..., i, j] - sij[..., j, i]) #anti-symmetric part
else:
eij[..., t, i, j] = eij[..., t, j, i]
# gij[..., i, j] = -gij[..., j, i] #anti-symmetric part
gij = sij - eij
return eij, gij
def reynolds_decomposition(udata, t0=0, t1=None, udata_mean=None):
"""
Apply the Reynolds decomposition to a velocity field
Returns a mean field (time-averaged) and a fluctuating field
Parameters
----------
udata: numpy array
... (ux, uy, uz) or (ux, uy)
... ui has a shape (height, width, depth, duration) or (height, width, depth) (3D)
... ui may have a shape (height, width, duration) or (height, width) (2D)
t0: int, default: 0, If udata_mean is None, it treats temporal average of udata over a range [t0, t1) as mean flow.
t1: int, default: None, If udata_mean is None, it treats temporal average of udata over a range [t0, t1) as mean flow.
udata_mean: numpy array, default: None, If given, it treats the given array as a mean flow.
It must have the same shape as udata.shape[:-1]=(dim, y, x, (optional: z))
Returns
-------
u_mean: nd array, mean velocity field
u_turb: nd array, turbulent velocity field
"""
udata = fix_udata_shape(udata)
dim = len(udata)
# Initialization
if udata_mean is None:
udata_mean = np.nanmean(udata[..., t0:t1], axis=-1)
u_turb = np.zeros_like(udata)
for i in range(dim):
for t in range(udata.shape[-1]):
if udata.shape[:-1] == udata_mean.shape:
u_turb[i, ..., t] = udata[i, ..., t] - udata_mean[i]
elif udata.shape == udata_mean.shape:
u_turb[i, ..., t] = udata[i, ..., t] - udata_mean[i, ..., t]
else:
raise ValueError('udata and udata_m must have the same shape')
return udata_mean, u_turb
def reynolds_decomposition_using_phase_matched_mean_flow(udata, phase, udata_pavg, phase_ref, notebook=useNotebook):
"""
Conduct Reynolds decomposition using a phase-matched mean flow
Returns a mean field (time-averaged) and a fluctuating field
Parameters
----------
udata
phase
udata_pavg
phase_ref
Returns
-------
"""
if notebook:
from tqdm import tqdm_notebook as tqdm
print('Using tqdm_notebook. If this is a mistake, set notebook=False')
else:
from tqdm import tqdm
udata = fix_udata_shape(udata)
dim = len(udata)
# Initialization
u_turb = np.zeros_like(udata)
for t, phase_ in enumerate(tqdm(phase)):
ind, _ = find_nearest(phase_ref, phase_)
u_turb[..., t] = udata[..., t] - udata_pavg[..., ind]
if notebook:
from tqdm import tqdm
return udata_pavg, u_turb
def get_mean_flow_field_using_udatapath(udatapath, x0=0, x1=None, y0=0, y1=None, z0=0, z1=None,
t0=0, t1=None, inc=1,
clean=True, cutoff=np.inf, method='nn', median_filter=False, verbose=False,
replace_zeros=True,
notebook=useNotebook):
"""
Returns mean_field when a path to udata is provided
... recommended to use if udata is large (> half of your memory size)
... only a snapshot of data exists on memory while time-averaging is performed
Parameters
----------
udatapath: str
... path to udata
x0: int, default: 0
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
x1 int, default: None
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
y0 int, default: 0
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
y1 int, default: None
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
z0 int, default: 0
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
z1 int, default: None
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
t0 int, default: 0
... index to specify temporal range of data used to compute time average (udata[..., t0:t1:inc])
t1 int, default: None
... index to specify temporal range of data used to compute time average (udata[..., t0:t1:inc])
slicez: int, default: None
... Option to return a 2D time-averaged field at z=slicez from 4D udata
... This is mostly for the sake of fast turnout of analysis
inc: int, default: 1
... time increment to specify temporal range of data used to compute time average (udata[..., t0:t1:inc])
thd: float, default: np.inf
... energy > thd will be replaced by fill_value. (Manual screening of data)
fill_value: float, default: np.nan
... value used to fill the data when data value is greater than a threshold
Returns
-------
udata_m: 2d or 3d arrya
"""
if notebook:
from tqdm import tqdm_notebook as tqdm
print('Using tqdm_notebook. If this is a mistake, set notebook=False')
else:
from tqdm import tqdm
with h5py.File(udatapath, mode='r') as f:
try:
height, width, depth, duration = f['ux'].shape
height, width, depth = f['ux'][y0:y1, x0:x1, z0:z1, 0].shape
dim = 3
shape = (dim, height, width, depth)
except:
height, width, duration = f['ux'].shape
height, width = f['ux'][y0:y1, x0:x1, 0].shape
dim = 2
shape = (dim, height, width)
if t1 is None:
t1 = duration
udata_m = np.zeros(shape)
counter = 0
for t in tqdm(range(t0, t1, inc), desc='mean flow'):
udata_tmp = get_udata_from_path(udatapath,
x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1,
t0=t, t1=t + 1, verbose=verbose)
if clean:
udata_tmp = clean_udata(udata_tmp, cutoff=cutoff, method=method, median_filter=median_filter,
replace_zeros=replace_zeros, verbose=verbose, showtqdm=verbose)
udata_tmp = fix_udata_shape(udata_tmp)
else:
udata_tmp = fix_udata_shape(udata_tmp)
udata_m += udata_tmp[..., 0]
counter += 1
udata_m /= counter
if notebook:
from tqdm import tqdm
return udata_m
def get_phase_averaged_udata_from_path(dpath, freq, time, deltaT=None,
x0=0, x1=None, y0=0, y1=None, z0=0, z1=None,
t0=0, t1=None, notebook=useNotebook, use_masked_array=True, verbose=True):
"""
Returns the phase-averaged udata (velocity field) without loading the entire udata from dpath
Parameters
----------
dpath: str, path to the udata (h5)
... the h5 file must contain ux and uy at /ux and /uy, respectively
freq: float, frequency of the peridocity in data
time: 1D array, time
... time unit must be the same as the inverse of the unit of 'freq'
deltaT: float, default: None
... time window of a phase used for averaging
... data in [T, T+deltaT), [2T, 2(T+deltaT)], [3T, 3(T+deltaT)], ... will be considered to be at the same phase
x0: int, default: 0
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
x1 int, default: None
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
y0 int, default: 0
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
y1 int, default: None
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
z0 int, default: 0
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
z1 int, default: None
... index to specify volume of data to load (udata[:, y0:y1, x0:x1, z0:z1])
t0 int, default: 0
... index to specify temporal range of data used to compute time average (udata[..., t0:t1:inc])
t1 int, default: None
... index to specify temporal range of data used to compute time average (udata[..., t0:t1:inc])
Returns
-------
tp: 1D array, time of phase-averaged data
... tp = [0, period]
... phase = tp * freq
udata_pavg: 2D array, phase-averaged udata
... This is a phase-dependent mean flow.
"""
duration = get_udata_dim(dpath)[-1]
if len(time) != duration:
raise ValueError("get_phase_averaged_udata_from_path: the length of t must match the duration of the udata")
if notebook:
from tqdm import tqdm_notebook as tqdm
print('Using tqdm_notebook. If this is a mistake, set notebook=False')
else:
from tqdm import tqdm
time = np.asarray(time[t0:t1])
period = 1. / freq # period T
if t1 is None:
t1 = duration
if deltaT is None:
deltaT = time[1] - time[0]
if verbose:
print(f'get_phase_averaged_udata_from_path: deltaT- {deltaT}')
time_mod_period = time % period
nt = int(np.floor(period / deltaT))
tp = np.arange(nt) * deltaT
# tp = time[:nt]
try:
dummy, xxx, yyy, zzz = get_udata_from_path(dpath, x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1, t0=0, t1=1,
return_xy=True, verbose=False)
except:
dummy, xxx, yyy = get_udata_from_path(dpath, x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1, t0=0, t1=1,
return_xy=True, verbose=False)
shape = dummy.shape[:-1] + (nt,)
udata_pavg = np.zeros(shape)
for i in tqdm(range(nt), desc='phase-avg udata'):
cond1 = time_mod_period >= i * deltaT
cond2 = time_mod_period < (i + 1) * deltaT
cond = cond1 * cond2 # If True, load data
counter = 0
for t in range(t0, t1):
if cond[t]:
udata_pavg[..., i] += \
get_udata_from_path(dpath, x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1, t0=t, t1=t + 1,
verbose=False)[..., 0]
counter += 1
udata_pavg[..., i] /= float(counter)
if use_masked_array:
udata_pavg = np.ma.masked_array(udata_pavg)
if notebook:
from tqdm import tqdm
return tp, udata_pavg
def subtract_mean_flow(udata, udata_m, notebook=useNotebook):
"""
Return a fluctuating flow field udata_t
u(x, t) = U(x, t) - <U>(x)
... u: fluctuating velocity field (udata_t)
... U: raw velocity field (udata)
... <U>: Mean velocity field (udata_m)
...... <U> is typically a time-averaged field <U>_t.
Parameters
----------
udata: nd array, velocity field
udata_m: nd array with shape udata.shape[:-1]
Returns
-------
udata_t: nd array, fluctuating velocity field
"""
if notebook:
from tqdm import tqdm_notebook as tqdm
udata = fix_udata_shape(udata)
udata_t = np.empty_like(udata)
for t in tqdm(range(udata.shape[-1])):
udata_t[..., t] = udata[..., t] - udata_m
if notebook:
from tqdm import tqdm as tqdm
return udata_t
def subtract_phase_dependent_mean_flow(udata, udata_pavg, time, freq, phase_ref=None, phase_shift=0.,
notebook=useNotebook):
"""
Returns a phase-dependent, fluctuating velocity field
u(x, t) = U(x, t) - <U>_p(x, theta)
... u: phase-dependent, fluctuating velocity field (udata_t)
...... theta: phase = (t % T) / T
... U: raw velocity field (udata)
... <U>_p: Phase-dependent, mean velocity field (udata_pavg)
Parameters
----------
udata: nd array, velocity field
udata_pavg: nd array, phase-dependent velocity field
time: 1d array, time corresponding to udata
freq: float, frequency
phase_shift: float, default: 0., phase shift in [0, 1)
notebook: bool, default: True
... If True, udata
Returns
-------
udata_t: nd array, fluctuating velocity field
"""
if notebook:
from tqdm import tqdm_notebook as tqdm
udata = fix_udata_shape(udata)
udata_t = np.empty_like(udata)
period = 1. / float(freq)
phase = (np.asarray(time) % period) * freq + phase_shift
if phase_ref is None:
phase_ref = np.linspace(0, 1, udata_pavg.shape[-1], endpoint=False)
for t, theta in enumerate(tqdm(phase, desc='subtract PDMF')):
idx, val = find_nearest(phase_ref, theta, option='less')
udata_t[..., t] = udata[..., t] - udata_pavg[..., idx]
if notebook:
from tqdm import tqdm as tqdm
return udata_t
########## vector operations ##########
def div(udata, dx=1., dy=1., dz=1., xyz_orientations=np.asarray([1, -1, 1]), xx=None, yy=None, zz=None):
"""
Computes divergence of a velocity field
Parameters
----------
udata: numpy array
... (ux, uy, uz) or (ux, uy)
... ui has a shape (height, width, depth, duration) or (height, width, depth) (3D)
... ui may have a shape (height, width, duration) or (height, width) (2D)
Returns
-------
div_u: numpy array
... div_u has a shape (height, width, depth, duration) (3D) or (height, width, duration) (2D)
"""
sij = get_duidxj_tensor(udata, dx=dx, dy=dy, dz=dz, xyz_orientations=xyz_orientations, xx=xx, yy=yy,
zz=zz) # shape (nrows, ncols, duration, 2, 2) (dim=2) or (nrows, ncols, nstacks, duration, 3, 3) (dim=3)
dim = len(sij.shape) - 3 # spatial dim
div_u = np.zeros(sij.shape[:-2])
for d in range(dim):
div_u += sij[..., d, d]
return div_u
def grad(U, dx=1., dy=1., dz=1., xyz_orientations=np.asarray([1, -1, 1]), xx=None, yy=None, zz=None):
"""
Computes divergence of a velocity field
Parameters
----------
U: numpy array
... U has a shape (height, width, depth, duration) or (height, width, depth) (3D)
... U may have a shape (height, width, duration) or (height, width) (2D)
Returns
-------
grad_U: numpy array
... grad_U has a shape (3, height, width, depth, duration) (3D) or (2, height, width, duration) (2D)
"""
if xx is not None and yy is not None:
xyz_orientations = get_jacobian_xyz_ijk(xx, yy, zz)
if zz is None:
dx, dy = get_grid_spacing(xx, yy)
else:
dx, dy, dz = get_grid_spacing(xx, yy, zz)
shape = U.shape # shape=(dim, nrows, ncols, nstacks) if nstacks=0, shape=(dim, nrows, ncols)
if zz is not None:
dim = 3
else:
dim = 2
if dim == 2:
try:
nrows, ncols, duration = U.shape
except:
nrows, ncols = U.shape
duration = 1
U = U.reshape((U.shape[0], U.shape[1], duration))
dUdx = np.gradient(U, dx, axis=1) * xyz_orientations[0]
dUdy = np.gradient(U, dy, axis=0) * xyz_orientations[1]
grad_U = np.stack((dUdx, dUdy))
elif dim == 3:
try:
nrows, ncols, nstacks, duration = U.shape
except:
nrows, ncols, nstacks = U.shape
duration = 1
U = U.reshape((U.shape[0], U.shape[1], U.shape[2], duration))
dUdx = np.gradient(U, dx, axis=1) * xyz_orientations[0]
dUdy = np.gradient(U, dy, axis=0) * xyz_orientations[1]
dUdz = np.gradient(U, dz, axis=2) * xyz_orientations[2]
grad_U = np.stack((dUdx, dUdy, dUdz))
return grad_U
def curl(udata, dx=1., dy=1., dz=1., xyz_orientations=np.asarray([1, -1, 1]),
xx=None, yy=None, zz=None, verbose=False):
"""
Computes curl of a velocity field using a rate of strain tensor
... if you already have velocity data as ux = array with shape (m, n) and uy = array with shape (m, n),
udata = np.stack((ugrid1, vgrid1))
omega = vec.curl(udata)
Parameters
----------
udata: (ux, uy, uz) or (ux, uy)
dx, dy, dz: float, spatial spating of a 2D/3D grid
xyz_orientations: 1d array
... differentiation in the index space and the physical space must be conducted properly.
tflow convention is to treat the row, column, and the depth (i,j,k) are parallel to x, y, z in the physical space;
however, this does not specify the direction of the axes. (+i direction is only equal to EITHER +x or -x).
This ambiguity causes a problem during differentiation, and the choice is somewhat arbitrary to the users.
This function offers a solution by two ways. One way is to give a 2d/3d array of the positional grids.
If xx, yy, zz are given, it would automatically figures out how +i,+j,+k are aligned with +x,+y,+z.
The second way is give delta x/ delta_i, dy/delta_j, dz/delta_k. This argument is related to this method.
... e.g.
xyz_orientations = [1, 1, 1]... means +x // +i, +y//+j, +z//+k
xyz_orientations = [-1, -1, -1]... means +x // -i, +y//-j, +z//-k
xyz_orientations = [1, -1, 1]... means +x // +i, +y//-j, +z//+k
xx: 2d/3d array, positional grid
... If given, it would automatically figure out whether +x and +i point at the same direction,
and the curl is computed based on that
yy: 2d/3d array, positional grid
... If given, it would automatically figure out whether +y and +j point at the same direction,
and the curl is computed based on that
zz: 2d/3d array, positional grid
... If given, it would automatically figure out whether +z and +k point at the same direction,
and the curl is computed based on that
Returns
-------
omega: numpy array
shape: (height, width, duration) (2D) or (height, width, depth, duration) (3D)
"""
if verbose:
print('... curl(): If the result is not sensible, consult altering xyz_orientations.\n'
'A common mistake is that udata is not origanized properly such that '
'+x direction is not equal to +direction along the row of an array'
'or +y direction is not equal to +direction along the column of an array')
sij = get_duidxj_tensor(udata, dx=dx, dy=dy, dz=dz, xyz_orientations=xyz_orientations, xx=xx, yy=yy, zz=zz)
dim = len(sij.shape) - 3 # spatial dim
eij, gij = decompose_duidxj(sij)
if dim == 2:
omega = 2 * gij[..., 1, 0] # checked. this is correct.
elif dim == 3:
# sign issue was checked. this is correct.
omega1, omega2, omega3 = 2. * gij[..., 2, 1], 2. * gij[..., 0, 2], 2. * gij[..., 1, 0]
# omega1, omega2, omega3 = -2. * gij[..., 2, 1], 2. * gij[..., 0, 2], -2. * gij[..., 1, 0]
omega = np.stack((omega1, omega2, omega3))
else:
print('Not implemented yet!')
return None
return omega
def curl_2d(ux, uy, dx=1., dy=1., xyz_orientations=np.asarray([1, -1]), xx=None, yy=None, zz=None):
"""
Calculate curl of 2D (or 2D+1) field
Parameters
----------
ux: 2D array
x component of a 2D field
uy: 2D array
y component of a 2D field
dx: float
data spacing (mm/px)
dy: float
data spacing (mm/px)
Returns
-------
omega: 2D numpy array
vorticity field
"""
if xx is not None and yy is not None:
xyz_orientations = get_jacobian_xyz_ijk(xx, yy, zz)
# duxdx = np.gradient(ux, axis=1)
duxdy = np.gradient(ux, dy, axis=0) * xyz_orientations[0]
duydx = np.gradient(uy, dx, axis=1) * xyz_orientations[1]
# duydy = np.gradient(uy, axis=0)
omega = duydx - duxdy
return omega
def dot(udata, vdata):
"""
Inner product of two udata
Parameters
----------
udata: nd array, vector field data with the same format as udata[dim, y0:y1, x0:x1, (optional: z0:z1), t0:t1]
vdata: nd array, vector field data with the same format as udata[dim, y0:y1, x0:x1, (optional: z0:z1), t0:t1]
Returns
-------
scaData: (n-1)d array, inner product
"""
udim, vdim = udata.shape[0], vdata.shape[0]
if udim != vdim:
raise ValueError('udata and vdata must have the same number of components')
else:
if len(udata.shape) < len(vdata.shape):
udata, vdata = vdata, udata # udata always a greater dimension
if udata.shape == vdata.shape or len(vdata.shape) == 1: # preferred
scaData = np.zeros_like(udata[0, ...])
for d in range(udim):
scaData += udata[d, ...] * vdata[d, ...]
return scaData
else:
if udata.shape[:-1] != vdata.shape:
raise ValueError(
'udata must have the same shape as vdata OR udata[..., 0] must have the shape as vdata')
if udata.shape[:-1] == vdata.shape:
scaData = np.zeros_like(udata[0, ...])
for d in range(udim):
for t in range(udata.shape[-1]):
scaData[..., t] += udata[d, ..., t] * vdata[d, ...]
return scaData
def cross(udata, vdata):
"""
Cross product of two udata
Parameters
----------
udata: nd array, vector field data with the same format as udata[dim, y0:y1, x0:x1, (optional: z0:z1), t0:t1]
vdata: nd array, vector field data with the same format as udata[dim, y0:y1, x0:x1, (optional: z0:z1), t0:t1]
Returns
-------
vecData: nd array, cross product
"""
udim, vdim = udata.shape[0], vdata.shape[0]
if udim != vdim:
raise ValueError('udata and vdata must have the same number of components')
else:
if len(udata.shape) < len(vdata.shape):
udata, vdata = vdata, -udata # udata always a greater dimension; flipping the order of u and v changes the sign
if udim == 3:
if udata.shape == vdata.shape or len(
vdata.shape) == 1: # preferred (udata and vdata are both time-series with the same shape)
vecData = np.empty_like(udata[...])
vecData[0, ...] = udata[1, ...] * vdata[2, ...] - udata[2, ...] * vdata[1, ...]
vecData[1, ...] = udata[2, ...] * vdata[0, ...] - udata[0, ...] * vdata[2, ...]
vecData[2, ...] = udata[0, ...] * vdata[1, ...] - udata[1, ...] * vdata[0, ...]
return vecData
else:
if udata.shape[:-1] != vdata.shape:
raise ValueError(
'udata must have the same shape as vdata OR udata[..., 0] must have the shape as vdata')
else:
vecData = np.empty_like(udata[...])
for d in range(udim):
for t in range(udata.shape[-1]):
vecData[0, ..., t] = udata[1, ..., t] * vdata[2, ...] - udata[2, ..., t] * vdata[1, ...]
vecData[1, ..., t] = udata[2, ..., t] * vdata[0, ...] - udata[0, ..., t] * vdata[2, ...]
vecData[2, ..., t] = udata[0, ..., t] * vdata[1, ...] - udata[1, ..., t] * vdata[0, ...]
return vecData
elif udim == 2:
if udata.shape == vdata.shape or len(
vdata.shape) == 1: # preferred (udata and vdata are both time-series with the same shape)
vecData = udata[0, ...] * vdata[1, ...] - udata[1, ...] * vdata[0, ...]
return vecData
else:
if udata.shape[:-1] != vdata.shape:
raise ValueError(
'udata must have the same shape as vdata OR udata[..., 0] must have the shape as vdata')
else:
vecData = np.empty_like(udata[0, ...])
for d in range(udim):
for t in range(udata.shape[-1]):
vecData[..., t] = udata[0, ..., t] * vdata[1, ...] - udata[1, ..., t] * vdata[0, ...]
return vecData
def mag(udata):
"""
Returns magnitude of a vector field every frame
Parameters
----------
udata: nd array, vector field data with the same format as udata[dim, y0:y1, x0:x1, (optional: z0:z1), t0:t1]
Returns
-------
umag: (n-1)d array, magnitude of each vector in the field
"""
umag = np.zeros_like(udata[0, ...])
dim = udata.shape[0]
for d in range(dim):
umag += udata[d, ...] ** 2
umag = np.sqrt(umag)
return umag
########## Elementary analysis ##########
def get_energy(udata):
"""
Returns energy(\vec{x}, t) of udata
... energy = np.nansum(udata**2, axis=0) / 2
Parameters
----------
udata: nd array
Returns
-------
energy: nd array
"""
shape = udata.shape # shape=(dim, nrows, ncols, nstacks) if nstacks=0, shape=(dim, nrows, ncols)
dim = udata.shape[0]
energy = np.zeros(shape[1:])
for d in range(dim):
energy += udata[d, ...] ** 2
energy /= 2.
if type(udata) == np.ma.core.MaskedArray:
mask = udata.mask
energy = np.ma.masked_array(energy, mask=mask[1:])
energy = energy.filled(np.nan)
return energy
def get_enstrophy(udata, dx=1., dy=1., dz=1., xx=None, yy=None, zz=None,
gaussian_blur=False, sigma=2, mode='nearest'):
"""
Returns enstrophy(\vec{x}, t) of udata
... enstropy = omega ** 2
... omega = curl(udata)
Parameters
----------
udata: nd array
dx: float
data spacing along x
dy: float
data spacing along y
dz: float
data spacing along z
Returns
-------
enstrophy: nd array
"""
dim = udata.shape[0]
omega = curl(udata, dx=dx, dy=dy, dz=dz, xx=xx, yy=yy, zz=zz)
if gaussian_blur:
omega = gaussian_blur_scalar_field(omega, sigma=sigma, mode=mode)
shape = omega.shape # shape=(dim, nrows, ncols, nstacks, duration) if nstacks=0, shape=(dim, nrows, ncols, duration)
if dim == 2:
enstrophy = omega ** 2
elif dim == 3:
enstrophy = np.zeros(shape[1:])
for d in range(dim):
enstrophy += omega[d, ...] ** 2
return enstrophy
def get_q_criterion(udata, xx, yy, zz):
"""
Returns Q criterion
... Q_ij = 0.5 (g_ij g_ij - e_ij e_ij)
where dui_dxj = eij + gij
(eij: a symmetric part of a vel gradient tensor- a rate-of-strain tensor,
gij: an antisymmetric part of a vel gradient tensor: a vorticity tensor)
Parameters
----------
udata: 4d numpy array
xx: 3d array, x-coordinate
yy: 3d array, y-coordinate
zz: 3d array, z-coordinate
Returns
-------
q: 4d array, (y, x, z, t), q-criterion; Q(x, y, z) = 0.5 (gij gij - eij eij) = 0.5 ( |Omega|^2 - |S|^2 )
"""
duidxj = get_duidxj_tensor(udata, xx=xx, yy=yy, zz=zz)
eij, gij = decompose_duidxj(duidxj)
q = np.nansum(gij ** 2 - eij ** 2, axis=(-2, -1)) / 2
return q
def get_lambda2_criterion(udata, xx, yy, zz):
"""
Returns lambda2 criterion
lambda2 is the second eigenvalue of S^2 + Omega^2 = e_ij e_ji + Omega_ij Omega_ji at a given coordinate (x, y, z)
where dui_dxj = eij + gij
Parameters
----------
udata: 4d numpy array
xx: 3d array, x-coordinate
yy: 3d array, y-coordinate
zz: 3d array, z-coordinate
Returns
-------
lambda2: 4d array, (y, x, z, t), lambda2 criterion
"""
duidxj = get_duidxj_tensor(udata, xx=xx, yy=yy, zz=zz)
eij, gij = decompose_duidxj(duidxj) # eij: rate-of-strain tensor, gij: vorticity tensor
width, height, depth, duration, dim, _ = duidxj.shape
lambda2 = np.empty((width, height, depth, duration))
for t in tqdm(range(duration)):
for i in range(width):
for j in range(height):
for k in range(depth):
sij, omega_ij = eij[i, j, k, t, :, :], gij[i, j, k, t, :, :]
aij = sij @ sij + omega_ij @ omega_ij
w, v = np.linalg.eig(aij)
lambda2[i, j, k, t] = w[1]
return lambda2
def get_time_avg_energy(udata):
"""
Returns a time-averaged energy field
Parameters
----------
udata: nd array
Returns
-------
energy_avg:
time-averaged energy field
"""
energy = get_energy(udata)
energy_avg = np.nanmean(energy, axis=-1)
return energy_avg
def get_time_avg_enstrophy(udata, dx=1., dy=1., dz=1., xx=None, yy=None, zz=None):
"""
Returns a time-averaged enstrophy field
Parameters
----------
udata: nd array, velocity field
dx: float, spacing along x
dy: float, spacing along y
dz: float, spacing along z (optional, applicable to 3D data)