diff --git a/M2.png b/M2.png index e795093..3c22d97 100644 Binary files a/M2.png and b/M2.png differ diff --git a/README.md b/README.md index 99d1315..2a9a002 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,9 @@ The included JavaScript `bench` provides a method to evaluate performance. This The graph below shows the time load the [left inferior fronto-occipital fasciculus (IFOF) ](https://brain.labsolver.org/hcp_trk_atlas.html) from the HCP1065 Population-Averaged Tractography Atlas (Yeh, 2022). This has with 21209 streamlines and 4915166 vertices. The different formats are generated with the [tff_convert_tractogram.py](https://github.com/tee-ar-ex/trx-python) script.The benchmark was run on a MacBook laptop with a M2 Pro CPU. The ideal format would be both fast to load (to the left on the horizontal axis) and have a small file size (toward the bottom in the right axis). However, compression typically trades load time for file size. Here all data is loaded from a local solid state drive, whereas smaller files would benefit if data was loaded using a slow internet connection. The following file formats are illustrated (except where noted, both positions and indices are stored with 32-bit precision): - - vtk: streamlines saved by an [undocumented](https://discourse.vtk.org/t/upcoming-changes-to-vtkcellarray/2066) extension to the [VTK legacy file format](https://vtk.org/wp-content/uploads/2015/04/file-formats.pdf). + - tt: The [DSI Studio format](https://dsi-studio.labsolver.org/doc/cli_data.html) is very compact. The vertex position is stored as 1/32nd of a voxel, so it may be slightly lossy for some data. + - tt.gz: Gzip compressed DSI-Studio. + - vtk: streamlines saved by an [undocumented](https://discourse.vtk.org/t/upcoming-changes-to-vtkcellarray/2066) extension to the [VTK legacy file format](https://vtk.org/wp-content/uploads/2015/04/file-formats.pdf) (converted with `--offsets_dtype uint32`). - tck: [MRtrix format](https://mrtrix.readthedocs.io/en/latest/getting_started/image_data.html#tracks-file-format-tck). - trk: The popular [TrackVis format](http://trackvis.org/docs/?subsect=fileformat). - trk.gz: Gzip compressed TrackVis. diff --git a/graphM2.py b/graphM2.py index aae754d..1860266 100755 --- a/graphM2.py +++ b/graphM2.py @@ -4,9 +4,9 @@ # Create a dataframe data = { - 'format': ['vtk', 'tck', 'trk', 'trk.gz', 'trk.zst', 'trx', 'z.trx', '16.trx', '16z.trx'], - 'time': [459, 866, 924, 3881, 4012, 159, 2763, 373, 2708], - 'bytes': [113218694, 59236654, 59067828, 28746432, 28291562, 59152204, 28502799, 29661208, 24245538] + 'format': ['tt', 'tt.gz', 'vtk', 'tck', 'trk', 'trk.gz', 'trk.zst', 'trx', 'z.trx', '16.trx', '16z.trx'], + 'time': [2068, 3894, 416, 395, 573, 3483, 3827, 159, 2763, 373, 2708], + 'bytes': [15022315, 7249360, 93473172, 59236654, 59067828, 28746432, 28291562, 59152204, 28502799, 29661208, 24245538] } df = pd.DataFrame(data) diff --git a/streamlineIO.mjs b/streamlineIO.mjs index cfe1413..ceb7e43 100644 --- a/streamlineIO.mjs +++ b/streamlineIO.mjs @@ -99,35 +99,36 @@ function readTRK(buffer) { //read and transform vertex positions let i = 0; let npt = 0; - let offsetPt0 = []; - let pts = []; + //over-provision offset array to store number of segments + let offsetPt0 = new Uint32Array(i32.length); + let noffset = 0;; + //over-provision points array to store vertex positions + let npt3 = 0; + let pts = new Float32Array(i32.length); while (i < ntracks) { let n_pts = i32[i]; i = i + 1; // read 1 32-bit integer for number of points in this streamline - offsetPt0.push(npt); //index of first vertex in this streamline + offsetPt0[noffset++] = npt; //index of first vertex in this streamline for (let j = 0; j < n_pts; j++) { let ptx = f32[i + 0]; let pty = f32[i + 1]; let ptz = f32[i + 2]; i += 3; //read 3 32-bit floats for XYZ position - pts.push( + pts[npt3++] = ptx * vox2mmMat[0] + pty * vox2mmMat[1] + ptz * vox2mmMat[2] + - vox2mmMat[3] - ); - pts.push( + vox2mmMat[3]; + pts[npt3++] = ptx * vox2mmMat[4] + pty * vox2mmMat[5] + ptz * vox2mmMat[6] + - vox2mmMat[7] - ); - pts.push( + vox2mmMat[7]; + pts[npt3++] = ptx * vox2mmMat[8] + pty * vox2mmMat[9] + ptz * vox2mmMat[10] + - vox2mmMat[11] - ); + vox2mmMat[11]; if (n_scalars > 0) { for (let s = 0; s < n_scalars; s++) { dpv[s].vals.push(f32[i]); @@ -143,7 +144,11 @@ function readTRK(buffer) { } } } //for each streamline: while i < n_count - offsetPt0.push(npt); //add 'first index' as if one more line was added (fence post problem) + //add 'first index' as if one more line was added (fence post problem) + offsetPt0[noffset++] = npt; + //resize offset/vertex arrays that were initially over-provisioned + pts = pts.slice(0, npt3); + offsetPt0 = offsetPt0.slice(0, noffset); return { pts, offsetPt0, @@ -176,9 +181,13 @@ function readTCK(buffer) { let reader = new DataView(buffer); //read and transform vertex positions let npt = 0; - let offsetPt0 = []; - offsetPt0.push(npt); //1st streamline starts at 0 - let pts = []; + //over-provision offset array to store number of segments + let offsetPt0 = new Uint32Array(len / 4); + let noffset = 0; + //over-provision points array to store vertex positions + let npt3 = 0; + let pts = new Float32Array(len / 4); + offsetPt0[0] = 0; //1st streamline starts at 0 while (pos + 12 < len) { let ptx = reader.getFloat32(pos, true); pos += 4; @@ -188,17 +197,20 @@ function readTCK(buffer) { pos += 4; if (!isFinite(ptx)) { //both NaN and Inifinity are not finite - offsetPt0.push(npt); + offsetPt0[noffset++] = npt; if (!isNaN(ptx)) //terminate if infinity break; } else { - pts.push(ptx); - pts.push(pty); - pts.push(ptz); + pts[npt3++] = ptx; + pts[npt3++] = pty; + pts[npt3++] = ptz; npt++; } } + //resize offset/vertex arrays that were initially over-provisioned + pts = pts.slice(0, npt3); + offsetPt0 = offsetPt0.slice(0, noffset); return { pts, offsetPt0,