Open3D (C++ API)  0.15.1
VoxelBlockGridImpl.h
Go to the documentation of this file.
1// ----------------------------------------------------------------------------
2// - Open3D: www.open3d.org -
3// ----------------------------------------------------------------------------
4// The MIT License (MIT)
5//
6// Copyright (c) 2018-2021 www.open3d.org
7//
8// Permission is hereby granted, free of charge, to any person obtaining a copy
9// of this software and associated documentation files (the "Software"), to deal
10// in the Software without restriction, including without limitation the rights
11// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12// copies of the Software, and to permit persons to whom the Software is
13// furnished to do so, subject to the following conditions:
14//
15// The above copyright notice and this permission notice shall be included in
16// all copies or substantial portions of the Software.
17//
18// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24// IN THE SOFTWARE.
25// ----------------------------------------------------------------------------
26
27#include <atomic>
28#include <cmath>
29
31#include "open3d/core/Dtype.h"
34#include "open3d/core/Tensor.h"
42
43namespace open3d {
44namespace t {
45namespace geometry {
46namespace kernel {
47namespace voxel_grid {
48
49using index_t = int;
51
52#if defined(__CUDACC__)
53void GetVoxelCoordinatesAndFlattenedIndicesCUDA
54#else
56#endif
57 (const core::Tensor& buf_indices,
58 const core::Tensor& block_keys,
59 core::Tensor& voxel_coords,
60 core::Tensor& flattened_indices,
61 index_t resolution,
62 float voxel_size) {
63 core::Device device = buf_indices.GetDevice();
64
65 const index_t* buf_indices_ptr = buf_indices.GetDataPtr<index_t>();
66 const index_t* block_key_ptr = block_keys.GetDataPtr<index_t>();
67
68 float* voxel_coords_ptr = voxel_coords.GetDataPtr<float>();
69 int64_t* flattened_indices_ptr = flattened_indices.GetDataPtr<int64_t>();
70
71 index_t n = flattened_indices.GetLength();
72 ArrayIndexer voxel_indexer({resolution, resolution, resolution});
73 index_t resolution3 = resolution * resolution * resolution;
74
75 core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
76 index_t block_idx = buf_indices_ptr[workload_idx / resolution3];
77 index_t voxel_idx = workload_idx % resolution3;
78
79 index_t block_key_offset = block_idx * 3;
80 index_t xb = block_key_ptr[block_key_offset + 0];
81 index_t yb = block_key_ptr[block_key_offset + 1];
82 index_t zb = block_key_ptr[block_key_offset + 2];
83
84 index_t xv, yv, zv;
85 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
86
87 float x = (xb * resolution + xv) * voxel_size;
88 float y = (yb * resolution + yv) * voxel_size;
89 float z = (zb * resolution + zv) * voxel_size;
90
91 flattened_indices_ptr[workload_idx] =
92 block_idx * resolution3 + voxel_idx;
93
94 index_t voxel_coords_offset = workload_idx * 3;
95 voxel_coords_ptr[voxel_coords_offset + 0] = x;
96 voxel_coords_ptr[voxel_coords_offset + 1] = y;
97 voxel_coords_ptr[voxel_coords_offset + 2] = z;
98 });
99}
100
103 index_t yo,
104 index_t zo,
105 index_t curr_block_idx,
106 index_t resolution,
107 const ArrayIndexer& nb_block_masks_indexer,
108 const ArrayIndexer& nb_block_indices_indexer) {
109 index_t xn = (xo + resolution) % resolution;
110 index_t yn = (yo + resolution) % resolution;
111 index_t zn = (zo + resolution) % resolution;
112
113 index_t dxb = Sign(xo - xn);
114 index_t dyb = Sign(yo - yn);
115 index_t dzb = Sign(zo - zn);
116
117 index_t nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
118
119 bool block_mask_i =
120 *nb_block_masks_indexer.GetDataPtr<bool>(curr_block_idx, nb_idx);
121 if (!block_mask_i) return -1;
122
123 index_t block_idx_i = *nb_block_indices_indexer.GetDataPtr<index_t>(
124 curr_block_idx, nb_idx);
125
126 return (((block_idx_i * resolution) + zn) * resolution + yn) * resolution +
127 xn;
128}
129
130template <typename tsdf_t>
132 const tsdf_t* tsdf_base_ptr,
133 index_t xo,
134 index_t yo,
135 index_t zo,
136 index_t curr_block_idx,
137 float* n,
138 index_t resolution,
139 const ArrayIndexer& nb_block_masks_indexer,
140 const ArrayIndexer& nb_block_indices_indexer) {
141 auto GetLinearIdx = [&] OPEN3D_DEVICE(index_t xo, index_t yo,
142 index_t zo) -> index_t {
143 return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx, resolution,
144 nb_block_masks_indexer,
145 nb_block_indices_indexer);
146 };
147 index_t vxp = GetLinearIdx(xo + 1, yo, zo);
148 index_t vxn = GetLinearIdx(xo - 1, yo, zo);
149 index_t vyp = GetLinearIdx(xo, yo + 1, zo);
150 index_t vyn = GetLinearIdx(xo, yo - 1, zo);
151 index_t vzp = GetLinearIdx(xo, yo, zo + 1);
152 index_t vzn = GetLinearIdx(xo, yo, zo - 1);
153 if (vxp >= 0 && vxn >= 0) n[0] = tsdf_base_ptr[vxp] - tsdf_base_ptr[vxn];
154 if (vyp >= 0 && vyn >= 0) n[1] = tsdf_base_ptr[vyp] - tsdf_base_ptr[vyn];
155 if (vzp >= 0 && vzn >= 0) n[2] = tsdf_base_ptr[vzp] - tsdf_base_ptr[vzn];
156};
157
158template <typename input_depth_t,
159 typename input_color_t,
160 typename tsdf_t,
161 typename weight_t,
162 typename color_t>
163#if defined(__CUDACC__)
164void IntegrateCUDA
165#else
167#endif
168 (const core::Tensor& depth,
169 const core::Tensor& color,
170 const core::Tensor& indices,
171 const core::Tensor& block_keys,
172 TensorMap& block_value_map,
173 const core::Tensor& depth_intrinsic,
174 const core::Tensor& color_intrinsic,
175 const core::Tensor& extrinsics,
176 index_t resolution,
177 float voxel_size,
178 float sdf_trunc,
179 float depth_scale,
180 float depth_max) {
181 // Parameters
182 index_t resolution2 = resolution * resolution;
183 index_t resolution3 = resolution2 * resolution;
184
185 TransformIndexer transform_indexer(depth_intrinsic, extrinsics, voxel_size);
186 TransformIndexer colormap_indexer(
187 color_intrinsic,
189
190 ArrayIndexer voxel_indexer({resolution, resolution, resolution});
191
192 ArrayIndexer block_keys_indexer(block_keys, 1);
193 ArrayIndexer depth_indexer(depth, 2);
194 core::Device device = block_keys.GetDevice();
195
196 const index_t* indices_ptr = indices.GetDataPtr<index_t>();
197
198 if (!block_value_map.Contains("tsdf") ||
199 !block_value_map.Contains("weight")) {
201 "TSDF and/or weight not allocated in blocks, please implement "
202 "customized integration.");
203 }
204 tsdf_t* tsdf_base_ptr = block_value_map.at("tsdf").GetDataPtr<tsdf_t>();
205 weight_t* weight_base_ptr =
206 block_value_map.at("weight").GetDataPtr<weight_t>();
207
208 bool integrate_color =
209 block_value_map.Contains("color") && color.NumElements() > 0;
210 color_t* color_base_ptr = nullptr;
211 ArrayIndexer color_indexer;
212
213 float color_multiplier = 1.0;
214 if (integrate_color) {
215 color_base_ptr = block_value_map.at("color").GetDataPtr<color_t>();
216 color_indexer = ArrayIndexer(color, 2);
217
218 // Float32: [0, 1] -> [0, 255]
219 if (color.GetDtype() == core::Float32) {
220 color_multiplier = 255.0;
221 }
222 }
223
224 index_t n = indices.GetLength() * resolution3;
225 core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
226 // Natural index (0, N) -> (block_idx, voxel_idx)
227 index_t block_idx = indices_ptr[workload_idx / resolution3];
228 index_t voxel_idx = workload_idx % resolution3;
229
231 // block_idx -> (x_block, y_block, z_block)
232 index_t* block_key_ptr =
233 block_keys_indexer.GetDataPtr<index_t>(block_idx);
234 index_t xb = block_key_ptr[0];
235 index_t yb = block_key_ptr[1];
236 index_t zb = block_key_ptr[2];
237
238 // voxel_idx -> (x_voxel, y_voxel, z_voxel)
239 index_t xv, yv, zv;
240 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
241
242 // coordinate in world (in voxel)
243 index_t x = xb * resolution + xv;
244 index_t y = yb * resolution + yv;
245 index_t z = zb * resolution + zv;
246
247 // coordinate in camera (in voxel -> in meter)
248 float xc, yc, zc, u, v;
249 transform_indexer.RigidTransform(static_cast<float>(x),
250 static_cast<float>(y),
251 static_cast<float>(z), &xc, &yc, &zc);
252
253 // coordinate in image (in pixel)
254 transform_indexer.Project(xc, yc, zc, &u, &v);
255 if (!depth_indexer.InBoundary(u, v)) {
256 return;
257 }
258
259 index_t ui = static_cast<index_t>(u);
260 index_t vi = static_cast<index_t>(v);
261
262 // Associate image workload and compute SDF and
263 // TSDF.
264 float depth =
265 *depth_indexer.GetDataPtr<input_depth_t>(ui, vi) / depth_scale;
266
267 float sdf = depth - zc;
268 if (depth <= 0 || depth > depth_max || zc <= 0 || sdf < -sdf_trunc) {
269 return;
270 }
271 sdf = sdf < sdf_trunc ? sdf : sdf_trunc;
272 sdf /= sdf_trunc;
273
274 index_t linear_idx = block_idx * resolution3 + voxel_idx;
275
276 tsdf_t* tsdf_ptr = tsdf_base_ptr + linear_idx;
277 weight_t* weight_ptr = weight_base_ptr + linear_idx;
278
279 float inv_wsum = 1.0f / (*weight_ptr + 1);
280 float weight = *weight_ptr;
281 *tsdf_ptr = (weight * (*tsdf_ptr) + sdf) * inv_wsum;
282
283 if (integrate_color) {
284 color_t* color_ptr = color_base_ptr + 3 * linear_idx;
285
286 // Unproject ui, vi with depth_intrinsic, then project back with
287 // color_intrinsic
288 float x, y, z;
289 transform_indexer.Unproject(ui, vi, 1.0, &x, &y, &z);
290
291 float uf, vf;
292 colormap_indexer.Project(x, y, z, &uf, &vf);
293 if (color_indexer.InBoundary(uf, vf)) {
294 ui = round(uf);
295 vi = round(vf);
296
297 input_color_t* input_color_ptr =
298 color_indexer.GetDataPtr<input_color_t>(ui, vi);
299
300 for (index_t i = 0; i < 3; ++i) {
301 color_ptr[i] = (weight * color_ptr[i] +
302 input_color_ptr[i] * color_multiplier) *
303 inv_wsum;
304 }
305 }
306 }
307 *weight_ptr = weight + 1;
308 });
309
310#if defined(__CUDACC__)
312#endif
313}
314
315#if defined(__CUDACC__)
316void EstimateRangeCUDA
317#else
319#endif
320 (const core::Tensor& block_keys,
321 core::Tensor& range_minmax_map,
322 const core::Tensor& intrinsics,
323 const core::Tensor& extrinsics,
324 int h,
325 int w,
326 int down_factor,
327 int64_t block_resolution,
328 float voxel_size,
329 float depth_min,
330 float depth_max) {
331
332 // TODO(wei): reserve it in a reusable buffer
333
334 // Every 2 channels: (min, max)
335 int h_down = h / down_factor;
336 int w_down = w / down_factor;
337 range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Float32,
338 block_keys.GetDevice());
339 NDArrayIndexer range_map_indexer(range_minmax_map, 2);
340
341 // Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max)
342 const int fragment_size = 16;
343 const int frag_buffer_size = 65535;
344
345 // TODO(wei): explicit buffer
346 core::Tensor fragment_buffer = core::Tensor(
347 {frag_buffer_size, 6}, core::Float32, block_keys.GetDevice());
348
349 NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1);
350 NDArrayIndexer block_keys_indexer(block_keys, 1);
351 TransformIndexer w2c_transform_indexer(intrinsics, extrinsics);
352#if defined(__CUDACC__)
353 core::Tensor count(std::vector<int>{0}, {1}, core::Int32,
354 block_keys.GetDevice());
355 int* count_ptr = count.GetDataPtr<int>();
356#else
357 std::atomic<int> count_atomic(0);
358 std::atomic<int>* count_ptr = &count_atomic;
359#endif
360
361#ifndef __CUDACC__
362 using std::max;
363 using std::min;
364#endif
365
366 // Pass 0: iterate over blocks, fill-in an rendering fragment array
368 block_keys.GetDevice(), block_keys.GetLength(),
369 [=] OPEN3D_DEVICE(int64_t workload_idx) {
370 int* key = block_keys_indexer.GetDataPtr<int>(workload_idx);
371
372 int u_min = w_down - 1, v_min = h_down - 1, u_max = 0,
373 v_max = 0;
374 float z_min = depth_max, z_max = depth_min;
375
376 float xc, yc, zc, u, v;
377
378 // Project 8 corners to low-res image and form a rectangle
379 for (int i = 0; i < 8; ++i) {
380 float xw = (key[0] + ((i & 1) > 0)) * block_resolution *
381 voxel_size;
382 float yw = (key[1] + ((i & 2) > 0)) * block_resolution *
383 voxel_size;
384 float zw = (key[2] + ((i & 4) > 0)) * block_resolution *
385 voxel_size;
386
387 w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc,
388 &zc);
389 if (zc <= 0) continue;
390
391 // Project to the down sampled image buffer
392 w2c_transform_indexer.Project(xc, yc, zc, &u, &v);
393 u /= down_factor;
394 v /= down_factor;
395
396 v_min = min(static_cast<int>(floorf(v)), v_min);
397 v_max = max(static_cast<int>(ceilf(v)), v_max);
398
399 u_min = min(static_cast<int>(floorf(u)), u_min);
400 u_max = max(static_cast<int>(ceilf(u)), u_max);
401
402 z_min = min(z_min, zc);
403 z_max = max(z_max, zc);
404 }
405
406 v_min = max(0, v_min);
407 v_max = min(h_down - 1, v_max);
408
409 u_min = max(0, u_min);
410 u_max = min(w_down - 1, u_max);
411
412 if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return;
413
414 // Divide the rectangle into small 16x16 fragments
415 int frag_v_count =
416 ceilf(float(v_max - v_min + 1) / float(fragment_size));
417 int frag_u_count =
418 ceilf(float(u_max - u_min + 1) / float(fragment_size));
419
420 int frag_count = frag_v_count * frag_u_count;
421 int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1);
422 int frag_count_end = frag_count_start + frag_count;
423 if (frag_count_end >= frag_buffer_size) {
424 printf("Fragment count exceeding buffer size, abort!\n");
425 }
426
427 int offset = 0;
428 for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) {
429 for (int frag_u = 0; frag_u < frag_u_count;
430 ++frag_u, ++offset) {
431 float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>(
432 frag_count_start + offset);
433 // zmin, zmax
434 frag_ptr[0] = z_min;
435 frag_ptr[1] = z_max;
436
437 // vmin, umin
438 frag_ptr[2] = v_min + frag_v * fragment_size;
439 frag_ptr[3] = u_min + frag_u * fragment_size;
440
441 // vmax, umax
442 frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1,
443 static_cast<float>(v_max));
444 frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1,
445 static_cast<float>(u_max));
446 }
447 }
448 });
449#if defined(__CUDACC__)
450 int frag_count = count[0].Item<int>();
451#else
452 int frag_count = (*count_ptr).load();
453#endif
454
455 // Pass 0.5: Fill in range map to prepare for atomic min/max
456 core::ParallelFor(block_keys.GetDevice(), h_down * w_down,
457 [=] OPEN3D_DEVICE(int64_t workload_idx) {
458 int v = workload_idx / w_down;
459 int u = workload_idx % w_down;
460 float* range_ptr =
461 range_map_indexer.GetDataPtr<float>(u, v);
462 range_ptr[0] = depth_max;
463 range_ptr[1] = depth_min;
464 });
465
466 // Pass 1: iterate over rendering fragment array, fill-in range
468 block_keys.GetDevice(), frag_count * fragment_size * fragment_size,
469 [=] OPEN3D_DEVICE(int64_t workload_idx) {
470 int frag_idx = workload_idx / (fragment_size * fragment_size);
471 int local_idx = workload_idx % (fragment_size * fragment_size);
472 int dv = local_idx / fragment_size;
473 int du = local_idx % fragment_size;
474
475 float* frag_ptr =
476 frag_buffer_indexer.GetDataPtr<float>(frag_idx);
477 int v_min = static_cast<int>(frag_ptr[2]);
478 int u_min = static_cast<int>(frag_ptr[3]);
479 int v_max = static_cast<int>(frag_ptr[4]);
480 int u_max = static_cast<int>(frag_ptr[5]);
481
482 int v = v_min + dv;
483 int u = u_min + du;
484 if (v > v_max || u > u_max) return;
485
486 float z_min = frag_ptr[0];
487 float z_max = frag_ptr[1];
488 float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v);
489#ifdef __CUDACC__
490 atomicMinf(&(range_ptr[0]), z_min);
491 atomicMaxf(&(range_ptr[1]), z_max);
492#else
493#pragma omp critical(EstimateRangeCPU)
494 {
495 range_ptr[0] = min(z_min, range_ptr[0]);
496 range_ptr[1] = max(z_max, range_ptr[1]);
497 }
498#endif
499 });
500#if defined(__CUDACC__)
502#endif
503}
504
510
512 return (xin == x && yin == y && zin == z) ? block_idx : -1;
513 }
514
515 inline void OPEN3D_DEVICE Update(index_t xin,
516 index_t yin,
517 index_t zin,
518 index_t block_idx_in) {
519 x = xin;
520 y = yin;
521 z = zin;
522 block_idx = block_idx_in;
523 }
524};
525
526template <typename tsdf_t, typename weight_t, typename color_t>
527#if defined(__CUDACC__)
528void RayCastCUDA
529#else
531#endif
532 (std::shared_ptr<core::HashMap>& hashmap,
533 const TensorMap& block_value_map,
534 const core::Tensor& range,
535 TensorMap& renderings_map,
536 const core::Tensor& intrinsic,
537 const core::Tensor& extrinsics,
538 index_t h,
539 index_t w,
540 index_t block_resolution,
541 float voxel_size,
542 float depth_scale,
543 float depth_min,
544 float depth_max,
545 float weight_threshold,
546 float trunc_voxel_multiplier,
547 int range_map_down_factor) {
551
552 auto device_hashmap = hashmap->GetDeviceHashBackend();
553#if defined(__CUDACC__)
554 auto cuda_hashmap =
555 std::dynamic_pointer_cast<core::StdGPUHashBackend<Key, Hash, Eq>>(
556 device_hashmap);
557 if (cuda_hashmap == nullptr) {
559 "Unsupported backend: CUDA raycasting only supports STDGPU.");
560 }
561 auto hashmap_impl = cuda_hashmap->GetImpl();
562#else
563 auto cpu_hashmap =
564 std::dynamic_pointer_cast<core::TBBHashBackend<Key, Hash, Eq>>(
565 device_hashmap);
566 if (cpu_hashmap == nullptr) {
568 "Unsupported backend: CPU raycasting only supports TBB.");
569 }
570 auto hashmap_impl = *cpu_hashmap->GetImpl();
571#endif
572
573 core::Device device = hashmap->GetDevice();
574
575 ArrayIndexer range_indexer(range, 2);
576
577 // Geometry
578 ArrayIndexer depth_indexer;
579 ArrayIndexer vertex_indexer;
580 ArrayIndexer normal_indexer;
581
582 // Diff rendering
583 ArrayIndexer index_indexer;
584 ArrayIndexer mask_indexer;
585 ArrayIndexer interp_ratio_indexer;
586 ArrayIndexer interp_ratio_dx_indexer;
587 ArrayIndexer interp_ratio_dy_indexer;
588 ArrayIndexer interp_ratio_dz_indexer;
589
590 // Color
591 ArrayIndexer color_indexer;
592
593 if (!block_value_map.Contains("tsdf") ||
594 !block_value_map.Contains("weight")) {
596 "TSDF and/or weight not allocated in blocks, please implement "
597 "customized integration.");
598 }
599 const tsdf_t* tsdf_base_ptr =
600 block_value_map.at("tsdf").GetDataPtr<tsdf_t>();
601 const weight_t* weight_base_ptr =
602 block_value_map.at("weight").GetDataPtr<weight_t>();
603
604 // Geometry
605 if (renderings_map.Contains("depth")) {
606 depth_indexer = ArrayIndexer(renderings_map.at("depth"), 2);
607 }
608 if (renderings_map.Contains("vertex")) {
609 vertex_indexer = ArrayIndexer(renderings_map.at("vertex"), 2);
610 }
611 if (renderings_map.Contains("normal")) {
612 normal_indexer = ArrayIndexer(renderings_map.at("normal"), 2);
613 }
614
615 // Diff rendering
616 if (renderings_map.Contains("index")) {
617 index_indexer = ArrayIndexer(renderings_map.at("index"), 2);
618 }
619 if (renderings_map.Contains("mask")) {
620 mask_indexer = ArrayIndexer(renderings_map.at("mask"), 2);
621 }
622 if (renderings_map.Contains("interp_ratio")) {
623 interp_ratio_indexer =
624 ArrayIndexer(renderings_map.at("interp_ratio"), 2);
625 }
626 if (renderings_map.Contains("interp_ratio_dx")) {
627 interp_ratio_dx_indexer =
628 ArrayIndexer(renderings_map.at("interp_ratio_dx"), 2);
629 }
630 if (renderings_map.Contains("interp_ratio_dy")) {
631 interp_ratio_dy_indexer =
632 ArrayIndexer(renderings_map.at("interp_ratio_dy"), 2);
633 }
634 if (renderings_map.Contains("interp_ratio_dz")) {
635 interp_ratio_dz_indexer =
636 ArrayIndexer(renderings_map.at("interp_ratio_dz"), 2);
637 }
638
639 // Color
640 bool render_color = false;
641 if (block_value_map.Contains("color") && renderings_map.Contains("color")) {
642 render_color = true;
643 color_indexer = ArrayIndexer(renderings_map.at("color"), 2);
644 }
645 const color_t* color_base_ptr =
646 render_color ? block_value_map.at("color").GetDataPtr<color_t>()
647 : nullptr;
648
649 bool visit_neighbors = render_color || normal_indexer.GetDataPtr() ||
650 mask_indexer.GetDataPtr() ||
651 index_indexer.GetDataPtr() ||
652 interp_ratio_indexer.GetDataPtr() ||
653 interp_ratio_dx_indexer.GetDataPtr() ||
654 interp_ratio_dy_indexer.GetDataPtr() ||
655 interp_ratio_dz_indexer.GetDataPtr();
656
657 TransformIndexer c2w_transform_indexer(
658 intrinsic, t::geometry::InverseTransformation(extrinsics));
659 TransformIndexer w2c_transform_indexer(intrinsic, extrinsics);
660
661 index_t rows = h;
662 index_t cols = w;
663 index_t n = rows * cols;
664
665 float block_size = voxel_size * block_resolution;
666 index_t resolution2 = block_resolution * block_resolution;
667 index_t resolution3 = resolution2 * block_resolution;
668
669#ifndef __CUDACC__
670 using std::max;
671 using std::sqrt;
672#endif
673
674 core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
675 auto GetLinearIdxAtP = [&] OPEN3D_DEVICE(
676 index_t x_b, index_t y_b, index_t z_b,
677 index_t x_v, index_t y_v, index_t z_v,
678 core::buf_index_t block_buf_idx,
679 MiniVecCache & cache) -> index_t {
680 index_t x_vn = (x_v + block_resolution) % block_resolution;
681 index_t y_vn = (y_v + block_resolution) % block_resolution;
682 index_t z_vn = (z_v + block_resolution) % block_resolution;
683
684 index_t dx_b = Sign(x_v - x_vn);
685 index_t dy_b = Sign(y_v - y_vn);
686 index_t dz_b = Sign(z_v - z_vn);
687
688 if (dx_b == 0 && dy_b == 0 && dz_b == 0) {
689 return block_buf_idx * resolution3 + z_v * resolution2 +
690 y_v * block_resolution + x_v;
691 } else {
692 Key key(x_b + dx_b, y_b + dy_b, z_b + dz_b);
693
694 index_t block_buf_idx = cache.Check(key[0], key[1], key[2]);
695 if (block_buf_idx < 0) {
696 auto iter = hashmap_impl.find(key);
697 if (iter == hashmap_impl.end()) return -1;
698 block_buf_idx = iter->second;
699 cache.Update(key[0], key[1], key[2], block_buf_idx);
700 }
701
702 return block_buf_idx * resolution3 + z_vn * resolution2 +
703 y_vn * block_resolution + x_vn;
704 }
705 };
706
707 auto GetLinearIdxAtT = [&] OPEN3D_DEVICE(
708 float x_o, float y_o, float z_o,
709 float x_d, float y_d, float z_d, float t,
710 MiniVecCache& cache) -> index_t {
711 float x_g = x_o + t * x_d;
712 float y_g = y_o + t * y_d;
713 float z_g = z_o + t * z_d;
714
715 // MiniVec coordinate and look up
716 index_t x_b = static_cast<index_t>(floorf(x_g / block_size));
717 index_t y_b = static_cast<index_t>(floorf(y_g / block_size));
718 index_t z_b = static_cast<index_t>(floorf(z_g / block_size));
719
720 Key key(x_b, y_b, z_b);
721 index_t block_buf_idx = cache.Check(x_b, y_b, z_b);
722 if (block_buf_idx < 0) {
723 auto iter = hashmap_impl.find(key);
724 if (iter == hashmap_impl.end()) return -1;
725 block_buf_idx = iter->second;
726 cache.Update(x_b, y_b, z_b, block_buf_idx);
727 }
728
729 // Voxel coordinate and look up
730 index_t x_v = index_t((x_g - x_b * block_size) / voxel_size);
731 index_t y_v = index_t((y_g - y_b * block_size) / voxel_size);
732 index_t z_v = index_t((z_g - z_b * block_size) / voxel_size);
733
734 return block_buf_idx * resolution3 + z_v * resolution2 +
735 y_v * block_resolution + x_v;
736 };
737
738 index_t y = workload_idx / cols;
739 index_t x = workload_idx % cols;
740
741 const float* range = range_indexer.GetDataPtr<float>(
742 x / range_map_down_factor, y / range_map_down_factor);
743
744 float* depth_ptr = nullptr;
745 float* vertex_ptr = nullptr;
746 float* color_ptr = nullptr;
747 float* normal_ptr = nullptr;
748
749 int64_t* index_ptr = nullptr;
750 bool* mask_ptr = nullptr;
751 float* interp_ratio_ptr = nullptr;
752 float* interp_ratio_dx_ptr = nullptr;
753 float* interp_ratio_dy_ptr = nullptr;
754 float* interp_ratio_dz_ptr = nullptr;
755
756 if (vertex_indexer.GetDataPtr()) {
757 vertex_ptr = vertex_indexer.GetDataPtr<float>(x, y);
758 vertex_ptr[0] = 0;
759 vertex_ptr[1] = 0;
760 vertex_ptr[2] = 0;
761 }
762 if (depth_indexer.GetDataPtr()) {
763 depth_ptr = depth_indexer.GetDataPtr<float>(x, y);
764 depth_ptr[0] = 0;
765 }
766 if (normal_indexer.GetDataPtr()) {
767 normal_ptr = normal_indexer.GetDataPtr<float>(x, y);
768 normal_ptr[0] = 0;
769 normal_ptr[1] = 0;
770 normal_ptr[2] = 0;
771 }
772
773 if (mask_indexer.GetDataPtr()) {
774 mask_ptr = mask_indexer.GetDataPtr<bool>(x, y);
775#ifdef __CUDACC__
776#pragma unroll
777#endif
778 for (int i = 0; i < 8; ++i) {
779 mask_ptr[i] = false;
780 }
781 }
782 if (index_indexer.GetDataPtr()) {
783 index_ptr = index_indexer.GetDataPtr<int64_t>(x, y);
784#ifdef __CUDACC__
785#pragma unroll
786#endif
787 for (int i = 0; i < 8; ++i) {
788 index_ptr[i] = 0;
789 }
790 }
791 if (interp_ratio_indexer.GetDataPtr()) {
792 interp_ratio_ptr = interp_ratio_indexer.GetDataPtr<float>(x, y);
793#ifdef __CUDACC__
794#pragma unroll
795#endif
796 for (int i = 0; i < 8; ++i) {
797 interp_ratio_ptr[i] = 0;
798 }
799 }
800 if (interp_ratio_dx_indexer.GetDataPtr()) {
801 interp_ratio_dx_ptr =
802 interp_ratio_dx_indexer.GetDataPtr<float>(x, y);
803#ifdef __CUDACC__
804#pragma unroll
805#endif
806 for (int i = 0; i < 8; ++i) {
807 interp_ratio_dx_ptr[i] = 0;
808 }
809 }
810 if (interp_ratio_dy_indexer.GetDataPtr()) {
811 interp_ratio_dy_ptr =
812 interp_ratio_dy_indexer.GetDataPtr<float>(x, y);
813#ifdef __CUDACC__
814#pragma unroll
815#endif
816 for (int i = 0; i < 8; ++i) {
817 interp_ratio_dy_ptr[i] = 0;
818 }
819 }
820 if (interp_ratio_dz_indexer.GetDataPtr()) {
821 interp_ratio_dz_ptr =
822 interp_ratio_dz_indexer.GetDataPtr<float>(x, y);
823#ifdef __CUDACC__
824#pragma unroll
825#endif
826 for (int i = 0; i < 8; ++i) {
827 interp_ratio_dz_ptr[i] = 0;
828 }
829 }
830
831 if (color_indexer.GetDataPtr()) {
832 color_ptr = color_indexer.GetDataPtr<float>(x, y);
833 color_ptr[0] = 0;
834 color_ptr[1] = 0;
835 color_ptr[2] = 0;
836 }
837
838 float t = range[0];
839 const float t_max = range[1];
840 if (t >= t_max) return;
841
842 // Coordinates in camera and global
843 float x_c = 0, y_c = 0, z_c = 0;
844 float x_g = 0, y_g = 0, z_g = 0;
845 float x_o = 0, y_o = 0, z_o = 0;
846
847 // Iterative ray intersection check
848 float t_prev = t;
849
850 float tsdf_prev = -1.0f;
851 float tsdf = 1.0;
852 float sdf_trunc = voxel_size * trunc_voxel_multiplier;
853 float w = 0.0;
854
855 // Camera origin
856 c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o, &z_o);
857
858 // Direction
859 c2w_transform_indexer.Unproject(static_cast<float>(x),
860 static_cast<float>(y), 1.0f, &x_c, &y_c,
861 &z_c);
862 c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g);
863 float x_d = (x_g - x_o);
864 float y_d = (y_g - y_o);
865 float z_d = (z_g - z_o);
866
867 MiniVecCache cache{0, 0, 0, -1};
868 bool surface_found = false;
869 while (t < t_max) {
870 index_t linear_idx =
871 GetLinearIdxAtT(x_o, y_o, z_o, x_d, y_d, z_d, t, cache);
872
873 if (linear_idx < 0) {
874 t_prev = t;
875 t += block_size;
876 } else {
877 tsdf_prev = tsdf;
878 tsdf = tsdf_base_ptr[linear_idx];
879 w = weight_base_ptr[linear_idx];
880 if (tsdf_prev > 0 && w >= weight_threshold && tsdf <= 0) {
881 surface_found = true;
882 break;
883 }
884 t_prev = t;
885 float delta = tsdf * sdf_trunc;
886 t += delta < voxel_size ? voxel_size : delta;
887 }
888 }
889
890 if (surface_found) {
891 float t_intersect =
892 (t * tsdf_prev - t_prev * tsdf) / (tsdf_prev - tsdf);
893 x_g = x_o + t_intersect * x_d;
894 y_g = y_o + t_intersect * y_d;
895 z_g = z_o + t_intersect * z_d;
896
897 // Trivial vertex assignment
898 if (depth_ptr) {
899 *depth_ptr = t_intersect * depth_scale;
900 }
901 if (vertex_ptr) {
902 w2c_transform_indexer.RigidTransform(
903 x_g, y_g, z_g, vertex_ptr + 0, vertex_ptr + 1,
904 vertex_ptr + 2);
905 }
906 if (!visit_neighbors) return;
907
908 // Trilinear interpolation
909 // TODO(wei): simplify the flow by splitting the
910 // functions given what is enabled
911 index_t x_b = static_cast<index_t>(floorf(x_g / block_size));
912 index_t y_b = static_cast<index_t>(floorf(y_g / block_size));
913 index_t z_b = static_cast<index_t>(floorf(z_g / block_size));
914 float x_v = (x_g - float(x_b) * block_size) / voxel_size;
915 float y_v = (y_g - float(y_b) * block_size) / voxel_size;
916 float z_v = (z_g - float(z_b) * block_size) / voxel_size;
917
918 Key key(x_b, y_b, z_b);
919
920 index_t block_buf_idx = cache.Check(x_b, y_b, z_b);
921 if (block_buf_idx < 0) {
922 auto iter = hashmap_impl.find(key);
923 if (iter == hashmap_impl.end()) return;
924 block_buf_idx = iter->second;
925 cache.Update(x_b, y_b, z_b, block_buf_idx);
926 }
927
928 index_t x_v_floor = static_cast<index_t>(floorf(x_v));
929 index_t y_v_floor = static_cast<index_t>(floorf(y_v));
930 index_t z_v_floor = static_cast<index_t>(floorf(z_v));
931
932 float ratio_x = x_v - float(x_v_floor);
933 float ratio_y = y_v - float(y_v_floor);
934 float ratio_z = z_v - float(z_v_floor);
935
936 float sum_r = 0.0;
937 for (index_t k = 0; k < 8; ++k) {
938 index_t dx_v = (k & 1) > 0 ? 1 : 0;
939 index_t dy_v = (k & 2) > 0 ? 1 : 0;
940 index_t dz_v = (k & 4) > 0 ? 1 : 0;
941
942 index_t linear_idx_k = GetLinearIdxAtP(
943 x_b, y_b, z_b, x_v_floor + dx_v, y_v_floor + dy_v,
944 z_v_floor + dz_v, block_buf_idx, cache);
945
946 if (linear_idx_k >= 0 && weight_base_ptr[linear_idx_k] > 0) {
947 float rx = dx_v * (ratio_x) + (1 - dx_v) * (1 - ratio_x);
948 float ry = dy_v * (ratio_y) + (1 - dy_v) * (1 - ratio_y);
949 float rz = dz_v * (ratio_z) + (1 - dz_v) * (1 - ratio_z);
950 float r = rx * ry * rz;
951
952 if (interp_ratio_ptr) {
953 interp_ratio_ptr[k] = r;
954 }
955 if (mask_ptr) {
956 mask_ptr[k] = true;
957 }
958 if (index_ptr) {
959 index_ptr[k] = linear_idx_k;
960 }
961
962 float tsdf_k = tsdf_base_ptr[linear_idx_k];
963 float interp_ratio_dx = ry * rz * (2 * dx_v - 1);
964 float interp_ratio_dy = rx * rz * (2 * dy_v - 1);
965 float interp_ratio_dz = rx * ry * (2 * dz_v - 1);
966
967 if (interp_ratio_dx_ptr) {
968 interp_ratio_dx_ptr[k] = interp_ratio_dx;
969 }
970 if (interp_ratio_dy_ptr) {
971 interp_ratio_dy_ptr[k] = interp_ratio_dy;
972 }
973 if (interp_ratio_dz_ptr) {
974 interp_ratio_dz_ptr[k] = interp_ratio_dz;
975 }
976
977 if (normal_ptr) {
978 normal_ptr[0] += interp_ratio_dx * tsdf_k;
979 normal_ptr[1] += interp_ratio_dy * tsdf_k;
980 normal_ptr[2] += interp_ratio_dz * tsdf_k;
981 }
982
983 if (color_ptr) {
984 index_t color_linear_idx = linear_idx_k * 3;
985 color_ptr[0] +=
986 r * color_base_ptr[color_linear_idx + 0];
987 color_ptr[1] +=
988 r * color_base_ptr[color_linear_idx + 1];
989 color_ptr[2] +=
990 r * color_base_ptr[color_linear_idx + 2];
991 }
992
993 sum_r += r;
994 }
995 } // loop over 8 neighbors
996
997 if (sum_r > 0) {
998 sum_r *= 255.0;
999 if (color_ptr) {
1000 color_ptr[0] /= sum_r;
1001 color_ptr[1] /= sum_r;
1002 color_ptr[2] /= sum_r;
1003 }
1004
1005 if (normal_ptr) {
1006 constexpr float EPSILON = 1e-5f;
1007 float norm = sqrt(normal_ptr[0] * normal_ptr[0] +
1008 normal_ptr[1] * normal_ptr[1] +
1009 normal_ptr[2] * normal_ptr[2]);
1010 norm = std::max(norm, EPSILON);
1011 w2c_transform_indexer.Rotate(
1012 -normal_ptr[0] / norm, -normal_ptr[1] / norm,
1013 -normal_ptr[2] / norm, normal_ptr + 0,
1014 normal_ptr + 1, normal_ptr + 2);
1015 }
1016 }
1017 } // surface-found
1018 });
1019
1020#if defined(__CUDACC__)
1022#endif
1023}
1024
1025template <typename tsdf_t, typename weight_t, typename color_t>
1026#if defined(__CUDACC__)
1027void ExtractPointCloudCUDA
1028#else
1030#endif
1031 (const core::Tensor& indices,
1032 const core::Tensor& nb_indices,
1033 const core::Tensor& nb_masks,
1034 const core::Tensor& block_keys,
1035 const TensorMap& block_value_map,
1037 core::Tensor& normals,
1038 core::Tensor& colors,
1039 index_t resolution,
1040 float voxel_size,
1041 float weight_threshold,
1042 int& valid_size) {
1043 core::Device device = block_keys.GetDevice();
1044
1045 // Parameters
1046 index_t resolution2 = resolution * resolution;
1047 index_t resolution3 = resolution2 * resolution;
1048
1049 // Shape / transform indexers, no data involved
1050 ArrayIndexer voxel_indexer({resolution, resolution, resolution});
1051
1052 // Real data indexer
1053 ArrayIndexer block_keys_indexer(block_keys, 1);
1054 ArrayIndexer nb_block_masks_indexer(nb_masks, 2);
1055 ArrayIndexer nb_block_indices_indexer(nb_indices, 2);
1056
1057 // Plain arrays that does not require indexers
1058 const index_t* indices_ptr = indices.GetDataPtr<index_t>();
1059
1060 if (!block_value_map.Contains("tsdf") ||
1061 !block_value_map.Contains("weight")) {
1063 "TSDF and/or weight not allocated in blocks, please implement "
1064 "customized integration.");
1065 }
1066 const tsdf_t* tsdf_base_ptr =
1067 block_value_map.at("tsdf").GetDataPtr<tsdf_t>();
1068 const weight_t* weight_base_ptr =
1069 block_value_map.at("weight").GetDataPtr<weight_t>();
1070 const color_t* color_base_ptr = nullptr;
1071 if (block_value_map.Contains("color")) {
1072 color_base_ptr = block_value_map.at("color").GetDataPtr<color_t>();
1073 }
1074
1075 index_t n_blocks = indices.GetLength();
1076 index_t n = n_blocks * resolution3;
1077
1078 // Output
1079#if defined(__CUDACC__)
1080 core::Tensor count(std::vector<index_t>{0}, {1}, core::Int32,
1081 block_keys.GetDevice());
1082 index_t* count_ptr = count.GetDataPtr<index_t>();
1083#else
1084 std::atomic<index_t> count_atomic(0);
1085 std::atomic<index_t>* count_ptr = &count_atomic;
1086#endif
1087
1088 if (valid_size < 0) {
1090 "No estimated max point cloud size provided, using a 2-pass "
1091 "estimation. Surface extraction could be slow.");
1092 // This pass determines valid number of points.
1093
1094 core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
1095 auto GetLinearIdx = [&] OPEN3D_DEVICE(
1096 index_t xo, index_t yo, index_t zo,
1097 index_t curr_block_idx) -> index_t {
1098 return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx,
1099 resolution, nb_block_masks_indexer,
1100 nb_block_indices_indexer);
1101 };
1102
1103 // Natural index (0, N) -> (block_idx,
1104 // voxel_idx)
1105 index_t workload_block_idx = workload_idx / resolution3;
1106 index_t block_idx = indices_ptr[workload_block_idx];
1107 index_t voxel_idx = workload_idx % resolution3;
1108
1109 // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1110 index_t xv, yv, zv;
1111 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1112
1113 index_t linear_idx = block_idx * resolution3 + voxel_idx;
1114 float tsdf_o = tsdf_base_ptr[linear_idx];
1115 float weight_o = weight_base_ptr[linear_idx];
1116 if (weight_o <= weight_threshold) return;
1117
1118 // Enumerate x-y-z directions
1119 for (index_t i = 0; i < 3; ++i) {
1120 index_t linear_idx_i =
1121 GetLinearIdx(xv + (i == 0), yv + (i == 1),
1122 zv + (i == 2), workload_block_idx);
1123 if (linear_idx_i < 0) continue;
1124
1125 float tsdf_i = tsdf_base_ptr[linear_idx_i];
1126 float weight_i = weight_base_ptr[linear_idx_i];
1127 if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) {
1128 OPEN3D_ATOMIC_ADD(count_ptr, 1);
1129 }
1130 }
1131 });
1132
1133#if defined(__CUDACC__)
1134 valid_size = count[0].Item<index_t>();
1135 count[0] = 0;
1136#else
1137 valid_size = (*count_ptr).load();
1138 (*count_ptr) = 0;
1139#endif
1140 }
1141
1142 if (points.GetLength() == 0) {
1143 points = core::Tensor({valid_size, 3}, core::Float32, device);
1144 }
1145 ArrayIndexer point_indexer(points, 1);
1146
1147 // Normals
1148 ArrayIndexer normal_indexer;
1149 normals = core::Tensor({valid_size, 3}, core::Float32, device);
1150 normal_indexer = ArrayIndexer(normals, 1);
1151
1152 // This pass extracts exact surface points.
1153
1154 // Colors
1155 ArrayIndexer color_indexer;
1156 if (color_base_ptr) {
1157 colors = core::Tensor({valid_size, 3}, core::Float32, device);
1158 color_indexer = ArrayIndexer(colors, 1);
1159 }
1160
1161 core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t workload_idx) {
1162 auto GetLinearIdx = [&] OPEN3D_DEVICE(
1163 index_t xo, index_t yo, index_t zo,
1164 index_t curr_block_idx) -> index_t {
1165 return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx, resolution,
1166 nb_block_masks_indexer,
1167 nb_block_indices_indexer);
1168 };
1169
1170 auto GetNormal = [&] OPEN3D_DEVICE(index_t xo, index_t yo, index_t zo,
1171 index_t curr_block_idx, float* n) {
1172 return DeviceGetNormal<tsdf_t>(
1173 tsdf_base_ptr, xo, yo, zo, curr_block_idx, n, resolution,
1174 nb_block_masks_indexer, nb_block_indices_indexer);
1175 };
1176
1177 // Natural index (0, N) -> (block_idx, voxel_idx)
1178 index_t workload_block_idx = workload_idx / resolution3;
1179 index_t block_idx = indices_ptr[workload_block_idx];
1180 index_t voxel_idx = workload_idx % resolution3;
1181
1183 // block_idx -> (x_block, y_block, z_block)
1184 index_t* block_key_ptr =
1185 block_keys_indexer.GetDataPtr<index_t>(block_idx);
1186 index_t xb = block_key_ptr[0];
1187 index_t yb = block_key_ptr[1];
1188 index_t zb = block_key_ptr[2];
1189
1190 // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1191 index_t xv, yv, zv;
1192 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1193
1194 index_t linear_idx = block_idx * resolution3 + voxel_idx;
1195 float tsdf_o = tsdf_base_ptr[linear_idx];
1196 float weight_o = weight_base_ptr[linear_idx];
1197 if (weight_o <= weight_threshold) return;
1198
1199 float no[3] = {0}, ne[3] = {0};
1200
1201 // Get normal at origin
1202 GetNormal(xv, yv, zv, workload_block_idx, no);
1203
1204 index_t x = xb * resolution + xv;
1205 index_t y = yb * resolution + yv;
1206 index_t z = zb * resolution + zv;
1207
1208 // Enumerate x-y-z axis
1209 for (index_t i = 0; i < 3; ++i) {
1210 index_t linear_idx_i =
1211 GetLinearIdx(xv + (i == 0), yv + (i == 1), zv + (i == 2),
1212 workload_block_idx);
1213 if (linear_idx_i < 0) continue;
1214
1215 float tsdf_i = tsdf_base_ptr[linear_idx_i];
1216 float weight_i = weight_base_ptr[linear_idx_i];
1217 if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) {
1218 float ratio = (0 - tsdf_o) / (tsdf_i - tsdf_o);
1219
1220 index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
1221 if (idx >= valid_size) {
1222 printf("Point cloud size larger than "
1223 "estimated, please increase the "
1224 "estimation!\n");
1225 return;
1226 }
1227
1228 float* point_ptr = point_indexer.GetDataPtr<float>(idx);
1229 point_ptr[0] = voxel_size * (x + ratio * int(i == 0));
1230 point_ptr[1] = voxel_size * (y + ratio * int(i == 1));
1231 point_ptr[2] = voxel_size * (z + ratio * int(i == 2));
1232
1233 // Get normal at edge and interpolate
1234 float* normal_ptr = normal_indexer.GetDataPtr<float>(idx);
1235 GetNormal(xv + (i == 0), yv + (i == 1), zv + (i == 2),
1236 workload_block_idx, ne);
1237 float nx = (1 - ratio) * no[0] + ratio * ne[0];
1238 float ny = (1 - ratio) * no[1] + ratio * ne[1];
1239 float nz = (1 - ratio) * no[2] + ratio * ne[2];
1240 float norm = static_cast<float>(
1241 sqrt(nx * nx + ny * ny + nz * nz) + 1e-5);
1242 normal_ptr[0] = nx / norm;
1243 normal_ptr[1] = ny / norm;
1244 normal_ptr[2] = nz / norm;
1245
1246 if (color_base_ptr) {
1247 float* color_ptr = color_indexer.GetDataPtr<float>(idx);
1248 const color_t* color_o_ptr =
1249 color_base_ptr + 3 * linear_idx;
1250 float r_o = color_o_ptr[0];
1251 float g_o = color_o_ptr[1];
1252 float b_o = color_o_ptr[2];
1253
1254 const color_t* color_i_ptr =
1255 color_base_ptr + 3 * linear_idx_i;
1256 float r_i = color_i_ptr[0];
1257 float g_i = color_i_ptr[1];
1258 float b_i = color_i_ptr[2];
1259
1260 color_ptr[0] = ((1 - ratio) * r_o + ratio * r_i) / 255.0f;
1261 color_ptr[1] = ((1 - ratio) * g_o + ratio * g_i) / 255.0f;
1262 color_ptr[2] = ((1 - ratio) * b_o + ratio * b_i) / 255.0f;
1263 }
1264 }
1265 }
1266 });
1267
1268#if defined(__CUDACC__)
1269 index_t total_count = count.Item<index_t>();
1270#else
1271 index_t total_count = (*count_ptr).load();
1272#endif
1273
1274 utility::LogDebug("{} vertices extracted", total_count);
1275 valid_size = total_count;
1276
1277#if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
1279#endif
1280}
1281
1282template <typename tsdf_t, typename weight_t, typename color_t>
1283#if defined(__CUDACC__)
1284void ExtractTriangleMeshCUDA
1285#else
1287#endif
1288 (const core::Tensor& block_indices,
1289 const core::Tensor& inv_block_indices,
1290 const core::Tensor& nb_block_indices,
1291 const core::Tensor& nb_block_masks,
1292 const core::Tensor& block_keys,
1293 const TensorMap& block_value_map,
1294 core::Tensor& vertices,
1295 core::Tensor& triangles,
1296 core::Tensor& vertex_normals,
1297 core::Tensor& vertex_colors,
1298 index_t block_resolution,
1299 float voxel_size,
1300 float weight_threshold,
1301 index_t& vertex_count) {
1302 core::Device device = block_indices.GetDevice();
1303
1304 index_t resolution = block_resolution;
1305 index_t resolution3 = resolution * resolution * resolution;
1306
1307 // Shape / transform indexers, no data involved
1308 ArrayIndexer voxel_indexer({resolution, resolution, resolution});
1309 index_t n_blocks = static_cast<index_t>(block_indices.GetLength());
1310
1311 // TODO(wei): profile performance by replacing the table to a hashmap.
1312 // Voxel-wise mesh info. 4 channels correspond to:
1313 // 3 edges' corresponding vertex index + 1 table index.
1314 core::Tensor mesh_structure;
1315 try {
1316 mesh_structure = core::Tensor::Zeros(
1317 {n_blocks, resolution, resolution, resolution, 4}, core::Int32,
1318 device);
1319 } catch (const std::runtime_error&) {
1321 "[MeshExtractionKernel] Unable to allocate assistance mesh "
1322 "structure for Marching "
1323 "Cubes with {} active voxel blocks. Please consider using a "
1324 "larger voxel size (currently {}) for TSDF "
1325 "integration, or using tsdf_volume.cpu() to perform mesh "
1326 "extraction on CPU.",
1327 n_blocks, voxel_size);
1328 }
1329
1330 // Real data indexer
1331 ArrayIndexer mesh_structure_indexer(mesh_structure, 4);
1332 ArrayIndexer nb_block_masks_indexer(nb_block_masks, 2);
1333 ArrayIndexer nb_block_indices_indexer(nb_block_indices, 2);
1334
1335 // Plain arrays that does not require indexers
1336 const index_t* indices_ptr = block_indices.GetDataPtr<index_t>();
1337 const index_t* inv_indices_ptr = inv_block_indices.GetDataPtr<index_t>();
1338
1339 if (!block_value_map.Contains("tsdf") ||
1340 !block_value_map.Contains("weight")) {
1342 "TSDF and/or weight not allocated in blocks, please implement "
1343 "customized integration.");
1344 }
1345 const tsdf_t* tsdf_base_ptr =
1346 block_value_map.at("tsdf").GetDataPtr<tsdf_t>();
1347 const weight_t* weight_base_ptr =
1348 block_value_map.at("weight").GetDataPtr<weight_t>();
1349 const color_t* color_base_ptr = nullptr;
1350 if (block_value_map.Contains("color")) {
1351 color_base_ptr = block_value_map.at("color").GetDataPtr<color_t>();
1352 }
1353
1354 index_t n = n_blocks * resolution3;
1355 // Pass 0: analyze mesh structure, set up one-on-one correspondences
1356 // from edges to vertices.
1357
1358 core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t widx) {
1359 auto GetLinearIdx = [&] OPEN3D_DEVICE(
1360 index_t xo, index_t yo, index_t zo,
1361 index_t curr_block_idx) -> index_t {
1362 return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx,
1363 static_cast<index_t>(resolution),
1364 nb_block_masks_indexer,
1365 nb_block_indices_indexer);
1366 };
1367
1368 // Natural index (0, N) -> (block_idx, voxel_idx)
1369 index_t workload_block_idx = widx / resolution3;
1370 index_t voxel_idx = widx % resolution3;
1371
1372 // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1373 index_t xv, yv, zv;
1374 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1375
1376 // Check per-vertex sign in the cube to determine cube
1377 // type
1378 index_t table_idx = 0;
1379 for (index_t i = 0; i < 8; ++i) {
1380 index_t linear_idx_i =
1381 GetLinearIdx(xv + vtx_shifts[i][0], yv + vtx_shifts[i][1],
1382 zv + vtx_shifts[i][2], workload_block_idx);
1383 if (linear_idx_i < 0) return;
1384
1385 float tsdf_i = tsdf_base_ptr[linear_idx_i];
1386 float weight_i = weight_base_ptr[linear_idx_i];
1387 if (weight_i <= weight_threshold) return;
1388
1389 table_idx |= ((tsdf_i < 0) ? (1 << i) : 0);
1390 }
1391
1392 index_t* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<index_t>(
1393 xv, yv, zv, workload_block_idx);
1394 mesh_struct_ptr[3] = table_idx;
1395
1396 if (table_idx == 0 || table_idx == 255) return;
1397
1398 // Check per-edge sign determine the cube type
1399 index_t edges_with_vertices = edge_table[table_idx];
1400 for (index_t i = 0; i < 12; ++i) {
1401 if (edges_with_vertices & (1 << i)) {
1402 index_t xv_i = xv + edge_shifts[i][0];
1403 index_t yv_i = yv + edge_shifts[i][1];
1404 index_t zv_i = zv + edge_shifts[i][2];
1405 index_t edge_i = edge_shifts[i][3];
1406
1407 index_t dxb = xv_i / resolution;
1408 index_t dyb = yv_i / resolution;
1409 index_t dzb = zv_i / resolution;
1410
1411 index_t nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
1412
1413 index_t block_idx_i =
1414 *nb_block_indices_indexer.GetDataPtr<index_t>(
1415 workload_block_idx, nb_idx);
1416 index_t* mesh_ptr_i =
1417 mesh_structure_indexer.GetDataPtr<index_t>(
1418 xv_i - dxb * resolution,
1419 yv_i - dyb * resolution,
1420 zv_i - dzb * resolution,
1421 inv_indices_ptr[block_idx_i]);
1422
1423 // Non-atomic write, but we are safe
1424 mesh_ptr_i[edge_i] = -1;
1425 }
1426 }
1427 });
1428
1429 // Pass 1: determine valid number of vertices (if not preset)
1430#if defined(__CUDACC__)
1431 core::Tensor count(std::vector<index_t>{0}, {}, core::Int32, device);
1432
1433 index_t* count_ptr = count.GetDataPtr<index_t>();
1434#else
1435 std::atomic<index_t> count_atomic(0);
1436 std::atomic<index_t>* count_ptr = &count_atomic;
1437#endif
1438
1439 if (vertex_count < 0) {
1440 core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t widx) {
1441 // Natural index (0, N) -> (block_idx, voxel_idx)
1442 index_t workload_block_idx = widx / resolution3;
1443 index_t voxel_idx = widx % resolution3;
1444
1445 // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1446 index_t xv, yv, zv;
1447 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1448
1449 // Obtain voxel's mesh struct ptr
1450 index_t* mesh_struct_ptr =
1451 mesh_structure_indexer.GetDataPtr<index_t>(
1452 xv, yv, zv, workload_block_idx);
1453
1454 // Early quit -- no allocated vertex to compute
1455 if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
1456 mesh_struct_ptr[2] != -1) {
1457 return;
1458 }
1459
1460 // Enumerate 3 edges in the voxel
1461 for (index_t e = 0; e < 3; ++e) {
1462 index_t vertex_idx = mesh_struct_ptr[e];
1463 if (vertex_idx != -1) continue;
1464
1465 OPEN3D_ATOMIC_ADD(count_ptr, 1);
1466 }
1467 });
1468
1469#if defined(__CUDACC__)
1470 vertex_count = count.Item<index_t>();
1471#else
1472 vertex_count = (*count_ptr).load();
1473#endif
1474 }
1475
1476 utility::LogDebug("Total vertex count = {}", vertex_count);
1477 vertices = core::Tensor({vertex_count, 3}, core::Float32, device);
1478
1479 vertex_normals = core::Tensor({vertex_count, 3}, core::Float32, device);
1480 ArrayIndexer normal_indexer = ArrayIndexer(vertex_normals, 1);
1481
1482 ArrayIndexer color_indexer;
1483 if (color_base_ptr) {
1484 vertex_colors = core::Tensor({vertex_count, 3}, core::Float32, device);
1485 color_indexer = ArrayIndexer(vertex_colors, 1);
1486 }
1487
1488 ArrayIndexer block_keys_indexer(block_keys, 1);
1489 ArrayIndexer vertex_indexer(vertices, 1);
1490
1491#if defined(__CUDACC__)
1492 count = core::Tensor(std::vector<index_t>{0}, {}, core::Int32, device);
1493 count_ptr = count.GetDataPtr<index_t>();
1494#else
1495 (*count_ptr) = 0;
1496#endif
1497
1498 // Pass 2: extract vertices.
1499
1500 core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t widx) {
1501 auto GetLinearIdx = [&] OPEN3D_DEVICE(
1502 index_t xo, index_t yo, index_t zo,
1503 index_t curr_block_idx) -> index_t {
1504 return DeviceGetLinearIdx(xo, yo, zo, curr_block_idx, resolution,
1505 nb_block_masks_indexer,
1506 nb_block_indices_indexer);
1507 };
1508
1509 auto GetNormal = [&] OPEN3D_DEVICE(index_t xo, index_t yo, index_t zo,
1510 index_t curr_block_idx, float* n) {
1511 return DeviceGetNormal<tsdf_t>(
1512 tsdf_base_ptr, xo, yo, zo, curr_block_idx, n, resolution,
1513 nb_block_masks_indexer, nb_block_indices_indexer);
1514 };
1515
1516 // Natural index (0, N) -> (block_idx, voxel_idx)
1517 index_t workload_block_idx = widx / resolution3;
1518 index_t block_idx = indices_ptr[workload_block_idx];
1519 index_t voxel_idx = widx % resolution3;
1520
1521 // block_idx -> (x_block, y_block, z_block)
1522 index_t* block_key_ptr =
1523 block_keys_indexer.GetDataPtr<index_t>(block_idx);
1524 index_t xb = block_key_ptr[0];
1525 index_t yb = block_key_ptr[1];
1526 index_t zb = block_key_ptr[2];
1527
1528 // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1529 index_t xv, yv, zv;
1530 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1531
1532 // global coordinate (in voxels)
1533 index_t x = xb * resolution + xv;
1534 index_t y = yb * resolution + yv;
1535 index_t z = zb * resolution + zv;
1536
1537 // Obtain voxel's mesh struct ptr
1538 index_t* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<index_t>(
1539 xv, yv, zv, workload_block_idx);
1540
1541 // Early quit -- no allocated vertex to compute
1542 if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 &&
1543 mesh_struct_ptr[2] != -1) {
1544 return;
1545 }
1546
1547 // Obtain voxel ptr
1548 index_t linear_idx = resolution3 * block_idx + voxel_idx;
1549 float tsdf_o = tsdf_base_ptr[linear_idx];
1550
1551 float no[3] = {0}, ne[3] = {0};
1552
1553 // Get normal at origin
1554 GetNormal(xv, yv, zv, workload_block_idx, no);
1555
1556 // Enumerate 3 edges in the voxel
1557 for (index_t e = 0; e < 3; ++e) {
1558 index_t vertex_idx = mesh_struct_ptr[e];
1559 if (vertex_idx != -1) continue;
1560
1561 index_t linear_idx_e =
1562 GetLinearIdx(xv + (e == 0), yv + (e == 1), zv + (e == 2),
1563 workload_block_idx);
1564 OPEN3D_ASSERT(linear_idx_e > 0 &&
1565 "Internal error: GetVoxelAt returns nullptr.");
1566 float tsdf_e = tsdf_base_ptr[linear_idx_e];
1567 float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o);
1568
1569 index_t idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
1570 mesh_struct_ptr[e] = idx;
1571
1572 float ratio_x = ratio * index_t(e == 0);
1573 float ratio_y = ratio * index_t(e == 1);
1574 float ratio_z = ratio * index_t(e == 2);
1575
1576 float* vertex_ptr = vertex_indexer.GetDataPtr<float>(idx);
1577 vertex_ptr[0] = voxel_size * (x + ratio_x);
1578 vertex_ptr[1] = voxel_size * (y + ratio_y);
1579 vertex_ptr[2] = voxel_size * (z + ratio_z);
1580
1581 // Get normal at edge and interpolate
1582 float* normal_ptr = normal_indexer.GetDataPtr<float>(idx);
1583 GetNormal(xv + (e == 0), yv + (e == 1), zv + (e == 2),
1584 workload_block_idx, ne);
1585 float nx = (1 - ratio) * no[0] + ratio * ne[0];
1586 float ny = (1 - ratio) * no[1] + ratio * ne[1];
1587 float nz = (1 - ratio) * no[2] + ratio * ne[2];
1588 float norm = static_cast<float>(sqrt(nx * nx + ny * ny + nz * nz) +
1589 1e-5);
1590 normal_ptr[0] = nx / norm;
1591 normal_ptr[1] = ny / norm;
1592 normal_ptr[2] = nz / norm;
1593
1594 if (color_base_ptr) {
1595 float* color_ptr = color_indexer.GetDataPtr<float>(idx);
1596 float r_o = color_base_ptr[linear_idx * 3 + 0];
1597 float g_o = color_base_ptr[linear_idx * 3 + 1];
1598 float b_o = color_base_ptr[linear_idx * 3 + 2];
1599
1600 float r_e = color_base_ptr[linear_idx_e * 3 + 0];
1601 float g_e = color_base_ptr[linear_idx_e * 3 + 1];
1602 float b_e = color_base_ptr[linear_idx_e * 3 + 2];
1603
1604 color_ptr[0] = ((1 - ratio) * r_o + ratio * r_e) / 255.0f;
1605 color_ptr[1] = ((1 - ratio) * g_o + ratio * g_e) / 255.0f;
1606 color_ptr[2] = ((1 - ratio) * b_o + ratio * b_e) / 255.0f;
1607 }
1608 }
1609 });
1610
1611 // Pass 3: connect vertices and form triangles.
1612 index_t triangle_count = vertex_count * 3;
1613 triangles = core::Tensor({triangle_count, 3}, core::Int32, device);
1614 ArrayIndexer triangle_indexer(triangles, 1);
1615
1616#if defined(__CUDACC__)
1617 count = core::Tensor(std::vector<index_t>{0}, {}, core::Int32, device);
1618 count_ptr = count.GetDataPtr<index_t>();
1619#else
1620 (*count_ptr) = 0;
1621#endif
1622 core::ParallelFor(device, n, [=] OPEN3D_DEVICE(index_t widx) {
1623 // Natural index (0, N) -> (block_idx, voxel_idx)
1624 index_t workload_block_idx = widx / resolution3;
1625 index_t voxel_idx = widx % resolution3;
1626
1627 // voxel_idx -> (x_voxel, y_voxel, z_voxel)
1628 index_t xv, yv, zv;
1629 voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv);
1630
1631 // Obtain voxel's mesh struct ptr
1632 index_t* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<index_t>(
1633 xv, yv, zv, workload_block_idx);
1634
1635 index_t table_idx = mesh_struct_ptr[3];
1636 if (tri_count[table_idx] == 0) return;
1637
1638 for (index_t tri = 0; tri < 16; tri += 3) {
1639 if (tri_table[table_idx][tri] == -1) return;
1640
1641 index_t tri_idx = OPEN3D_ATOMIC_ADD(count_ptr, 1);
1642
1643 for (index_t vertex = 0; vertex < 3; ++vertex) {
1644 index_t edge = tri_table[table_idx][tri + vertex];
1645
1646 index_t xv_i = xv + edge_shifts[edge][0];
1647 index_t yv_i = yv + edge_shifts[edge][1];
1648 index_t zv_i = zv + edge_shifts[edge][2];
1649 index_t edge_i = edge_shifts[edge][3];
1650
1651 index_t dxb = xv_i / resolution;
1652 index_t dyb = yv_i / resolution;
1653 index_t dzb = zv_i / resolution;
1654
1655 index_t nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9;
1656
1657 index_t block_idx_i =
1658 *nb_block_indices_indexer.GetDataPtr<index_t>(
1659 workload_block_idx, nb_idx);
1660 index_t* mesh_struct_ptr_i =
1661 mesh_structure_indexer.GetDataPtr<index_t>(
1662 xv_i - dxb * resolution,
1663 yv_i - dyb * resolution,
1664 zv_i - dzb * resolution,
1665 inv_indices_ptr[block_idx_i]);
1666
1667 index_t* triangle_ptr =
1668 triangle_indexer.GetDataPtr<index_t>(tri_idx);
1669 triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i];
1670 }
1671 }
1672 });
1673
1674#if defined(__CUDACC__)
1675 triangle_count = count.Item<index_t>();
1676#else
1677 triangle_count = (*count_ptr).load();
1678#endif
1679 utility::LogDebug("Total triangle count = {}", triangle_count);
1680 triangles = triangles.Slice(0, 0, triangle_count);
1681}
1682
1683} // namespace voxel_grid
1684} // namespace kernel
1685} // namespace geometry
1686} // namespace t
1687} // namespace open3d
#define OPEN3D_DEVICE
Definition: CUDAUtils.h:64
OPEN3D_HOST_DEVICE int Sign(int x)
Definition: GeometryMacros.h:96
#define OPEN3D_ATOMIC_ADD(X, Y)
Definition: GeometryMacros.h:58
math::float4 color
Definition: LineSetBuffers.cpp:64
#define LogError(...)
Definition: Logging.h:67
#define LogDebug(...)
Definition: Logging.h:98
#define OPEN3D_ASSERT(...)
Definition: Macro.h:67
Definition: Device.h:39
static const Dtype Float64
Definition: Dtype.h:43
Definition: Tensor.h:51
static Tensor Zeros(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with zeros.
Definition: Tensor.cpp:380
static Tensor Eye(int64_t n, Dtype dtype, const Device &device)
Create an identity matrix of size n x n.
Definition: Tensor.cpp:392
Definition: TensorMap.h:49
Definition: GeometryIndexer.h:180
OPEN3D_HOST_DEVICE bool InBoundary(float x, float y) const
Definition: GeometryIndexer.h:314
OPEN3D_HOST_DEVICE void * GetDataPtr() const
Definition: GeometryIndexer.h:335
Helper class for converting coordinates/indices between 3D/3D, 3D/2D, 2D/3D.
Definition: GeometryIndexer.h:44
OPEN3D_HOST_DEVICE void Project(float x_in, float y_in, float z_in, float *u_out, float *v_out) const
Project a 3D coordinate in camera coordinate to a 2D uv coordinate.
Definition: GeometryIndexer.h:119
OPEN3D_HOST_DEVICE void Rotate(float x_in, float y_in, float z_in, float *x_out, float *y_out, float *z_out) const
Transform a 3D coordinate in camera coordinate to world coordinate.
Definition: GeometryIndexer.h:100
OPEN3D_HOST_DEVICE void RigidTransform(float x_in, float y_in, float z_in, float *x_out, float *y_out, float *z_out) const
Transform a 3D coordinate in camera coordinate to world coordinate.
Definition: GeometryIndexer.h:81
OPEN3D_HOST_DEVICE void Unproject(float u_in, float v_in, float d_in, float *x_out, float *y_out, float *z_out) const
Unproject a 2D uv coordinate with depth to 3D in camera coordinate.
Definition: GeometryIndexer.h:130
int count
Definition: FilePCD.cpp:61
int offset
Definition: FilePCD.cpp:64
int points
Definition: FilePCD.cpp:73
void Synchronize()
Definition: CUDAUtils.cpp:78
uint32_t buf_index_t
Definition: HashBackendBuffer.h:63
const Dtype Int32
Definition: Dtype.cpp:65
void ParallelFor(const Device &device, int64_t n, const func_t &func)
Definition: ParallelFor.h:122
const Dtype Float32
Definition: Dtype.cpp:61
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle float
Definition: K4aPlugin.cpp:479
const char const char value recording_handle imu_sample recording_handle uint8_t size_t data_size k4a_record_configuration_t config target_format k4a_capture_t capture_handle k4a_imu_sample_t imu_sample playback_handle k4a_logging_message_cb_t void min_level device_handle k4a_imu_sample_t timeout_in_ms capture_handle capture_handle capture_handle image_handle temperature_c int
Definition: K4aPlugin.cpp:493
void ExtractPointCloudCPU(const core::Tensor &block_indices, const core::Tensor &nb_block_indices, const core::Tensor &nb_block_masks, const core::Tensor &block_keys, const TensorMap &block_value_map, core::Tensor &points, core::Tensor &normals, core::Tensor &colors, index_t block_resolution, float voxel_size, float weight_threshold, index_t &valid_size)
Definition: VoxelBlockGridImpl.h:1031
void GetVoxelCoordinatesAndFlattenedIndicesCPU(const core::Tensor &buf_indices, const core::Tensor &block_keys, core::Tensor &voxel_coords, core::Tensor &flattened_indices, index_t block_resolution, float voxel_size)
Definition: VoxelBlockGridImpl.h:57
OPEN3D_DEVICE index_t DeviceGetLinearIdx(index_t xo, index_t yo, index_t zo, index_t curr_block_idx, index_t resolution, const ArrayIndexer &nb_block_masks_indexer, const ArrayIndexer &nb_block_indices_indexer)
Definition: VoxelBlockGridImpl.h:102
void IntegrateCPU(const core::Tensor &depth, const core::Tensor &color, const core::Tensor &block_indices, const core::Tensor &block_keys, TensorMap &block_value_map, const core::Tensor &depth_intrinsic, const core::Tensor &color_intrinsic, const core::Tensor &extrinsic, index_t resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max)
Definition: VoxelBlockGridImpl.h:168
void EstimateRangeCPU(const core::Tensor &block_keys, core::Tensor &range_minmax_map, const core::Tensor &intrinsics, const core::Tensor &extrinsics, int h, int w, int down_factor, int64_t block_resolution, float voxel_size, float depth_min, float depth_max)
Definition: VoxelBlockGridImpl.h:320
TArrayIndexer< index_t > ArrayIndexer
Definition: VoxelBlockGridImpl.h:50
void ExtractTriangleMeshCPU(const core::Tensor &block_indices, const core::Tensor &inv_block_indices, const core::Tensor &nb_block_indices, const core::Tensor &nb_block_masks, const core::Tensor &block_keys, const TensorMap &block_value_map, core::Tensor &vertices, core::Tensor &triangles, core::Tensor &vertex_normals, core::Tensor &vertex_colors, index_t block_resolution, float voxel_size, float weight_threshold, index_t &vertex_count)
Definition: VoxelBlockGridImpl.h:1288
OPEN3D_DEVICE void DeviceGetNormal(const tsdf_t *tsdf_base_ptr, index_t xo, index_t yo, index_t zo, index_t curr_block_idx, float *n, index_t resolution, const ArrayIndexer &nb_block_masks_indexer, const ArrayIndexer &nb_block_indices_indexer)
Definition: VoxelBlockGridImpl.h:131
void RayCastCPU(std::shared_ptr< core::HashMap > &hashmap, const TensorMap &block_value_map, const core::Tensor &range_map, TensorMap &renderings_map, const core::Tensor &intrinsic, const core::Tensor &extrinsic, index_t h, index_t w, index_t block_resolution, float voxel_size, float depth_scale, float depth_min, float depth_max, float weight_threshold, float trunc_voxel_multiplier, int range_map_down_factor)
Definition: VoxelBlockGridImpl.h:532
int index_t
Definition: VoxelBlockGrid.h:41
core::Tensor InverseTransformation(const core::Tensor &T)
TODO(wei): find a proper place for such functionalities.
Definition: Utility.h:96
Definition: PinholeCameraIntrinsic.cpp:35
Definition: VoxelBlockGridImpl.h:505
void OPEN3D_DEVICE Update(index_t xin, index_t yin, index_t zin, index_t block_idx_in)
Definition: VoxelBlockGridImpl.h:515
index_t x
Definition: VoxelBlockGridImpl.h:506
index_t block_idx
Definition: VoxelBlockGridImpl.h:509
index_t z
Definition: VoxelBlockGridImpl.h:508
index_t y
Definition: VoxelBlockGridImpl.h:507
index_t OPEN3D_DEVICE Check(index_t xin, index_t yin, index_t zin)
Definition: VoxelBlockGridImpl.h:511
Definition: Dispatch.h:129
Definition: Dispatch.h:113
Definition: MiniVec.h:43