Skip to content

Commit 0330c91

Browse files
authored
Merge pull request #138 from eEcoLiDAR/development
Development
2 parents acd9266 + 52ae499 commit 0330c91

File tree

70 files changed

+262541
-550
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+262541
-550
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,3 +105,5 @@ ENV/
105105
.idea/tasks.xml
106106
.idea/eEcoLiDAR.iml
107107
.idea/misc.xml
108+
.idea/dictionaries/
109+
.idea/inspectionProfiles/

.travis.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ dist: trusty
33
sudo: false
44

55
python:
6-
- "2.7"
76
- "3.5"
87
- "3.6"
98

CHANGELOG.md

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,23 @@ All notable changes to this project will be documented in this file.
44
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
55
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
66

7-
## [Unreleased]
7+
## 0.3.0 - 2019-02-27
88
### Added
9+
- Normalization module
910
- General tests that all current and future feature extractors will be checked against.
11+
- Possibility to have a randomly subsampled (fixed) number of neighbors (eg for faster feature calculation)
12+
- Added feature extractors based on normalized height
1013

1114
## Changed
15+
- Many feature calculations are done in a vectorized way
16+
- z_entropy feature renamed to entropy_z for consistency
17+
- density_absolute_mean feature renamed to density_absolute_mean_z for consistency
18+
- perc_10 features renamed to perc_10_z for consistency
1219

1320
## Fixed
14-
- Fixed many feature extractors for corner cases (e.g. zero points)
21+
- Corner cases for many feature extractors (e.g. zero points)
22+
- Bug in slope feature calculation
23+
- Bug in fix density absolute mean feature calculation
1524

1625
## Removed
1726

laserchicken/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = '0.2.0'
1+
__version__ = '0.3.0'

laserchicken/compute_neighbors.py

Lines changed: 30 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -80,10 +80,12 @@ def compute_sphere_neighborhood(environment_pc, target_pc, radius):
8080
neighborhoods = compute_cylinder_neighborhood(
8181
environment_pc, target_pc, radius)
8282

83+
counter = 0 # Target and neighborhood indices are going to be out of sync in loop below.
8384
for neighborhood_indices in neighborhoods:
8485
result = []
8586
for i, _ in enumerate(neighborhood_indices):
86-
target_x, target_y, target_z = utils.get_point(target_pc, i)
87+
target_x, target_y, target_z = utils.get_point(target_pc, counter)
88+
counter += 1
8789
result_indices = []
8890
for j in neighborhood_indices[i]:
8991
env_x, env_y, env_z = utils.get_point(environment_pc, j)
@@ -106,20 +108,22 @@ def compute_cell_neighborhood(environment_pc, target_pc, side_length):
106108
:return: indices of neighboring points from the environment point cloud for each target point
107109
"""
108110

109-
max_radius = 0.5*math.sqrt((side_length ** 2) + (side_length ** 2))
111+
max_radius = 0.5 * math.sqrt((side_length ** 2) + (side_length ** 2))
110112

111113
neighbors = compute_cylinder_neighborhood(
112114
environment_pc, target_pc, max_radius)
113115

116+
counter = 0 # Target and neighborhood indices are going to be out of sync in loop below.
114117
for neighborhood_indices in neighbors:
115118
result = []
116119
for i, _ in enumerate(neighborhood_indices):
117-
target_x, target_y, _ = utils.get_point(target_pc, i)
120+
target_x, target_y, _ = utils.get_point(target_pc, counter)
121+
counter += 1
118122
neighbor_indices = neighborhood_indices[i]
119123
result_indices = []
120124
for j in neighbor_indices:
121125
env_x, env_y, _ = utils.get_point(environment_pc, j)
122-
if ((abs(target_x - env_x)) > 0.5*side_length) or ((abs(target_y - env_y)) > 0.5*side_length):
126+
if ((abs(target_x - env_x)) > 0.5 * side_length) or ((abs(target_y - env_y)) > 0.5 * side_length):
123127
continue
124128
else:
125129
result_indices.append(j)
@@ -141,10 +145,12 @@ def compute_cube_neighborhood(environment_pc, target_pc, side_length):
141145
neighbors = compute_cell_neighborhood(
142146
environment_pc, target_pc, side_length)
143147

148+
counter = 0
144149
for neighborhood_indices in neighbors:
145150
result = []
146151
for i, _ in enumerate(neighborhood_indices):
147-
_, _, target_z = utils.get_point(target_pc, i)
152+
_, _, target_z = utils.get_point(target_pc, counter)
153+
counter += 1
148154
neighbor_indices = neighborhood_indices[i]
149155
result_indices = []
150156
for j in neighbor_indices:
@@ -157,33 +163,41 @@ def compute_cube_neighborhood(environment_pc, target_pc, side_length):
157163
yield result
158164

159165

160-
def compute_neighborhoods(env_pc, target_pc, volume_description):
166+
def compute_neighborhoods(env_pc, target_pc, volume_description, sample_size=None):
161167
"""
162168
Find a subset of points in a neighbourhood in the environment point cloud for each point in a target point cloud.
163169
164170
:param env_pc: environment point cloud
165171
:param target_pc: point cloud that contains the points at which neighborhoods are to be calculated
166172
:param volume_description: volume object that describes the shape and size of the search volume
173+
:param sample_size: maximum number of neighbors returned per target point; if None (default), all are returned
167174
:return: indices of neighboring points from the environment point cloud for each target point
168175
"""
169176
volume_type = volume_description.get_type()
170-
177+
171178
if volume_type == Cell.TYPE:
172-
neighbors1 = compute_cell_neighborhood(env_pc, target_pc, volume_description.side_length)
173-
for x in neighbors1:
174-
yield x
179+
neighbors = compute_cell_neighborhood(env_pc, target_pc, volume_description.side_length)
175180
elif volume_type == Cube.TYPE:
176181
neighbors = compute_cube_neighborhood(env_pc, target_pc, volume_description.side_length)
177-
for x in neighbors:
178-
yield x
179182
elif volume_type == Sphere.TYPE:
180183
neighbors = compute_sphere_neighborhood(env_pc, target_pc, volume_description.radius)
181-
for x in neighbors:
182-
yield x
183184
elif volume_type == InfiniteCylinder.TYPE:
184185
neighbors = compute_cylinder_neighborhood(env_pc, target_pc, volume_description.radius)
185-
for x in neighbors:
186-
yield x
187186
else:
188187
raise ValueError(
189188
'Neighborhood computation error because volume type "{}" is unknown.'.format(volume_type))
189+
190+
for x in neighbors:
191+
yield _subsample_if_necessary(x, sample_size)
192+
193+
194+
def _subsample_if_necessary(x, sample_size):
195+
if sample_size:
196+
sampled = [_subsample(elements, sample_size) if len(elements) > sample_size else elements for elements in x]
197+
return sampled
198+
else:
199+
return x
200+
201+
202+
def _subsample(neighborhood, sample_size=None):
203+
return list(np.random.choice(neighborhood, sample_size, replace=False))

laserchicken/feature_extractor/__init__.py

Lines changed: 112 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -10,28 +10,49 @@
1010
from laserchicken import keys, utils
1111
from .density_feature_extractor import PointDensityFeatureExtractor
1212
from .echo_ratio_feature_extractor import EchoRatioFeatureExtractor
13-
from .eigenvals_feature_extractor import EigenValueFeatureExtractor
14-
from .entropy_feature_extractor import EntropyFeatureExtractor
15-
from .height_statistics_feature_extractor import HeightStatisticsFeatureExtractor
16-
from .normal_plane_feature_extractor import NormalPlaneFeatureExtractor
17-
from .percentile_feature_extractor import PercentileFeatureExtractor
13+
from .eigenvals_feature_extractor import EigenValueVectorizeFeatureExtractor
14+
from .entropy_z_feature_extractor import EntropyZFeatureExtractor
15+
from .percentile_z_feature_extractor import PercentileZFeatureExtractor
1816
from .pulse_penetration_feature_extractor import PulsePenetrationFeatureExtractor
1917
from .sigma_z_feature_extractor import SigmaZFeatureExtractor
18+
from .median_z_feature_extractor import MedianZFeatureExtractor
19+
from .range_z_feature_extractor import RangeZFeatureExtractor
20+
from .var_z_feature_extractor import VarianceZFeatureExtractor
21+
from .mean_std_coeff_z_feature_extractor import MeanStdCoeffZFeatureExtractor
22+
from .skew_z_feature_extractor import SkewZFeatureExtractor
23+
from .kurtosis_z_feature_extractor import KurtosisZFeatureExtractor
24+
from .skew_norm_z_feature_extractor import SkewNormZFeatureExtractor
25+
from .mean_std_coeff_norm_z_feature_extractor import MeanStdCoeffNormZFeatureExtractor
26+
from .var_norm_z_feature_extractor import VarianceNormZFeatureExtractor
27+
from .range_norm_z_feature_extractor import RangeNormZFeatureExtractor
28+
from .kurtosis_norm_z_feature_extractor import KurtosisNormZFeatureExtractor
29+
from .entropy_norm_z_feature_extractor import EntropyNormZFeatureExtractor
30+
from .median_norm_z_feature_extractor import MedianNormZFeatureExtractor
31+
from .percentile_norm_z_feature_extractor import PercentileNormZFeatureExtractor
32+
from .density_absolute_mean_z_feature_extractor import DensityAbsoluteMeanZFeatureExtractor
33+
from .density_absolute_mean_norm_z_feature_extractor import DensityAbsoluteMeanNormZFeatureExtractor
34+
35+
36+
def _create_feature_map(module_name=__name__):
37+
"""Construct a mapping from feature names to feature extractor classes."""
38+
name_extractor_pairs = _find_name_extractor_pairs(module_name)
39+
return {feature_name: extractor for feature_name, extractor in name_extractor_pairs}
2040

2141

22-
def _feature_map(module_name=__name__):
23-
"""Construct a mapping from feature names to feature extractor classes."""
42+
def _find_name_extractor_pairs(module_name):
2443
module = importlib.import_module(module_name)
25-
return {feature_name: extractor
26-
for name, extractor in vars(module).items() if re.match('^[A-Z][a-zA-Z0-9_]*FeatureExtractor$', name)
27-
for feature_name in extractor.provides()
28-
}
44+
name_extractor_pairs = [(feature_name, extractor)
45+
for name, extractor in vars(module).items() if
46+
re.match('^[A-Z][a-zA-Z0-9_]*FeatureExtractor$', name)
47+
for feature_name in extractor.provides()]
48+
return name_extractor_pairs
2949

3050

31-
FEATURES = _feature_map()
51+
FEATURES = _create_feature_map()
3252

3353

34-
def compute_features(env_point_cloud, neighborhoods, target_idx_base, target_point_cloud, feature_names, volume, overwrite=False,
54+
def compute_features(env_point_cloud, neighborhoods, target_idx_base, target_point_cloud, feature_names, volume,
55+
overwrite=False,
3556
verbose=True, **kwargs):
3657
"""
3758
Compute features for each target and store result as point attributes in target point cloud.
@@ -60,18 +81,23 @@ def compute_features(env_point_cloud, neighborhoods, target_idx_base, target_poi
6081
:return: None, results are stored in attributes of the target point cloud
6182
"""
6283
_verify_feature_names(feature_names)
63-
ordered_features = _make_feature_list(feature_names)
84+
wanted_feature_names = feature_names + [existing_feature for existing_feature in target_point_cloud[keys.point]]
85+
extended_features = _make_extended_feature_list(feature_names)
86+
features_to_do = extended_features
6487

65-
for feature in ordered_features:
66-
if (target_idx_base == 0) and (not overwrite) and (feature in target_point_cloud[keys.point]):
88+
while features_to_do:
89+
feature_name = features_to_do[0]
90+
91+
if (target_idx_base == 0) and (not overwrite) and (feature_name in target_point_cloud[keys.point]):
6792
continue # Skip feature calc if it is already there and we do not overwrite
6893

94+
extractor = FEATURES[feature_name]()
95+
6996
if verbose:
70-
sys.stdout.write('Feature "{}"\n'.format(feature))
97+
sys.stdout.write('Feature(s) "{}"'.format(extractor.provides()))
7198
sys.stdout.flush()
7299
start = time.time()
73100

74-
extractor = FEATURES[feature]()
75101
_add_or_update_feature(env_point_cloud, neighborhoods, target_idx_base,
76102
target_point_cloud, extractor, volume, overwrite, kwargs)
77103
utils.add_metadata(target_point_cloud, type(
@@ -82,6 +108,21 @@ def compute_features(env_point_cloud, neighborhoods, target_idx_base, target_poi
82108
sys.stdout.write(' took {:.2f} seconds\n'.format(elapsed))
83109
sys.stdout.flush()
84110

111+
for provided_feature in extractor.provides():
112+
if provided_feature in features_to_do:
113+
features_to_do.remove(provided_feature)
114+
115+
_keep_only_wanted_features(target_point_cloud, wanted_feature_names)
116+
117+
118+
def _keep_only_wanted_features(target_point_cloud, wanted_feature_names):
119+
redundant_features = [f for f in target_point_cloud[keys.point] if f not in wanted_feature_names]
120+
if redundant_features:
121+
print('The following unrequested features were calculated as a side effect, but will not be returned:',
122+
redundant_features)
123+
for f in redundant_features:
124+
target_point_cloud[keys.point].pop(f)
125+
85126

86127
def _verify_feature_names(feature_names):
87128
unknown_features = [f for f in feature_names if f not in FEATURES]
@@ -90,8 +131,8 @@ def _verify_feature_names(feature_names):
90131
.format(', '.join(unknown_features), ', '.join(FEATURES.keys())))
91132

92133

93-
def _add_or_update_feature(env_point_cloud, neighborhoods, target_idx_base, target_point_cloud, extractor, volume, overwrite, kwargs):
94-
#n_targets = len(target_point_cloud[keys.point]["x"]["data"])
134+
def _add_or_update_feature(env_point_cloud, neighborhoods, target_idx_base, target_point_cloud, extractor, volume,
135+
overwrite, kwargs):
95136
n_targets = len(neighborhoods)
96137

97138
for k in kwargs:
@@ -101,35 +142,73 @@ def _add_or_update_feature(env_point_cloud, neighborhoods, target_idx_base, targ
101142
feature_values = [np.empty(n_targets, dtype=np.float64)
102143
for i in range(n_features)]
103144

104-
print("Number of targets: %d, number of features: %d" % (n_targets, n_features))
145+
if hasattr(extractor, 'is_vectorized'):
146+
_add_or_update_feature_in_chunks(env_point_cloud, extractor, feature_values, n_features, n_targets,
147+
neighborhoods,
148+
target_idx_base, target_point_cloud, volume)
149+
else:
150+
_add_or_update_feature_one_by_one(env_point_cloud, extractor, feature_values, n_features, n_targets,
151+
neighborhoods, target_idx_base, target_point_cloud, volume)
152+
153+
for i in range(n_features):
154+
feature = provided_features[i]
155+
if target_idx_base != 0:
156+
if feature not in target_point_cloud[keys.point]:
157+
continue
158+
159+
target_point_cloud[keys.point][feature]["data"] = np.hstack(
160+
[target_point_cloud[keys.point][feature]["data"], feature_values[i]])
161+
162+
elif overwrite or (feature not in target_point_cloud[keys.point]):
163+
target_point_cloud[keys.point][feature] = {
164+
"type": 'float64', "data": feature_values[i]}
165+
166+
167+
def _add_or_update_feature_one_by_one(env_point_cloud, extractor, feature_values, n_features, n_targets, neighborhoods,
168+
target_idx_base, target_point_cloud, volume):
105169
for target_index in range(n_targets):
106170
point_values = extractor.extract(env_point_cloud, neighborhoods[target_index], target_point_cloud,
107-
target_index+target_idx_base, volume)
171+
target_index + target_idx_base, volume)
108172
if n_features > 1:
109173
for i in range(n_features):
110174
feature_values[i][target_index] = point_values[i]
111175
else:
112176
feature_values[0][target_index] = point_values
113-
for i in range(n_features):
114-
feature = provided_features[i]
115-
if (target_idx_base != 0):
116-
target_point_cloud[keys.point][feature]["data"] = np.append(target_point_cloud[keys.point][feature]["data"], feature_values[i])
117-
elif (overwrite or (feature not in target_point_cloud[keys.point])) and (target_idx_base == 0):
118-
target_point_cloud[keys.point][feature] = {
119-
"type": 'float64', "data": feature_values[i]}
120177

121178

122-
def _make_feature_list(feature_names):
123-
feature_list = reversed(_make_feature_list_helper(feature_names))
179+
def _add_or_update_feature_in_chunks(env_point_cloud, extractor, feature_values, n_features, n_targets, neighborhoods,
180+
target_idx_base, target_point_cloud, volume):
181+
chunk_size = 100000
182+
print('calculating {} in chunks'.format(extractor.provides()))
183+
for chunk_no in range(int(np.math.ceil(n_targets / chunk_size))):
184+
i_start = chunk_no * chunk_size
185+
i_end = min((chunk_no + 1) * chunk_size, n_targets)
186+
target_indices = np.arange(i_start, i_end)
187+
point_values = extractor.extract(env_point_cloud, neighborhoods[i_start:i_end], target_point_cloud,
188+
target_indices + target_idx_base, volume)
189+
190+
if n_features > 1:
191+
for i in range(n_features):
192+
feature_values[i][target_indices] = point_values[i]
193+
else:
194+
feature_values[0][target_indices] = point_values
195+
196+
197+
def _make_extended_feature_list(feature_names):
198+
feature_list = reversed(_make_extended_feature_list_helper(feature_names))
199+
return _remove_duplicates(feature_list)
200+
201+
202+
def _remove_duplicates(feature_list):
124203
seen = set()
125204
return [f for f in feature_list if not (f in seen or seen.add(f))]
126205

127206

128-
def _make_feature_list_helper(feature_names):
207+
def _make_extended_feature_list_helper(feature_names):
129208
feature_list = feature_names
130209
for feature_name in feature_names:
131210
extractor = FEATURES[feature_name]()
132211
dependencies = extractor.requires()
133212
feature_list.extend(dependencies)
134-
feature_list.extend(_make_feature_list_helper(dependencies))
213+
feature_list.extend(_make_extended_feature_list_helper(dependencies))
135214
return feature_list
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
"""Pulse penetration ratio and density absolute mean calculations.
2+
3+
See https://github.com/eEcoLiDAR/eEcoLiDAR/issues/23.
4+
"""
5+
6+
import numpy as np
7+
8+
from laserchicken.feature_extractor.abc import AbstractFeatureExtractor
9+
from laserchicken.feature_extractor.density_absolute_mean_z_feature_extractor import \
10+
DensityAbsoluteMeanZFeatureExtractor
11+
from laserchicken.keys import point, normalized_height
12+
13+
# classification according to
14+
# http://www.asprs.org/wp-content/uploads/2010/12/LAS_1-4_R6.pdf
15+
GROUND_TAGS = [2]
16+
17+
18+
class DensityAbsoluteMeanNormZFeatureExtractor(DensityAbsoluteMeanZFeatureExtractor):
19+
"""Feature extractor for the point density."""
20+
DATA_KEY = normalized_height
21+
22+
@classmethod
23+
def provides(cls):
24+
"""
25+
Get a list of names of the feature values.
26+
27+
This will return as many names as the number feature values that will be returned.
28+
For instance, if a feature extractor returns the first 3 eigen values, this method
29+
should return 3 names, for instance 'eigen_value_1', 'eigen_value_2' and 'eigen_value_3'.
30+
31+
:return: List of feature names
32+
"""
33+
return ['density_absolute_mean_norm_z']

0 commit comments

Comments
 (0)