Skip to content

Commit a5ba698

Browse files
authored
Merge pull request #188 from eEcoLiDAR/development
Development
2 parents 1656f26 + ed12126 commit a5ba698

File tree

7 files changed

+30
-30
lines changed

7 files changed

+30
-30
lines changed

laserchicken/feature_extractor/eigenvals_feature_extractor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def _reorder_vectors(eigen_vectors, new_vector_indices):
6060
# and reshape and even repeat some indices to get the behavior I'm looking for.
6161
vects_t = eigen_vectors.transpose([0, 2, 1]) # Eigen vectors used as column vectors. Making row vectors here.
6262
flattened_vects_t = vects_t.reshape(-1, 3 * 3) # Flatten the (eigen) vector dimension
63-
vect_indices = np.zeros_like(flattened_vects_t, dtype=np.int) + [0, 1, 2, 0, 1, 2, 0, 1, 2] # 0,1,2 for x,y,z
63+
vect_indices = np.zeros_like(flattened_vects_t, dtype=int) + [0, 1, 2, 0, 1, 2, 0, 1, 2] # 0,1,2 for x,y,z
6464
vect_indices[:, :3] += new_vector_indices[:, 0:1] * 3 # Because x,y,z, indices have to be increased by 3.
6565
vect_indices[:, 3:6] += new_vector_indices[:, 1:2] * 3
6666
vect_indices[:, 6:9] += new_vector_indices[:, 2:3] * 3

laserchicken/feature_extractor/feature_extraction.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def compute_features(env_point_cloud, neighborhoods, target_point_cloud, feature
5656
for feature_name in extended_features:
5757
target_point_cloud[point][feature_name] = {"type": 'float64',
5858
"data": np.zeros_like(target_point_cloud[point]['x']['data'],
59-
dtype=np.float64)}
59+
dtype=float)}
6060

6161
if provenance in env_point_cloud:
6262
utils.add_metadata(target_point_cloud, sys.modules[__name__],
@@ -87,7 +87,7 @@ def _get_point_cloud_size(target_point_cloud):
8787

8888

8989
def _calculate_number_of_chunks(chunk_size, n_targets):
90-
return int(np.math.ceil(n_targets / chunk_size))
90+
return int(np.ceil(n_targets / chunk_size))
9191

9292

9393
def _compute_features_for_chunk(features_to_do, env_point_cloud, current_neighborhoods, target_point_cloud,
@@ -124,7 +124,7 @@ def _add_features_from_single_extractor(extractor, env_point_cloud, current_neig
124124
target_indices, volume)
125125

126126
n_targets = len(target_indices)
127-
feature_values = [np.empty(n_targets, dtype=np.float64) for _ in range(n_features)]
127+
feature_values = [np.empty(n_targets, dtype=float) for _ in range(n_features)]
128128
if n_features > 1:
129129
for i in range(n_features):
130130
feature_values[i] = point_values[i]

laserchicken/filter.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import shapefile
66
import shapely
77
import sys
8-
from shapely.geometry import Point
8+
99
from shapely.errors import WKTReadingError
1010
from shapely.wkt import loads
1111
from shapely.geometry import box
@@ -97,11 +97,11 @@ def _check_valid_arguments(attribute, point_cloud):
9797

9898
def select_polygon(point_cloud, polygon_string, read_from_file=False, return_mask=False):
9999
"""
100-
Return the selection of the input point cloud that contains only points within a given polygon.
100+
Return the selection of the input point cloud that contains only points within the given polygon(s).
101101
102102
:param point_cloud: Input point cloud
103-
:param polygon_string: Polygon, either defined in a WKT string or in a file (WKT and ESRI formats supported)
104-
:param read_from_file: if true, polygon is expected to be the name of the file where the polygon is defined
103+
:param polygon_string: polygon(s), either defined in a WKT string or in a file (WKT and ESRI formats supported)
104+
:param read_from_file: if true, polygon is expected to be the name of the file where the geometry is defined
105105
:param return_mask: if true, return a mask of selected points, rather than point cloud
106106
:return:
107107
"""
@@ -121,12 +121,12 @@ def select_polygon(point_cloud, polygon_string, read_from_file=False, return_mas
121121
elif isinstance(polygon,shapely.geometry.multipolygon.MultiPolygon):
122122
points_in = []
123123
count=1
124-
for poly in polygon:
125-
if not(count%200) or count==len(polygon):
126-
print('Checking polygon {}/{}...'.format(count, len(polygon)))
124+
for poly in polygon.geoms:
125+
if not(count%200) or count==len(polygon.geoms):
126+
print('Checking polygon {}/{}...'.format(count, len(polygon.geoms)))
127127
points_in.extend(_contains(point_cloud, poly))
128128
count=count+1
129-
print('{} points found in {} polygons.'.format(len(points_in), len(polygon)))
129+
print('{} points found in {} polygons.'.format(len(points_in), len(polygon.geoms)))
130130
else:
131131
raise ValueError('It is not a Polygon or Multipolygon.')
132132

laserchicken/test_feature_extractor/test_extract_features.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def test_extract_multiple_features_ends_up_in_pc():
3838
@staticmethod
3939
def test_extract_can_overwrite():
4040
target = test_tools.ComplexTestData().get_point_cloud()
41-
target[keys.point]['test1_a'] = {"type": np.float64, "data": [0.9, 0.99, 0.999, 0.9999]}
41+
target[keys.point]['test1_a'] = {"type": float, "data": [0.9, 0.99, 0.999, 0.9999]}
4242
feature_names = ['test3_a', 'test1_a']
4343
target = _compute_features(target, feature_names)
4444
assert all(target[keys.point]['test1_a']['data'] == 0.5 * target[keys.point]['z']['data'])

laserchicken/test_filter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -429,8 +429,8 @@ def test_shp_polygons_contains():
429429
x = pc_out[point]['x']['data']
430430
y = pc_out[point]['y']['data']
431431
# Seemingly redundant 'astype' call: since pandas 0.24 Dataframe() doesn't enforce the given dtype as before
432-
df_out = pd.DataFrame({'x': x, 'y': y}, dtype=np.int32).astype(dtype=np.int32)
433-
df = pd.read_csv("testdata/ahn2_polygon.out", sep=',', header=0, index_col=0, dtype=np.int32)
432+
df_out = pd.DataFrame({'x': x, 'y': y}).astype(int)
433+
df = pd.read_csv("testdata/ahn2_polygon.out", sep=',', header=0, index_col=0, dtype=int)
434434
assert (pd.DataFrame.equals(df, df_out))
435435

436436
@staticmethod

laserchicken/test_tools.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@
88

99
def generate_tiny_test_point_cloud():
1010
"""Generate a simple but valid point cloud with 3 points."""
11-
pc = {keys.point: {'x': {'type': 'double', 'data': np.array([1, 2, 3], dtype=np.float64)},
12-
'y': {'type': 'double', 'data': np.array([2, 3, 4], dtype=np.float64)},
13-
'z': {'type': 'double', 'data': np.array([3, 4, 5], dtype=np.float64)}}}
11+
pc = {keys.point: {'x': {'type': 'double', 'data': np.array([1, 2, 3], dtype=float)},
12+
'y': {'type': 'double', 'data': np.array([2, 3, 4], dtype=float)},
13+
'z': {'type': 'double', 'data': np.array([3, 4, 5], dtype=float)}}}
1414
return pc
1515

1616

@@ -21,9 +21,9 @@ class SimpleTestData(object):
2121
def get_point_cloud():
2222
"""Get the point cloud data."""
2323
# This simple_test_point cloud and the simple_test_header should be in sync. Some tests depend on it.
24-
pc = {keys.point: {'x': {'type': 'double', 'data': np.array([1, 2, 3], dtype=np.float64)},
25-
'y': {'type': 'double', 'data': np.array([20, 30, 40], dtype=np.float64)},
26-
'z': {'type': 'double', 'data': np.array([300, 400, 500], dtype=np.float64)}}}
24+
pc = {keys.point: {'x': {'type': 'double', 'data': np.array([1, 2, 3], dtype=float)},
25+
'y': {'type': 'double', 'data': np.array([20, 30, 40], dtype=float)},
26+
'z': {'type': 'double', 'data': np.array([300, 400, 500], dtype=float)}}}
2727
return pc
2828

2929
@staticmethod
@@ -64,10 +64,10 @@ class ComplexTestData(object):
6464
def get_point_cloud():
6565
"""Get the point cloud data."""
6666
# This complex_test_point cloud and the complex_test_header should be in sync. Some tests depend on it.
67-
pc = {keys.point: {'x': {'type': 'double', 'data': np.array([1, 2, 3, 4, 5], dtype=np.float)},
68-
'y': {'type': 'double', 'data': np.array([2, 3, 4, 5, 6], dtype=np.float)},
69-
'z': {'type': 'double', 'data': np.array([3, 4, 5, 6, 7], dtype=np.float)},
70-
'return': {'type': 'int', 'data': np.array([1, 1, 2, 2, 1], dtype=np.int32)}
67+
pc = {keys.point: {'x': {'type': 'double', 'data': np.array([1, 2, 3, 4, 5], dtype=float)},
68+
'y': {'type': 'double', 'data': np.array([2, 3, 4, 5, 6], dtype=float)},
69+
'z': {'type': 'double', 'data': np.array([3, 4, 5, 6, 7], dtype=float)},
70+
'return': {'type': 'int', 'data': np.array([1, 1, 2, 2, 1], dtype=int)}
7171
},
7272
keys.point_cloud: {'offset': {'type': 'double', 'data': 12.1}},
7373
keys.provenance: [{"module": "filter", "time": str(dt.datetime(2018, 1, 18, 16, 1, 0))},
@@ -131,14 +131,14 @@ def create_point_cloud(x, y, z, normalized_z=None):
131131
:param normalized_z: optional normalized z attribute values
132132
:return: point cloud object
133133
"""
134-
point_cloud = {keys.point: {'x': {'type': 'double', 'data': np.array(x, dtype=np.float)},
135-
'y': {'type': 'double', 'data': np.array(y, dtype=np.float)},
136-
'z': {'type': 'double', 'data': np.array(z, dtype=np.float)}},
134+
point_cloud = {keys.point: {'x': {'type': 'double', 'data': np.array(x, dtype=float)},
135+
'y': {'type': 'double', 'data': np.array(y, dtype=float)},
136+
'z': {'type': 'double', 'data': np.array(z, dtype=float)}},
137137
keys.point_cloud: {},
138138
keys.provenance: [{'time': (dt.datetime(2018, 1, 18, 16, 1, 0)), 'module': 'filter'}]}
139139
if normalized_z is not None:
140140
point_cloud[keys.point][keys.normalized_height] = {'type': 'double',
141-
'data': np.array(normalized_z, dtype=np.float)}
141+
'data': np.array(normalized_z, dtype=float)}
142142
return point_cloud
143143

144144

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ pytest
44
mock
55
plyfile
66
python-dateutil
7-
shapely
7+
shapely>=2
88
PyShp
99
pandas
1010
click

0 commit comments

Comments
 (0)