Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 42 additions & 8 deletions eqcorrscan/core/lag_calc.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import scipy
import logging
import os
import warnings

from collections import Counter, namedtuple

Expand Down Expand Up @@ -227,7 +228,7 @@ def xcorr_pick_family(family, stream, shift_len=0.2, min_cc=0.4,
horizontal_chans=['E', 'N', '1', '2'],
cores=1, interpolate=False,
plot=False, plotdir=None, export_cc=False, cc_dir=None,
**kwargs):
check_full_seed=False, **kwargs):
"""
Compute cross-correlation picks for detections in a family.

Expand Down Expand Up @@ -276,15 +277,27 @@ def xcorr_pick_family(family, stream, shift_len=0.2, min_cc=0.4,
:type cc_dir: str
:param cc_dir:
Path to saving folder, NumPy files will be output here.
:type check_full_seed: bool
:param check_full_seed:
If True, will check for duplicate traces against the full SEED id,
including Network, Station, Location and Channel. If False (default),
will check only against Station and Channel.


:return: Dictionary of picked events keyed by detection id.
"""
if not check_full_seed:
warnings.warn(
"Deprecation warning: check_full_seed will default to"
"True in a future release. Check the docs page here "
"for how this will affect you: "
"https://eqcorrscan.readthedocs.io/en/latest/faq.html")
picked_dict = {}
delta = family.template.st[0].stats.delta
detect_streams_dict = _prepare_data(
family=family, detect_data=stream, shift_len=shift_len,
all_vert=all_vert, all_horiz=all_horiz, vertical_chans=vertical_chans,
horizontal_chans=horizontal_chans)
horizontal_chans=horizontal_chans, check_full_seed=check_full_seed)
detection_ids = list(detect_streams_dict.keys())
detect_streams = [detect_streams_dict[detection_id]
for detection_id in detection_ids]
Expand Down Expand Up @@ -401,8 +414,8 @@ def xcorr_pick_family(family, stream, shift_len=0.2, min_cc=0.4,
return picked_dict


def _prepare_data(family, detect_data, shift_len, all_vert=False,
all_horiz=False, vertical_chans=['Z'],
def _prepare_data(family, detect_data, shift_len, check_full_seed,
all_vert=False, all_horiz=False, vertical_chans=['Z'],
horizontal_chans=['E', 'N', '1', '2']):
"""
Prepare data for lag_calc - reduce memory here.
Expand All @@ -414,6 +427,11 @@ def _prepare_data(family, detect_data, shift_len, all_vert=False,
:param detect_data: Stream to extract detection streams from.
:type shift_len: float
:param shift_len: Shift length in seconds allowed for picking.
:type check_full_seed: bool
:param check_full_seed:
If True, will check for duplicate traces against the full SEED id,
including Network, Station, Location and Channel. If False (default),
will check only against Station and Channel.

:returns: Dictionary of detect_streams keyed by detection id
to be worked on
Expand Down Expand Up @@ -441,8 +459,11 @@ def _prepare_data(family, detect_data, shift_len, all_vert=False,
detect_stream.remove(trace)
Logger.warning("Masked array found for {0}, not supported, "
"removing.".format(trace.id))
stachans = [(tr.stats.station, tr.stats.channel)
for tr in detect_stream]
if check_full_seed:
stachans = [tr.id for tr in detect_stream]
else:
stachans = [(tr.stats.station, tr.stats.channel)
for tr in detect_stream]
c_stachans = Counter(stachans)
for key in c_stachans.keys():
if c_stachans[key] > 1:
Expand All @@ -463,7 +484,8 @@ def lag_calc(detections, detect_data, template_names, templates,
all_vert=False, all_horiz=False,
horizontal_chans=['E', 'N', '1', '2'],
vertical_chans=['Z'], cores=1, interpolate=False,
plot=False, plotdir=None, export_cc=False, cc_dir=None, **kwargs):
plot=False, plotdir=None, export_cc=False, cc_dir=None,
check_full_seed=False, **kwargs):
"""
Cross-correlation derived picking of seismic events.

Expand Down Expand Up @@ -525,6 +547,11 @@ def lag_calc(detections, detect_data, template_names, templates,
:type cc_dir: str
:param cc_dir:
Path to saving folder, NumPy files will be output here.
:type check_full_seed: bool
:param check_full_seed:
If True, will check for duplicate traces against the full SEED id,
including Network, Station, Location and Channel. If False (default),
will check only against Station and Channel.

:returns:
Catalog of events with picks. No origin information is included.
Expand Down Expand Up @@ -581,6 +608,12 @@ def lag_calc(detections, detect_data, template_names, templates,
The correlation data that are saved to the binary files can be useful
to select an appropriate threshold for your data.
"""
if not check_full_seed:
warnings.warn(
"Deprecation warning: check_full_seed will default to"
"True in a future release. Check the docs page here "
"for how this will affect you: "
"https://eqcorrscan.readthedocs.io/en/latest/faq.html")
# First check that sample rates are equal for everything
for tr in detect_data:
if tr.stats.sampling_rate != detect_data[0].stats.sampling_rate:
Expand Down Expand Up @@ -611,7 +644,8 @@ def lag_calc(detections, detect_data, template_names, templates,
horizontal_chans=horizontal_chans,
vertical_chans=vertical_chans, interpolate=interpolate,
cores=cores, shift_len=shift_len, plot=plot, plotdir=plotdir,
export_cc=export_cc, cc_dir=cc_dir, **kwargs)
export_cc=export_cc, cc_dir=cc_dir,
check_full_seed=check_full_seed, **kwargs)
initial_cat.update(template_dict)
# Order the catalogue to match the input
output_cat = Catalog()
Expand Down
19 changes: 17 additions & 2 deletions eqcorrscan/core/match_filter/family.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import os
import shutil
import logging
import warnings

from obspy import UTCDateTime, Stream, Catalog
from obspy.core.event import (
Expand Down Expand Up @@ -512,7 +513,7 @@ def lag_calc(self, stream, pre_processed, shift_len=0.2, min_cc=0.4,
cores=1, interpolate=False, plot=False, plotdir=None,
parallel=True, process_cores=None, ignore_length=False,
ignore_bad_data=False, export_cc=False, cc_dir=None,
**kwargs):
check_full_seed=False, **kwargs):
"""
Compute picks based on cross-correlation alignment.

Expand Down Expand Up @@ -585,6 +586,12 @@ def lag_calc(self, stream, pre_processed, shift_len=0.2, min_cc=0.4,
:type cc_dir: str
:param cc_dir:
Path to saving folder, NumPy files will be output here.
:type check_full_seed: bool
:param check_full_seed:
If True, will check for duplicate traces against the full SEED id,
including Network, Station, Location and Channel. If False
(default), will check only against Station and Channel.


:returns:
Catalog of events with picks. No origin information is included.
Expand All @@ -609,6 +616,13 @@ def lag_calc(self, stream, pre_processed, shift_len=0.2, min_cc=0.4,
"""
from eqcorrscan.core.lag_calc import xcorr_pick_family

if not check_full_seed:
warnings.warn(
"Deprecation warning: check_full_seed will default to"
"True in a future release. Check the docs page here "
"for how this will affect you: "
"https://eqcorrscan.readthedocs.io/en/latest/faq.html")

processed_stream = self._process_streams(
stream=stream, pre_processed=pre_processed,
process_cores=process_cores, parallel=parallel,
Expand All @@ -619,7 +633,8 @@ def lag_calc(self, stream, pre_processed, shift_len=0.2, min_cc=0.4,
min_cc_from_mean_cc_factor=min_cc_from_mean_cc_factor,
vertical_chans=vertical_chans, cores=cores,
interpolate=interpolate, plot=plot, plotdir=plotdir,
export_cc=export_cc, cc_dir=cc_dir, **kwargs)
export_cc=export_cc, cc_dir=cc_dir,
check_full_seed=check_full_seed, **kwargs)
catalog_out = Catalog([ev for ev in picked_dict.values()])
for detection_id, event in picked_dict.items():
for pick in event.picks:
Expand Down
19 changes: 17 additions & 2 deletions eqcorrscan/core/match_filter/party.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import tarfile
import tempfile
import logging
import warnings
from os.path import join

import numpy as np
Expand Down Expand Up @@ -838,7 +839,7 @@ def lag_calc(self, stream, pre_processed, shift_len=0.2, min_cc=0.4,
cores=1, interpolate=False, plot=False, plotdir=None,
parallel=True, process_cores=None, ignore_length=False,
ignore_bad_data=False, export_cc=False, cc_dir=None,
**kwargs):
check_full_seed=False, **kwargs):
"""
Compute picks based on cross-correlation alignment.

Expand Down Expand Up @@ -911,6 +912,13 @@ def lag_calc(self, stream, pre_processed, shift_len=0.2, min_cc=0.4,
If False (default), errors will be raised if data are excessively
gappy or are mostly zeros. If True then no error will be raised,
but an empty trace will be returned (and not used in detection).
:type check_full_seed: bool
:param check_full_seed:
If True, will check for duplicate traces against the full SEED id,
including Network, Station, Location and Channel. If False
(default), will check only against Station and Channel. This
behaviour was originally necessary to cope with some software
(i.e. SEISAN) not storing picks with full SEED info.

:returns:
Catalog of events with picks. No origin information is included.
Expand All @@ -936,6 +944,12 @@ def lag_calc(self, stream, pre_processed, shift_len=0.2, min_cc=0.4,
.. Note::
Picks are corrected for the template pre-pick time.
"""
if not check_full_seed:
warnings.warn(
"Deprecation warning: check_full_seed will default to"
"True in a future release. Check the docs page here "
"for how this will affect you: "
"https://eqcorrscan.readthedocs.io/en/latest/faq.html")
process_cores = process_cores or cores
template_groups = group_templates(
[_f.template for _f in self.families
Expand Down Expand Up @@ -970,7 +984,8 @@ def lag_calc(self, stream, pre_processed, shift_len=0.2, min_cc=0.4,
export_cc=export_cc, cc_dir=cc_dir,
parallel=parallel, process_cores=process_cores,
ignore_bad_data=ignore_bad_data,
ignore_length=ignore_length, **kwargs)
ignore_length=ignore_length,
check_full_seed=check_full_seed, **kwargs)
return catalog

@staticmethod
Expand Down
17 changes: 16 additions & 1 deletion eqcorrscan/core/match_filter/template.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import re
import shutil
import logging
import warnings

import numpy as np
from obspy import Stream
Expand Down Expand Up @@ -535,7 +536,8 @@ def construct(self, method, name, lowcut, highcut, samp_rate, filt_order,
length, prepick, swin="all", process_len=86400,
all_horiz=False, delayed=True, plot=False, plotdir=None,
min_snr=None, parallel=False, num_cores=False,
skip_short_chans=False, **kwargs):
skip_short_chans=False, check_full_seed=False,
**kwargs):
"""
Construct a template using a given method.

Expand Down Expand Up @@ -600,6 +602,13 @@ def construct(self, method, name, lowcut, highcut, samp_rate, filt_order,
Whether to ignore channels that have insufficient length data or
not. Useful when the quality of data is not known, e.g. when
downloading old, possibly triggered data from a datacentre
:type check_full_seed: bool
:param check_full_seed:
If True, will check for duplicate traces against the full SEED id,
including Network, Station, Location and Channel. If False
(default), will check only against Station and Channel. This
behaviour was originally necessary to cope with some software
(i.e. SEISAN) not storing picks with full SEED info.

.. note::

Expand Down Expand Up @@ -644,6 +653,12 @@ def construct(self, method, name, lowcut, highcut, samp_rate, filt_order,
Tribe.construct instead.

"""
if not check_full_seed:
warnings.warn(
"Deprecation warning: check_full_seed will default to"
"True in a future release. Check the docs page here "
"for how this will affect you: "
"https://eqcorrscan.readthedocs.io/en/latest/faq.html")
if method in ['from_meta_file', 'from_client', 'multi_template_gen']:
raise NotImplementedError('Method is not supported, '
'use Tribe.construct instead.')
Expand Down
19 changes: 17 additions & 2 deletions eqcorrscan/core/match_filter/tribe.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import tarfile
import tempfile
import logging
import warnings

import numpy as np
from obspy import Catalog, Stream, read, read_events
Expand Down Expand Up @@ -933,7 +934,8 @@ def construct(self, method, lowcut, highcut, samp_rate, filt_order,
length, prepick, swin="all", process_len=86400,
all_horiz=False, delayed=True, plot=False, plotdir=None,
min_snr=None, parallel=False, num_cores=False,
skip_short_chans=False, save_progress=False, **kwargs):
skip_short_chans=False, save_progress=False,
check_full_seed=False, **kwargs):
"""
Generate a Tribe of Templates.

Expand Down Expand Up @@ -1004,6 +1006,13 @@ def construct(self, method, lowcut, highcut, samp_rate, filt_order,
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
:type check_full_seed: bool
:param check_full_seed:
If True, will check for duplicate traces against the full SEED id,
including Network, Station, Location and Channel. If False
(default), will check only against Station and Channel. This
behaviour was originally necessary to cope with some software
(i.e. SEISAN) not storing picks with full SEED info.

.. note::
*Method specific arguments:*
Expand Down Expand Up @@ -1032,14 +1041,20 @@ def construct(self, method, lowcut, highcut, samp_rate, filt_order,

.. Note:: Templates will be named according to their start-time.
"""
if not check_full_seed:
warnings.warn(
"Deprecation warning: check_full_seed will default to"
"True in a future release. Check the docs page here "
"for how this will affect you: "
"https://eqcorrscan.readthedocs.io/en/latest/faq.html")
templates, catalog, process_lengths = template_gen.template_gen(
method=method, lowcut=lowcut, highcut=highcut, length=length,
filt_order=filt_order, samp_rate=samp_rate, prepick=prepick,
return_event=True, save_progress=save_progress, swin=swin,
process_len=process_len, all_horiz=all_horiz, plotdir=plotdir,
delayed=delayed, plot=plot, min_snr=min_snr, parallel=parallel,
num_cores=num_cores, skip_short_chans=skip_short_chans,
**kwargs)
check_full_seed=check_full_seed, **kwargs)
for template, event, process_len in zip(templates, catalog,
process_lengths):
t = Template()
Expand Down
Loading