Skip to content

Commit b55f6da

Browse files
committed
Bump version for 0.10.8
1 parent 849d324 commit b55f6da

95 files changed

Lines changed: 645 additions & 647 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Stoner/Analysis.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ def integrate(
193193
bounds=lambda x, y: True,
194194
**kargs,
195195
):
196-
"""Inegrate a column of data, optionally returning the cumulative integral.
196+
"""Integrate a column of data, optionally returning the cumulative integral.
197197
198198
Args:
199199
xcol (index):
@@ -214,7 +214,7 @@ def integrate(
214214
bounds (callable):
215215
A function that evaluates for each row to determine if the data should be integrated over.
216216
**kargs:
217-
Other keyword arguements are fed direct to the scipy.integrate.cumtrapz method
217+
Other keyword arguments are fed direct to the scipy.integrate.cumtrapz method
218218
219219
Returns:
220220
(:py:class:`Stoner.Data`):
@@ -240,7 +240,7 @@ def integrate(
240240
resultdata = cumtrapz(yd, xdat, **kargs)
241241
resultdata = np.append(np.array([0]), resultdata)
242242
if result is not None:
243-
header = header if header is not None else f"Intergral of {self.column_headers[_.ycol]}"
243+
header = header if header is not None else f"Integral of {self.column_headers[_.ycol]}"
244244
if isinstance(result, bool) and result:
245245
self.add_column(resultdata, header=header, replace=False)
246246
else:
@@ -267,7 +267,7 @@ def normalise(self, target=None, base=None, replace=True, header=None, scale=Non
267267
268268
Keyword Arguments:
269269
base (index):
270-
The column to normalise to, can be an integer or string. **Depricated** can also be a tuple (low,
270+
The column to normalise to, can be an integer or string. **Deprecated** can also be a tuple (low,
271271
high) being the output range
272272
replace (bool):
273273
Set True(default) to overwrite the target data columns
@@ -461,7 +461,7 @@ def transform(set1, *p):
461461
perr = np.sqrt(np.diagonal(pcov))
462462
self.data[:, _.xcol], self.data[:, _.ycol] = func(self.data[:, _.xcol], self.data[:, _.ycol], *popt)
463463
self["Stitching Coefficients"] = list(popt)
464-
self["Stitching Coeffient Errors"] = list(perr)
464+
self["Stitching Coefficient Errors"] = list(perr)
465465
self["Stitching overlap"] = (lower, upper)
466466
self["Stitching Window"] = num_pts
467467

@@ -478,7 +478,7 @@ def threshold(self, threshold, **kargs):
478478
col (index):
479479
Column index to look for data in
480480
rising (bool):
481-
look for case where the data is increasing in value (defaukt True)
481+
look for case where the data is increasing in value (default True)
482482
falling (bool):
483483
look for case where data is fallinh in value (default False)
484484
xcol (index, bool or None):
@@ -496,7 +496,7 @@ def threshold(self, threshold, **kargs):
496496
Either a sing;le fractional row index, or an in terpolated x value
497497
498498
Note:
499-
If you don't sepcify a col value or set it to None, then the assigned columns via the
499+
If you don't specify a col value or set it to None, then the assigned columns via the
500500
:py:attr:`DataFile.setas` attribute will be used.
501501
502502
Warning:

Stoner/Core.py

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ class DataFile(
8686
The possible mime-types of data files represented by each matching filename pattern in
8787
:py:attr:`Datafile.pattern`.
8888
patterns (list):
89-
A list of filename extenion glob patterns that matrches the expected filename patterns for a DataFile
89+
A list of filename extension glob patterns that matrches the expected filename patterns for a DataFile
9090
(*.txt and *.dat")
9191
priority (int):
9292
Used to indicathe order in which subclasses of :py:class:`DataFile` are tried when loading data. A higher
@@ -96,11 +96,11 @@ class DataFile(
9696
shape (tuple of integers):
9797
Returns the shape of the data (rows,columns) - equivalent to self.data.shape.
9898
records (numpy record array):
99-
Returns the data in the form of a list of yuples where each tuple maps to the columsn names.
99+
Returns the data in the form of a list of yuples where each tuple maps to the columns names.
100100
clone (DataFile):
101101
Creates a deep copy of the :py:class`DataFile` object.
102102
dict_records (array of dictionaries):
103-
View the data as an array or dictionaries where each dictionary represnets one row with keys dervied
103+
View the data as an array or dictionaries where each dictionary represents one row with keys derived
104104
from column headers.
105105
dims (int):
106106
When data columns are set as x,y,z etc. returns the number of dimensions implied in the data set
@@ -257,7 +257,7 @@ def __init__(self, *args, **kargs):
257257
# ============================================================================================
258258

259259
def _init_single(self, *args, **kargs):
260-
"""Handle constructor with 1 arguement - called from __init__."""
260+
"""Handle constructor with 1 argument - called from __init__."""
261261
arg = args[0]
262262
inits = {
263263
path_types + (bool, bytes, io.IOBase): self._init_load,
@@ -396,7 +396,7 @@ def _init_list(self, arg, **kargs):
396396
raise TypeError(f"Unable to construct DataFile from a {type(arg)}")
397397

398398
# ============================================================================================
399-
############################ Speical Methods ###############################################
399+
############################ Special Methods ###############################################
400400
# ============================================================================================
401401

402402
def __call__(self, *args, **kargs):
@@ -448,7 +448,7 @@ def __deepcopy__(self, memo):
448448
return result
449449

450450
def __dir__(self):
451-
"""Reeturns the attributes of the current object.
451+
"""Returns the attributes of the current object.
452452
453453
Augmenting the keys of self.__dict__ with the attributes that __getattr__ will handle."""
454454
attr = dir(type(self))
@@ -638,7 +638,7 @@ def _load(self, filename, *args, **kargs):
638638
Raised if the first row does not start with 'TDI Format 1.5' or 'TDI Format=1.0'.
639639
640640
Note:
641-
The *_load* methods shouldbe overidden in each child class to handle the process of loading data from
641+
The *_load* methods shouldbe overridden in each child class to handle the process of loading data from
642642
disc. If they encounter unexpected data, then they should raise StonerLoadError to signal this, so that
643643
the loading class can try a different sub-class instead.
644644
"""
@@ -916,7 +916,7 @@ def add_column(self, column_data, header=None, index=None, func_args=None, repla
916916
917917
Returns:
918918
self:
919-
The :py:class:`DataFile` instance with the additonal column inserted.
919+
The :py:class:`DataFile` instance with the additional column inserted.
920920
921921
Note:
922922
Like most :py:class:`DataFile` methods, this method operates in-place in that it also modifies
@@ -991,7 +991,7 @@ def add_column(self, column_data, header=None, index=None, func_args=None, repla
991991

992992
# If not replacing, then add extra columns to existing data.
993993
if not replace:
994-
colums = copy.copy(self.column_headers)
994+
columns = copy.copy(self.column_headers)
995995
old_setas = self.setas.clone
996996
if index == self.data.shape[1]: # appending column
997997
self.data = DataArray(np.append(self.data, np_data, axis=1), setas=self.setas.clone)
@@ -1003,10 +1003,10 @@ def add_column(self, column_data, header=None, index=None, func_args=None, repla
10031003
setas=self.setas.clone,
10041004
)
10051005
for ix in range(0, index):
1006-
self.column_headers[ix] = colums[ix]
1006+
self.column_headers[ix] = columns[ix]
10071007
self.setas[ix] = old_setas[ix]
10081008
for ix in range(index, dc):
1009-
self.column_headers[ix + cw] = colums[ix]
1009+
self.column_headers[ix + cw] = columns[ix]
10101010
self.setas[ix + cw] = old_setas[ix]
10111011
# Check that we don't need to expand to overwrite with the new data
10121012
if index + cw > self.shape[1]:
@@ -1069,7 +1069,7 @@ def del_column(self, col=None, duplicates=False):
10691069
- If duplicates is True and col is None then all duplicate columns are removed,
10701070
- if col is not None and duplicates is True then all duplicates of the specified column are removed.
10711071
- If duplicates is False and *col* is either None or False then all masked coplumns are deleeted. If
1072-
*col* is True, then all columns that are not set i the :py:attr:`setas` attrobute are delted.
1072+
*col* is True, then all columns that are not set i the :py:attr:`setas` attrobute are deleted.
10731073
- If col is a list (duplicates should not be None) then the all the matching columns are found.
10741074
- If col is an iterable of booleans, then all columns whose elements are False are deleted.
10751075
- If col is None and duplicates is None, then all columns with at least one elelemtn masked
@@ -1142,7 +1142,7 @@ def del_nan(self, col=None, clone=False):
11421142
else: # Not cloning so ret is self
11431143
ret = self
11441144

1145-
if col is None: # If col is still None, use all columsn that are set to any value in self.setas
1145+
if col is None: # If col is still None, use all columns that are set to any value in self.setas
11461146
col = [ix for ix, col in enumerate(self.setas) if col != "."]
11471147
if not isLikeList(col): # If col isn't a list, make it one now
11481148
col = [col]
@@ -1163,13 +1163,13 @@ def del_rows(self, col=None, val=None, invert=False):
11631163
11641164
Args:
11651165
col (list,slice,int,string, re, callable or None):
1166-
Column containg values to search for.
1166+
Column containing values to search for.
11671167
val (float or callable):
11681168
Specifies rows to delete. Maybe:
11691169
- None - in which case the *col* argument is used to identify rows to be deleted,
11701170
- a float in which case rows whose columncol = val are deleted
11711171
- or a function - in which case rows where the function evaluates to be true are deleted.
1172-
- a tuple, in which case rows where column col takes value between the minium and maximum of
1172+
- a tuple, in which case rows where column col takes value between the minimum and maximum of
11731173
the tuple are deleted.
11741174
11751175
Keyword Arguments:
@@ -1247,7 +1247,7 @@ def del_rows(self, col=None, val=None, invert=False):
12471247
return self
12481248

12491249
def dir(self, pattern=None):
1250-
"""Return a list of keys in the metadata, filtering wiht a regular expression if necessary.
1250+
"""Return a list of keys in the metadata, filtering with a regular expression if necessary.
12511251
12521252
Keyword Arguments:
12531253
pattern (string or re):
@@ -1271,7 +1271,7 @@ def filter(self, func=None, cols=None, reset=True):
12711271
12721272
Args:
12731273
func (callable):
1274-
is a callable object that should take a single list as a p[arameter representing one row.
1274+
is a callable object that should take a single list as a p[parameter representing one row.
12751275
cols (list):
12761276
a list of column indices that are used to form the list of values passed to func.
12771277
reset (bool):
@@ -1357,7 +1357,7 @@ def load(cls, *args, **kargs):
13571357
13581358
Each subclass is scanned in turn for a class attribute priority which governs the order in which they
13591359
are tried. Subclasses which can make an early positive determination that a file has the correct format
1360-
can have higher priority levels. Classes should return a suitable expcetion if they fail to load the file.
1360+
can have higher priority levels. Classes should return a suitable exception if they fail to load the file.
13611361
13621362
If no class can load a file successfully then a StonerUnrecognisedFormat exception is raised.
13631363
"""
@@ -1569,7 +1569,7 @@ def to_pandas(self):
15691569
Notes:
15701570
In addition to transferring the numerical data, the DataFrame's columns are set to
15711571
a multi-level index of the :py:attr:`Stoner.Data.column_headers` and :py:attr:`Stoner.Data.setas`
1572-
calues. A pandas DataFrame extension attribute, *metadata* is registered and is used to store
1572+
values. A pandas DataFrame extension attribute, *metadata* is registered and is used to store
15731573
the metada from the :py:class:1Stoner.Data` object. This pandas extension attribute is in fact a trivial
15741574
subclass of the :py:class:`Stoner.core.typeHintedDict`.
15751575

Stoner/FileFormats.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,6 @@
5858

5959
warn(
6060
"*" * 80
61-
+ "\nStoner.FileFormats is a depricated module - use Stoner.formats and it's sub-modules now!\n"
61+
+ "\nStoner.FileFormats is a deprecated module - use Stoner.formats and it's sub-modules now!\n"
6262
+ "*" * 80
6363
)

Stoner/Folders.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class DataFolder(DataMethodsMixin, DiskBasedFolderMixin, baseFolder):
1414

1515
"""Provide an interface to manipulating lots of data files stored within a directory structure on disc.
1616
17-
By default, the members of the DataFolder are isntances of :class:`Stoner.Data`. The DataFolder emplys a lazy
17+
By default, the members of the DataFolder are instances of :class:`Stoner.Data`. The DataFolder emplys a lazy
1818
open strategy, so that files are only read in from disc when actually needed.
1919
2020
.. inheritance-diagram:: DataFolder

Stoner/HDF5.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def get_hdf_loader(f, default_loader=lambda *args, **kargs: None):
4141
typ = bytes2str(f.attrs.get("type", ""))
4242
if (typ not in globals() or not isinstance(globals()[typ], type)) and "module" not in f.attrs:
4343
raise StonerLoadError(
44-
"HDF5 Group does not speicify a recongized type and does not specify a module to use to load."
44+
"HDF5 Group does not specify a recognized type and does not specify a module to use to load."
4545
)
4646

4747
if "module" in f.attrs:
@@ -121,13 +121,13 @@ class HDF5File(DataFile):
121121
kargs (dict):
122122
Dictionary of keyword arguments
123123
124-
If the first non-keyword arguement is not an h5py File or Group then
124+
If the first non-keyword argument is not an h5py File or Group then
125125
initialises with a blank parent constructor and then loads data, otherwise,
126126
calls parent constructor.
127127
128128
Datalayout is dead simple, the numerical data is in a dataset called *data*,
129-
metadata are attribtutes of a group called *metadata* with the keynames being the
130-
full name + typehint of the stanard DataFile metadata dictionary
129+
metadata are attributes of a group called *metadata* with the keynames being the
130+
full name + typehint of the standard DataFile metadata dictionary
131131
*column_headers* are an attribute of the root file/group
132132
*filename* is set from either an attribute called filename, or from the
133133
group name or from the hdf5 filename.
@@ -392,7 +392,7 @@ def __getter__(self, name, instantiate=True):
392392
the baseFolder class uses a :py:class:`regexpDict` to store objects in.
393393
394394
Keyword Arguments:
395-
instatiate (bool):
395+
instantiate (bool):
396396
If True (default) then always return a :py:class:`Stoner.Core.Data` object. If False,
397397
the __getter__ method may return a key that can be used by it later to actually get the
398398
:py:class:`Stoner.Core.Data` object.
@@ -649,7 +649,7 @@ def _load(self, filename, *args, **kargs):
649649
return self
650650

651651
def scan_meta(self, group):
652-
"""Scan the HDF5 Group for atributes and datasets and sub groups and recursively add them to the metadata."""
652+
"""Scan the HDF5 Group for attributes and datasets and sub groups and recursively add them to the metadata."""
653653
root = ".".join(group.name.split("/")[2:])
654654
for name, thing in group.items():
655655
parts = thing.name.split("/")
@@ -684,7 +684,7 @@ def __init__(self, *args, **kargs):
684684
685685
Keyword Args:
686686
regrid (bool):
687-
If set True, the gridimage() method is automatically called to re-grid the image to known co-ordinates.
687+
If set True, the gridimage() method is automatically called to re-grid the image to known coordinates.
688688
"""
689689
regrid = kargs.pop("regrid", False)
690690
bcn = kargs.pop("bcn", False)

0 commit comments

Comments
 (0)