This commit is contained in:
Sven Riwoldt
2024-04-01 20:30:24 +02:00
parent fd333f3514
commit c7bc862c6f
6804 changed files with 1065135 additions and 0 deletions

View File

@@ -0,0 +1,11 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Utilities
"""

View File

@@ -0,0 +1,376 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""Utilities and wrappers around inspect module"""
from __future__ import print_function
import inspect
import re
# Local imports:
from spyder_kernels.py3compat import (is_text_string, builtins, get_meth_func,
get_meth_class_inst, get_meth_class,
get_func_defaults, to_text_string, PY2)
SYMBOLS = r"[^\'\"a-zA-Z0-9_.]"
def getobj(txt, last=False):
"""Return the last valid object name in string"""
txt_end = ""
for startchar, endchar in ["[]", "()"]:
if txt.endswith(endchar):
pos = txt.rfind(startchar)
if pos:
txt_end = txt[pos:]
txt = txt[:pos]
tokens = re.split(SYMBOLS, txt)
token = None
try:
while token is None or re.match(SYMBOLS, token):
token = tokens.pop()
if token.endswith('.'):
token = token[:-1]
if token.startswith('.'):
# Invalid object name
return None
if last:
#XXX: remove this statement as well as the "last" argument
token += txt[ txt.rfind(token) + len(token) ]
token += txt_end
if token:
return token
except IndexError:
return None
def getobjdir(obj):
"""
For standard objects, will simply return dir(obj)
In special cases (e.g. WrapITK package), will return only string elements
of result returned by dir(obj)
"""
return [item for item in dir(obj) if is_text_string(item)]
def getdoc(obj):
"""
Return text documentation from an object. This comes in a form of
dictionary with four keys:
name:
The name of the inspected object
argspec:
It's argspec
note:
A phrase describing the type of object (function or method) we are
inspecting, and the module it belongs to.
docstring:
It's docstring
"""
docstring = inspect.getdoc(obj) or inspect.getcomments(obj) or ''
# Most of the time doc will only contain ascii characters, but there are
# some docstrings that contain non-ascii characters. Not all source files
# declare their encoding in the first line, so querying for that might not
# yield anything, either. So assume the most commonly used
# multi-byte file encoding (which also covers ascii).
try:
docstring = to_text_string(docstring)
except:
pass
# Doc dict keys
doc = {'name': '',
'argspec': '',
'note': '',
'docstring': docstring}
if callable(obj):
try:
name = obj.__name__
except AttributeError:
doc['docstring'] = docstring
return doc
if inspect.ismethod(obj):
imclass = get_meth_class(obj)
if get_meth_class_inst(obj) is not None:
doc['note'] = 'Method of %s instance' \
% get_meth_class_inst(obj).__class__.__name__
else:
doc['note'] = 'Unbound %s method' % imclass.__name__
obj = get_meth_func(obj)
elif hasattr(obj, '__module__'):
doc['note'] = 'Function of %s module' % obj.__module__
else:
doc['note'] = 'Function'
doc['name'] = obj.__name__
if inspect.isfunction(obj):
if PY2:
args, varargs, varkw, defaults = inspect.getargspec(obj)
doc['argspec'] = inspect.formatargspec(
args, varargs, varkw, defaults,
formatvalue=lambda o:'='+repr(o))
else:
# This is necessary to catch errors for objects without a
# signature, like numpy.where.
# Fixes spyder-ide/spyder#21148
try:
sig = inspect.signature(obj)
except ValueError:
sig = getargspecfromtext(doc['docstring'])
if not sig:
sig = '(...)'
doc['argspec'] = str(sig)
if name == '<lambda>':
doc['name'] = name + ' lambda '
doc['argspec'] = doc['argspec'][1:-1] # remove parentheses
else:
argspec = getargspecfromtext(doc['docstring'])
if argspec:
doc['argspec'] = argspec
# Many scipy and numpy docstrings begin with a function
# signature on the first line. This ends up begin redundant
# when we are using title and argspec to create the
# rich text "Definition:" field. We'll carefully remove this
# redundancy but only under a strict set of conditions:
# Remove the starting charaters of the 'doc' portion *iff*
# the non-whitespace characters on the first line
# match *exactly* the combined function title
# and argspec we determined above.
signature = doc['name'] + doc['argspec']
docstring_blocks = doc['docstring'].split("\n\n")
first_block = docstring_blocks[0].strip()
if first_block == signature:
doc['docstring'] = doc['docstring'].replace(
signature, '', 1).lstrip()
else:
doc['argspec'] = '(...)'
# Remove self from argspec
argspec = doc['argspec']
doc['argspec'] = argspec.replace('(self)', '()').replace('(self, ', '(')
return doc
def getsource(obj):
"""Wrapper around inspect.getsource"""
try:
try:
src = to_text_string(inspect.getsource(obj))
except TypeError:
if hasattr(obj, '__class__'):
src = to_text_string(inspect.getsource(obj.__class__))
else:
# Bindings like VTK or ITK require this case
src = getdoc(obj)
return src
except (TypeError, IOError):
return
def getsignaturefromtext(text, objname):
"""Get object signature from text (i.e. object documentation)."""
if isinstance(text, dict):
text = text.get('docstring', '')
# Regexps
args_re = r'(\(.+?\))'
if objname:
signature_re = objname + args_re
else:
identifier_re = r'(\w+)'
signature_re = identifier_re + args_re
# Grabbing signatures
if not text:
text = ''
sigs = re.findall(signature_re, text)
# The most relevant signature is usually the first one. There could be
# others in doctests or other places, but those are not so important.
sig = ''
if sigs:
if PY2:
# We don't have an easy way to check if the identifier detected by
# signature_re is a valid one in Python 2. So, we simply select the
# first match.
sig = sigs[0] if objname else sigs[0][1]
else:
# Default signatures returned by IPython.
# Notes:
# * These are not real signatures but only used to provide a
# placeholder.
# * We skip them if we can find other signatures in `text`.
# * This is necessary because we also use this function in Spyder
# to parse the content of inspect replies that come from the
# kernel, which can include these signatures.
default_ipy_sigs = [
'(*args, **kwargs)',
'(self, /, *args, **kwargs)'
]
if objname:
real_sigs = [s for s in sigs if s not in default_ipy_sigs]
if real_sigs:
sig = real_sigs[0]
else:
sig = sigs[0]
else:
valid_sigs = [s for s in sigs if s[0].isidentifier()]
if valid_sigs:
real_sigs = [
s for s in valid_sigs if s[1] not in default_ipy_sigs
]
if real_sigs:
sig = real_sigs[0][1]
else:
sig = valid_sigs[0][1]
return sig
def getargspecfromtext(text):
"""
Try to get the formatted argspec of a callable from the first block of its
docstring.
This will return something like `(x, y, k=1)`.
"""
blocks = text.split("\n\n")
first_block = blocks[0].strip().replace('\n', '')
return getsignaturefromtext(first_block, '')
def getargsfromtext(text, objname):
"""Get arguments from text (object documentation)."""
signature = getsignaturefromtext(text, objname)
if signature:
argtxt = signature[signature.find('(') + 1:-1]
return argtxt.split(',')
def getargsfromdoc(obj):
"""Get arguments from object doc"""
if obj.__doc__ is not None:
return getargsfromtext(obj.__doc__, obj.__name__)
def getargs(obj):
"""Get the names and default values of a function's arguments"""
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = get_meth_func(obj)
elif inspect.isclass(obj) and hasattr(obj, '__init__'):
func_obj = getattr(obj, '__init__')
else:
return []
if not hasattr(func_obj, '__code__'):
# Builtin: try to extract info from doc
args = getargsfromdoc(func_obj)
if args is not None:
return args
else:
# Example: PyQt5
return getargsfromdoc(obj)
args, _, _ = inspect.getargs(func_obj.__code__)
if not args:
return getargsfromdoc(obj)
# Supporting tuple arguments in def statement:
for i_arg, arg in enumerate(args):
if isinstance(arg, list):
args[i_arg] = "(%s)" % ", ".join(arg)
defaults = get_func_defaults(func_obj)
if defaults is not None:
for index, default in enumerate(defaults):
args[index + len(args) - len(defaults)] += '=' + repr(default)
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(args) == 1:
return None
# Remove 'self' from args
if 'self' in args:
args.remove('self')
return args
def getargtxt(obj, one_arg_per_line=True):
"""
Get the names and default values of a function's arguments
Return list with separators (', ') formatted for calltips
"""
args = getargs(obj)
if args:
sep = ', '
textlist = None
for i_arg, arg in enumerate(args):
if textlist is None:
textlist = ['']
textlist[-1] += arg
if i_arg < len(args)-1:
textlist[-1] += sep
if len(textlist[-1]) >= 32 or one_arg_per_line:
textlist.append('')
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(textlist) == 1:
return None
if 'self'+sep in textlist:
textlist.remove('self'+sep)
return textlist
def isdefined(obj, force_import=False, namespace=None):
"""Return True if object is defined in namespace
If namespace is None --> namespace = locals()"""
if namespace is None:
namespace = locals()
attr_list = obj.split('.')
base = attr_list.pop(0)
if len(base) == 0:
return False
if base not in builtins.__dict__ and base not in namespace:
if force_import:
try:
module = __import__(base, globals(), namespace)
if base not in globals():
globals()[base] = module
namespace[base] = module
except Exception:
return False
else:
return False
for attr in attr_list:
try:
attr_not_found = not hasattr(eval(base, namespace), attr)
except (AttributeError, SyntaxError, TypeError):
return False
if attr_not_found:
if force_import:
try:
__import__(base+'.'+attr, globals(), namespace)
except (ImportError, SyntaxError):
return False
else:
return False
base += '.'+attr
return True

View File

@@ -0,0 +1,557 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Input/Output Utilities
Note: 'load' functions has to return a dictionary from which a globals()
namespace may be updated
"""
from __future__ import print_function
# Standard library imports
import sys
import os
import os.path as osp
import tarfile
import tempfile
import shutil
import types
import json
import inspect
import dis
import copy
import glob
# Local imports
from spyder_kernels.py3compat import getcwd, pickle, PY2, to_text_string
from spyder_kernels.utils.lazymodules import (
FakeObject, numpy as np, pandas as pd, PIL, scipy as sp)
class MatlabStruct(dict):
"""
Matlab style struct, enhanced.
Supports dictionary and attribute style access. Can be pickled,
and supports code completion in a REPL.
Examples
========
>>> from spyder.utils.iofuncs import MatlabStruct
>>> a = MatlabStruct()
>>> a.b = 'spam' # a["b"] == 'spam'
>>> a.c["d"] = 'eggs' # a.c.d == 'eggs'
>>> print(a)
{'c': {'d': 'eggs'}, 'b': 'spam'}
"""
def __getattr__(self, attr):
"""Access the dictionary keys for unknown attributes."""
try:
return self[attr]
except KeyError:
msg = "'MatlabStruct' object has no attribute %s" % attr
raise AttributeError(msg)
def __getitem__(self, attr):
"""
Get a dict value; create a MatlabStruct if requesting a submember.
Do not create a key if the attribute starts with an underscore.
"""
if attr in self.keys() or attr.startswith('_'):
return dict.__getitem__(self, attr)
frame = inspect.currentframe()
# step into the function that called us
if frame.f_back.f_back and self._is_allowed(frame.f_back.f_back):
dict.__setitem__(self, attr, MatlabStruct())
elif self._is_allowed(frame.f_back):
dict.__setitem__(self, attr, MatlabStruct())
return dict.__getitem__(self, attr)
def _is_allowed(self, frame):
"""Check for allowed op code in the calling frame"""
allowed = [dis.opmap['STORE_ATTR'], dis.opmap['LOAD_CONST'],
dis.opmap.get('STOP_CODE', 0)]
bytecode = frame.f_code.co_code
instruction = bytecode[frame.f_lasti + 3]
instruction = ord(instruction) if PY2 else instruction
return instruction in allowed
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
@property
def __dict__(self):
"""Allow for code completion in a REPL"""
return self.copy()
def get_matlab_value(val):
"""
Extract a value from a Matlab file
From the oct2py project, see
https://pythonhosted.org/oct2py/conversions.html
"""
# Extract each item of a list.
if isinstance(val, list):
return [get_matlab_value(v) for v in val]
# Ignore leaf objects.
if not isinstance(val, np.ndarray):
return val
# Convert user defined classes.
if hasattr(val, 'classname'):
out = dict()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
cls = type(val.classname, (object,), out)
return cls()
# Extract struct data.
elif val.dtype.names:
out = MatlabStruct()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
val = out
# Extract cells.
elif val.dtype.kind == 'O':
val = val.squeeze().tolist()
if not isinstance(val, list):
val = [val]
val = get_matlab_value(val)
# Compress singleton values.
elif val.size == 1:
val = val.item()
# Compress empty values.
elif val.size == 0:
if val.dtype.kind in 'US':
val = ''
else:
val = []
return val
def load_matlab(filename):
if sp.io is FakeObject:
return None, ''
try:
out = sp.io.loadmat(filename, struct_as_record=True)
data = dict()
for (key, value) in out.items():
data[key] = get_matlab_value(value)
return data, None
except Exception as error:
return None, str(error)
def save_matlab(data, filename):
if sp.io is FakeObject:
return
try:
sp.io.savemat(filename, data, oned_as='row')
except Exception as error:
return str(error)
def load_array(filename):
if np.load is FakeObject:
return None, ''
try:
name = osp.splitext(osp.basename(filename))[0]
data = np.load(filename)
if isinstance(data, np.lib.npyio.NpzFile):
return dict(data), None
elif hasattr(data, 'keys'):
return data, None
else:
return {name: data}, None
except Exception as error:
return None, str(error)
def __save_array(data, basename, index):
"""Save numpy array"""
fname = basename + '_%04d.npy' % index
np.save(fname, data)
return fname
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
DTYPES = {
"1": ('|b1', None),
"L": ('|u1', None),
"I": ('%si4' % _ENDIAN, None),
"F": ('%sf4' % _ENDIAN, None),
"I;16": ('|u2', None),
"I;16S": ('%si2' % _ENDIAN, None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 4),
}
def __image_to_array(filename):
img = PIL.Image.open(filename)
try:
dtype, extra = DTYPES[img.mode]
except KeyError:
raise RuntimeError("%s mode is not supported" % img.mode)
shape = (img.size[1], img.size[0])
if extra is not None:
shape += (extra,)
return np.array(img.getdata(), dtype=np.dtype(dtype)).reshape(shape)
def load_image(filename):
if PIL.Image is FakeObject or np.array is FakeObject:
return None, ''
try:
name = osp.splitext(osp.basename(filename))[0]
return {name: __image_to_array(filename)}, None
except Exception as error:
return None, str(error)
def load_pickle(filename):
"""Load a pickle file as a dictionary"""
try:
if pd.read_pickle is not FakeObject:
return pd.read_pickle(filename), None
else:
with open(filename, 'rb') as fid:
data = pickle.load(fid)
return data, None
except Exception as err:
return None, str(err)
def load_json(filename):
"""Load a json file as a dictionary"""
try:
if PY2:
args = 'rb'
else:
args = 'r'
with open(filename, args) as fid:
data = json.load(fid)
return data, None
except Exception as err:
return None, str(err)
def save_dictionary(data, filename):
"""Save dictionary in a single file .spydata file"""
filename = osp.abspath(filename)
old_cwd = getcwd()
os.chdir(osp.dirname(filename))
error_message = None
skipped_keys = []
data_copy = {}
try:
# Copy dictionary before modifying it to fix #6689
for obj_name, obj_value in data.items():
# Skip modules, since they can't be pickled, users virtually never
# would want them to be and so they don't show up in the skip list.
# Skip callables, since they are only pickled by reference and thus
# must already be present in the user's environment anyway.
if not (callable(obj_value) or isinstance(obj_value,
types.ModuleType)):
# If an object cannot be deepcopied, then it cannot be pickled.
# Ergo, we skip it and list it later.
try:
data_copy[obj_name] = copy.deepcopy(obj_value)
except Exception:
skipped_keys.append(obj_name)
data = data_copy
if not data:
raise RuntimeError('No supported objects to save')
saved_arrays = {}
if np.ndarray is not FakeObject:
# Saving numpy arrays with np.save
arr_fname = osp.splitext(filename)[0]
for name in list(data.keys()):
try:
if (isinstance(data[name], np.ndarray) and
data[name].size > 0):
# Save arrays at data root
fname = __save_array(data[name], arr_fname,
len(saved_arrays))
saved_arrays[(name, None)] = osp.basename(fname)
data.pop(name)
elif isinstance(data[name], (list, dict)):
# Save arrays nested in lists or dictionaries
if isinstance(data[name], list):
iterator = enumerate(data[name])
else:
iterator = iter(list(data[name].items()))
to_remove = []
for index, value in iterator:
if (isinstance(value, np.ndarray) and
value.size > 0):
fname = __save_array(value, arr_fname,
len(saved_arrays))
saved_arrays[(name, index)] = (
osp.basename(fname))
to_remove.append(index)
for index in sorted(to_remove, reverse=True):
data[name].pop(index)
except (RuntimeError, pickle.PicklingError, TypeError,
AttributeError, IndexError):
# If an array can't be saved with numpy for some reason,
# leave the object intact and try to save it normally.
pass
if saved_arrays:
data['__saved_arrays__'] = saved_arrays
pickle_filename = osp.splitext(filename)[0] + '.pickle'
# Attempt to pickle everything.
# If pickling fails, iterate through to eliminate problem objs & retry.
with open(pickle_filename, 'w+b') as fdesc:
try:
pickle.dump(data, fdesc, protocol=2)
except (pickle.PicklingError, AttributeError, TypeError,
ImportError, IndexError, RuntimeError):
data_filtered = {}
for obj_name, obj_value in data.items():
try:
pickle.dumps(obj_value, protocol=2)
except Exception:
skipped_keys.append(obj_name)
else:
data_filtered[obj_name] = obj_value
if not data_filtered:
raise RuntimeError('No supported objects to save')
pickle.dump(data_filtered, fdesc, protocol=2)
# Use PAX (POSIX.1-2001) format instead of default GNU.
# This improves interoperability and UTF-8/long variable name support.
with tarfile.open(filename, "w", format=tarfile.PAX_FORMAT) as tar:
for fname in ([pickle_filename]
+ [fn for fn in list(saved_arrays.values())]):
tar.add(osp.basename(fname))
os.remove(fname)
except (RuntimeError, pickle.PicklingError, TypeError) as error:
error_message = to_text_string(error)
else:
if skipped_keys:
skipped_keys.sort()
error_message = ('Some objects could not be saved: '
+ ', '.join(skipped_keys))
finally:
os.chdir(old_cwd)
return error_message
def is_within_directory(directory, target):
"""Check if a file is within a directory."""
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, numeric_owner=False):
"""Safely extract a tar file."""
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception(
"Attempted path traversal in tar file {}".format(
repr(tar.name)
)
)
tar.extractall(path, members, numeric_owner=numeric_owner)
def load_dictionary(filename):
"""Load dictionary from .spydata file"""
filename = osp.abspath(filename)
old_cwd = getcwd()
tmp_folder = tempfile.mkdtemp()
os.chdir(tmp_folder)
data = None
error_message = None
try:
with tarfile.open(filename, "r") as tar:
if PY2:
tar.extractall()
else:
safe_extract(tar)
pickle_filename = glob.glob('*.pickle')[0]
# 'New' format (Spyder >=2.2 for Python 2 and Python 3)
with open(pickle_filename, 'rb') as fdesc:
data = pickle.loads(fdesc.read())
saved_arrays = {}
if np.load is not FakeObject:
# Loading numpy arrays saved with np.save
try:
saved_arrays = data.pop('__saved_arrays__')
for (name, index), fname in list(saved_arrays.items()):
arr = np.load(osp.join(tmp_folder, fname), allow_pickle=True)
if index is None:
data[name] = arr
elif isinstance(data[name], dict):
data[name][index] = arr
else:
data[name].insert(index, arr)
except KeyError:
pass
# Except AttributeError from e.g. trying to load function no longer present
except (AttributeError, EOFError, ValueError) as error:
error_message = to_text_string(error)
# To ensure working dir gets changed back and temp dir wiped no matter what
finally:
os.chdir(old_cwd)
try:
shutil.rmtree(tmp_folder)
except OSError as error:
error_message = to_text_string(error)
return data, error_message
class IOFunctions(object):
def __init__(self):
self.load_extensions = None
self.save_extensions = None
self.load_filters = None
self.save_filters = None
self.load_funcs = None
self.save_funcs = None
def setup(self):
iofuncs = self.get_internal_funcs()+self.get_3rd_party_funcs()
load_extensions = {}
save_extensions = {}
load_funcs = {}
save_funcs = {}
load_filters = []
save_filters = []
load_ext = []
for ext, name, loadfunc, savefunc in iofuncs:
filter_str = to_text_string(name + " (*%s)" % ext)
if loadfunc is not None:
load_filters.append(filter_str)
load_extensions[filter_str] = ext
load_funcs[ext] = loadfunc
load_ext.append(ext)
if savefunc is not None:
save_extensions[filter_str] = ext
save_filters.append(filter_str)
save_funcs[ext] = savefunc
load_filters.insert(0, to_text_string("Supported files"+" (*"+\
" *".join(load_ext)+")"))
load_filters.append(to_text_string("All files (*.*)"))
self.load_filters = "\n".join(load_filters)
self.save_filters = "\n".join(save_filters)
self.load_funcs = load_funcs
self.save_funcs = save_funcs
self.load_extensions = load_extensions
self.save_extensions = save_extensions
def get_internal_funcs(self):
return [
('.spydata', "Spyder data files",
load_dictionary, save_dictionary),
('.npy', "NumPy arrays", load_array, None),
('.npz', "NumPy zip arrays", load_array, None),
('.mat', "Matlab files", load_matlab, save_matlab),
('.csv', "CSV text files", 'import_wizard', None),
('.txt', "Text files", 'import_wizard', None),
('.jpg', "JPEG images", load_image, None),
('.png', "PNG images", load_image, None),
('.gif', "GIF images", load_image, None),
('.tif', "TIFF images", load_image, None),
('.pkl', "Pickle files", load_pickle, None),
('.pickle', "Pickle files", load_pickle, None),
('.json', "JSON files", load_json, None),
]
def get_3rd_party_funcs(self):
other_funcs = []
try:
from spyder.otherplugins import get_spyderplugins_mods
for mod in get_spyderplugins_mods(io=True):
try:
other_funcs.append((mod.FORMAT_EXT, mod.FORMAT_NAME,
mod.FORMAT_LOAD, mod.FORMAT_SAVE))
except AttributeError as error:
print("%s: %s" % (mod, str(error)), file=sys.stderr)
except ImportError:
pass
return other_funcs
def save(self, data, filename):
ext = osp.splitext(filename)[1].lower()
if ext in self.save_funcs:
return self.save_funcs[ext](data, filename)
else:
return "<b>Unsupported file type '%s'</b>" % ext
def load(self, filename):
ext = osp.splitext(filename)[1].lower()
if ext in self.load_funcs:
return self.load_funcs[ext](filename)
else:
return None, "<b>Unsupported file type '%s'</b>" % ext
iofunctions = IOFunctions()
iofunctions.setup()
def save_auto(data, filename):
"""Save data into filename, depending on file extension"""
pass
if __name__ == "__main__":
import datetime
testdict = {'d': 1, 'a': np.random.rand(10, 10), 'b': [1, 2]}
testdate = datetime.date(1945, 5, 8)
example = {'str': 'kjkj kj k j j kj k jkj',
'unicode': u'éù',
'list': [1, 3, [4, 5, 6], 'kjkj', None],
'tuple': ([1, testdate, testdict], 'kjkj', None),
'dict': testdict,
'float': 1.2233,
'array': np.random.rand(4000, 400),
'empty_array': np.array([]),
'date': testdate,
'datetime': datetime.datetime(1945, 5, 8),
}
import time
t0 = time.time()
save_dictionary(example, "test.spydata")
print(" Data saved in %.3f seconds" % (time.time()-t0)) # spyder: test-skip
t0 = time.time()
example2, ok = load_dictionary("test.spydata")
os.remove("test.spydata")
print("Data loaded in %.3f seconds" % (time.time()-t0)) # spyder: test-skip

View File

@@ -0,0 +1,69 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Lazy modules.
They are useful to not import big modules until it's really necessary.
"""
from spyder_kernels.utils.misc import is_module_installed
# =============================================================================
# Auxiliary classes
# =============================================================================
class FakeObject(object):
"""Fake class used in replacement of missing objects"""
pass
class LazyModule(object):
"""Lazy module loader class."""
def __init__(self, modname, second_level_attrs=None):
"""
Lazy module loader class.
Parameters
----------
modname: str
Module name to lazy load.
second_level_attrs: list (optional)
List of second level attributes to add to the FakeObject
that stands for the module in case it's not found.
"""
self.__spy_modname__ = modname
self.__spy_mod__ = FakeObject
# Set required second level attributes
if second_level_attrs is not None:
for attr in second_level_attrs:
setattr(self.__spy_mod__, attr, FakeObject)
def __getattr__(self, name):
if is_module_installed(self.__spy_modname__):
self.__spy_mod__ = __import__(self.__spy_modname__)
else:
return self.__spy_mod__
return getattr(self.__spy_mod__, name)
# =============================================================================
# Lazy modules
# =============================================================================
numpy = LazyModule('numpy', ['MaskedArray'])
pandas = LazyModule('pandas')
PIL = LazyModule('PIL.Image', ['Image'])
bs4 = LazyModule('bs4', ['NavigableString'])
scipy = LazyModule('scipy.io')

View File

@@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""Miscellaneous utilities"""
import re
from spyder_kernels.py3compat import lru_cache
@lru_cache(maxsize=100)
def is_module_installed(module_name):
"""
Simpler version of spyder.utils.programs.is_module_installed.
"""
try:
mod = __import__(module_name)
# This is necessary to not report that the module is installed
# when only its __pycache__ directory is present.
if getattr(mod, '__file__', None):
return True
else:
return False
except Exception:
# Module is not installed
return False
def fix_reference_name(name, blacklist=None):
"""Return a syntax-valid Python reference name from an arbitrary name"""
name = "".join(re.split(r'[^0-9a-zA-Z_]', name))
while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name):
if not re.match(r'[a-zA-Z]', name[0]):
name = name[1:]
continue
name = str(name)
if not name:
name = "data"
if blacklist is not None and name in blacklist:
get_new_name = lambda index: name+('_%03d' % index)
index = 0
while get_new_name(index) in blacklist:
index += 1
name = get_new_name(index)
return name

View File

@@ -0,0 +1,56 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""Matplotlib utilities."""
from spyder_kernels.utils.misc import is_module_installed
# Mapping of inline figure formats
INLINE_FIGURE_FORMATS = {
'0': 'png',
'1': 'svg'
}
# Inline backend
if is_module_installed('matplotlib_inline'):
inline_backend = 'module://matplotlib_inline.backend_inline'
else:
inline_backend = 'module://ipykernel.pylab.backend_inline'
# Mapping of matlotlib backends options to Spyder
MPL_BACKENDS_TO_SPYDER = {
inline_backend: 0,
'Qt5Agg': 2,
'QtAgg': 2, # For Matplotlib 3.5+
'TkAgg': 3,
'MacOSX': 4,
}
def automatic_backend():
"""Get Matplolib automatic backend option."""
if is_module_installed('PyQt5'):
auto_backend = 'qt5'
elif is_module_installed('_tkinter'):
auto_backend = 'tk'
else:
auto_backend = 'inline'
return auto_backend
# Mapping of Spyder options to backends
MPL_BACKENDS_FROM_SPYDER = {
'0': 'inline',
'1': automatic_backend(),
'2': 'qt5',
'3': 'tk',
'4': 'osx'
}

View File

@@ -0,0 +1,716 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Utilities to build a namespace view.
"""
from __future__ import print_function
from itertools import islice
import inspect
import re
# Local imports
from spyder_kernels.py3compat import (NUMERIC_TYPES, INT_TYPES, TEXT_TYPES,
to_text_string, is_text_string,
is_type_text_string,
is_binary_string, PY2,
to_binary_string, iteritems)
from spyder_kernels.utils.lazymodules import (
bs4, FakeObject, numpy as np, pandas as pd, PIL)
#==============================================================================
# Numpy support
#==============================================================================
def get_numeric_numpy_types():
return (np.int64, np.int32, np.int16, np.int8, np.uint64, np.uint32,
np.uint16, np.uint8, np.float64, np.float32, np.float16,
np.complex64, np.complex128, np.bool_)
def get_numpy_dtype(obj):
"""
Return Numpy data type associated to `obj`.
Return None if Numpy is not available, if we get errors or if `obj` is not
a Numpy array or scalar.
"""
# Check if NumPy is available
if np.ndarray is not FakeObject:
# All Numpy scalars inherit from np.generic and all Numpy arrays
# inherit from np.ndarray. If we check that we are certain we have one
# of these types then we are less likely to generate an exception
# below.
# Note: The try/except is necessary to fix spyder-ide/spyder#19516.
try:
scalar_or_array = (
isinstance(obj, np.generic) or isinstance(obj, np.ndarray)
)
except Exception:
return
if scalar_or_array:
try:
return obj.dtype.type
except (AttributeError, RuntimeError):
# AttributeError: some NumPy objects have no dtype attribute
# RuntimeError: happens with NetCDF objects (Issue 998)
return
def get_numpy_type_string(value):
"""Get the type of a Numpy object as a string."""
np_dtype = get_numpy_dtype(value)
if np_dtype is None or not hasattr(value, 'size'):
return 'Unknown'
elif value.size == 1:
return 'Scalar'
else:
return 'Array'
#==============================================================================
# Misc.
#==============================================================================
def address(obj):
"""Return object address as a string: '<classname @ address>'"""
return "<%s @ %s>" % (obj.__class__.__name__,
hex(id(obj)).upper().replace('X', 'x'))
def try_to_eval(value):
"""Try to eval value"""
try:
return eval(value)
except (NameError, SyntaxError, ImportError):
return value
def get_size(item):
"""Return shape/size/len of an item of arbitrary type"""
try:
if (
hasattr(item, 'size') and hasattr(item.size, 'compute') or
hasattr(item, 'shape') and hasattr(item.shape, 'compute')
):
# This is necessary to avoid an error when trying to
# get the size/shape of dask objects. We don't compute the
# size/shape since such operation could be expensive.
# Fixes spyder-ide/spyder#16844
return 1
elif (
hasattr(item, 'shape') and
isinstance(item.shape, (tuple, np.integer))
):
try:
if item.shape:
# This is needed since values could return as
# `shape` an instance of a `tuple` subclass.
# See spyder-ide/spyder#16348
if isinstance(item.shape, tuple):
return tuple(item.shape)
return item.shape
else:
# Scalar value
return 1
except RecursionError:
# This is necessary to avoid an error when trying to
# get the shape of these objects.
# Fixes spyder-ide/spyder-kernels#217
return (-1, -1)
elif (hasattr(item, 'size') and
isinstance(item.size, (tuple, np.integer))):
try:
return item.size
except RecursionError:
return (-1, -1)
elif hasattr(item, '__len__'):
return len(item)
else:
return 1
except Exception:
# There is one item
return 1
def get_object_attrs(obj):
"""
Get the attributes of an object using dir.
This filters protected attributes
"""
attrs = [k for k in dir(obj) if not k.startswith('__')]
if not attrs:
attrs = dir(obj)
return attrs
#==============================================================================
# Date and datetime objects support
#==============================================================================
import datetime
try:
from dateutil.parser import parse as dateparse
except:
def dateparse(datestr): # analysis:ignore
"""Just for 'year, month, day' strings"""
return datetime.datetime( *list(map(int, datestr.split(','))) )
def datestr_to_datetime(value):
rp = value.rfind('(')+1
v = dateparse(value[rp:-1])
print(value, "-->", v) # spyder: test-skip
return v
def str_to_timedelta(value):
"""Convert a string to a datetime.timedelta value.
The following strings are accepted:
- 'datetime.timedelta(1, 5, 12345)'
- 'timedelta(1, 5, 12345)'
- '(1, 5, 12345)'
- '1, 5, 12345'
- '1'
if there are less then three parameters, the missing parameters are
assumed to be 0. Variations in the spacing of the parameters are allowed.
Raises:
ValueError for strings not matching the above criterion.
"""
m = re.match(r'^(?:(?:datetime\.)?timedelta)?'
r'\(?'
r'([^)]*)'
r'\)?$', value)
if not m:
raise ValueError('Invalid string for datetime.timedelta')
args = [int(a.strip()) for a in m.group(1).split(',')]
return datetime.timedelta(*args)
#==============================================================================
# Supported types
#==============================================================================
def is_editable_type(value):
"""
Return True if data type is editable with a standard GUI-based editor,
like CollectionsEditor, ArrayEditor, QDateEdit or a simple QLineEdit.
"""
if not is_known_type(value):
return False
else:
supported_types = [
'bool', 'int', 'long', 'float', 'complex', 'list', 'set', 'dict',
'tuple', 'str', 'unicode', 'NDArray', 'MaskedArray', 'Matrix',
'DataFrame', 'Series', 'PIL.Image.Image', 'datetime.date',
'datetime.timedelta'
]
if (get_type_string(value) not in supported_types and
not isinstance(value, pd.Index)):
np_dtype = get_numpy_dtype(value)
if np_dtype is None or not hasattr(value, 'size'):
return False
return True
#==============================================================================
# Sorting
#==============================================================================
def sort_against(list1, list2, reverse=False, sort_key=None):
"""
Arrange items of list1 in the same order as sorted(list2).
In other words, apply to list1 the permutation which takes list2
to sorted(list2, reverse).
"""
if sort_key is None:
key = lambda x: x[0]
else:
key = lambda x: sort_key(x[0])
try:
return [item for _, item in
sorted(zip(list2, list1), key=key, reverse=reverse)]
except:
return list1
def unsorted_unique(lista):
"""Removes duplicates from lista neglecting its initial ordering"""
return list(set(lista))
#==============================================================================
# Display <--> Value
#==============================================================================
def default_display(value, with_module=True):
"""Default display for unknown objects."""
object_type = type(value)
try:
name = object_type.__name__
module = object_type.__module__
# Classes correspond to new types
if name == 'type':
name = 'class'
if with_module:
if name == 'module':
return value.__name__ + ' module'
if module == 'builtins':
return name + ' object'
return name + ' object of ' + module + ' module'
return name
except Exception:
type_str = to_text_string(object_type)
return type_str[1:-1]
def collections_display(value, level):
"""Display for collections (i.e. list, set, tuple and dict)."""
is_dict = isinstance(value, dict)
is_set = isinstance(value, set)
# Get elements
if is_dict:
elements = iteritems(value)
else:
elements = value
# Truncate values
truncate = False
if level == 1 and len(value) > 10:
elements = islice(elements, 10) if is_dict or is_set else value[:10]
truncate = True
elif level == 2 and len(value) > 5:
elements = islice(elements, 5) if is_dict or is_set else value[:5]
truncate = True
# Get display of each element
if level <= 2:
if is_dict:
displays = [value_to_display(k, level=level) + ':' +
value_to_display(v, level=level)
for (k, v) in list(elements)]
else:
displays = [value_to_display(e, level=level)
for e in elements]
if truncate:
displays.append('...')
display = ', '.join(displays)
else:
display = '...'
# Return display
if is_dict:
display = '{' + display + '}'
elif isinstance(value, list):
display = '[' + display + ']'
elif isinstance(value, set):
display = '{' + display + '}'
else:
display = '(' + display + ')'
return display
def value_to_display(value, minmax=False, level=0):
"""Convert value for display purpose"""
# To save current Numpy printoptions
np_printoptions = FakeObject
numeric_numpy_types = get_numeric_numpy_types()
try:
if np.ndarray is not FakeObject:
# Save printoptions
np_printoptions = np.get_printoptions()
# Set max number of elements to show for Numpy arrays
# in our display
np.set_printoptions(threshold=10)
if isinstance(value, np.recarray):
if level == 0:
fields = value.names
display = 'Field names: ' + ', '.join(fields)
else:
display = 'Recarray'
elif isinstance(value, np.ma.MaskedArray):
display = 'Masked array'
elif isinstance(value, np.ndarray):
if level == 0:
if minmax:
try:
display = 'Min: %r\nMax: %r' % (value.min(), value.max())
except (TypeError, ValueError):
if value.dtype.type in numeric_numpy_types:
display = str(value)
else:
display = default_display(value)
elif value.dtype.type in numeric_numpy_types:
display = str(value)
else:
display = default_display(value)
else:
display = 'Numpy array'
elif any([type(value) == t for t in [list, set, tuple, dict]]):
display = collections_display(value, level+1)
elif isinstance(value, PIL.Image.Image):
if level == 0:
display = '%s Mode: %s' % (address(value), value.mode)
else:
display = 'Image'
elif isinstance(value, pd.DataFrame):
if level == 0:
cols = value.columns
if PY2 and len(cols) > 0:
# Get rid of possible BOM utf-8 data present at the
# beginning of a file, which gets attached to the first
# column header when headers are present in the first
# row.
# Fixes Issue 2514
try:
ini_col = to_text_string(cols[0], encoding='utf-8-sig')
except:
ini_col = to_text_string(cols[0])
cols = [ini_col] + [to_text_string(c) for c in cols[1:]]
else:
cols = [to_text_string(c) for c in cols]
display = 'Column names: ' + ', '.join(list(cols))
else:
display = 'Dataframe'
elif isinstance(value, bs4.element.NavigableString):
# Fixes Issue 2448
display = to_text_string(value)
if level > 0:
display = u"'" + display + u"'"
elif isinstance(value, pd.Index):
if level == 0:
try:
display = value._summary()
except AttributeError:
display = value.summary()
else:
display = 'Index'
elif is_binary_string(value):
# We don't apply this to classes that extend string types
# See issue 5636
if is_type_text_string(value):
try:
display = to_text_string(value, 'utf8')
if level > 0:
display = u"'" + display + u"'"
except:
display = value
if level > 0:
display = b"'" + display + b"'"
else:
display = default_display(value)
elif is_text_string(value):
# We don't apply this to classes that extend string types
# See issue 5636
if is_type_text_string(value):
display = value
if level > 0:
display = u"'" + display + u"'"
else:
display = default_display(value)
elif (isinstance(value, datetime.date) or
isinstance(value, datetime.timedelta)):
display = str(value)
elif (isinstance(value, NUMERIC_TYPES) or
isinstance(value, bool) or
isinstance(value, numeric_numpy_types)):
display = repr(value)
else:
if level == 0:
display = default_display(value)
else:
display = default_display(value, with_module=False)
except Exception:
display = default_display(value)
# Truncate display at 70 chars to avoid freezing Spyder
# because of large displays
if len(display) > 70:
if is_binary_string(display):
ellipses = b' ...'
else:
ellipses = u' ...'
display = display[:70].rstrip() + ellipses
# Restore Numpy printoptions
if np_printoptions is not FakeObject:
np.set_printoptions(**np_printoptions)
return display
def display_to_value(value, default_value, ignore_errors=True):
"""Convert back to value"""
from qtpy.compat import from_qvariant
value = from_qvariant(value, to_text_string)
try:
np_dtype = get_numpy_dtype(default_value)
if isinstance(default_value, bool):
# We must test for boolean before NumPy data types
# because `bool` class derives from `int` class
try:
value = bool(float(value))
except ValueError:
value = value.lower() == "true"
elif np_dtype is not None:
if 'complex' in str(type(default_value)):
value = np_dtype(complex(value))
else:
value = np_dtype(value)
elif is_binary_string(default_value):
value = to_binary_string(value, 'utf8')
elif is_text_string(default_value):
value = to_text_string(value)
elif isinstance(default_value, complex):
value = complex(value)
elif isinstance(default_value, float):
value = float(value)
elif isinstance(default_value, int):
try:
value = int(value)
except ValueError:
value = float(value)
elif isinstance(default_value, datetime.datetime):
value = datestr_to_datetime(value)
elif isinstance(default_value, datetime.date):
value = datestr_to_datetime(value).date()
elif isinstance(default_value, datetime.timedelta):
value = str_to_timedelta(value)
elif ignore_errors:
value = try_to_eval(value)
else:
value = eval(value)
except (ValueError, SyntaxError):
if ignore_errors:
value = try_to_eval(value)
else:
return default_value
return value
# =============================================================================
# Types
# =============================================================================
def get_type_string(item):
"""Return type string of an object."""
# The try/except is necessary to fix spyder-ide/spyder#19516.
try:
# Numpy objects (don't change the order!)
if isinstance(item, np.ma.MaskedArray):
return "MaskedArray"
if isinstance(item, np.matrix):
return "Matrix"
if isinstance(item, np.ndarray):
return "NDArray"
# Pandas objects
if isinstance(item, pd.DataFrame):
return "DataFrame"
if isinstance(item, pd.Index):
return type(item).__name__
if isinstance(item, pd.Series):
return "Series"
except Exception:
pass
found = re.findall(r"<(?:type|class) '(\S*)'>",
to_text_string(type(item)))
if found:
if found[0] == 'type':
return 'class'
return found[0]
else:
return 'Unknown'
def is_known_type(item):
"""Return True if object has a known type"""
# Unfortunately, the masked array case is specific
return (isinstance(item, np.ma.MaskedArray) or
get_type_string(item) != 'Unknown')
def get_human_readable_type(item):
"""Return human-readable type string of an item"""
# The try/except is necessary to fix spyder-ide/spyder#19516.
try:
if isinstance(item, (np.ndarray, np.ma.MaskedArray)):
return u'Array of ' + item.dtype.name
elif isinstance(item, PIL.Image.Image):
return "Image"
else:
text = get_type_string(item)
return text[text.find('.')+1:]
except Exception:
return 'Unknown'
#==============================================================================
# Globals filter: filter namespace dictionaries (to be edited in
# CollectionsEditor)
#==============================================================================
def is_supported(value, check_all=False, filters=None, iterate=False):
"""Return True if value is supported, False otherwise."""
assert filters is not None
if value is None:
return True
if is_callable_or_module(value):
return True
elif not is_editable_type(value):
return False
elif not isinstance(value, filters):
return False
elif iterate:
if isinstance(value, (list, tuple, set)):
valid_count = 0
for val in value:
if is_supported(val, filters=filters, iterate=check_all):
valid_count += 1
if not check_all:
break
return valid_count > 0
elif isinstance(value, dict):
for key, val in list(value.items()):
if not is_supported(key, filters=filters, iterate=check_all) \
or not is_supported(val, filters=filters,
iterate=check_all):
return False
if not check_all:
break
return True
def is_callable_or_module(value):
"""Return True if value is a callable or module, False otherwise."""
try:
callable_or_module = callable(value) or inspect.ismodule(value)
except Exception:
callable_or_module = False
return callable_or_module
def globalsfilter(input_dict, check_all=False, filters=None,
exclude_private=None, exclude_capitalized=None,
exclude_uppercase=None, exclude_unsupported=None,
excluded_names=None, exclude_callables_and_modules=None):
"""Keep objects in namespace view according to different criteria."""
output_dict = {}
_is_string = is_type_text_string
for key, value in list(input_dict.items()):
excluded = (
(exclude_private and _is_string(key) and key.startswith('_')) or
(exclude_capitalized and _is_string(key) and key[0].isupper()) or
(exclude_uppercase and _is_string(key) and key.isupper() and
len(key) > 1 and not key[1:].isdigit()) or
(key in excluded_names) or
(exclude_callables_and_modules and is_callable_or_module(value)) or
(exclude_unsupported and
not is_supported(value, check_all=check_all, filters=filters))
)
if not excluded:
output_dict[key] = value
return output_dict
#==============================================================================
# Create view to be displayed by NamespaceBrowser
#==============================================================================
REMOTE_SETTINGS = ('check_all', 'exclude_private', 'exclude_uppercase',
'exclude_capitalized', 'exclude_unsupported',
'excluded_names', 'minmax', 'show_callable_attributes',
'show_special_attributes', 'exclude_callables_and_modules')
def get_supported_types():
"""
Return a dictionnary containing types lists supported by the
namespace browser.
Note:
If you update this list, don't forget to update variablexplorer.rst
in spyder-docs
"""
from datetime import date, timedelta
editable_types = [int, float, complex, list, set, dict, tuple, date,
timedelta] + list(TEXT_TYPES) + list(INT_TYPES)
try:
from numpy import ndarray, matrix, generic
editable_types += [ndarray, matrix, generic]
except:
pass
try:
from pandas import DataFrame, Series, Index
editable_types += [DataFrame, Series, Index]
except:
pass
picklable_types = editable_types[:]
try:
from PIL import Image
editable_types.append(Image.Image)
except:
pass
return dict(picklable=picklable_types, editable=editable_types)
def get_remote_data(data, settings, mode, more_excluded_names=None):
"""
Return globals according to filter described in *settings*:
* data: data to be filtered (dictionary)
* settings: variable explorer settings (dictionary)
* mode (string): 'editable' or 'picklable'
* more_excluded_names: additional excluded names (list)
"""
supported_types = get_supported_types()
assert mode in list(supported_types.keys())
excluded_names = list(settings['excluded_names'])
if more_excluded_names is not None:
excluded_names += more_excluded_names
return globalsfilter(
data,
check_all=settings['check_all'],
filters=tuple(supported_types[mode]),
exclude_private=settings['exclude_private'],
exclude_uppercase=settings['exclude_uppercase'],
exclude_capitalized=settings['exclude_capitalized'],
exclude_unsupported=settings['exclude_unsupported'],
exclude_callables_and_modules=settings['exclude_callables_and_modules'],
excluded_names=excluded_names)
def make_remote_view(data, settings, more_excluded_names=None):
"""
Make a remote view of dictionary *data*
-> globals explorer
"""
data = get_remote_data(data, settings, mode='editable',
more_excluded_names=more_excluded_names)
remote = {}
for key, value in list(data.items()):
view = value_to_display(value, minmax=settings['minmax'])
remote[key] = {
'type': get_human_readable_type(value),
'size': get_size(value),
'view': view,
'python_type': get_type_string(value),
'numpy_type': get_numpy_type_string(value)
}
return remote

View File

@@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2018- Spyder Kernels Contributors
# Taken from the tests utils in the Metakernel package
# See utils.py at https://github.com/Calysto/metakernel/metakernel/tests
# Licensed under the terms of the BSD License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
try:
from jupyter_client import session as ss
except ImportError:
from IPython.kernel.zmq import session as ss
import zmq
import logging
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from spyder_kernels.console.kernel import SpyderKernel
def get_kernel(kernel_class=SpyderKernel):
"""Get an instance of a kernel with the kernel class given."""
log = logging.getLogger('test')
log.setLevel(logging.DEBUG)
for hdlr in log.handlers:
log.removeHandler(hdlr)
hdlr = logging.StreamHandler(StringIO())
hdlr.setLevel(logging.DEBUG)
log.addHandler(hdlr)
context = zmq.Context.instance()
iopub_socket = context.socket(zmq.PUB)
kernel = kernel_class(session=ss.Session(), iopub_socket=iopub_socket,
log=log)
return kernel
def get_log_text(kernel):
"""Get the log of the given kernel."""
return kernel.log.handlers[0].stream.getvalue()

View File

@@ -0,0 +1,9 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""Tests."""

View File

@@ -0,0 +1,160 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for dochelpers.py
"""
# Standard library imports
import os
import sys
# Test library imports
import pytest
# Local imports
from spyder_kernels.utils.dochelpers import (
getargtxt, getargspecfromtext, getdoc, getobj, getsignaturefromtext,
isdefined)
from spyder_kernels.py3compat import PY2
class Test(object):
def method(self, x, y=2):
pass
@pytest.mark.skipif(
PY2 or os.name == 'nt', reason="Only works on Linux and Mac")
@pytest.mark.skipif(
sys.platform == 'darwin' and sys.version_info[:2] == (3, 8),
reason="Fails on Mac with Python 3.8")
def test_dochelpers():
"""Test dochelpers."""
assert getargtxt(Test.method) == ['x, ', 'y=2']
assert not getargtxt(Test.__init__)
assert getdoc(sorted) == {
'note': 'Function of builtins module',
'argspec': '(...)',
'docstring': 'Return a new list containing all items from the '
'iterable in ascending order.\n\nA custom key function '
'can be supplied to customize the sort order, and the\n'
'reverse flag can be set to request the result in '
'descending order.',
'name': 'sorted'
}
assert not getargtxt(sorted)
assert isdefined('numpy.take', force_import=True)
assert isdefined('__import__')
assert not isdefined('zzz', force_import=True)
assert getobj('globals') == 'globals'
assert not getobj('globals().keys')
assert getobj('+scipy.signal.') == 'scipy.signal'
assert getobj('4.') == '4'
@pytest.mark.skipif(PY2, reason="Fails in Python 2")
def test_no_signature():
"""
Test that we can get documentation for objects for which Python can't get a
signature directly because it gives an error.
This is a regression test for issue spyder-ide/spyder#21148
"""
import numpy as np
doc = getdoc(np.where)
signature = doc['argspec']
assert signature and signature != "(...)" and signature.startswith("(")
assert doc['docstring']
@pytest.mark.parametrize(
'text, name, expected',
[
('foo(x, y)', 'foo', '(x, y)'),
('foo(x, y)', '', '(x, y)'),
]
)
def test_getsignaturefromtext_py2(text, name, expected):
assert getsignaturefromtext(text, name) == expected
@pytest.mark.skipif(PY2, reason="Don't work in Python 2")
@pytest.mark.parametrize(
'text, name, expected',
[
# Simple text with and without name
('foo(x, y)', 'foo', '(x, y)'),
('foo(x, y)', '', '(x, y)'),
# Single arg
('foo(x)', '', '(x)'),
('foo(x = {})', '', '(x = {})'),
# Not a valid identifier
('1a(x, y)', '', ''),
# Valid identifier
('a1(x, y=2)', '', '(x, y=2)'),
# Unicode identifier with and without name
('ΣΔ(x, y)', 'ΣΔ', '(x, y)'),
('ΣΔ(x, y)', '', '(x, y)'),
# Multiple signatures in a single line
('ΣΔ(x, y) foo(a, b)', '', '(x, y)'),
('1a(x, y) foo(a, b)', '', '(a, b)'),
# Multiple signatures in multiple lines
('foo(a, b = 1)\n\nΣΔ(x, y=2)', '', '(a, b = 1)'),
('1a(a, b = 1)\n\nΣΔ(x, y=2)', '', '(x, y=2)'),
# Signature after math operations
('2(3 + 5) 3*(99) ΣΔ(x, y)', '', '(x, y)'),
# No identifier
('(x, y)', '', ''),
('foo (a=1, b = 2)', '', ''),
# Empty signature
('foo()', '', ''),
('foo()', 'foo', ''),
]
)
def test_getsignaturefromtext(text, name, expected):
assert getsignaturefromtext(text, name) == expected
def test_multisignature():
"""
Test that we can get at least one signature from an object with multiple
ones declared in its docstring.
"""
def foo():
"""
foo(x, y) foo(a, b)
foo(c, d)
"""
signature = getargspecfromtext(foo.__doc__)
assert signature == "(x, y)"
def test_multiline_signature():
"""
Test that we can get signatures splitted into multiple lines in a
docstring.
"""
def foo():
"""
foo(x,
y)
This is a docstring.
"""
signature = getargspecfromtext(foo.__doc__)
assert signature.startswith("(x, ")
if __name__ == "__main__":
pytest.main()

View File

@@ -0,0 +1,344 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for iofuncs.py.
"""
# Standard library imports
import io
import os
import copy
# Third party imports
import pytest
import numpy as np
# Local imports
import spyder_kernels.utils.iofuncs as iofuncs
from spyder_kernels.py3compat import is_text_string, PY2
# Full path to this file's parent directory for loading data
LOCATION = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
# =============================================================================
# ---- Helper functions and classes
# =============================================================================
def are_namespaces_equal(actual, expected):
if actual is None and expected is None:
return True
are_equal = True
for var in sorted(expected.keys()):
try:
are_equal = are_equal and bool(np.mean(
expected[var] == actual[var]))
except ValueError:
are_equal = are_equal and all(
[np.all(obj1 == obj2) for obj1, obj2 in zip(expected[var],
actual[var])])
print(str(var) + ": " + str(are_equal))
return are_equal
class CustomObj(object):
"""A custom class of objects for testing."""
def __init__(self, data):
self.data = None
if data:
self.data = data
def __eq__(self, other):
return self.__dict__ == other.__dict__
class UnDeepCopyableObj(CustomObj):
"""A class of objects that cannot be deepcopied."""
def __getstate__(self):
raise RuntimeError()
class UnPickleableObj(UnDeepCopyableObj):
"""A class of objects that can deepcopied, but not pickled."""
def __deepcopy__(self, memo):
new_one = self.__class__.__new__(self.__class__)
new_one.__dict__.update(self.__dict__)
return new_one
# =============================================================================
# ---- Fixtures
# =============================================================================
@pytest.fixture
def spydata_values():
"""
Define spydata file ground truth values.
The file export_data.spydata contains five variables to be loaded.
This fixture declares those variables in a static way.
"""
A = 1
B = 'ham'
C = np.eye(3)
D = {'a': True, 'b': np.eye(4, dtype=np.complex128)}
E = [np.eye(2, dtype=np.int64), 42.0, np.eye(3, dtype=np.bool_), np.eye(4, dtype=object)]
return {'A': A, 'B': B, 'C': C, 'D': D, 'E': E}
@pytest.fixture
def real_values():
"""
Load a Numpy pickled file.
The file numpy_data.npz contains six variables, each one represents the
expected test values after a manual conversion of the same variables
defined and evaluated in MATLAB. The manual type conversion was done
over several variable types, such as: Matrices/Vectors, Scalar and
Complex numbers, Structs, Strings and Cell Arrays. The set of variables
was defined to allow and test the deep conversion of a compound type,
i.e., a struct that contains other types that need to be converted,
like other structs, matrices and Cell Arrays.
"""
path = os.path.join(LOCATION, 'numpy_data.npz')
file_s = np.load(path, allow_pickle=True)
A = file_s['A'].item()
B = file_s['B']
C = file_s['C']
D = file_s['D'].item()
E = file_s['E']
return {'A': A, 'B': B, 'C': C, 'D': D, 'E': E}
@pytest.fixture
def namespace_objects_full(spydata_values):
"""
Define a dictionary of objects of a variety of different types to be saved.
This fixture reprisents the state of the namespace before saving and
filtering out un-deep-copyable, un-pickleable, and uninteresting objects.
"""
namespace_dict = copy.deepcopy(spydata_values)
namespace_dict['expected_error_string'] = (
'Some objects could not be saved: '
'undeepcopyable_instance, unpickleable_instance')
namespace_dict['module_obj'] = io
namespace_dict['class_obj'] = Exception
namespace_dict['function_obj'] = os.path.join
namespace_dict['unpickleable_instance'] = UnPickleableObj("spam")
namespace_dict['undeepcopyable_instance'] = UnDeepCopyableObj("ham")
namespace_dict['custom_instance'] = CustomObj("eggs")
return namespace_dict
@pytest.fixture
def namespace_objects_filtered(spydata_values):
"""
Define a dictionary of the objects from the namespace that can be saved.
This fixture reprisents the state of the namespace after saving and
filtering out un-deep-copyable, un-pickleable, and uninteresting objects.
"""
namespace_dict = copy.deepcopy(spydata_values)
namespace_dict['custom_instance'] = CustomObj("eggs")
return namespace_dict
@pytest.fixture
def namespace_objects_nocopyable():
"""
Define a dictionary of that cannot be deepcopied.
"""
namespace_dict = {}
namespace_dict['expected_error_string'] = 'No supported objects to save'
namespace_dict['class_obj'] = Exception
namespace_dict['undeepcopyable_instance'] = UnDeepCopyableObj("ham")
return namespace_dict
@pytest.fixture
def namespace_objects_nopickleable():
"""
Define a dictionary of objects that cannot be pickled.
"""
namespace_dict = {}
namespace_dict['expected_error_string'] = 'No supported objects to save'
namespace_dict['function_obj'] = os.path.join
namespace_dict['unpickleable_instance'] = UnPickleableObj("spam")
return namespace_dict
@pytest.fixture
def input_namespace(request):
if request.param is None:
return None
else:
return request.getfixturevalue(request.param)
@pytest.fixture
def expected_namespace(request):
if request.param is None:
return None
else:
return request.getfixturevalue(request.param)
# =============================================================================
# ---- Tests
# =============================================================================
def test_npz_import():
"""
Test the load of .npz files as dictionaries.
"""
filename = os.path.join(LOCATION, 'import_data.npz')
data = iofuncs.load_array(filename)
assert isinstance(data, tuple)
variables, error = data
assert variables['val1'] == np.array(1) and not error
@pytest.mark.skipif(iofuncs.load_matlab is None, reason="SciPy required")
def test_matlab_import(real_values):
"""
Test the automatic conversion and import of variables from MATLAB.
This test loads a file stored in MATLAB, the variables defined are
equivalent to the manually converted values done over Numpy. This test
allows to evaluate the function which processes the conversion automa-
tically. i.e., The automatic conversion results should be equal to the
manual conversion of the variables.
"""
path = os.path.join(LOCATION, 'data.mat')
inf, _ = iofuncs.load_matlab(path)
valid = True
for var in sorted(real_values.keys()):
valid = valid and bool(np.mean(real_values[var] == inf[var]))
assert valid
@pytest.mark.skipif(PY2, reason="Fails on Python 2")
@pytest.mark.parametrize('spydata_file_name', ['export_data.spydata',
'export_data_renamed.spydata'])
def test_spydata_import(spydata_file_name, spydata_values):
"""
Test spydata handling and variable importing.
This test loads all the variables contained inside a spydata tar
container and compares them against their static values.
It tests both a file with the original name, and one that has been renamed
in order to catch Issue #9 .
"""
path = os.path.join(LOCATION, spydata_file_name)
data, error = iofuncs.load_dictionary(path)
assert error is None
assert are_namespaces_equal(data, spydata_values)
def test_spydata_import_witherror():
"""
Test that import fails gracefully with a fn not present in the namespace.
Checks that the error is caught, the message is passed back,
and the current working directory is restored afterwards.
"""
original_cwd = os.getcwd()
path = os.path.join(LOCATION, 'export_data_withfunction.spydata')
data, error = iofuncs.load_dictionary(path)
assert error and is_text_string(error)
assert data is None
assert os.getcwd() == original_cwd
def test_spydata_import_missing_file():
"""
Test that import fails properly when file is missing, and resets the cwd.
"""
original_cwd = os.getcwd()
path = os.path.join(LOCATION, 'non_existant_path_2019-01-23.spydata')
try:
iofuncs.load_dictionary(path)
except IOError:
pass
else:
# Fail if exception did not occur when it should
assert False
assert os.getcwd() == original_cwd
@pytest.mark.skipif(iofuncs.load_matlab is None, reason="SciPy required")
def test_matlabstruct():
"""Test support for matlab stlye struct."""
a = iofuncs.MatlabStruct()
a.b = 'spam'
assert a["b"] == 'spam'
a.c["d"] = 'eggs'
assert a.c.d == 'eggs'
assert a == {'c': {'d': 'eggs'}, 'b': 'spam'}
a['d'] = [1, 2, 3]
buf = io.BytesIO()
iofuncs.save_matlab(a, buf)
buf.seek(0)
data, error = iofuncs.load_matlab(buf)
assert error is None
assert data['b'] == 'spam'
assert data['c'].d == 'eggs'
assert data['d'].tolist() == [[1, 2, 3]]
@pytest.mark.parametrize('input_namespace,expected_namespace,filename', [
('spydata_values', 'spydata_values', 'export_data_copy'),
('namespace_objects_full', 'namespace_objects_filtered', 'export_data_2'),
('namespace_objects_nocopyable', None, 'export_data_none_1'),
('namespace_objects_nopickleable', None, 'export_data_none_2'),
], indirect=['input_namespace', 'expected_namespace'])
def test_spydata_export(input_namespace, expected_namespace,
filename):
"""
Test spydata export and re-import.
This test saves the variables in ``spydata`` format and then
reloads and checks them to make sure they save/restore properly
and no errors occur during the process.
"""
path = os.path.join(LOCATION, filename + '.spydata')
expected_error = None
if 'expected_error_string' in input_namespace:
expected_error = input_namespace['expected_error_string']
del input_namespace['expected_error_string']
cwd_original = os.getcwd()
try:
export_error = iofuncs.save_dictionary(input_namespace, path)
assert export_error == expected_error
if expected_namespace is None:
assert not os.path.isfile(path)
else:
data_actual, import_error = iofuncs.load_dictionary(path)
assert import_error is None
print(data_actual.keys())
print(expected_namespace.keys())
assert are_namespaces_equal(data_actual, expected_namespace)
assert cwd_original == os.getcwd()
finally:
if os.path.isfile(path):
try:
os.remove(path)
except (IOError, OSError, PermissionError):
pass
if __name__ == "__main__":
pytest.main()

View File

@@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
import pytest
from spyder_kernels.utils.lazymodules import LazyModule, FakeObject
def test_non_existent_module():
"""Test that we retun FakeObject's for non-existing modules."""
mod = LazyModule('no_module', second_level_attrs=['a'])
# First level attributes must return FakeObject
assert mod.foo is FakeObject
# Second level attributes in second_level_attrs should return
# FakeObject too.
assert mod.foo.a is FakeObject
# Other second level attributes should raise an error.
with pytest.raises(AttributeError):
mod.foo.b
def test_existing_modules():
"""Test that lazy modules work for existing modules."""
np = LazyModule('numpy')
import numpy
# Both the lazy and actual modules should return the same.
assert np.ndarray == numpy.ndarray
# The lazy module should have these extra attributes
assert np.__spy_mod__
assert np.__spy_modname__

View File

@@ -0,0 +1,458 @@
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for utils.py
"""
# Standard library imports
from collections import defaultdict
import datetime
import sys
# Third party imports
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import PIL.Image
# Local imports
from spyder_kernels.py3compat import PY2
from spyder_kernels.utils.nsview import (
sort_against, is_supported, value_to_display, get_size,
get_supported_types, get_type_string, get_numpy_type_string,
is_editable_type)
def generate_complex_object():
"""Taken from issue #4221."""
bug = defaultdict(list)
for i in range(50000):
a = {j:np.random.rand(10) for j in range(10)}
bug[i] = a
return bug
COMPLEX_OBJECT = generate_complex_object()
DF = pd.DataFrame([1,2,3])
DATASET = xr.Dataset({0: pd.DataFrame([1,2]), 1:pd.DataFrame([3,4])})
# --- Tests
# -----------------------------------------------------------------------------
def test_get_size():
"""Test that the size of all values is returned correctly"""
class RecursionClassNoLen():
def __getattr__(self, name):
if name=='size': return self.name
else:
return super(object, self).__getattribute__(name)
length = [list([1,2,3]), tuple([1,2,3]), set([1,2,3]), '123',
{1:1, 2:2, 3:3}]
for obj in length:
assert get_size(obj) == 3
df = pd.DataFrame([[1,2,3], [1,2,3]])
assert get_size(df) == (2, 3)
df = pd.Series([1,2,3])
assert get_size(df) == (3,)
df = pd.Index([1,2,3])
assert get_size(df) == (3,)
arr = np.array([[1,2,3], [1,2,3]], dtype=np.complex128)
assert get_size(arr) == (2, 3)
img = PIL.Image.new('RGB', (256,256))
assert get_size(img) == (256,256)
obj = RecursionClassNoLen()
assert get_size(obj) == 1
def test_sort_against():
lista = [5, 6, 7]
listb = [2, 3, 1]
res = sort_against(lista, listb)
assert res == [7, 5, 6]
def test_sort_against_is_stable():
lista = [3, 0, 1]
listb = [1, 1, 1]
res = sort_against(lista, listb)
assert res == lista
def test_none_values_are_supported():
"""Tests that None values are displayed by default"""
supported_types = get_supported_types()
mode = 'editable'
none_var = None
none_list = [2, None, 3, None]
none_dict = {'a': None, 'b': 4}
none_tuple = (None, [3, None, 4], 'eggs')
assert is_supported(none_var, filters=tuple(supported_types[mode]))
assert is_supported(none_list, filters=tuple(supported_types[mode]))
assert is_supported(none_dict, filters=tuple(supported_types[mode]))
assert is_supported(none_tuple, filters=tuple(supported_types[mode]))
def test_str_subclass_display():
"""Test for value_to_display of subclasses of str/basestring."""
class Test(str):
def __repr__(self):
return 'test'
value = Test()
value_display = value_to_display(value)
assert 'Test object' in value_display
def test_default_display():
"""Tests for default_display."""
# Display of defaultdict
assert (value_to_display(COMPLEX_OBJECT) ==
'defaultdict object of collections module')
# Display of array of COMPLEX_OBJECT
assert (value_to_display(np.array(COMPLEX_OBJECT)) ==
'ndarray object of numpy module')
# Display of Dataset
assert (value_to_display(DATASET) ==
'Dataset object of xarray.core.dataset module')
@pytest.mark.skipif(
sys.platform == 'darwin' and sys.version_info[:2] == (3, 8),
reason="Fails on Mac with Python 3.8")
def test_list_display():
"""Tests for display of lists."""
long_list = list(range(100))
# Simple list
assert value_to_display([1, 2, 3]) == '[1, 2, 3]'
# Long list
assert (value_to_display(long_list) ==
'[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ...]')
# Short list of lists
assert (value_to_display([long_list] * 3) ==
'[[0, 1, 2, 3, 4, ...], [0, 1, 2, 3, 4, ...], [0, 1, 2, 3, 4, ...]]')
# Long list of lists
result = '[' + ''.join('[0, 1, 2, 3, 4, ...], '*10)[:-2] + ']'
assert value_to_display([long_list] * 10) == result[:70] + ' ...'
# Multiple level lists
assert (value_to_display([[1, 2, 3, [4], 5]] + long_list) ==
'[[1, 2, 3, [...], 5], 0, 1, 2, 3, 4, 5, 6, 7, 8, ...]')
assert value_to_display([1, 2, [DF]]) == '[1, 2, [Dataframe]]'
assert value_to_display([1, 2, [[DF], DATASET]]) == '[1, 2, [[...], Dataset]]'
# List of complex object
assert value_to_display([COMPLEX_OBJECT]) == '[defaultdict]'
# List of composed objects
li = [COMPLEX_OBJECT, DATASET, 1, {1:2, 3:4}, DF]
result = '[defaultdict, Dataset, 1, {1:2, 3:4}, Dataframe]'
assert value_to_display(li) == result
# List starting with a non-supported object (#5313)
supported_types = tuple(get_supported_types()['editable'])
li = [len, 1]
assert value_to_display(li) == '[builtin_function_or_method, 1]'
assert is_supported(li, filters=supported_types)
@pytest.mark.skipif(
sys.platform == 'darwin' and sys.version_info[:2] == (3, 8),
reason="Fails on Mac with Python 3.8")
def test_dict_display():
"""Tests for display of dicts."""
long_list = list(range(100))
long_dict = dict(zip(list(range(100)), list(range(100))))
# Simple dict
assert value_to_display({0:0, 'a':'b'}) == "{0:0, 'a':'b'}"
# Long dict
assert (value_to_display(long_dict) ==
'{0:0, 1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8, 9:9, ...}')
# Short list of lists
assert (value_to_display({1:long_dict, 2:long_dict}) ==
'{1:{0:0, 1:1, 2:2, 3:3, 4:4, ...}, 2:{0:0, 1:1, 2:2, 3:3, 4:4, ...}}')
# Long dict of dicts
result = ('{(0, 0, 0, 0, 0, ...):[0, 1, 2, 3, 4, ...], '
'(1, 1, 1, 1, 1, ...):[0, 1, 2, 3, 4, ...]}')
assert value_to_display({(0,)*100:long_list, (1,)*100:long_list}) == result[:70] + ' ...'
# Multiple level dicts
assert (value_to_display({0: {1:1, 2:2, 3:3, 4:{0:0}, 5:5}, 1:1}) ==
'{0:{1:1, 2:2, 3:3, 4:{...}, 5:5}, 1:1}')
assert value_to_display({0:0, 1:1, 2:2, 3:DF}) == '{0:0, 1:1, 2:2, 3:Dataframe}'
assert value_to_display({0:0, 1:1, 2:[[DF], DATASET]}) == '{0:0, 1:1, 2:[[...], Dataset]}'
# Dict of complex object
assert value_to_display({0:COMPLEX_OBJECT}) == '{0:defaultdict}'
# Dict of composed objects
li = {0:COMPLEX_OBJECT, 1:DATASET, 2:2, 3:{0:0, 1:1}, 4:DF}
result = '{0:defaultdict, 1:Dataset, 2:2, 3:{0:0, 1:1}, 4:Dataframe}'
assert value_to_display(li) == result
# Dict starting with a non-supported object (#5313)
supported_types = tuple(get_supported_types()['editable'])
di = {max: len, 1: 1}
assert value_to_display(di) in (
'{builtin_function_or_method:builtin_function_or_method, 1:1}',
'{1:1, builtin_function_or_method:builtin_function_or_method}')
assert is_supported(di, filters=supported_types)
def test_set_display():
"""Tests for display of sets."""
long_set = {i for i in range(100)}
# Simple set
assert value_to_display({1, 2, 3}) == '{1, 2, 3}'
# Long set
disp = '{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ...}'
assert value_to_display(long_set) == disp
# Short list of sets
disp = '[{0, 1, 2, 3, 4, ...}, {0, 1, 2, 3, 4, ...}, {0, 1, 2, 3, 4, ...}]'
assert value_to_display([long_set] * 3) == disp
# Long list of sets
disp = '[' + ''.join('{0, 1, 2, 3, 4, ...}, '*10)[:-2] + ']'
assert value_to_display([long_set] * 10) == disp[:70] + ' ...'
def test_datetime_display():
"""Simple tests that dates, datetimes and timedeltas display correctly."""
test_date = datetime.date(2017, 12, 18)
test_date_2 = datetime.date(2017, 2, 2)
test_datetime = datetime.datetime(2017, 12, 18, 13, 43, 2)
test_datetime_2 = datetime.datetime(2017, 8, 18, 0, 41, 27)
test_timedelta = datetime.timedelta(-1, 2000)
test_timedelta_2 = datetime.timedelta(0, 3600)
# Simple dates/datetimes/timedeltas
assert value_to_display(test_date) == '2017-12-18'
assert value_to_display(test_datetime) == '2017-12-18 13:43:02'
assert value_to_display(test_timedelta) == '-1 day, 0:33:20'
# Lists of dates/datetimes/timedeltas
assert (value_to_display([test_date, test_date_2]) ==
'[2017-12-18, 2017-02-02]')
assert (value_to_display([test_datetime, test_datetime_2]) ==
'[2017-12-18 13:43:02, 2017-08-18 00:41:27]')
assert (value_to_display([test_timedelta, test_timedelta_2]) ==
'[-1 day, 0:33:20, 1:00:00]')
# Tuple of dates/datetimes/timedeltas
assert (value_to_display((test_date, test_datetime, test_timedelta)) ==
'(2017-12-18, 2017-12-18 13:43:02, -1 day, 0:33:20)')
# Dict of dates/datetimes/timedeltas
assert (value_to_display({0: test_date,
1: test_datetime,
2: test_timedelta_2}) ==
("{0:2017-12-18, 1:2017-12-18 13:43:02, 2:1:00:00}"))
def test_str_in_container_display():
"""Test that strings are displayed correctly inside lists or dicts."""
# Assert that both bytes and unicode return the right display
assert value_to_display([b'a', u'b']) == "['a', 'b']"
# Encoded unicode gives bytes and it can't be transformed to
# unicode again. So this test the except part of
# is_binary_string(value) in value_to_display
if PY2:
assert value_to_display([u'Э'.encode('cp1251')]) == "['\xdd']"
def test_ellipses(tmpdir):
"""
Test that we're adding a binary ellipses when value_to_display of
a collection is too long and binary.
For issue 6942
"""
# Create binary file with all bytes
file = tmpdir.new(basename='bytes.txt')
file.write_binary(bytearray(list(range(255))))
# Read bytes back
buffer = file.read(mode='rb')
# Assert that there's a binary ellipses in the representation
assert b' ...' in value_to_display(buffer)
def test_get_type_string():
"""Test for get_type_string."""
# Bools
assert get_type_string(True) == 'bool'
# Numeric types (PY2 has long, which disappeared in PY3)
if not PY2:
expected = ['int', 'float', 'complex']
numeric_types = [1, 1.5, 1 + 2j]
assert [get_type_string(t) for t in numeric_types] == expected
# Lists
assert get_type_string([1, 2, 3]) == 'list'
# Sets
assert get_type_string({1, 2, 3}) == 'set'
# Dictionaries
assert get_type_string({'a': 1, 'b': 2}) == 'dict'
# Tuples
assert get_type_string((1, 2, 3)) == 'tuple'
# Strings
if not PY2:
assert get_type_string('foo') == 'str'
# Numpy objects
assert get_type_string(np.array([1, 2, 3])) == 'NDArray'
masked_array = np.ma.MaskedArray([1, 2, 3], mask=[True, False, True])
assert get_type_string(masked_array) == 'MaskedArray'
matrix = np.matrix([[1, 2], [3, 4]])
assert get_type_string(matrix) == 'Matrix'
# Pandas objects
df = pd.DataFrame([1, 2, 3])
assert get_type_string(df) == 'DataFrame'
series = pd.Series([1, 2, 3])
assert get_type_string(series) == 'Series'
index = pd.Index([1, 2, 3])
assert get_type_string(index) in ['Int64Index', 'Index']
# PIL images
img = PIL.Image.new('RGB', (256,256))
assert get_type_string(img) == 'PIL.Image.Image'
# Datetime objects
date = datetime.date(2010, 10, 1)
assert get_type_string(date) == 'datetime.date'
date = datetime.timedelta(-1, 2000)
assert get_type_string(date) == 'datetime.timedelta'
def test_is_editable_type():
"""Test for get_type_string."""
# Bools
assert is_editable_type(True)
# Numeric type
numeric_types = [1, 1.5, 1 + 2j]
assert all([is_editable_type(t) for t in numeric_types])
# Lists
assert is_editable_type([1, 2, 3])
# Sets
assert is_editable_type({1, 2, 3})
# Dictionaries
assert is_editable_type({'a': 1, 'b': 2})
# Tuples
assert is_editable_type((1, 2, 3))
# Strings
assert is_editable_type('foo')
# Numpy objects
assert is_editable_type(np.array([1, 2, 3]))
masked_array = np.ma.MaskedArray([1, 2, 3], mask=[True, False, True])
assert is_editable_type(masked_array)
matrix = np.matrix([[1, 2], [3, 4]])
assert is_editable_type(matrix)
# Pandas objects
df = pd.DataFrame([1, 2, 3])
assert is_editable_type(df)
series = pd.Series([1, 2, 3])
assert is_editable_type(series)
index = pd.Index([1, 2, 3])
assert is_editable_type(index)
# PIL images
img = PIL.Image.new('RGB', (256,256))
assert is_editable_type(img)
# Datetime objects
date = datetime.date(2010, 10, 1)
assert is_editable_type(date)
date = datetime.timedelta(-1, 2000)
assert is_editable_type(date)
# Other objects
class MyClass:
a = 1
assert not is_editable_type(MyClass)
my_instance = MyClass()
assert not is_editable_type(my_instance)
def test_get_numpy_type():
"""Test for get_numpy_type_string."""
# Numpy objects
assert get_numpy_type_string(np.array([1, 2, 3])) == 'Array'
matrix = np.matrix([[1, 2], [3, 4]])
assert get_numpy_type_string(matrix) == 'Array'
assert get_numpy_type_string(np.int32(1)) == 'Scalar'
# Regular Python objects
assert get_numpy_type_string(1.5) == 'Unknown'
assert get_numpy_type_string([1, 2, 3]) == 'Unknown'
assert get_numpy_type_string({1: 2}) == 'Unknown'
# PIL images
img = PIL.Image.new('RGB', (256,256))
assert get_numpy_type_string(img) == 'Unknown'
# Pandas objects
df = pd.DataFrame([1, 2, 3])
assert get_numpy_type_string(df) == 'Unknown'
if __name__ == "__main__":
pytest.main()