diff --git a/CHANGELOG.md b/CHANGELOG.md
index f7b98bc1..9bbf977f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
[#449](https://github.com/PyFilesystem/pyfilesystem2/pull/449).
- `PathError` now supports wrapping an exception using the `exc` argument.
Closes [#453](https://github.com/PyFilesystem/pyfilesystem2/issues/453).
+- Better documentation of the `writable` parameter of `fs.open_fs`, and
+ hint about using `fs.wrap.read_only` when a read-only filesystem is
+ required. Closes [#441](https://github.com/PyFilesystem/pyfilesystem2/issues/441).
### Changed
@@ -28,7 +31,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- `FSTestCases` now builds the large data required for `upload` and `download` tests only
once in order to reduce the total testing time.
- `MemoryFS.move` and `MemoryFS.movedir` will now avoid copying data.
- Closes [#452](https://github.com/PyFilesystem/pyfilesystem2/issues/452).
+ Closes [#452](https://github.com/PyFilesystem/pyfilesystem2/issues/452).
+- `FS.removetree("/")` behaviour has been standardized in all filesystems, and
+ is expected to clear the contents of the root folder without deleting it.
+ Closes [#471](https://github.com/PyFilesystem/pyfilesystem2/issues/471).
+- `FS.getbasic` is now deprecated, as it is redundant with `FS.getinfo`,
+ and `FS.getinfo` is now explicitly expected to return the *basic* info
+ namespace unconditionally. Closes [#469](https://github.com/PyFilesystem/pyfilesystem2/issues/469).
### Fixed
@@ -40,8 +49,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- `WrapCachedDir.isdir` and `WrapCachedDir.isfile` raising a `ResourceNotFound` error on non-existing path ([#470](https://github.com/PyFilesystem/pyfilesystem2/pull/470)).
- `FTPFS` not listing certain entries with sticky/SUID/SGID permissions set by Linux server ([#473](https://github.com/PyFilesystem/pyfilesystem2/pull/473)).
Closes [#451](https://github.com/PyFilesystem/pyfilesystem2/issues/451).
-- `scandir` iterator not being closed explicitly in `OSFS.scandir`, occasionally causing a `ResourceWarning`
+- `scandir` iterator not being closed explicitly in `OSFS.scandir`, occasionally causing a `ResourceWarning`
to be thrown. Closes [#311](https://github.com/PyFilesystem/pyfilesystem2/issues/311).
+- Incomplete type annotations for the `temp_fs` parameter of `WriteTarFS` and `WriteZipFS`.
+ Closes [#410](https://github.com/PyFilesystem/pyfilesystem2/issues/410).
## [2.4.12] - 2021-01-14
diff --git a/fs/_ftp_parse.py b/fs/_ftp_parse.py
index a9088ab4..defc55ee 100644
--- a/fs/_ftp_parse.py
+++ b/fs/_ftp_parse.py
@@ -56,9 +56,7 @@
def get_decoders():
- """
- Returns all available FTP LIST line decoders with their matching regexes.
- """
+ """Return all available FTP LIST line decoders with their matching regexes."""
decoders = [
(RE_LINUX, decode_linux),
(RE_WINDOWSNT, decode_windowsnt),
@@ -149,13 +147,34 @@ def _decode_windowsnt_time(mtime):
def decode_windowsnt(line, match):
- """
- Decodes a Windows NT FTP LIST line like one of these:
+ """Decode a Windows NT FTP LIST line.
+
+ Examples:
+ Decode a directory line::
+
+ >>> line = "11-02-18 02:12PM
images"
+ >>> match = RE_WINDOWSNT.match(line)
+ >>> pprint(decode_windowsnt(line, match))
+ {'basic': {'is_dir': True, 'name': 'images'},
+ 'details': {'modified': 1518358320.0, 'type': 1},
+ 'ftp': {'ls': '11-02-18 02:12PM images'}}
+
+ Decode a file line::
+
+ >>> line = "11-02-18 03:33PM 9276 logo.gif"
+ >>> match = RE_WINDOWSNT.match(line)
+ >>> pprint(decode_windowsnt(line, match))
+ {'basic': {'is_dir': False, 'name': 'logo.gif'},
+ 'details': {'modified': 1518363180.0, 'size': 9276, 'type': 2},
+ 'ftp': {'ls': '11-02-18 03:33PM 9276 logo.gif'}}
+
+ Alternatively, the time might also be present in 24-hour format::
- `11-02-18 02:12PM images`
- `11-02-18 03:33PM 9276 logo.gif`
+ >>> line = "11-02-18 15:33 9276 logo.gif"
+ >>> match = RE_WINDOWSNT.match(line)
+ >>> decode_windowsnt(line, match)["details"]["modified"]
+ 1518363180.0
- Alternatively, the time (02:12PM) might also be present in 24-hour format (14:12).
"""
is_dir = match.group("size") == ""
diff --git a/fs/_repr.py b/fs/_repr.py
index 0a207207..d313b0a8 100644
--- a/fs/_repr.py
+++ b/fs/_repr.py
@@ -27,7 +27,7 @@ def make_repr(class_name, *args, **kwargs):
>>> MyClass('Will')
MyClass('foo', name='Will')
>>> MyClass(None)
- MyClass()
+ MyClass('foo')
"""
arguments = [repr(arg) for arg in args]
diff --git a/fs/_url_tools.py b/fs/_url_tools.py
index 64c58bd6..af55ff74 100644
--- a/fs/_url_tools.py
+++ b/fs/_url_tools.py
@@ -11,13 +11,15 @@
def url_quote(path_snippet):
# type: (Text) -> Text
- """
- On Windows, it will separate drive letter and quote windows
- path alone. No magic on Unix-alie path, just pythonic
- `pathname2url`
+ """Quote a URL without quoting the Windows drive letter, if any.
+
+ On Windows, it will separate drive letter and quote Windows
+ path alone. No magic on Unix-like path, just pythonic
+ `~urllib.request.pathname2url`.
Arguments:
- path_snippet: a file path, relative or absolute.
+ path_snippet (str): a file path, relative or absolute.
+
"""
if _WINDOWS_PLATFORM and _has_drive_letter(path_snippet):
drive_letter, path = path_snippet.split(":", 1)
@@ -34,17 +36,19 @@ def url_quote(path_snippet):
def _has_drive_letter(path_snippet):
# type: (Text) -> bool
- """
- The following path will get True
- D:/Data
- C:\\My Dcouments\\ test
+ """Check whether a path contains a drive letter.
- And will get False
+ Arguments:
+ path_snippet (str): a file path, relative or absolute.
- /tmp/abc:test
+ Example:
+ >>> _has_drive_letter("D:/Data")
+ True
+ >>> _has_drive_letter(r"C:\\System32\\ test")
+ True
+ >>> _has_drive_letter("/tmp/abc:test")
+ False
- Arguments:
- path_snippet: a file path, relative or absolute.
"""
windows_drive_pattern = ".:[/\\\\].*$"
return re.match(windows_drive_pattern, path_snippet) is not None
diff --git a/fs/base.py b/fs/base.py
index 7a81c5cb..4966d0ca 100644
--- a/fs/base.py
+++ b/fs/base.py
@@ -152,12 +152,16 @@ def getinfo(self, path, namespaces=None):
Arguments:
path (str): A path to a resource on the filesystem.
- namespaces (list, optional): Info namespaces to query
- (defaults to *[basic]*).
+ namespaces (list, optional): Info namespaces to query. The
+ `"basic"` namespace is alway included in the returned
+ info, whatever the value of `namespaces` may be.
Returns:
~fs.info.Info: resource information object.
+ Raises:
+ fs.errors.ResourceNotFound: If ``path`` does not exist.
+
For more information regarding resource information, see :ref:`info`.
"""
@@ -235,10 +239,12 @@ def openbin(
io.IOBase: a *file-like* object.
Raises:
- fs.errors.FileExpected: If the path is not a file.
- fs.errors.FileExists: If the file exists, and *exclusive mode*
- is specified (``x`` in the mode).
- fs.errors.ResourceNotFound: If the path does not exist.
+ fs.errors.FileExpected: If ``path`` exists and is not a file.
+ fs.errors.FileExists: If the ``path`` exists, and
+ *exclusive mode* is specified (``x`` in the mode).
+ fs.errors.ResourceNotFound: If ``path`` does not exist and
+ ``mode`` does not imply creating the file, or if any
+ ancestor of ``path`` does not exist.
"""
@@ -267,7 +273,7 @@ def removedir(self, path):
Raises:
fs.errors.DirectoryNotEmpty: If the directory is not empty (
see `~fs.base.FS.removetree` for a way to remove the
- directory contents.).
+ directory contents).
fs.errors.DirectoryExpected: If the path does not refer to
a directory.
fs.errors.ResourceNotFound: If no resource exists at the
@@ -402,6 +408,7 @@ def copy(self, src_path, dst_path, overwrite=False):
and ``overwrite`` is `False`.
fs.errors.ResourceNotFound: If a parent directory of
``dst_path`` does not exist.
+ fs.errors.FileExpected: If ``src_path`` is not a file.
"""
with self._lock:
@@ -424,6 +431,8 @@ def copydir(self, src_path, dst_path, create=False):
Raises:
fs.errors.ResourceNotFound: If the ``dst_path``
does not exist, and ``create`` is not `True`.
+ fs.errors.DirectoryExpected: If ``src_path`` is not a
+ directory.
"""
with self._lock:
@@ -467,6 +476,9 @@ def desc(self, path):
Returns:
str: a short description of the path.
+ Raises:
+ fs.errors.ResourceNotFound: If ``path`` does not exist.
+
"""
if not self.exists(path):
raise errors.ResourceNotFound(path)
@@ -587,6 +599,7 @@ def readbytes(self, path):
bytes: the file contents.
Raises:
+ fs.errors.FileExpected: if ``path`` exists but is not a file.
fs.errors.ResourceNotFound: if ``path`` does not exist.
"""
@@ -603,6 +616,10 @@ def download(self, path, file, chunk_size=None, **options):
This may be more efficient that opening and copying files
manually if the filesystem supplies an optimized method.
+ Note that the file object ``file`` will *not* be closed by this
+ method. Take care to close it after this method completes
+ (ideally with a context manager).
+
Arguments:
path (str): Path to a resource.
file (file-like): A file-like object open for writing in
@@ -613,13 +630,12 @@ def download(self, path, file, chunk_size=None, **options):
**options: Implementation specific options required to open
the source file.
- Note that the file object ``file`` will *not* be closed by this
- method. Take care to close it after this method completes
- (ideally with a context manager).
-
Example:
>>> with open('starwars.mov', 'wb') as write_file:
- ... my_fs.download('/movies/starwars.mov', write_file)
+ ... my_fs.download('/Videos/starwars.mov', write_file)
+
+ Raises:
+ fs.errors.ResourceNotFound: if ``path`` does not exist.
"""
with self._lock:
@@ -726,6 +742,9 @@ def getsize(self, path):
Returns:
int: the *size* of the resource.
+ Raises:
+ fs.errors.ResourceNotFound: if ``path`` does not exist.
+
The *size* of a file is the total number of readable bytes,
which may not reflect the exact number of bytes of reserved
disk space (or other storage medium).
@@ -814,6 +833,9 @@ def gettype(self, path):
Returns:
~fs.enums.ResourceType: the type of the resource.
+ Raises:
+ fs.errors.ResourceNotFound: if ``path`` does not exist.
+
A type of a resource is an integer that identifies the what
the resource references. The standard type integers may be one
of the values in the `~fs.enums.ResourceType` enumerations.
@@ -986,6 +1008,7 @@ def lock(self):
Example:
>>> with my_fs.lock(): # May block
... # code here has exclusive access to the filesystem
+ ... pass
It is a good idea to put a lock around any operations that you
would like to be *atomic*. For instance if you are copying
@@ -1017,6 +1040,8 @@ def movedir(self, src_path, dst_path, create=False):
Raises:
fs.errors.ResourceNotFound: if ``dst_path`` does not exist,
and ``create`` is `False`.
+ fs.errors.DirectoryExpected: if ``src_path`` or one of its
+ ancestors is not a directory.
"""
with self._lock:
@@ -1184,28 +1209,56 @@ def opendir(
~fs.subfs.SubFS: A filesystem representing a sub-directory.
Raises:
- fs.errors.DirectoryExpected: If ``dst_path`` does not
- exist or is not a directory.
+ fs.errors.ResourceNotFound: If ``path`` does not exist.
+ fs.errors.DirectoryExpected: If ``path`` is not a directory.
"""
from .subfs import SubFS
_factory = factory or self.subfs_class or SubFS
- if not self.getbasic(path).is_dir:
+ if not self.getinfo(path).is_dir:
raise errors.DirectoryExpected(path=path)
return _factory(self, path)
def removetree(self, dir_path):
# type: (Text) -> None
- """Recursively remove the contents of a directory.
+ """Recursively remove a directory and all its contents.
- This method is similar to `~fs.base.removedir`, but will
+ This method is similar to `~fs.base.FS.removedir`, but will
remove the contents of the directory if it is not empty.
Arguments:
dir_path (str): Path to a directory on the filesystem.
+ Raises:
+ fs.errors.ResourceNotFound: If ``dir_path`` does not exist.
+ fs.errors.DirectoryExpected: If ``dir_path`` is not a directory.
+
+ Caution:
+ A filesystem should never delete its root folder, so
+ ``FS.removetree("/")`` has different semantics: the
+ contents of the root folder will be deleted, but the
+ root will be untouched::
+
+ >>> home_fs = fs.open_fs("~")
+ >>> home_fs.removetree("/")
+ >>> home_fs.exists("/")
+ True
+ >>> home_fs.isempty("/")
+ True
+
+ Combined with `~fs.base.FS.opendir`, this can be used
+ to clear a directory without removing the directory
+ itself::
+
+ >>> home_fs = fs.open_fs("~")
+ >>> home_fs.opendir("/Videos").removetree("/")
+ >>> home_fs.exists("/Videos")
+ True
+ >>> home_fs.isempty("/Videos")
+ True
+
"""
_dir_path = abspath(normpath(dir_path))
with self._lock:
@@ -1456,11 +1509,10 @@ def validatepath(self, path):
str: A normalized, absolute path.
Raises:
+ fs.errors.InvalidPath: If the path is invalid.
+ fs.errors.FilesystemClosed: if the filesystem is closed.
fs.errors.InvalidCharsInPath: If the path contains
invalid characters.
- fs.errors.InvalidPath: If the path is invalid.
- fs.errors.FilesystemClosed: if the filesystem
- is closed.
"""
self.check()
@@ -1512,7 +1564,16 @@ def getbasic(self, path):
Returns:
~fs.info.Info: Resource information object for ``path``.
+ Note:
+ .. deprecated:: 2.4.13
+ Please use `~FS.getinfo` directly, which is
+ required to always return the *basic* namespace.
+
"""
+ warnings.warn(
+ "method 'getbasic' has been deprecated, please use 'getinfo'",
+ DeprecationWarning,
+ )
return self.getinfo(path, namespaces=["basic"])
def getdetails(self, path):
@@ -1547,23 +1608,28 @@ def match(self, patterns, name):
# type: (Optional[Iterable[Text]], Text) -> bool
"""Check if a name matches any of a list of wildcards.
+ If a filesystem is case *insensitive* (such as Windows) then
+ this method will perform a case insensitive match (i.e. ``*.py``
+ will match the same names as ``*.PY``). Otherwise the match will
+ be case sensitive (``*.py`` and ``*.PY`` will match different
+ names).
+
Arguments:
- patterns (list): A list of patterns, e.g. ``['*.py']``
+ patterns (list, optional): A list of patterns, e.g.
+ ``['*.py']``, or `None` to match everything.
name (str): A file or directory name (not a path)
Returns:
bool: `True` if ``name`` matches any of the patterns.
- If a filesystem is case *insensitive* (such as Windows) then
- this method will perform a case insensitive match (i.e. ``*.py``
- will match the same names as ``*.PY``). Otherwise the match will
- be case sensitive (``*.py`` and ``*.PY`` will match different
- names).
+ Raises:
+ TypeError: If ``patterns`` is a single string instead of
+ a list (or `None`).
Example:
- >>> home_fs.match(['*.py'], '__init__.py')
+ >>> my_fs.match(['*.py'], '__init__.py')
True
- >>> home_fs.match(['*.jpg', '*.png'], 'foo.gif')
+ >>> my_fs.match(['*.jpg', '*.png'], 'foo.gif')
False
Note:
@@ -1616,13 +1682,16 @@ def hash(self, path, name):
Arguments:
path(str): A path on the filesystem.
name(str):
- One of the algorithms supported by the hashlib module, e.g. `"md5"`
+ One of the algorithms supported by the `hashlib` module,
+ e.g. `"md5"` or `"sha256"`.
Returns:
str: The hex digest of the hash.
Raises:
fs.errors.UnsupportedHash: If the requested hash is not supported.
+ fs.errors.ResourceNotFound: If ``path`` does not exist.
+ fs.errors.FileExpected: If ``path`` exists but is not a file.
"""
self.validatepath(path)
diff --git a/fs/filesize.py b/fs/filesize.py
index a80fd9e1..fafcc61d 100644
--- a/fs/filesize.py
+++ b/fs/filesize.py
@@ -61,7 +61,7 @@ def traditional(size):
`str`: A string containing an abbreviated file size and units.
Example:
- >>> filesize.traditional(30000)
+ >>> fs.filesize.traditional(30000)
'29.3 KB'
"""
@@ -87,7 +87,7 @@ def binary(size):
`str`: A string containing a abbreviated file size and units.
Example:
- >>> filesize.binary(30000)
+ >>> fs.filesize.binary(30000)
'29.3 KiB'
"""
@@ -112,7 +112,7 @@ def decimal(size):
`str`: A string containing a abbreviated file size and units.
Example:
- >>> filesize.decimal(30000)
+ >>> fs.filesize.decimal(30000)
'30.0 kB'
"""
diff --git a/fs/ftpfs.py b/fs/ftpfs.py
index c5929ee1..515709df 100644
--- a/fs/ftpfs.py
+++ b/fs/ftpfs.py
@@ -358,22 +358,31 @@ class FTPFS(FS):
FTPS, or FTP Secure. TLS will be enabled when using the ftps:// protocol,
or when setting the `tls` argument to True in the constructor.
-
Examples:
Create with the constructor::
>>> from fs.ftpfs import FTPFS
- >>> ftp_fs = FTPFS()
+ >>> ftp_fs = FTPFS("demo.wftpserver.com")
Or via an FS URL::
- >>> import fs
- >>> ftp_fs = fs.open_fs('ftp://')
+ >>> ftp_fs = fs.open_fs('ftp://test.rebex.net')
Or via an FS URL, using TLS::
- >>> import fs
- >>> ftp_fs = fs.open_fs('ftps://')
+ >>> ftp_fs = fs.open_fs('ftps://demo.wftpserver.com')
+
+ You can also use a non-anonymous username, and optionally a
+ password, even within a FS URL::
+
+ >>> ftp_fs = FTPFS("test.rebex.net", user="demo", passwd="password")
+ >>> ftp_fs = fs.open_fs('ftp://demo:password@test.rebex.net')
+
+ Connecting via a proxy is supported. If using a FS URL, the proxy
+ URL will need to be added as a URL parameter::
+
+ >>> ftp_fs = FTPFS("ftp.ebi.ac.uk", proxy="test.rebex.net")
+ >>> ftp_fs = fs.open_fs('ftp://ftp.ebi.ac.uk/?proxy=test.rebex.net')
"""
diff --git a/fs/glob.py b/fs/glob.py
index c21bb9c6..0dfa9f1c 100644
--- a/fs/glob.py
+++ b/fs/glob.py
@@ -172,9 +172,8 @@ def count(self):
"""Count files / directories / data in matched paths.
Example:
- >>> import fs
- >>> fs.open_fs('~/projects').glob('**/*.py').count()
- Counts(files=18519, directories=0, data=206690458)
+ >>> my_fs.glob('**/*.py').count()
+ Counts(files=2, directories=0, data=55)
Returns:
`~Counts`: A named tuple containing results.
@@ -199,9 +198,8 @@ def count_lines(self):
`~LineCounts`: A named tuple containing line counts.
Example:
- >>> import fs
- >>> fs.open_fs('~/projects').glob('**/*.py').count_lines()
- LineCounts(lines=5767102, non_blank=4915110)
+ >>> my_fs.glob('**/*.py').count_lines()
+ LineCounts(lines=4, non_blank=3)
"""
lines = 0
@@ -222,9 +220,8 @@ def remove(self):
int: Number of file and directories removed.
Example:
- >>> import fs
- >>> fs.open_fs('~/projects/my_project').glob('**/*.pyc').remove()
- 29
+ >>> my_fs.glob('**/*.pyc').remove()
+ 2
"""
removes = 0
diff --git a/fs/info.py b/fs/info.py
index 60a659e6..03bf27cd 100644
--- a/fs/info.py
+++ b/fs/info.py
@@ -41,7 +41,7 @@ class Info(object):
raw_info (dict): A dict containing resource info.
to_datetime (callable): A callable that converts an
epoch time to a datetime object. The default uses
- :func:`~fs.time.epoch_to_datetime`.
+ `~fs.time.epoch_to_datetime`.
"""
@@ -106,8 +106,9 @@ def get(self, namespace, key, default=None): # noqa: F811
is not found.
Example:
- >>> info.get('access', 'permissions')
- ['u_r', 'u_w', '_wx']
+ >>> info = my_fs.getinfo("foo.py", namespaces=["details"])
+ >>> info.get('details', 'type')
+ 2
"""
try:
@@ -131,7 +132,8 @@ def is_writeable(self, namespace, key):
# type: (Text, Text) -> bool
"""Check if a given key in a namespace is writable.
- Uses `~fs.base.FS.setinfo`.
+ When creating an `Info` object, you can add a ``_write`` key to
+ each raw namespace that lists which keys are writable or not.
Arguments:
namespace (str): A namespace identifier.
@@ -140,6 +142,24 @@ def is_writeable(self, namespace, key):
Returns:
bool: `True` if the key can be modified, `False` otherwise.
+ Example:
+ Create an `Info` object that marks only the ``modified`` key
+ as writable in the ``details`` namespace::
+
+ >>> now = time.time()
+ >>> info = Info({
+ ... "basic": {"name": "foo", "is_dir": False},
+ ... "details": {
+ ... "modified": now,
+ ... "created": now,
+ ... "_write": ["modified"],
+ ... }
+ ... })
+ >>> info.is_writeable("details", "created")
+ False
+ >>> info.is_writeable("details", "modified")
+ True
+
"""
_writeable = self.get(namespace, "_write", ())
return key in _writeable
@@ -189,12 +209,10 @@ def suffix(self):
In case there is no suffix, an empty string is returned.
Example:
- >>> info
-
+ >>> info = my_fs.getinfo("foo.py")
>>> info.suffix
'.py'
- >>> info2
-
+ >>> info2 = my_fs.getinfo("bar")
>>> info2.suffix
''
@@ -211,8 +229,7 @@ def suffixes(self):
"""`List`: a list of any suffixes in the name.
Example:
- >>> info
-
+ >>> info = my_fs.getinfo("foo.tar.gz")
>>> info.suffixes
['.tar', '.gz']
@@ -228,8 +245,7 @@ def stem(self):
"""`str`: the name minus any suffixes.
Example:
- >>> info
-
+ >>> info = my_fs.getinfo("foo.tar.gz")
>>> info.stem
'foo'
diff --git a/fs/opener/registry.py b/fs/opener/registry.py
index 4c1c2d3e..54e2dda1 100644
--- a/fs/opener/registry.py
+++ b/fs/opener/registry.py
@@ -198,7 +198,8 @@ def open_fs(
"""Open a filesystem from a FS URL (ignoring the path component).
Arguments:
- fs_url (str): A filesystem URL.
+ fs_url (str): A filesystem URL. If a filesystem instance is
+ given instead, it will be returned transparently.
writeable (bool, optional): `True` if the filesystem must
be writeable.
create (bool, optional): `True` if the filesystem should be
@@ -211,6 +212,14 @@ def open_fs(
Returns:
~fs.base.FS: A filesystem instance.
+ Caution:
+ The ``writeable`` parameter only controls whether the
+ filesystem *needs* to be writable, which is relevant for
+ some archive filesystems. Passing ``writeable=False`` will
+ **not** make the return filesystem read-only. For this,
+ consider using `fs.wrap.read_only` to wrap the returned
+ instance.
+
"""
from ..base import FS
@@ -251,10 +260,13 @@ def manage_fs(
required logic for that.
Example:
- >>> def print_ls(list_fs):
- ... '''List a directory.'''
- ... with manage_fs(list_fs) as fs:
- ... print(' '.join(fs.listdir()))
+ The `~Registry.manage_fs` method can be used to define a small
+ utility function::
+
+ >>> def print_ls(list_fs):
+ ... '''List a directory.'''
+ ... with manage_fs(list_fs) as fs:
+ ... print(' '.join(fs.listdir()))
This function may be used in two ways. You may either pass
a ``str``, as follows::
diff --git a/fs/path.py b/fs/path.py
index 9d6496b5..13641be1 100644
--- a/fs/path.py
+++ b/fs/path.py
@@ -64,9 +64,9 @@ def normpath(path):
>>> normpath("/foo//bar/frob/../baz")
'/foo/bar/baz'
>>> normpath("foo/../../bar")
- Traceback (most recent call last)
+ Traceback (most recent call last):
...
- IllegalBackReference: path 'foo/../../bar' contains back-references outside of filesystem"
+ fs.errors.IllegalBackReference: path 'foo/../../bar' contains back-references outside of filesystem
""" # noqa: E501
if path in "/":
@@ -86,6 +86,7 @@ def normpath(path):
else:
components.append(component)
except IndexError:
+ # FIXME (@althonos): should be raised from the IndexError
raise IllegalBackReference(path)
return prefix + "/".join(components)
diff --git a/fs/permissions.py b/fs/permissions.py
index 032c3be0..3aaa6eff 100644
--- a/fs/permissions.py
+++ b/fs/permissions.py
@@ -58,7 +58,7 @@ class Permissions(object):
>>> p.mode
500
>>> oct(p.mode)
- '0764'
+ '0o764'
"""
diff --git a/fs/tarfs.py b/fs/tarfs.py
index 85e74840..0e808fe1 100644
--- a/fs/tarfs.py
+++ b/fs/tarfs.py
@@ -66,10 +66,10 @@ def _get_member_info(member, encoding):
class TarFS(WrapFS):
"""Read and write tar files.
- There are two ways to open a TarFS for the use cases of reading
+ There are two ways to open a `TarFS` for the use cases of reading
a tar file, and creating a new one.
- If you open the TarFS with ``write`` set to `False` (the
+ If you open the `TarFS` with ``write`` set to `False` (the
default), then the filesystem will be a read only filesystem which
maps to the files and directories within the tar file. Files are
decompressed on the fly when you open them.
@@ -79,9 +79,9 @@ class TarFS(WrapFS):
with TarFS('foo.tar.gz') as tar_fs:
readme = tar_fs.readtext('readme.txt')
- If you open the TarFS with ``write`` set to `True`, then the TarFS
+ If you open the TarFS with ``write`` set to `True`, then the `TarFS`
will be a empty temporary filesystem. Any files / directories you
- create in the TarFS will be written in to a tar file when the TarFS
+ create in the `TarFS` will be written in to a tar file when the `TarFS`
is closed. The compression is set from the new file name but may be
set manually with the ``compression`` argument.
@@ -100,8 +100,9 @@ class TarFS(WrapFS):
use default (`False`) to read an existing tar file.
compression (str, optional): Compression to use (one of the formats
supported by `tarfile`: ``xz``, ``gz``, ``bz2``, or `None`).
- temp_fs (str): An FS URL for the temporary filesystem
- used to store data prior to tarring.
+ temp_fs (str): An FS URL or an FS instance to use to store
+ data prior to tarring. Defaults to creating a new
+ `~fs.tempfs.TempFS`.
"""
@@ -118,7 +119,7 @@ def __new__( # type: ignore
write=False, # type: bool
compression=None, # type: Optional[Text]
encoding="utf-8", # type: Text
- temp_fs="temp://__tartemp__", # type: Text
+ temp_fs="temp://__tartemp__", # type: Union[Text, FS]
):
# type: (...) -> FS
if isinstance(file, (six.text_type, six.binary_type)):
@@ -164,7 +165,7 @@ def __init__(
file, # type: Union[Text, BinaryIO]
compression=None, # type: Optional[Text]
encoding="utf-8", # type: Text
- temp_fs="temp://__tartemp__", # type: Text
+ temp_fs="temp://__tartemp__", # type: Union[Text, FS]
): # noqa: D107
# type: (...) -> None
self._file = file # type: Union[Text, BinaryIO]
diff --git a/fs/tempfs.py b/fs/tempfs.py
index a1e5a3d2..5fdc2f61 100644
--- a/fs/tempfs.py
+++ b/fs/tempfs.py
@@ -27,7 +27,31 @@
@six.python_2_unicode_compatible
class TempFS(OSFS):
- """A temporary filesystem on the OS."""
+ """A temporary filesystem on the OS.
+
+ Temporary filesystems are created using the `tempfile.mkdtemp`
+ function to obtain a temporary folder in an OS-specific location.
+ You can provide an alternative location with the ``temp_dir``
+ argument of the constructor.
+
+ Examples:
+ Create with the constructor::
+
+ >>> from fs.tempfs import TempFS
+ >>> tmp_fs = TempFS()
+
+ Or via an FS URL::
+
+ >>> import fs
+ >>> tmp_fs = fs.open_fs("temp://")
+
+ Use a specific identifier for the temporary folder to better
+ illustrate its purpose::
+
+ >>> named_tmp_fs = fs.open_fs("temp://local_copy")
+ >>> named_tmp_fs = TempFS(identifier="local_copy")
+
+ """
def __init__(
self,
@@ -43,7 +67,7 @@ def __init__(
identifier (str): A string to distinguish the directory within
the OS temp location, used as part of the directory name.
temp_dir (str, optional): An OS path to your temp directory
- (leave as `None` to auto-detect)
+ (leave as `None` to auto-detect).
auto_clean (bool): If `True` (the default), the directory
contents will be wiped on close.
ignore_clean_errors (bool): If `True` (the default), any errors
@@ -71,6 +95,28 @@ def __str__(self):
def close(self):
# type: () -> None
+ """Close the filesystem and release any resources.
+
+ It is important to call this method when you have finished
+ working with the filesystem. Some filesystems may not finalize
+ changes until they are closed (archives for example). You may
+ call this method explicitly (it is safe to call close multiple
+ times), or you can use the filesystem as a context manager to
+ automatically close.
+
+ Hint:
+ Depending on the value of ``auto_clean`` passed when creating
+ the `TempFS`, the underlying temporary folder may be removed
+ or not.
+
+ Example:
+ >>> tmp_fs = TempFS(auto_clean=False)
+ >>> syspath = tmp_fs.getsyspath("/")
+ >>> tmp_fs.close()
+ >>> os.path.exists(syspath)
+ True
+
+ """
if self._auto_clean:
self.clean()
super(TempFS, self).close()
diff --git a/fs/test.py b/fs/test.py
index 4d4e4518..e40e43cf 100644
--- a/fs/test.py
+++ b/fs/test.py
@@ -292,6 +292,15 @@ def assert_not_exists(self, path):
"""
self.assertFalse(self.fs.exists(path))
+ def assert_isempty(self, path):
+ """Assert a path is an empty directory.
+
+ Arguments:
+ path (str): A path on the filesystem.
+
+ """
+ self.assertTrue(self.fs.isempty(path))
+
def assert_isfile(self, path):
"""Assert a path is a file.
@@ -457,6 +466,7 @@ def test_getinfo(self):
root_info = self.fs.getinfo("/")
self.assertEqual(root_info.name, "")
self.assertTrue(root_info.is_dir)
+ self.assertIn("basic", root_info.namespaces)
# Make a file of known size
self.fs.writebytes("foo", b"bar")
@@ -464,17 +474,20 @@ def test_getinfo(self):
# Check basic namespace
info = self.fs.getinfo("foo").raw
+ self.assertIn("basic", info)
self.assertIsInstance(info["basic"]["name"], text_type)
self.assertEqual(info["basic"]["name"], "foo")
self.assertFalse(info["basic"]["is_dir"])
# Check basic namespace dir
info = self.fs.getinfo("dir").raw
+ self.assertIn("basic", info)
self.assertEqual(info["basic"]["name"], "dir")
self.assertTrue(info["basic"]["is_dir"])
# Get the info
info = self.fs.getinfo("foo", namespaces=["details"]).raw
+ self.assertIn("basic", info)
self.assertIsInstance(info, dict)
self.assertEqual(info["details"]["size"], 3)
self.assertEqual(info["details"]["type"], int(ResourceType.file))
@@ -1101,6 +1114,7 @@ def test_removedir(self):
self.fs.removedir("foo/bar")
def test_removetree(self):
+ self.fs.makedirs("spam")
self.fs.makedirs("foo/bar/baz")
self.fs.makedirs("foo/egg")
self.fs.makedirs("foo/a/b/c/d/e")
@@ -1116,6 +1130,7 @@ def test_removetree(self):
self.fs.removetree("foo")
self.assert_not_exists("foo")
+ self.assert_exists("spam")
# Errors on files
self.fs.create("bar")
@@ -1126,6 +1141,34 @@ def test_removetree(self):
with self.assertRaises(errors.ResourceNotFound):
self.fs.removetree("foofoo")
+ def test_removetree_root(self):
+ self.fs.makedirs("foo/bar/baz")
+ self.fs.makedirs("foo/egg")
+ self.fs.makedirs("foo/a/b/c/d/e")
+ self.fs.create("foo/egg.txt")
+ self.fs.create("foo/bar/egg.bin")
+ self.fs.create("foo/a/b/c/1.txt")
+ self.fs.create("foo/a/b/c/2.txt")
+ self.fs.create("foo/a/b/c/3.txt")
+
+ self.assert_exists("foo/egg.txt")
+ self.assert_exists("foo/bar/egg.bin")
+
+ # removetree("/") removes the contents,
+ # but not the root folder itself
+ self.fs.removetree("/")
+ self.assert_exists("/")
+ self.assert_isempty("/")
+
+ # we check we can create a file after
+ # to catch potential issues with the
+ # root folder being deleted on faulty
+ # implementations
+ self.fs.create("egg")
+ self.fs.makedir("yolk")
+ self.assert_exists("egg")
+ self.assert_exists("yolk")
+
def test_setinfo(self):
self.fs.create("birthday.txt")
now = math.floor(time.time())
diff --git a/fs/time.py b/fs/time.py
index f1638aa3..cdf06061 100644
--- a/fs/time.py
+++ b/fs/time.py
@@ -4,10 +4,14 @@
from __future__ import print_function
from __future__ import unicode_literals
+import typing
from calendar import timegm
from datetime import datetime
from pytz import UTC, timezone
+if typing.TYPE_CHECKING:
+ from typing import Optional
+
utcfromtimestamp = datetime.utcfromtimestamp
utclocalize = UTC.localize
@@ -20,7 +24,19 @@ def datetime_to_epoch(d):
return timegm(d.utctimetuple())
-def epoch_to_datetime(t):
+@typing.overload
+def epoch_to_datetime(t): # noqa: D103
+ # type: (None) -> None
+ pass
+
+
+@typing.overload
+def epoch_to_datetime(t): # noqa: D103
# type: (int) -> datetime
+ pass
+
+
+def epoch_to_datetime(t):
+ # type: (Optional[int]) -> Optional[datetime]
"""Convert epoch time to a UTC datetime."""
return utclocalize(utcfromtimestamp(t)) if t is not None else None
diff --git a/fs/walk.py b/fs/walk.py
index 0f4adb99..f539fa9d 100644
--- a/fs/walk.py
+++ b/fs/walk.py
@@ -146,24 +146,24 @@ def bind(cls, fs):
Returns:
~fs.walk.BoundWalker: a bound walker.
- Example:
- >>> from fs import open_fs
- >>> from fs.walk import Walker
- >>> home_fs = open_fs('~/')
- >>> walker = Walker.bind(home_fs)
- >>> for path in walker.files(filter=['*.py']):
- ... print(path)
-
- Unless you have written a customized walker class, you will be
- unlikely to need to call this explicitly, as filesystem objects
- already have a ``walk`` attribute which is a bound walker
- object.
+ Examples:
+ Use this method to explicitly bind a filesystem instance::
- Example:
- >>> from fs import open_fs
- >>> home_fs = open_fs('~/')
- >>> for path in home_fs.walk.files(filter=['*.py']):
- ... print(path)
+ >>> walker = Walker.bind(my_fs)
+ >>> for path in walker.files(filter=['*.py']):
+ ... print(path)
+ /foo.py
+ /bar.py
+
+ Unless you have written a customized walker class, you will
+ be unlikely to need to call this explicitly, as filesystem
+ objects already have a ``walk`` attribute which is a bound
+ walker object::
+
+ >>> for path in my_fs.walk.files(filter=['*.py']):
+ ... print(path)
+ /foo.py
+ /bar.py
"""
return BoundWalker(fs)
@@ -316,14 +316,16 @@ def walk(
`~fs.info.Info` objects for directories and files in ````.
Example:
- >>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
- >>> namespaces = ['details']
- >>> for path, dirs, files in walker.walk(home_fs, namespaces)
+ >>> for path, dirs, files in walker.walk(my_fs, namespaces=["details"]):
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
- ... print("{} bytes {}".format(total))
+ ... print("{} bytes".format(total))
+ [/]
+ 2 directories
+ 55 bytes
+ ...
"""
_path = abspath(normpath(path))
@@ -495,10 +497,9 @@ class BoundWalker(typing.Generic[_F]):
`BoundWalker` object.
Example:
- >>> import fs
- >>> home_fs = fs.open_fs('~/')
- >>> home_fs.walk
- BoundWalker(OSFS('/Users/will', encoding='utf-8'))
+ >>> tmp_fs = fs.tempfs.TempFS()
+ >>> tmp_fs.walk
+ BoundWalker(TempFS())
A `BoundWalker` is callable. Calling it is an alias for the
`~fs.walk.BoundWalker.walk` method.
@@ -575,13 +576,16 @@ def walk(
`~fs.info.Info` objects for directories and files in ````.
Example:
- >>> home_fs = open_fs('~/')
>>> walker = Walker(filter=['*.py'])
- >>> for path, dirs, files in walker.walk(home_fs, namespaces=['details']):
+ >>> for path, dirs, files in walker.walk(my_fs, namespaces=['details']):
... print("[{}]".format(path))
... print("{} directories".format(len(dirs)))
... total = sum(info.size for info in files)
- ... print("{} bytes {}".format(total))
+ ... print("{} bytes".format(total))
+ [/]
+ 2 directories
+ 55 bytes
+ ...
This method invokes `Walker.walk` with bound `FS` object.
diff --git a/fs/wrap.py b/fs/wrap.py
index 3ae4aa9f..e03ef805 100644
--- a/fs/wrap.py
+++ b/fs/wrap.py
@@ -2,14 +2,12 @@
Here's an example that opens a filesystem then makes it *read only*::
- >>> from fs import open_fs
- >>> from fs.wrap import read_only
- >>> projects_fs = open_fs('~/projects')
- >>> read_only_projects_fs = read_only(projects_fs)
- >>> read_only_projects_fs.remove('__init__.py')
+ >>> home_fs = fs.open_fs('~')
+ >>> read_only_home_fs = fs.wrap.read_only(home_fs)
+ >>> read_only_home_fs.removedir('Desktop')
Traceback (most recent call last):
...
- fs.errors.ResourceReadOnly: resource '__init__.py' is read only
+ fs.errors.ResourceReadOnly: resource 'Desktop' is read only
"""
diff --git a/fs/wrapfs.py b/fs/wrapfs.py
index e40a7a83..5944b111 100644
--- a/fs/wrapfs.py
+++ b/fs/wrapfs.py
@@ -12,7 +12,7 @@
from .copy import copy_file, copy_dir
from .info import Info
from .move import move_file, move_dir
-from .path import abspath, normpath
+from .path import abspath, join, normpath
from .error_tools import unwrap_errors
if typing.TYPE_CHECKING:
@@ -217,11 +217,20 @@ def removetree(self, dir_path):
# type: (Text) -> None
self.check()
_path = abspath(normpath(dir_path))
- if _path == "/":
- raise errors.RemoveRootError()
- _fs, _path = self.delegate_path(dir_path)
+ _delegate_fs, _delegate_path = self.delegate_path(dir_path)
with unwrap_errors(dir_path):
- _fs.removetree(_path)
+ if _path == "/":
+ # with root path, we must remove the contents but
+ # not the directory itself, so we can't just directly
+ # delegate
+ for info in _delegate_fs.scandir(_delegate_path):
+ info_path = join(_delegate_path, info.name)
+ if info.is_dir:
+ _delegate_fs.removetree(info_path)
+ else:
+ _delegate_fs.remove(info_path)
+ else:
+ _delegate_fs.removetree(_delegate_path)
def scandir(
self,
diff --git a/fs/zipfs.py b/fs/zipfs.py
index d8300a26..12d8a668 100644
--- a/fs/zipfs.py
+++ b/fs/zipfs.py
@@ -124,11 +124,11 @@ def tell(self):
class ZipFS(WrapFS):
"""Read and write zip files.
- There are two ways to open a ZipFS for the use cases of reading
+ There are two ways to open a `ZipFS` for the use cases of reading
a zip file, and creating a new one.
- If you open the ZipFS with ``write`` set to `False` (the default)
- then the filesystem will be a read only filesystem which maps to
+ If you open the `ZipFS` with ``write`` set to `False` (the default)
+ then the filesystem will be a read-only filesystem which maps to
the files and directories within the zip file. Files are
decompressed on the fly when you open them.
@@ -137,12 +137,12 @@ class ZipFS(WrapFS):
with ZipFS('foo.zip') as zip_fs:
readme = zip_fs.readtext('readme.txt')
- If you open the ZipFS with ``write`` set to `True`, then the ZipFS
- will be a empty temporary filesystem. Any files / directories you
- create in the ZipFS will be written in to a zip file when the ZipFS
+ If you open the `ZipFS` with ``write`` set to `True`, then the `ZipFS`
+ will be an empty temporary filesystem. Any files / directories you
+ create in the `ZipFS` will be written in to a zip file when the `ZipFS`
is closed.
- Here's how you might write a new zip file containing a readme.txt
+ Here's how you might write a new zip file containing a ``readme.txt``
file::
with ZipFS('foo.zip', write=True) as new_zip:
@@ -158,8 +158,9 @@ class ZipFS(WrapFS):
(default) to read an existing zip file.
compression (int): Compression to use (one of the constants
defined in the `zipfile` module in the stdlib).
- temp_fs (str): An FS URL for the temporary filesystem used to
- store data prior to zipping.
+ temp_fs (str or FS): An FS URL or an FS instance to use to
+ store data prior to zipping. Defaults to creating a new
+ `~fs.tempfs.TempFS`.
"""
@@ -170,7 +171,7 @@ def __new__( # type: ignore
write=False, # type: bool
compression=zipfile.ZIP_DEFLATED, # type: int
encoding="utf-8", # type: Text
- temp_fs="temp://__ziptemp__", # type: Text
+ temp_fs="temp://__ziptemp__", # type: Union[Text, FS]
):
# type: (...) -> FS
# This magic returns a different class instance based on the
@@ -205,7 +206,7 @@ def __init__(
file, # type: Union[Text, BinaryIO]
compression=zipfile.ZIP_DEFLATED, # type: int
encoding="utf-8", # type: Text
- temp_fs="temp://__ziptemp__", # type: Text
+ temp_fs="temp://__ziptemp__", # type: Union[Text, FS]
): # noqa: D107
# type: (...) -> None
self._file = file
diff --git a/setup.cfg b/setup.cfg
index a5c46904..fcc75584 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -119,6 +119,7 @@ skip_covered = true
exclude_lines =
pragma: no cover
if False:
+ it typing.TYPE_CHECKING:
@typing.overload
@overload
diff --git a/tests/test_doctest.py b/tests/test_doctest.py
new file mode 100644
index 00000000..22c02357
--- /dev/null
+++ b/tests/test_doctest.py
@@ -0,0 +1,194 @@
+# coding: utf-8
+"""Test doctest contained tests in every file of the module.
+"""
+import doctest
+import importlib
+import os
+import pkgutil
+import types
+import warnings
+import tempfile
+import time
+import unittest
+from pprint import pprint
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+import six
+
+import fs
+import fs.opener.parse
+from fs.memoryfs import MemoryFS
+from fs.subfs import ClosingSubFS
+
+# --- Mocks ------------------------------------------------------------------
+
+
+def _home_fs():
+ """Create a mock filesystem that matches the XDG user-dirs spec."""
+ home_fs = MemoryFS()
+ home_fs.makedir("Desktop")
+ home_fs.makedir("Documents")
+ home_fs.makedir("Downloads")
+ home_fs.makedir("Music")
+ home_fs.makedir("Pictures")
+ home_fs.makedir("Public")
+ home_fs.makedir("Templates")
+ home_fs.makedir("Videos")
+ return home_fs
+
+
+def _open_fs(path):
+ """A mock `open_fs` that avoids side effects when running doctests."""
+ if "://" not in path:
+ path = "osfs://{}".format(path)
+ parse_result = fs.opener.parse(path)
+ if parse_result.protocol == "osfs" and parse_result.resource == "~":
+ home_fs = _home_fs()
+ if parse_result.path is not None:
+ home_fs = home_fs.opendir(parse_result.path, factory=ClosingSubFS)
+ return home_fs
+ elif parse_result.protocol in {"ftp", "ftps", "mem", "temp"}:
+ return MemoryFS()
+ else:
+ raise RuntimeError("not allowed in doctests: {}".format(path))
+
+
+def _my_fs(module):
+ """Create a mock filesystem to be used in examples."""
+ my_fs = MemoryFS()
+ if module == "fs.base":
+ my_fs.makedir("Desktop")
+ my_fs.makedir("Videos")
+ my_fs.touch("Videos/starwars.mov")
+ my_fs.touch("file.txt")
+ elif module == "fs.info":
+ my_fs.touch("foo.tar.gz")
+ my_fs.settext("foo.py", "print('Hello, world!')")
+ my_fs.makedir("bar")
+ elif module in {"fs.walk", "fs.glob"}:
+ my_fs.makedir("dir1")
+ my_fs.makedir("dir2")
+ my_fs.settext("foo.py", "print('Hello, world!')")
+ my_fs.touch("foo.pyc")
+ my_fs.settext("bar.py", "print('ok')\n\n# this is a comment\n")
+ my_fs.touch("bar.pyc")
+ return my_fs
+
+
+def _open(filename, mode="r"):
+ """A mock `open` that actually opens a temporary file."""
+ return tempfile.NamedTemporaryFile(mode="r+" if mode == "r" else mode)
+
+
+# --- Loader protocol --------------------------------------------------------
+
+
+def _load_tests_from_module(tests, module, globs, setUp=None, tearDown=None):
+ """Load tests from module, iterating through submodules."""
+ for attr in (getattr(module, x) for x in dir(module) if not x.startswith("_")):
+ if isinstance(attr, types.ModuleType):
+ suite = doctest.DocTestSuite(
+ attr,
+ globs,
+ setUp=setUp,
+ tearDown=tearDown,
+ optionflags=+doctest.ELLIPSIS,
+ )
+ tests.addTests(suite)
+ return tests
+
+
+def _load_tests(loader, tests, ignore):
+ """`load_test` function used by unittest to find the doctests."""
+
+ # NB (@althonos): we only test docstrings on Python 3 because it's
+ # extremely hard to maintain compatibility for both versions without
+ # extensively hacking `doctest` and `unittest`.
+ if six.PY2:
+ return tests
+
+ def setUp(self):
+ warnings.simplefilter("ignore")
+ self._open_fs_mock = mock.patch.object(fs, "open_fs", new=_open_fs)
+ self._open_fs_mock.__enter__()
+ self._ftpfs_mock = mock.patch.object(fs.ftpfs, "FTPFS")
+ self._ftpfs_mock.__enter__()
+
+ def tearDown(self):
+ self._open_fs_mock.__exit__(None, None, None)
+ self._ftpfs_mock.__exit__(None, None, None)
+ warnings.simplefilter(warnings.defaultaction)
+
+ # recursively traverse all library submodules and load tests from them
+ packages = [None, fs]
+ for pkg in iter(packages.pop, None):
+ for (_, subpkgname, subispkg) in pkgutil.walk_packages(pkg.__path__):
+ # import the submodule and add it to the tests
+ module = importlib.import_module(".".join([pkg.__name__, subpkgname]))
+
+ # load some useful modules / classes / mocks to the
+ # globals so that we don't need to explicitly import
+ # them in the doctests
+ globs = dict(**module.__dict__)
+ globs.update(
+ os=os,
+ fs=fs,
+ my_fs=_my_fs(module.__name__),
+ open=_open,
+ # NB (@althonos): This allows using OSFS in some examples,
+ # while not actually opening the real filesystem
+ OSFS=lambda path: MemoryFS(),
+ # NB (@althonos): This is for compatibility in `fs.registry`
+ print_list=lambda path: None,
+ pprint=pprint,
+ time=time,
+ )
+
+ # load the doctests into the unittest test suite
+ tests.addTests(
+ doctest.DocTestSuite(
+ module,
+ globs=globs,
+ setUp=setUp,
+ tearDown=tearDown,
+ optionflags=+doctest.ELLIPSIS,
+ )
+ )
+
+ # if the submodule is a package, we need to process its submodules
+ # as well, so we add it to the package queue
+ if subispkg:
+ packages.append(module)
+
+ return tests
+
+
+# --- Unit test wrapper ------------------------------------------------------
+#
+# NB (@althonos): Since pytest doesn't support the `load_tests` protocol
+# above, we manually build a `unittest.TestCase` using a dedicated test
+# method for each doctest. This should be safe to remove when pytest
+# supports it, or if we move away from pytest to run tests.
+
+
+class TestDoctest(unittest.TestCase):
+ pass
+
+
+def make_wrapper(x):
+ def _test_wrapper(self):
+ x.setUp()
+ try:
+ x.runTest()
+ finally:
+ x.tearDown()
+
+ return _test_wrapper
+
+
+for x in _load_tests(None, unittest.TestSuite(), False):
+ setattr(TestDoctest, "test_{}".format(x.id().replace(".", "_")), make_wrapper(x))