diff options
author | holger krekel <holger@merlinux.eu> | 2011-01-18 14:13:31 +0100 |
---|---|---|
committer | holger krekel <holger@merlinux.eu> | 2011-01-18 14:13:31 +0100 |
commit | cd173a7f26ee3df1e038c131a3270036d7f561d0 (patch) | |
tree | 9c7db1502305a2419243f9a8e852c2323f2b1088 /py | |
parent | Fix test_compile_framework_vref on 64-bit. (diff) | |
download | pypy-cd173a7f26ee3df1e038c131a3270036d7f561d0.tar.gz pypy-cd173a7f26ee3df1e038c131a3270036d7f561d0.tar.bz2 pypy-cd173a7f26ee3df1e038c131a3270036d7f561d0.zip |
remove old py copy, add current pytest and py lib snapshots (from pytest-2.0.1dev and py-1.4.1dev)
and some initial tweeks to conftest.py
Diffstat (limited to 'py')
99 files changed, 1128 insertions, 8316 deletions
diff --git a/py/__init__.py b/py/__init__.py index 31f88b2131..cf22e254de 100644 --- a/py/__init__.py +++ b/py/__init__.py @@ -4,68 +4,52 @@ py.test and pylib: rapid testing and development utils this module uses apipkg.py for lazy-loading sub modules and classes. The initpkg-dictionary below specifies name->value mappings where value can be another namespace -dictionary or an import path. +dictionary or an import path. (c) Holger Krekel and others, 2004-2010 """ -__version__ = version = "1.3.1" +__version__ = '1.4.1.dev2' -import py.apipkg +from py import _apipkg -py.apipkg.initpkg(__name__, dict( +# so that py.error.* instances are picklable +import sys +sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error') + +_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={ # access to all standard lib modules - std = '._std:std', + 'std': '._std:std', # access to all posix errno's as classes - error = '._error:error', - - _pydir = '.__metainfo:pydir', - version = 'py:__version__', # backward compatibility - - cmdline = { - 'pytest': '._cmdline.pytest:main', - 'pylookup': '._cmdline.pylookup:main', - 'pycountloc': '._cmdline.pycountlog:main', - 'pylookup': '._cmdline.pylookup:main', - 'pycountloc': '._cmdline.pycountloc:main', - 'pycleanup': '._cmdline.pycleanup:main', - 'pywhich' : '._cmdline.pywhich:main', - 'pysvnwcrevert' : '._cmdline.pysvnwcrevert:main', - 'pyconvert_unittest' : '._cmdline.pyconvert_unittest:main', - }, + 'error': '._error:error', - test = { - # helpers for use from test functions or collectors - '__onfirstaccess__' : '._test.config:onpytestaccess', - '__doc__' : '._test:__doc__', - # configuration/initialization related test api - 'config' : '._test.config:config_per_process', - 'ensuretemp' : '._test.config:ensuretemp', - 'collect': { - 'Collector' : '._test.collect:Collector', - 'Directory' : '._test.collect:Directory', - 'File' : '._test.collect:File', - 'Item' : '._test.collect:Item', - 'Module' : '._test.pycollect:Module', - 'Class' : '._test.pycollect:Class', - 'Instance' : '._test.pycollect:Instance', - 'Generator' : '._test.pycollect:Generator', - 'Function' : '._test.pycollect:Function', - '_fillfuncargs' : '._test.funcargs:fillfuncargs', - }, - 'cmdline': { - 'main' : '._test.cmdline:main', # backward compat - }, - }, + '_pydir' : '.__metainfo:pydir', + 'version': 'py:__version__', # backward compatibility + + # pytest-2.0 has a flat namespace, we use alias modules + # to keep old references compatible + 'test' : 'pytest', + 'test.collect' : 'pytest', + 'test.cmdline' : 'pytest', # hook into the top-level standard library - process = { + 'process' : { '__doc__' : '._process:__doc__', 'cmdexec' : '._process.cmdexec:cmdexec', 'kill' : '._process.killproc:kill', 'ForkedFunc' : '._process.forkedfunc:ForkedFunc', }, - path = { + 'apipkg' : { + 'initpkg' : '._apipkg:initpkg', + 'ApiModule' : '._apipkg:ApiModule', + }, + + 'iniconfig' : { + 'IniConfig' : '._iniconfig:IniConfig', + 'ParseError' : '._iniconfig:ParseError', + }, + + 'path' : { '__doc__' : '._path:__doc__', 'svnwc' : '._path.svnwc:SvnWCCommandPath', 'svnurl' : '._path.svnurl:SvnCommandPath', @@ -73,18 +57,8 @@ py.apipkg.initpkg(__name__, dict( 'SvnAuth' : '._path.svnwc:SvnAuth', }, - # some nice slightly magic APIs - magic = { - 'invoke' : '._code.oldmagic:invoke', - 'revoke' : '._code.oldmagic:revoke', - 'patch' : '._code.oldmagic:patch', - 'revert' : '._code.oldmagic:revert', - 'autopath' : '._path.local:autopath', - 'AssertionError' : '._code.oldmagic2:AssertionError', - }, - # python inspection/code-generation API - code = { + 'code' : { '__doc__' : '._code:__doc__', 'compile' : '._code.source:compile_', 'Source' : '._code.source:Source', @@ -99,18 +73,22 @@ py.apipkg.initpkg(__name__, dict( '_AssertionError' : '._code.assertion:AssertionError', '_reinterpret_old' : '._code.assertion:reinterpret_old', '_reinterpret' : '._code.assertion:reinterpret', + '_reprcompare' : '._code.assertion:_reprcompare', }, # backports and additions of builtins - builtin = { + 'builtin' : { '__doc__' : '._builtin:__doc__', 'enumerate' : '._builtin:enumerate', 'reversed' : '._builtin:reversed', 'sorted' : '._builtin:sorted', + 'any' : '._builtin:any', + 'all' : '._builtin:all', 'set' : '._builtin:set', 'frozenset' : '._builtin:frozenset', 'BaseException' : '._builtin:BaseException', 'GeneratorExit' : '._builtin:GeneratorExit', + '_sysex' : '._builtin:_sysex', 'print_' : '._builtin:print_', '_reraise' : '._builtin:_reraise', '_tryimport' : '._builtin:_tryimport', @@ -128,7 +106,7 @@ py.apipkg.initpkg(__name__, dict( }, # input-output helping - io = { + 'io' : { '__doc__' : '._io:__doc__', 'dupfile' : '._io.capture:dupfile', 'TextIO' : '._io.capture:TextIO', @@ -137,13 +115,13 @@ py.apipkg.initpkg(__name__, dict( 'StdCapture' : '._io.capture:StdCapture', 'StdCaptureFD' : '._io.capture:StdCaptureFD', 'TerminalWriter' : '._io.terminalwriter:TerminalWriter', - 'ansi_print' : '._io.terminalwriter:ansi_print', + 'ansi_print' : '._io.terminalwriter:ansi_print', 'get_terminal_width' : '._io.terminalwriter:get_terminal_width', 'saferepr' : '._io.saferepr:saferepr', }, # small and mean xml/html generation - xml = { + 'xml' : { '__doc__' : '._xmlgen:__doc__', 'html' : '._xmlgen:html', 'Tag' : '._xmlgen:Tag', @@ -152,7 +130,7 @@ py.apipkg.initpkg(__name__, dict( 'escape' : '._xmlgen:escape', }, - log = { + 'log' : { # logging API ('producers' and 'consumers' connected via keywords) '__doc__' : '._log:__doc__', '_apiwarn' : '._log.warning:_apiwarn', @@ -166,12 +144,5 @@ py.apipkg.initpkg(__name__, dict( 'Syslog' : '._log.log:Syslog', }, - # compatibility modules (deprecated) - compat = { - '__doc__' : '._compat:__doc__', - 'doctest' : '._compat.dep_doctest:doctest', - 'optparse' : '._compat.dep_optparse:optparse', - 'textwrap' : '._compat.dep_textwrap:textwrap', - 'subprocess' : '._compat.dep_subprocess:subprocess', - }, -)) +}) + diff --git a/py/apipkg.py b/py/_apipkg.py index 5571b04add..afd1e6d274 100644 --- a/py/apipkg.py +++ b/py/_apipkg.py @@ -5,32 +5,65 @@ see http://pypi.python.org/pypi/apipkg (c) holger krekel, 2009 - MIT license """ +import os import sys from types import ModuleType -__version__ = "1.0b6" +__version__ = '1.2.dev6' -def initpkg(pkgname, exportdefs): +def initpkg(pkgname, exportdefs, attr=dict()): """ initialize given package from the export definitions. """ - mod = ApiModule(pkgname, exportdefs, implprefix=pkgname) - oldmod = sys.modules[pkgname] - mod.__file__ = getattr(oldmod, '__file__', None) - mod.__version__ = getattr(oldmod, '__version__', '0') - for name in ('__path__', '__loader__'): - if hasattr(oldmod, name): - setattr(mod, name, getattr(oldmod, name)) + oldmod = sys.modules.get(pkgname) + d = {} + f = getattr(oldmod, '__file__', None) + if f: + f = os.path.abspath(f) + d['__file__'] = f + if hasattr(oldmod, '__version__'): + d['__version__'] = oldmod.__version__ + if hasattr(oldmod, '__loader__'): + d['__loader__'] = oldmod.__loader__ + if hasattr(oldmod, '__path__'): + d['__path__'] = [os.path.abspath(p) for p in oldmod.__path__] + if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): + d['__doc__'] = oldmod.__doc__ + d.update(attr) + if hasattr(oldmod, "__dict__"): + oldmod.__dict__.update(d) + mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) sys.modules[pkgname] = mod def importobj(modpath, attrname): module = __import__(modpath, None, None, ['__doc__']) - return getattr(module, attrname) + if not attrname: + return module + + retval = module + names = attrname.split(".") + for x in names: + retval = getattr(retval, x) + return retval class ApiModule(ModuleType): - def __init__(self, name, importspec, implprefix=None): + def __docget(self): + try: + return self.__doc + except AttributeError: + if '__doc__' in self.__map__: + return self.__makeattr('__doc__') + def __docset(self, value): + self.__doc = value + __doc__ = property(__docget, __docset) + + def __init__(self, name, importspec, implprefix=None, attr=None): self.__name__ = name self.__all__ = [x for x in importspec if x != '__onfirstaccess__'] self.__map__ = {} self.__implprefix__ = implprefix or name + if attr: + for name, val in attr.items(): + #print "setting", self.__name__, name, val + setattr(self, name, val) for name, importspec in importspec.items(): if isinstance(importspec, dict): subname = '%s.%s'%(self.__name__, name) @@ -38,11 +71,18 @@ class ApiModule(ModuleType): sys.modules[subname] = apimod setattr(self, name, apimod) else: - modpath, attrname = importspec.split(':') + parts = importspec.split(':') + modpath = parts.pop(0) + attrname = parts and parts[0] or "" if modpath[0] == '.': modpath = implprefix + modpath - if name == '__doc__': - self.__doc__ = importobj(modpath, attrname) + + if not attrname: + subname = '%s.%s'%(self.__name__, name) + apimod = AliasModule(subname, modpath) + sys.modules[subname] = apimod + if '.' not in name: + setattr(self, name, apimod) else: self.__map__[name] = (modpath, attrname) @@ -58,6 +98,7 @@ class ApiModule(ModuleType): def __makeattr(self, name): """lazily compute value for name or raise AttributeError if unknown.""" + #print "makeattr", self.__name__, name target = None if '__onfirstaccess__' in self.__map__: target = self.__map__.pop('__onfirstaccess__') @@ -93,3 +134,34 @@ class ApiModule(ModuleType): pass return dict __dict__ = property(__dict__) + + +def AliasModule(modname, modpath, attrname=None): + mod = [] + + def getmod(): + if not mod: + x = importobj(modpath, None) + if attrname is not None: + x = getattr(x, attrname) + mod.append(x) + return mod[0] + + class AliasModule(ModuleType): + + def __repr__(self): + x = modpath + if attrname: + x += "." + attrname + return '<AliasModule %r for %r>' % (modname, x) + + def __getattribute__(self, name): + return getattr(getmod(), name) + + def __setattr__(self, name, value): + setattr(getmod(), name, value) + + def __delattr__(self, name): + delattr(getmod(), name) + + return AliasModule(modname) diff --git a/py/_builtin.py b/py/_builtin.py index b9395a8de3..e796862119 100644 --- a/py/_builtin.py +++ b/py/_builtin.py @@ -36,6 +36,24 @@ except NameError: return self.remaining try: + any = any +except NameError: + def any(iterable): + for x in iterable: + if x: + return True + return False + +try: + all = all +except NameError: + def all(iterable): + for x in iterable: + if not x: + return False + return True + +try: sorted = sorted except NameError: builtin_cmp = cmp # need to use cmp as keyword arg @@ -67,10 +85,10 @@ except NameError: try: set, frozenset = set, frozenset except NameError: - from sets import set, frozenset + from sets import set, frozenset # pass through -enumerate = enumerate +enumerate = enumerate try: BaseException = BaseException @@ -87,12 +105,14 @@ except NameError: pass GeneratorExit.__module__ = 'exceptions' +_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit) + if sys.version_info >= (3, 0): exec ("print_ = print ; exec_=exec") import builtins - # some backward compatibility helpers - _basestring = str + # some backward compatibility helpers + _basestring = str def _totext(obj, encoding=None): if isinstance(obj, bytes): obj = obj.decode(encoding) @@ -100,9 +120,9 @@ if sys.version_info >= (3, 0): obj = str(obj) return obj - def _isbytes(x): + def _isbytes(x): return isinstance(x, bytes) - def _istext(x): + def _istext(x): return isinstance(x, str) def _getimself(function): @@ -135,13 +155,13 @@ if sys.version_info >= (3, 0): else: import __builtin__ as builtins - _totext = unicode + _totext = unicode _basestring = basestring execfile = execfile callable = callable - def _isbytes(x): + def _isbytes(x): return isinstance(x, str) - def _istext(x): + def _istext(x): return isinstance(x, unicode) def _getimself(function): @@ -157,7 +177,7 @@ else: return getattr(function, "func_code", None) def print_(*args, **kwargs): - """ minimal backport of py3k print statement. """ + """ minimal backport of py3k print statement. """ sep = ' ' if 'sep' in kwargs: sep = kwargs.pop('sep') @@ -177,24 +197,22 @@ else: file.write(end) def exec_(obj, globals=None, locals=None): - """ minimal backport of py3k exec statement. """ + """ minimal backport of py3k exec statement. """ __tracebackhide__ = True - if globals is None: + if globals is None: frame = sys._getframe(1) - globals = frame.f_globals + globals = frame.f_globals if locals is None: locals = frame.f_locals elif locals is None: locals = globals - exec2(obj, globals, locals) + exec2(obj, globals, locals) if sys.version_info >= (3,0): - exec (""" -def _reraise(cls, val, tb): - __tracebackhide__ = True - assert hasattr(val, '__traceback__') - raise val -""") + def _reraise(cls, val, tb): + __tracebackhide__ = True + assert hasattr(val, '__traceback__') + raise val else: exec (""" def _reraise(cls, val, tb): @@ -202,11 +220,11 @@ def _reraise(cls, val, tb): raise cls, val, tb def exec2(obj, globals, locals): __tracebackhide__ = True - exec obj in globals, locals + exec obj in globals, locals """) def _tryimport(*names): - """ return the first successfully imported module. """ + """ return the first successfully imported module. """ assert names for name in names: try: diff --git a/py/_cmdline/__init__.py b/py/_cmdline/__init__.py deleted file mode 100644 index 792d600548..0000000000 --- a/py/_cmdline/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# diff --git a/py/_cmdline/pycleanup.py b/py/_cmdline/pycleanup.py deleted file mode 100755 index 7d35c5c2bd..0000000000 --- a/py/_cmdline/pycleanup.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python - -"""\ -py.cleanup [PATH] ... - -Delete typical python development related files recursively under the specified PATH (which defaults to the current working directory). Don't follow links and don't recurse into directories with a dot. Optionally remove setup.py related files and empty -directories. - -""" -import py -import sys, subprocess - -def main(): - parser = py.std.optparse.OptionParser(usage=__doc__) - parser.add_option("-e", metavar="ENDING", - dest="endings", default=[".pyc", "$py.class"], action="append", - help=("(multi) recursively remove files with the given ending." - " '.pyc' and '$py.class' are in the default list.")) - parser.add_option("-d", action="store_true", dest="removedir", - help="remove empty directories.") - parser.add_option("-s", action="store_true", dest="setup", - help="remove 'build' and 'dist' directories next to setup.py files") - parser.add_option("-a", action="store_true", dest="all", - help="synonym for '-S -d -e pip-log.txt'") - parser.add_option("-n", "--dryrun", dest="dryrun", default=False, - action="store_true", - help="don't actually delete but display would-be-removed filenames.") - (options, args) = parser.parse_args() - - Cleanup(options, args).main() - -class Cleanup: - def __init__(self, options, args): - if not args: - args = ["."] - self.options = options - self.args = [py.path.local(x) for x in args] - if options.all: - options.setup = True - options.removedir = True - options.endings.append("pip-log.txt") - - def main(self): - if self.options.setup: - for arg in self.args: - self.setupclean(arg) - - for path in self.args: - py.builtin.print_("cleaning path", path, - "of extensions", self.options.endings) - for x in path.visit(self.shouldremove, self.recursedir): - self.remove(x) - if self.options.removedir: - for x in path.visit(lambda x: x.check(dir=1), self.recursedir): - if not x.listdir(): - self.remove(x) - - def shouldremove(self, p): - for ending in self.options.endings: - if p.basename.endswith(ending): - return True - - def recursedir(self, path): - return path.check(dotfile=0, link=0) - - def remove(self, path): - if not path.check(): - return - if self.options.dryrun: - py.builtin.print_("would remove", path) - else: - py.builtin.print_("removing", path) - path.remove() - - def XXXcallsetup(self, setup, *args): - old = setup.dirpath().chdir() - try: - subprocess.call([sys.executable, str(setup)] + list(args)) - finally: - old.chdir() - - def setupclean(self, path): - for x in path.visit("setup.py", self.recursedir): - basepath = x.dirpath() - self.remove(basepath / "build") - self.remove(basepath / "dist") diff --git a/py/_cmdline/pyconvert_unittest.py b/py/_cmdline/pyconvert_unittest.py deleted file mode 100644 index a502336380..0000000000 --- a/py/_cmdline/pyconvert_unittest.py +++ /dev/null @@ -1,253 +0,0 @@ -import re -import sys - -try: - import parser -except ImportError: - parser = None - -d={} -# d is the dictionary of unittest changes, keyed to the old name -# used by unittest. -# d[old][0] is the new replacement function. -# d[old][1] is the operator you will substitute, or '' if there is none. -# d[old][2] is the possible number of arguments to the unittest -# function. - -# Old Unittest Name new name operator # of args -d['assertRaises'] = ('raises', '', ['Any']) -d['fail'] = ('raise AssertionError', '', [0,1]) -d['assert_'] = ('assert', '', [1,2]) -d['failIf'] = ('assert not', '', [1,2]) -d['assertEqual'] = ('assert', ' ==', [2,3]) -d['failIfEqual'] = ('assert not', ' ==', [2,3]) -d['assertIn'] = ('assert', ' in', [2,3]) -d['assertNotIn'] = ('assert', ' not in', [2,3]) -d['assertNotEqual'] = ('assert', ' !=', [2,3]) -d['failUnlessEqual'] = ('assert', ' ==', [2,3]) -d['assertAlmostEqual'] = ('assert round', ' ==', [2,3,4]) -d['failIfAlmostEqual'] = ('assert not round', ' ==', [2,3,4]) -d['assertNotAlmostEqual'] = ('assert round', ' !=', [2,3,4]) -d['failUnlessAlmostEquals'] = ('assert round', ' ==', [2,3,4]) - -# the list of synonyms -d['failUnlessRaises'] = d['assertRaises'] -d['failUnless'] = d['assert_'] -d['assertEquals'] = d['assertEqual'] -d['assertNotEquals'] = d['assertNotEqual'] -d['assertAlmostEquals'] = d['assertAlmostEqual'] -d['assertNotAlmostEquals'] = d['assertNotAlmostEqual'] - -# set up the regular expressions we will need -leading_spaces = re.compile(r'^(\s*)') # this never fails - -pat = '' -for k in d.keys(): # this complicated pattern to match all unittests - pat += '|' + r'^(\s*)' + 'self.' + k + r'\(' # \tself.whatever( - -old_names = re.compile(pat[1:]) -linesep='\n' # nobody will really try to convert files not read - # in text mode, will they? - - -def blocksplitter(fp): - '''split a file into blocks that are headed by functions to rename''' - - blocklist = [] - blockstring = '' - - for line in fp: - interesting = old_names.match(line) - if interesting : - if blockstring: - blocklist.append(blockstring) - blockstring = line # reset the block - else: - blockstring += line - - blocklist.append(blockstring) - return blocklist - -def rewrite_utest(block): - '''rewrite every block to use the new utest functions''' - - '''returns the rewritten unittest, unless it ran into problems, - in which case it just returns the block unchanged. - ''' - utest = old_names.match(block) - - if not utest: - return block - - old = utest.group(0).lstrip()[5:-1] # the name we want to replace - new = d[old][0] # the name of the replacement function - op = d[old][1] # the operator you will use , or '' if there is none. - possible_args = d[old][2] # a list of the number of arguments the - # unittest function could possibly take. - - if possible_args == ['Any']: # just rename assertRaises & friends - return re.sub('self.'+old, new, block) - - message_pos = possible_args[-1] - # the remaining unittests can have an optional message to print - # when they fail. It is always the last argument to the function. - - try: - indent, argl, trailer = decompose_unittest(old, block) - - except SyntaxError: # but we couldn't parse it! - return block - - argnum = len(argl) - if argnum not in possible_args: - # sanity check - this one isn't real either - return block - - elif argnum == message_pos: - message = argl[-1] - argl = argl[:-1] - else: - message = None - - if argnum is 0 or (argnum is 1 and argnum is message_pos): #unittest fail() - string = '' - if message: - message = ' ' + message - - elif message_pos is 4: # assertAlmostEqual & friends - try: - pos = argl[2].lstrip() - except IndexError: - pos = '7' # default if none is specified - string = '(%s -%s, %s)%s 0' % (argl[0], argl[1], pos, op ) - - else: # assert_, assertEquals and all the rest - string = ' ' + op.join(argl) - - if message: - string = string + ',' + message - - return indent + new + string + trailer - -def decompose_unittest(old, block): - '''decompose the block into its component parts''' - - ''' returns indent, arglist, trailer - indent -- the indentation - arglist -- the arguments to the unittest function - trailer -- any extra junk after the closing paren, such as #commment - ''' - - indent = re.match(r'(\s*)', block).group() - pat = re.search('self.' + old + r'\(', block) - - args, trailer = get_expr(block[pat.end():], ')') - arglist = break_args(args, []) - - if arglist == ['']: # there weren't any - return indent, [], trailer - - for i in range(len(arglist)): - try: - parser.expr(arglist[i].lstrip('\t ')) - except SyntaxError: - if i == 0: - arglist[i] = '(' + arglist[i] + ')' - else: - arglist[i] = ' (' + arglist[i] + ')' - - return indent, arglist, trailer - -def break_args(args, arglist): - '''recursively break a string into a list of arguments''' - try: - first, rest = get_expr(args, ',') - if not rest: - return arglist + [first] - else: - return [first] + break_args(rest, arglist) - except SyntaxError: - return arglist + [args] - -def get_expr(s, char): - '''split a string into an expression, and the rest of the string''' - - pos=[] - for i in range(len(s)): - if s[i] == char: - pos.append(i) - if pos == []: - raise SyntaxError # we didn't find the expected char. Ick. - - for p in pos: - # make the python parser do the hard work of deciding which comma - # splits the string into two expressions - try: - parser.expr('(' + s[:p] + ')') - return s[:p], s[p+1:] - except SyntaxError: # It's not an expression yet - pass - raise SyntaxError # We never found anything that worked. - - -def main(): - import sys - import py - - usage = "usage: %prog [-s [filename ...] | [-i | -c filename ...]]" - optparser = py.std.optparse.OptionParser(usage) - - def select_output (option, opt, value, optparser, **kw): - if hasattr(optparser, 'output'): - optparser.error( - 'Cannot combine -s -i and -c options. Use one only.') - else: - optparser.output = kw['output'] - - optparser.add_option("-s", "--stdout", action="callback", - callback=select_output, - callback_kwargs={'output':'stdout'}, - help="send your output to stdout") - - optparser.add_option("-i", "--inplace", action="callback", - callback=select_output, - callback_kwargs={'output':'inplace'}, - help="overwrite files in place") - - optparser.add_option("-c", "--copy", action="callback", - callback=select_output, - callback_kwargs={'output':'copy'}, - help="copy files ... fn.py --> fn_cp.py") - - options, args = optparser.parse_args() - - output = getattr(optparser, 'output', 'stdout') - - if output in ['inplace', 'copy'] and not args: - optparser.error( - '-i and -c option require at least one filename') - - if not args: - s = '' - for block in blocksplitter(sys.stdin): - s += rewrite_utest(block) - sys.stdout.write(s) - - else: - for infilename in args: # no error checking to see if we can open, etc. - infile = file(infilename) - s = '' - for block in blocksplitter(infile): - s += rewrite_utest(block) - if output == 'inplace': - outfile = file(infilename, 'w+') - elif output == 'copy': # yes, just go clobber any existing .cp - outfile = file (infilename[:-3]+ '_cp.py', 'w+') - else: - outfile = sys.stdout - - outfile.write(s) - - -if __name__ == '__main__': - main() diff --git a/py/_cmdline/pycountloc.py b/py/_cmdline/pycountloc.py deleted file mode 100755 index 28c0c172f6..0000000000 --- a/py/_cmdline/pycountloc.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python - -# hands on script to compute the non-empty Lines of Code -# for tests and non-test code - -"""\ -py.countloc [PATHS] - -Count (non-empty) lines of python code and number of python files recursively -starting from a list of paths given on the command line (starting from the -current working directory). Distinguish between test files and normal ones and -report them separately. -""" -import py - -def main(): - parser = py.std.optparse.OptionParser(usage=__doc__) - (options, args) = parser.parse_args() - countloc(args) - -def nodot(p): - return p.check(dotfile=0) - -class FileCounter(object): - def __init__(self): - self.file2numlines = {} - self.numlines = 0 - self.numfiles = 0 - - def addrecursive(self, directory, fil="*.py", rec=nodot): - for x in directory.visit(fil, rec): - self.addfile(x) - - def addfile(self, fn, emptylines=False): - if emptylines: - s = len(p.readlines()) - else: - s = 0 - for i in fn.readlines(): - if i.strip(): - s += 1 - self.file2numlines[fn] = s - self.numfiles += 1 - self.numlines += s - - def getnumlines(self, fil): - numlines = 0 - for path, value in self.file2numlines.items(): - if fil(path): - numlines += value - return numlines - - def getnumfiles(self, fil): - numfiles = 0 - for path in self.file2numlines: - if fil(path): - numfiles += 1 - return numfiles - -def get_loccount(locations=None): - if locations is None: - localtions = [py.path.local()] - counter = FileCounter() - for loc in locations: - counter.addrecursive(loc, '*.py', rec=nodot) - - def istestfile(p): - return p.check(fnmatch='test_*.py') - isnottestfile = lambda x: not istestfile(x) - - numfiles = counter.getnumfiles(isnottestfile) - numlines = counter.getnumlines(isnottestfile) - numtestfiles = counter.getnumfiles(istestfile) - numtestlines = counter.getnumlines(istestfile) - - return counter, numfiles, numlines, numtestfiles, numtestlines - -def countloc(paths=None): - if not paths: - paths = ['.'] - locations = [py.path.local(x) for x in paths] - (counter, numfiles, numlines, numtestfiles, - numtestlines) = get_loccount(locations) - - items = counter.file2numlines.items() - items.sort(lambda x,y: cmp(x[1], y[1])) - for x, y in items: - print("%3d %30s" % (y,x)) - - print("%30s %3d" %("number of testfiles", numtestfiles)) - print("%30s %3d" %("number of non-empty testlines", numtestlines)) - print("%30s %3d" %("number of files", numfiles)) - print("%30s %3d" %("number of non-empty lines", numlines)) - diff --git a/py/_cmdline/pylookup.py b/py/_cmdline/pylookup.py deleted file mode 100755 index 369ce90cda..0000000000 --- a/py/_cmdline/pylookup.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python - -"""\ -py.lookup [search_directory] SEARCH_STRING [options] - -Looks recursively at Python files for a SEARCH_STRING, starting from the -present working directory. Prints the line, with the filename and line-number -prepended.""" - -import sys, os -import py -from py.io import ansi_print, get_terminal_width -import re - -def rec(p): - return p.check(dotfile=0) - -parser = py.std.optparse.OptionParser(usage=__doc__) -parser.add_option("-i", "--ignore-case", action="store_true", dest="ignorecase", - help="ignore case distinctions") -parser.add_option("-C", "--context", action="store", type="int", dest="context", - default=0, help="How many lines of output to show") - -terminal_width = get_terminal_width() - -def find_indexes(search_line, string): - indexes = [] - before = 0 - while 1: - i = search_line.find(string, before) - if i == -1: - break - indexes.append(i) - before = i + len(string) - return indexes - -def main(): - (options, args) = parser.parse_args() - if len(args) == 2: - search_dir, string = args - search_dir = py.path.local(search_dir) - else: - search_dir = py.path.local() - string = args[0] - if options.ignorecase: - string = string.lower() - for x in search_dir.visit('*.py', rec): - # match filename directly - s = x.relto(search_dir) - if options.ignorecase: - s = s.lower() - if s.find(string) != -1: - sys.stdout.write("%s: filename matches %r" %(x, string) + "\n") - - try: - s = x.read() - except py.error.ENOENT: - pass # whatever, probably broken link (ie emacs lock) - searchs = s - if options.ignorecase: - searchs = s.lower() - if s.find(string) != -1: - lines = s.splitlines() - if options.ignorecase: - searchlines = s.lower().splitlines() - else: - searchlines = lines - for i, (line, searchline) in enumerate(zip(lines, searchlines)): - indexes = find_indexes(searchline, string) - if not indexes: - continue - if not options.context: - sys.stdout.write("%s:%d: " %(x.relto(search_dir), i+1)) - last_index = 0 - for index in indexes: - sys.stdout.write(line[last_index: index]) - ansi_print(line[index: index+len(string)], - file=sys.stdout, esc=31, newline=False) - last_index = index + len(string) - sys.stdout.write(line[last_index:] + "\n") - else: - context = (options.context)/2 - for count in range(max(0, i-context), min(len(lines) - 1, i+context+1)): - print("%s:%d: %s" %(x.relto(search_dir), count+1, lines[count].rstrip())) - print("-" * terminal_width) diff --git a/py/_cmdline/pysvnwcrevert.py b/py/_cmdline/pysvnwcrevert.py deleted file mode 100755 index 3e1bb47746..0000000000 --- a/py/_cmdline/pysvnwcrevert.py +++ /dev/null @@ -1,55 +0,0 @@ -#! /usr/bin/env python -"""\ -py.svnwcrevert [options] WCPATH - -Running this script and then 'svn up' puts the working copy WCPATH in a state -as clean as a fresh check-out. - -WARNING: you'll loose all local changes, obviously! - -This script deletes all files that have been modified -or that svn doesn't explicitly know about, including svn:ignored files -(like .pyc files, hint hint). - -The goal of this script is to leave the working copy with some files and -directories possibly missing, but - most importantly - in a state where -the following 'svn up' won't just crash. -""" - -import sys, py - -def kill(p, root): - print('< %s' % (p.relto(root),)) - p.remove(rec=1) - -def svnwcrevert(path, root=None, precious=[]): - if root is None: - root = path - wcpath = py.path.svnwc(path) - try: - st = wcpath.status() - except ValueError: # typically, "bad char in wcpath" - kill(path, root) - return - for p in path.listdir(): - if p.basename == '.svn' or p.basename in precious: - continue - wcp = py.path.svnwc(p) - if wcp not in st.unchanged and wcp not in st.external: - kill(p, root) - elif p.check(dir=1): - svnwcrevert(p, root) - -# XXX add a functional test - -parser = py.std.optparse.OptionParser(usage=__doc__) -parser.add_option("-p", "--precious", - action="append", dest="precious", default=[], - help="preserve files with this name") - -def main(): - opts, args = parser.parse_args() - if len(args) != 1: - parser.print_help() - sys.exit(2) - svnwcrevert(py.path.local(args[0]), precious=opts.precious) diff --git a/py/_cmdline/pytest.py b/py/_cmdline/pytest.py deleted file mode 100755 index 14e2bc81c9..0000000000 --- a/py/_cmdline/pytest.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env python -import py - -def main(args=None): - raise SystemExit(py.test.cmdline.main(args)) diff --git a/py/_cmdline/pywhich.py b/py/_cmdline/pywhich.py deleted file mode 100755 index b1b940437b..0000000000 --- a/py/_cmdline/pywhich.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python - -"""\ -py.which [name] - -print the location of the given python module or package name -""" - -import sys - -def main(): - name = sys.argv[1] - try: - mod = __import__(name) - except ImportError: - sys.stderr.write("could not import: " + name + "\n") - else: - try: - location = mod.__file__ - except AttributeError: - sys.stderr.write("module (has no __file__): " + str(mod)) - else: - print(location) diff --git a/py/_code/_assertionnew.py b/py/_code/_assertionnew.py index 4b5b181b69..77c06c8a0e 100644 --- a/py/_code/_assertionnew.py +++ b/py/_code/_assertionnew.py @@ -1,6 +1,6 @@ """ -Like _assertion.py but using builtin AST. It should replace _assertionold.py -eventually. +Find intermediate evalutation results in assert statements through builtin AST. +This should replace _assertionold.py eventually. """ import sys @@ -108,7 +108,7 @@ unary_map = { class DebugInterpreter(ast.NodeVisitor): - """Interpret AST nodes to gleam useful debugging information.""" + """Interpret AST nodes to gleam useful debugging information. """ def __init__(self, frame): self.frame = frame @@ -162,10 +162,7 @@ class DebugInterpreter(ast.NodeVisitor): def visit_Compare(self, comp): left = comp.left left_explanation, left_result = self.visit(left) - got_result = False for op, next_op in zip(comp.ops, comp.comparators): - if got_result and not result: - break next_explanation, next_result = self.visit(next_op) op_symbol = operator_map[op.__class__] explanation = "%s %s %s" % (left_explanation, op_symbol, @@ -177,9 +174,20 @@ class DebugInterpreter(ast.NodeVisitor): __exprinfo_right=next_result) except Exception: raise Failure(explanation) - else: - got_result = True + try: + if not result: + break + except KeyboardInterrupt: + raise + except: + break left_explanation, left_result = next_explanation, next_result + + rcomp = py.code._reprcompare + if rcomp: + res = rcomp(op_symbol, left_result, next_result) + if res: + explanation = res return explanation, result def visit_BoolOp(self, boolop): @@ -295,6 +303,9 @@ class DebugInterpreter(ast.NodeVisitor): result = self.frame.eval(co, __exprinfo_expr=source_result) except Exception: raise Failure(explanation) + explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), + self.frame.repr(result), + source_explanation, attr.attr) # Check if the attr is from an instance. source = "%r in getattr(__exprinfo_expr, '__dict__', {})" source = source % (attr.attr,) @@ -325,10 +336,11 @@ class DebugInterpreter(ast.NodeVisitor): def visit_Assign(self, assign): value_explanation, value_result = self.visit(assign.value) explanation = "... = %s" % (value_explanation,) - name = ast.Name("__exprinfo_expr", ast.Load(), assign.value.lineno, - assign.value.col_offset) - new_assign = ast.Assign(assign.targets, name, assign.lineno, - assign.col_offset) + name = ast.Name("__exprinfo_expr", ast.Load(), + lineno=assign.value.lineno, + col_offset=assign.value.col_offset) + new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, + col_offset=assign.col_offset) mod = ast.Module([new_assign]) co = self._compile(mod, "exec") try: diff --git a/py/_code/_assertionold.py b/py/_code/_assertionold.py index a472b54b4f..4e81fb3ef6 100644 --- a/py/_code/_assertionold.py +++ b/py/_code/_assertionold.py @@ -3,7 +3,7 @@ import sys, inspect from compiler import parse, ast, pycodegen from py._code.assertion import BuiltinAssertionError, _format_explanation -passthroughex = (KeyboardInterrupt, SystemExit, MemoryError) +passthroughex = py.builtin._sysex class Failure: def __init__(self, node): @@ -496,7 +496,7 @@ def getmsg(excinfo): #frame = py.code.Frame(frame) #return interpret(line, frame) - tb = excinfo.traceback[-1] + tb = excinfo.traceback[-1] source = str(tb.statement).strip() x = interpret(source, tb.frame, should_fail=True) if not isinstance(x, str): diff --git a/py/_code/assertion.py b/py/_code/assertion.py index a2396c5425..4ce80c75b1 100644 --- a/py/_code/assertion.py +++ b/py/_code/assertion.py @@ -3,14 +3,23 @@ import py BuiltinAssertionError = py.builtin.builtins.AssertionError +_reprcompare = None # if set, will be called by assert reinterp for comparison ops def _format_explanation(explanation): - # uck! See CallFunc for where \n{ and \n} escape sequences are used + """This formats an explanation + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ raw_lines = (explanation or '').split('\n') - # escape newlines not followed by { and } + # escape newlines not followed by {, } and ~ lines = [raw_lines[0]] for l in raw_lines[1:]: - if l.startswith('{') or l.startswith('}'): + if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l @@ -28,23 +37,25 @@ def _format_explanation(explanation): stackcnt[-1] += 1 stackcnt.append(0) result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) - else: + elif line.startswith('}'): assert line.startswith('}') stack.pop() stackcnt.pop() result[stack[-1]] += line[1:] + else: + assert line.startswith('~') + result.append(' '*len(stack) + line[1:]) assert len(stack) == 1 return '\n'.join(result) class AssertionError(BuiltinAssertionError): - def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: try: self.msg = str(args[0]) - except (KeyboardInterrupt, SystemExit): + except py.builtin._sysex: raise except: self.msg = "<[broken __repr__] %s at %0xd>" %( @@ -52,18 +63,24 @@ class AssertionError(BuiltinAssertionError): else: f = py.code.Frame(sys._getframe(1)) try: - source = f.statement - source = str(source.deindent()).strip() + source = f.code.fullsource + if source is not None: + try: + source = source.getstatement(f.lineno, assertion=True) + except IndexError: + source = None + else: + source = str(source.deindent()).strip() except py.error.ENOENT: source = None # this can also occur during reinterpretation, when the # co_filename is set to "<run>". if source: self.msg = reinterpret(source, f, should_fail=True) - if not self.args: - self.args = (self.msg,) else: - self.msg = None + self.msg = "<could not determine information>" + if not self.args: + self.args = (self.msg,) if sys.version_info > (3, 0): AssertionError.__module__ = "builtins" @@ -74,4 +91,4 @@ if sys.version_info >= (2, 6) or (sys.platform.startswith("java")): from py._code._assertionnew import interpret as reinterpret else: reinterpret = reinterpret_old - + diff --git a/py/_code/code.py b/py/_code/code.py index dc765bb36d..9fbf207969 100644 --- a/py/_code/code.py +++ b/py/_code/code.py @@ -9,15 +9,15 @@ class Code(object): """ wrapper around Python code objects """ def __init__(self, rawcode): rawcode = py.code.getrawcode(rawcode) - self.raw = rawcode + self.raw = rawcode try: self.filename = rawcode.co_filename self.firstlineno = rawcode.co_firstlineno - 1 self.name = rawcode.co_name - except AttributeError: + except AttributeError: raise TypeError("not a code object: %r" %(rawcode,)) - - def __eq__(self, other): + + def __eq__(self, other): return self.raw == other.raw def __ne__(self, other): @@ -27,11 +27,11 @@ class Code(object): """ return a path object pointing to source code""" p = py.path.local(self.raw.co_filename) if not p.check(): - # XXX maybe try harder like the weird logic - # in the standard lib [linecache.updatecache] does? + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? p = self.raw.co_filename return p - + path = property(path, None, None, "path of this code object") def fullsource(self): @@ -42,7 +42,7 @@ class Code(object): return full fullsource = property(fullsource, None, None, "full source containing this code object") - + def source(self): """ return a py.code.Source object for the code object's source only """ @@ -81,7 +81,7 @@ class Frame(object): returns the result of the evaluation """ - f_locals = self.f_locals.copy() + f_locals = self.f_locals.copy() f_locals.update(vars) return eval(code, self.f_globals, f_locals) @@ -90,7 +90,7 @@ class Frame(object): 'vars' are optiona; additional local variables """ - f_locals = self.f_locals.copy() + f_locals = self.f_locals.copy() f_locals.update(vars) py.builtin.exec_(code, self.f_globals, f_locals ) @@ -115,8 +115,8 @@ class Frame(object): class TracebackEntry(object): """ a single entry in a traceback """ - - exprinfo = None + + exprinfo = None def __init__(self, rawentry): self._rawentry = rawentry @@ -153,13 +153,14 @@ class TracebackEntry(object): x = py.code._reinterpret(source, self.frame, should_fail=True) if not isinstance(x, str): raise TypeError("interpret returned non-string %r" % (x,)) - self.exprinfo = x + self.exprinfo = x return self.exprinfo def getfirstlinesource(self): - return self.frame.code.firstlineno + # on Jython this firstlineno can be -1 apparently + return max(self.frame.code.firstlineno, 0) - def getsource(self): + def getsource(self): """ return failing source code. """ source = self.frame.code.fullsource if source is None: @@ -167,64 +168,64 @@ class TracebackEntry(object): start = self.getfirstlinesource() end = self.lineno try: - _, end = source.getstatementrange(end) - except IndexError: - end = self.lineno + 1 - # heuristic to stop displaying source on e.g. + _, end = source.getstatementrange(end) + except IndexError: + end = self.lineno + 1 + # heuristic to stop displaying source on e.g. # if something: # assume this causes a NameError - # # _this_ lines and the one - # below we don't want from entry.getsource() - for i in range(self.lineno, end): - if source[i].rstrip().endswith(':'): + # # _this_ lines and the one + # below we don't want from entry.getsource() + for i in range(self.lineno, end): + if source[i].rstrip().endswith(':'): end = i + 1 - break + break return source[start:end] source = property(getsource) def ishidden(self): - """ return True if the current frame has a var __tracebackhide__ + """ return True if the current frame has a var __tracebackhide__ resolving to True - + mostly for internal use """ - try: - return self.frame.eval("__tracebackhide__") - except (SystemExit, KeyboardInterrupt): + try: + return self.frame.eval("__tracebackhide__") + except py.builtin._sysex: raise except: - return False + return False - def __str__(self): - try: - fn = str(self.path) - except py.error.Error: + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: fn = '???' - name = self.frame.code.name - try: + name = self.frame.code.name + try: line = str(self.statement).lstrip() except KeyboardInterrupt: raise except: line = "???" - return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) def name(self): return self.frame.code.raw.co_name name = property(name, None, None, "co_name of underlaying code") class Traceback(list): - """ Traceback objects encapsulate and offer higher level - access to Traceback entries. + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. """ - Entry = TracebackEntry + Entry = TracebackEntry def __init__(self, tb): """ initialize from given python traceback object. """ if hasattr(tb, 'tb_next'): - def f(cur): - while cur is not None: + def f(cur): + while cur is not None: yield self.Entry(cur) - cur = cur.tb_next - list.__init__(self, f(tb)) + cur = cur.tb_next + list.__init__(self, f(tb)) else: list.__init__(self, tb) @@ -243,7 +244,7 @@ class Traceback(list): codepath = code.path if ((path is None or codepath == path) and (excludepath is None or not hasattr(codepath, 'relto') or - not codepath.relto(excludepath)) and + not codepath.relto(excludepath)) and (lineno is None or x.lineno == lineno) and (firstlineno is None or x.frame.code.firstlineno == firstlineno)): return Traceback(x._rawentry) @@ -269,7 +270,7 @@ class Traceback(list): def getcrashentry(self): """ return last non-hidden traceback entry that lead - to the exception of a traceback. + to the exception of a traceback. """ tb = self.filter() if not tb: @@ -282,17 +283,17 @@ class Traceback(list): """ cache = {} for i, entry in enumerate(self): - key = entry.frame.code.path, entry.lineno + key = entry.frame.code.path, entry.lineno #print "checking for recursion at", key l = cache.setdefault(key, []) - if l: + if l: f = entry.frame loc = f.f_locals - for otherloc in l: - if f.is_true(f.eval(co_equal, + for otherloc in l: + if f.is_true(f.eval(co_equal, __recursioncache_locals_1=loc, __recursioncache_locals_2=otherloc)): - return i + return i l.append(entry.frame.f_locals) return None @@ -303,7 +304,7 @@ class ExceptionInfo(object): """ wraps sys.exc_info() objects and offers help for navigating the traceback. """ - _striptext = '' + _striptext = '' def __init__(self, tup=None, exprinfo=None): # NB. all attributes are private! Subclasses or other # ExceptionInfo-like classes may have different attributes. @@ -318,14 +319,14 @@ class ExceptionInfo(object): self._excinfo = tup self.type, self.value, tb = self._excinfo self.typename = self.type.__name__ - self.traceback = py.code.Traceback(tb) + self.traceback = py.code.Traceback(tb) def __repr__(self): return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback)) - def exconly(self, tryshort=False): + def exconly(self, tryshort=False): """ return the exception as a string - + when 'tryshort' resolves to True, and the exception is a py.code._AssertionError, only the actual exception part of the exception representation is returned (so 'AssertionError: ' is @@ -334,14 +335,14 @@ class ExceptionInfo(object): lines = py.std.traceback.format_exception_only(self.type, self.value) text = ''.join(lines) text = text.rstrip() - if tryshort: - if text.startswith(self._striptext): + if tryshort: + if text.startswith(self._striptext): text = text[len(self._striptext):] return text - def errisinstance(self, exc): + def errisinstance(self, exc): """ return True if the exception is an instance of exc """ - return isinstance(self.value, exc) + return isinstance(self.value, exc) def _getreprcrash(self): exconly = self.exconly(tryshort=True) @@ -350,14 +351,22 @@ class ExceptionInfo(object): reprcrash = ReprFileLocation(path, lineno+1, exconly) return reprcrash - def getrepr(self, showlocals=False, style="long", + def getrepr(self, showlocals=False, style="long", abspath=False, tbfilter=True, funcargs=False): """ return str()able representation of this exception info. - showlocals: show locals per traceback entry - style: long|short|no traceback style + showlocals: show locals per traceback entry + style: long|short|no|native traceback style tbfilter: hide entries (where __tracebackhide__ is true) """ - fmt = FormattedExcinfo(showlocals=showlocals, style=style, + if style == 'native': + import traceback + return ''.join(traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry, + )) + + fmt = FormattedExcinfo(showlocals=showlocals, style=style, abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) return fmt.repr_excinfo(self) @@ -370,27 +379,27 @@ class ExceptionInfo(object): entry = self.traceback[-1] loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) return unicode(loc) - + class FormattedExcinfo(object): - """ presenting information about failing Functions and Generators. """ - # for traceback entries - flow_marker = ">" + """ presenting information about failing Functions and Generators. """ + # for traceback entries + flow_marker = ">" fail_marker = "E" - + def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False): self.showlocals = showlocals self.style = style self.tbfilter = tbfilter self.funcargs = funcargs - self.abspath = abspath + self.abspath = abspath def _getindent(self, source): - # figure out indent for given source + # figure out indent for given source try: s = str(source.getstatement(len(source)-1)) - except KeyboardInterrupt: - raise + except KeyboardInterrupt: + raise except: try: s = str(source[-1]) @@ -405,7 +414,7 @@ class FormattedExcinfo(object): if source is not None: source = source.deindent() return source - + def _saferepr(self, obj): return py.io.saferepr(obj) @@ -421,7 +430,7 @@ class FormattedExcinfo(object): lines = [] if source is None: source = py.code.Source("???") - line_index = 0 + line_index = 0 if line_index < 0: line_index += len(source) for i in range(len(source)): @@ -440,24 +449,24 @@ class FormattedExcinfo(object): def get_exconly(self, excinfo, indent=4, markall=False): lines = [] - indent = " " * indent - # get the real exception information out + indent = " " * indent + # get the real exception information out exlines = excinfo.exconly(tryshort=True).split('\n') failindent = self.fail_marker + indent[1:] for line in exlines: lines.append(failindent + line) if not markall: - failindent = indent + failindent = indent return lines def repr_locals(self, locals): - if self.showlocals: + if self.showlocals: lines = [] keys = list(locals) keys.sort() for name in keys: value = locals[name] - if name == '__builtins__': + if name == '__builtins__': lines.append("__builtins__ = <builtins>") else: # This formatting could all be handled by the @@ -474,7 +483,7 @@ class FormattedExcinfo(object): return ReprLocals(lines) def repr_traceback_entry(self, entry, excinfo=None): - # excinfo is not None if this is the last tb entry + # excinfo is not None if this is the last tb entry source = self._getentrysource(entry) if source is None: source = py.code.Source("???") @@ -488,7 +497,7 @@ class FormattedExcinfo(object): short = self.style == "short" reprargs = None if not short: - reprargs = self.repr_args(entry) + reprargs = self.repr_args(entry) s = self.get_source(source, line_index, excinfo, short=short) lines.extend(s) if short: @@ -501,7 +510,7 @@ class FormattedExcinfo(object): if not short: localsrepr = self.repr_locals(entry.locals) return ReprEntry(lines, reprargs, localsrepr, filelocrepr, short) - if excinfo: + if excinfo: lines.extend(self.get_exconly(excinfo, indent=4)) return ReprEntry(lines, None, None, None, False) @@ -512,8 +521,8 @@ class FormattedExcinfo(object): path = np return path - def repr_traceback(self, excinfo): - traceback = excinfo.traceback + def repr_traceback(self, excinfo): + traceback = excinfo.traceback if self.tbfilter: traceback = traceback.filter() recursionindex = None @@ -522,7 +531,7 @@ class FormattedExcinfo(object): last = traceback[-1] entries = [] extraline = None - for index, entry in enumerate(traceback): + for index, entry in enumerate(traceback): einfo = (last == entry) and excinfo or None reprentry = self.repr_traceback_entry(entry, einfo) entries.append(reprentry) @@ -564,7 +573,7 @@ def unicode_or_repr(obj): class ReprExceptionInfo(TerminalRepr): def __init__(self, reprtraceback, reprcrash): self.reprtraceback = reprtraceback - self.reprcrash = reprcrash + self.reprcrash = reprcrash self.sections = [] def addsection(self, name, content, sep="-"): @@ -575,7 +584,7 @@ class ReprExceptionInfo(TerminalRepr): for name, content, sep in self.sections: tw.sep(sep, name) tw.line(content) - + class ReprTraceback(TerminalRepr): entrysep = "_ " @@ -585,7 +594,7 @@ class ReprTraceback(TerminalRepr): self.style = style def toterminal(self, tw): - sepok = False + sepok = False for entry in self.reprentries: if self.style == "long": if sepok: @@ -602,7 +611,7 @@ class ReprEntry(TerminalRepr): def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, short): self.lines = lines self.reprfuncargs = reprfuncargs - self.reprlocals = reprlocals + self.reprlocals = reprlocals self.reprfileloc = filelocrepr self.short = short @@ -610,14 +619,14 @@ class ReprEntry(TerminalRepr): if self.short: self.reprfileloc.toterminal(tw) for line in self.lines: - red = line.startswith("E ") + red = line.startswith("E ") tw.line(line, bold=True, red=red) #tw.line("") return if self.reprfuncargs: self.reprfuncargs.toterminal(tw) for line in self.lines: - red = line.startswith("E ") + red = line.startswith("E ") tw.line(line, bold=True, red=red) if self.reprlocals: #tw.sep(self.localssep, "Locals") @@ -628,8 +637,8 @@ class ReprEntry(TerminalRepr): self.reprfileloc.toterminal(tw) def __str__(self): - return "%s\n%s\n%s" % ("\n".join(self.lines), - self.reprlocals, + return "%s\n%s\n%s" % ("\n".join(self.lines), + self.reprlocals, self.reprfileloc) class ReprFileLocation(TerminalRepr): @@ -641,15 +650,15 @@ class ReprFileLocation(TerminalRepr): def toterminal(self, tw): # filename and lineno output for each entry, # using an output format that most editors unterstand - msg = self.message + msg = self.message i = msg.find("\n") if i != -1: - msg = msg[:i] + msg = msg[:i] tw.line("%s:%s: %s" %(self.path, self.lineno, msg)) class ReprLocals(TerminalRepr): def __init__(self, lines): - self.lines = lines + self.lines = lines def toterminal(self, tw): for line in self.lines: @@ -667,7 +676,7 @@ class ReprFuncArgs(TerminalRepr): if len(ns) + len(linesofar) + 2 > tw.fullwidth: if linesofar: tw.line(linesofar) - linesofar = ns + linesofar = ns else: if linesofar: linesofar += ", " + ns @@ -688,7 +697,7 @@ def patch_builtins(assertion=True, compile=True): l = oldbuiltins.setdefault('AssertionError', []) l.append(py.builtin.builtins.AssertionError) py.builtin.builtins.AssertionError = assertion.AssertionError - if compile: + if compile: l = oldbuiltins.setdefault('compile', []) l.append(py.builtin.builtins.compile) py.builtin.builtins.compile = py.code.compile @@ -697,14 +706,17 @@ def unpatch_builtins(assertion=True, compile=True): """ remove compile and AssertionError builtins from Python builtins. """ if assertion: py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop() - if compile: + if compile: py.builtin.builtins.compile = oldbuiltins['compile'].pop() def getrawcode(obj): - """ return code object for given function. """ - obj = getattr(obj, 'im_func', obj) - obj = getattr(obj, 'func_code', obj) - obj = getattr(obj, 'f_code', obj) - obj = getattr(obj, '__code__', obj) - return obj - + """ return code object for given function. """ + try: + return obj.__code__ + except AttributeError: + obj = getattr(obj, 'im_func', obj) + obj = getattr(obj, 'func_code', obj) + obj = getattr(obj, 'f_code', obj) + obj = getattr(obj, '__code__', obj) + return obj + diff --git a/py/_code/oldmagic.py b/py/_code/oldmagic.py deleted file mode 100644 index 51db7f5722..0000000000 --- a/py/_code/oldmagic.py +++ /dev/null @@ -1,62 +0,0 @@ -""" deprecated module for turning on/off some features. """ - -import py - -from py.builtin import builtins as cpy_builtin - -def invoke(assertion=False, compile=False): - """ (deprecated) invoke magic, currently you can specify: - - assertion patches the builtin AssertionError to try to give - more meaningful AssertionErrors, which by means - of deploying a mini-interpreter constructs - a useful error message. - """ - py.log._apiwarn("1.1", - "py.magic.invoke() is deprecated, use py.code.patch_builtins()", - stacklevel=2, - ) - py.code.patch_builtins(assertion=assertion, compile=compile) - -def revoke(assertion=False, compile=False): - """ (deprecated) revoke previously invoked magic (see invoke()).""" - py.log._apiwarn("1.1", - "py.magic.revoke() is deprecated, use py.code.unpatch_builtins()", - stacklevel=2, - ) - py.code.unpatch_builtins(assertion=assertion, compile=compile) - -patched = {} - -def patch(namespace, name, value): - """ (deprecated) rebind the 'name' on the 'namespace' to the 'value', - possibly and remember the original value. Multiple - invocations to the same namespace/name pair will - remember a list of old values. - """ - py.log._apiwarn("1.1", - "py.magic.patch() is deprecated, in tests use monkeypatch funcarg.", - stacklevel=2, - ) - nref = (namespace, name) - orig = getattr(namespace, name) - patched.setdefault(nref, []).append(orig) - setattr(namespace, name, value) - return orig - -def revert(namespace, name): - """ (deprecated) revert to the orginal value the last patch modified. - Raise ValueError if no such original value exists. - """ - py.log._apiwarn("1.1", - "py.magic.revert() is deprecated, in tests use monkeypatch funcarg.", - stacklevel=2, - ) - nref = (namespace, name) - if nref not in patched or not patched[nref]: - raise ValueError("No original value stored for %s.%s" % nref) - current = getattr(namespace, name) - orig = patched[nref].pop() - setattr(namespace, name, orig) - return current - diff --git a/py/_code/oldmagic2.py b/py/_code/oldmagic2.py deleted file mode 100644 index d041a800fa..0000000000 --- a/py/_code/oldmagic2.py +++ /dev/null @@ -1,6 +0,0 @@ - -import py - -py.log._apiwarn("1.1", "py.magic.AssertionError is deprecated, use py.code._AssertionError", stacklevel=2) - -from py.code import _AssertionError as AssertionError diff --git a/py/_code/source.py b/py/_code/source.py index 87e61d4d12..15c8e023e3 100644 --- a/py/_code/source.py +++ b/py/_code/source.py @@ -3,7 +3,7 @@ import sys import inspect, tokenize import py from types import ModuleType -cpy_compile = compile +cpy_compile = compile try: import _ast @@ -21,9 +21,9 @@ class Source(object): def __init__(self, *parts, **kwargs): self.lines = lines = [] de = kwargs.get('deindent', True) - rstrip = kwargs.get('rstrip', True) + rstrip = kwargs.get('rstrip', True) for part in parts: - if not part: + if not part: partlines = [] if isinstance(part, Source): partlines = part.lines @@ -32,8 +32,8 @@ class Source(object): elif isinstance(part, py.builtin._basestring): partlines = part.split('\n') if rstrip: - while partlines: - if partlines[-1].strip(): + while partlines: + if partlines[-1].strip(): break partlines.pop() else: @@ -42,13 +42,13 @@ class Source(object): partlines = deindent(partlines) lines.extend(partlines) - def __eq__(self, other): + def __eq__(self, other): try: - return self.lines == other.lines - except AttributeError: - if isinstance(other, str): - return str(self) == other - return False + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False def __getitem__(self, key): if isinstance(key, int): @@ -58,8 +58,8 @@ class Source(object): raise IndexError("cannot slice a Source with a step") return self.__getslice__(key.start, key.stop) - def __len__(self): - return len(self.lines) + def __len__(self): + return len(self.lines) def __getslice__(self, start, end): newsource = Source() @@ -79,9 +79,9 @@ class Source(object): source.lines[:] = self.lines[start:end] return source - def putaround(self, before='', after='', indent=' ' * 4): - """ return a copy of the source object with - 'before' and 'after' wrapped around it. + def putaround(self, before='', after='', indent=' ' * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. """ before = Source(before) after = Source(after) @@ -90,23 +90,23 @@ class Source(object): newsource.lines = before.lines + lines + after.lines return newsource - def indent(self, indent=' ' * 4): - """ return a copy of the source object with - all lines indented by the given indent-string. + def indent(self, indent=' ' * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. """ newsource = Source() newsource.lines = [(indent+line) for line in self.lines] return newsource - def getstatement(self, lineno): + def getstatement(self, lineno, assertion=False): """ return Source statement which contains the given linenumber (counted from 0). """ - start, end = self.getstatementrange(lineno) + start, end = self.getstatementrange(lineno, assertion) return self[start:end] - def getstatementrange(self, lineno): - """ return (start, end) tuple which spans the minimal + def getstatementrange(self, lineno, assertion=False): + """ return (start, end) tuple which spans the minimal statement region which containing the given lineno. """ # XXX there must be a better than these heuristic ways ... @@ -117,24 +117,28 @@ class Source(object): # 1. find the start of the statement from codeop import compile_command for start in range(lineno, -1, -1): + if assertion: + line = self.lines[start] + # the following lines are not fully tested, change with care + if 'super' in line and 'self' in line and '__init__' in line: + raise IndexError("likely a subclass") + if "assert" not in line and "raise" not in line: + continue trylines = self.lines[start:lineno+1] # quick hack to indent the source and get it as a string in one go - trylines.insert(0, 'def xxx():') + trylines.insert(0, 'if xxx:') trysource = '\n '.join(trylines) # ^ space here try: compile_command(trysource) except (SyntaxError, OverflowError, ValueError): - pass - else: - break # got a valid or incomplete statement - - # 2. find the end of the statement - for end in range(lineno+1, len(self)+1): - trysource = self[start:end] - if trysource.isparseable(): - break + continue + # 2. find the end of the statement + for end in range(lineno+1, len(self)+1): + trysource = self[start:end] + if trysource.isparseable(): + return start, end return start, end def getblockend(self, lineno): @@ -159,7 +163,7 @@ class Source(object): def isparseable(self, deindent=True): """ return True if source is parseable, heuristically - deindenting it by default. + deindenting it by default. """ try: import parser @@ -167,7 +171,7 @@ class Source(object): syntax_checker = lambda x: compile(x, 'asd', 'exec') else: syntax_checker = parser.suite - + if deindent: source = str(self.deindent()) else: @@ -185,14 +189,14 @@ class Source(object): def __str__(self): return "\n".join(self.lines) - def compile(self, filename=None, mode='exec', - flag=generators.compiler_flag, + def compile(self, filename=None, mode='exec', + flag=generators.compiler_flag, dont_inherit=0, _genframe=None): """ return compiled code object. if filename is None invent an artificial filename which displays the source/line position of the caller frame. """ - if not filename or py.path.local(filename).check(file=0): + if not filename or py.path.local(filename).check(file=0): if _genframe is None: _genframe = sys._getframe(1) # the caller fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno @@ -240,8 +244,8 @@ def compile_(source, filename=None, mode='exec', flags= generators.compiler_flag, dont_inherit=0): """ compile the given source to a raw code object, and maintain an internal cache which allows later - retrieval of the source code for the code object - and any recursively created code objects. + retrieval of the source code for the code object + and any recursively created code objects. """ if _ast is not None and isinstance(source, _ast.AST): # XXX should Source support having AST? @@ -256,7 +260,7 @@ def getfslineno(obj): try: code = py.code.Code(obj) except TypeError: - # fallback to + # fallback to fn = (py.std.inspect.getsourcefile(obj) or py.std.inspect.getfile(obj)) fspath = fn and py.path.local(fn) or None @@ -269,7 +273,7 @@ def getfslineno(obj): lineno = None else: fspath = code.path - lineno = code.firstlineno + lineno = code.firstlineno return fspath, lineno # @@ -279,7 +283,7 @@ def getfslineno(obj): def findsource(obj): try: sourcelines, lineno = py.std.inspect.findsource(obj) - except (KeyboardInterrupt, SystemExit): + except py.builtin._sysex: raise except: return None, None @@ -314,9 +318,9 @@ def deindent(lines, offset=None): yield line + '\n' while True: yield '' - + r = readline_generator(lines) - try: + try: readline = r.next except AttributeError: readline = r.__next__ diff --git a/py/_compat/__init__.py b/py/_compat/__init__.py deleted file mode 100644 index 727911bc90..0000000000 --- a/py/_compat/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -""" compatibility modules (taken from 2.4.4) """ - diff --git a/py/_compat/dep_doctest.py b/py/_compat/dep_doctest.py deleted file mode 100644 index 6da68a5006..0000000000 --- a/py/_compat/dep_doctest.py +++ /dev/null @@ -1,5 +0,0 @@ -import py - -py.log._apiwarn("1.1", "py.compat.doctest deprecated, use standard library version.", -stacklevel="apipkg") -doctest = py.std.doctest diff --git a/py/_compat/dep_optparse.py b/py/_compat/dep_optparse.py deleted file mode 100644 index 253d40fe47..0000000000 --- a/py/_compat/dep_optparse.py +++ /dev/null @@ -1,4 +0,0 @@ -import py -py.log._apiwarn("1.1", "py.compat.optparse deprecated, use standard library version.", stacklevel="apipkg") - -optparse = py.std.optparse diff --git a/py/_compat/dep_subprocess.py b/py/_compat/dep_subprocess.py deleted file mode 100644 index dac3d6916b..0000000000 --- a/py/_compat/dep_subprocess.py +++ /dev/null @@ -1,5 +0,0 @@ - -import py -py.log._apiwarn("1.1", "py.compat.subprocess deprecated, use standard library version.", -stacklevel="apipkg") -subprocess = py.std.subprocess diff --git a/py/_compat/dep_textwrap.py b/py/_compat/dep_textwrap.py deleted file mode 100644 index 3f378298e3..0000000000 --- a/py/_compat/dep_textwrap.py +++ /dev/null @@ -1,5 +0,0 @@ -import py - -py.log._apiwarn("1.1", "py.compat.textwrap deprecated, use standard library version.", - stacklevel="apipkg") -textwrap = py.std.textwrap diff --git a/py/_error.py b/py/_error.py index 2f1d9e3a0e..1062bbacf6 100644 --- a/py/_error.py +++ b/py/_error.py @@ -1,5 +1,5 @@ """ -create errno-specific classes for IO or os calls. +create errno-specific classes for IO or os calls. """ import sys, os, errno @@ -20,8 +20,8 @@ class Error(EnvironmentError): return s _winerrnomap = { - 2: errno.ENOENT, - 3: errno.ENOENT, + 2: errno.ENOENT, + 3: errno.ENOENT, 17: errno.EEXIST, 22: errno.ENOTDIR, 267: errno.ENOTDIR, @@ -29,9 +29,9 @@ _winerrnomap = { } class ErrorMaker(object): - """ lazily provides Exception classes for each possible POSIX errno - (as defined per the 'errno' module). All such instances - subclass EnvironmentError. + """ lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. """ Error = Error _errno2class = {} @@ -53,11 +53,11 @@ class ErrorMaker(object): self._errno2class[eno] = errorcls return errorcls - def checked_call(self, func, *args): + def checked_call(self, func, *args, **kwargs): """ call a function and raise an errno-exception if applicable. """ __tracebackhide__ = True try: - return func(*args) + return func(*args, **kwargs) except self.Error: raise except EnvironmentError: @@ -65,18 +65,18 @@ class ErrorMaker(object): if not hasattr(value, 'errno'): raise __tracebackhide__ = False - errno = value.errno + errno = value.errno try: - if not isinstance(value, WindowsError): + if not isinstance(value, WindowsError): raise NameError - except NameError: + except NameError: # we are not on Windows, or we got a proper OSError cls = self._geterrnoclass(errno) - else: - try: - cls = self._geterrnoclass(_winerrnomap[errno]) - except KeyError: - raise value + else: + try: + cls = self._geterrnoclass(_winerrnomap[errno]) + except KeyError: + raise value raise cls("%s%r" % (func.__name__, args)) __tracebackhide__ = True diff --git a/py/_iniconfig.py b/py/_iniconfig.py new file mode 100644 index 0000000000..d1b86aae20 --- /dev/null +++ b/py/_iniconfig.py @@ -0,0 +1,149 @@ +""" brain-dead simple parser for ini-style files. +(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed +""" +__version__ = "0.2.dev2" + +__all__ = ['IniConfig', 'ParseError'] + +class ParseError(Exception): + def __init__(self, path, lineno, msg): + Exception.__init__(self, path, lineno, msg) + self.path = path + self.lineno = lineno + self.msg = msg + + def __str__(self): + return "%s:%s: %s" %(self.path, self.lineno+1, self.msg) + +class SectionWrapper(object): + def __init__(self, config, name): + self.config = config + self.name = name + + def lineof(self, name): + return self.config.lineof(self.name, name) + + def get(self, key, default=None, convert=str): + return self.config.get(self.name, key, convert=convert, default=default) + + def __getitem__(self, key): + return self.config.sections[self.name][key] + + def __iter__(self): + section = self.config.sections.get(self.name, []) + def lineof(key): + return self.config.lineof(self.name, key) + for name in sorted(section, key=lineof): + yield name + + def items(self): + for name in self: + yield name, self[name] + + +class IniConfig(object): + def __init__(self, path, data=None): + self.path = str(path) # convenience + if data is None: + f = open(self.path) + try: + tokens = self._parse(iter(f)) + finally: + f.close() + else: + tokens = self._parse(data.splitlines(True)) + + self._sources = {} + self.sections = {} + + for lineno, section, name, value in tokens: + if section is None: + self._raise(lineno, 'no section header defined') + self._sources[section, name] = lineno + if name is None: + if section in self.sections: + self._raise(lineno, 'duplicate section %r'%(section, )) + self.sections[section] = {} + else: + if name in self.sections[section]: + self._raise(lineno, 'duplicate name %r'%(name, )) + self.sections[section][name] = value + + def _raise(self, lineno, msg): + raise ParseError(self.path, lineno, msg) + + def _parse(self, line_iter): + result = [] + section = None + for lineno, line in enumerate(line_iter): + name, data = self._parseline(line, lineno) + # new value + if name is not None and data is not None: + result.append((lineno, section, name, data)) + # new section + elif name is not None and data is None: + if not name: + self._raise(lineno, 'empty section name') + section = name + result.append((lineno, section, None, None)) + # continuation + elif name is None and data is not None: + if not result: + self._raise(lineno, 'unexpected value continuation') + last = result.pop() + last_name, last_data = last[-2:] + if last_name is None: + self._raise(lineno, 'unexpected value continuation') + + if last_data: + data = '%s\n%s' % (last_data, data) + result.append(last[:-1] + (data,)) + return result + + def _parseline(self, line, lineno): + # comments + line = line.split('#')[0].rstrip() + # blank lines + if not line: + return None, None + # section + if line[0] == '[' and line[-1] == ']': + return line[1:-1], None + # value + elif not line[0].isspace(): + try: + name, value = line.split('=', 1) + if ": " in name: + raise ValueError() + except ValueError: + try: + name, value = line.split(": ", 1) + except ValueError: + self._raise(lineno, 'unexpected line: %r' % line) + return name.strip(), value.strip() + # continuation + else: + return None, line.strip() + + def lineof(self, section, name=None): + lineno = self._sources.get((section, name)) + if lineno is not None: + return lineno + 1 + + def get(self, section, name, default=None, convert=str): + try: + return convert(self.sections[section][name]) + except KeyError: + return default + + def __getitem__(self, name): + if name not in self.sections: + raise KeyError(name) + return SectionWrapper(self, name) + + def __iter__(self): + for name in sorted(self.sections, key=self.lineof): + yield SectionWrapper(self, name) + + def __contains__(self, arg): + return arg in self.sections diff --git a/py/_io/capture.py b/py/_io/capture.py index b2decd60fe..7ad5fa9ff1 100644 --- a/py/_io/capture.py +++ b/py/_io/capture.py @@ -3,9 +3,9 @@ import sys import py import tempfile -try: +try: from io import StringIO -except ImportError: +except ImportError: from StringIO import StringIO if sys.version_info < (3,0): @@ -28,21 +28,21 @@ except ImportError: patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} -class FDCapture: +class FDCapture: """ Capture IO to/from a given os-level filedescriptor. """ - + def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False): - """ save targetfd descriptor, and open a new - temporary file there. If no tmpfile is + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is specified a tempfile.Tempfile() will be opened - in text mode. + in text mode. """ self.targetfd = targetfd if tmpfile is None and targetfd != 0: f = tempfile.TemporaryFile('wb+') - tmpfile = dupfile(f, encoding="UTF-8") + tmpfile = dupfile(f, encoding="UTF-8") f.close() - self.tmpfile = tmpfile + self.tmpfile = tmpfile self._savefd = os.dup(self.targetfd) if patchsys: self._oldsys = getattr(sys, patchsysdict[targetfd]) @@ -62,21 +62,20 @@ class FDCapture: if hasattr(self, '_oldsys'): setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) else: - fd = self.tmpfile.fileno() - os.dup2(self.tmpfile.fileno(), self.targetfd) + os.dup2(self.tmpfile.fileno(), self.targetfd) if hasattr(self, '_oldsys'): setattr(sys, patchsysdict[self.targetfd], self.tmpfile) - def done(self): + def done(self): """ unpatch and clean up, returns the self.tmpfile (file object) """ - os.dup2(self._savefd, self.targetfd) - os.close(self._savefd) + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) if self.targetfd != 0: self.tmpfile.seek(0) if hasattr(self, '_oldsys'): setattr(sys, patchsysdict[self.targetfd], self._oldsys) - return self.tmpfile + return self.tmpfile def writeorg(self, data): """ write a string to the original file descriptor @@ -89,22 +88,22 @@ class FDCapture: tempfp.close() -def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): """ return a new open file object that's a duplicate of f - mode is duplicated if not given, 'buffering' controls + mode is duplicated if not given, 'buffering' controls buffer size (defaulting to no buffering) and 'raising' defines whether an exception is raised when an incompatible file object is passed in (if raising is False, the file object itself will be returned) """ - try: - fd = f.fileno() - except AttributeError: - if raising: - raise + try: + fd = f.fileno() + except AttributeError: + if raising: + raise return f - newfd = os.dup(fd) + newfd = os.dup(fd) mode = mode and mode or f.mode if sys.version_info >= (3,0): if encoding is not None: @@ -112,7 +111,7 @@ def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): buffering = True return os.fdopen(newfd, mode, buffering, encoding, closefd=True) else: - f = os.fdopen(newfd, mode, buffering) + f = os.fdopen(newfd, mode, buffering) if encoding is not None: return EncodedFile(f, encoding) return f @@ -139,24 +138,27 @@ class EncodedFile(object): return getattr(self._stream, name) class Capture(object): - def call(cls, func, *args, **kwargs): + def call(cls, func, *args, **kwargs): """ return a (res, out, err) tuple where out and err represent the output/error output - during function execution. + during function execution. call the given function with args/kwargs - and capture output/error during its execution. - """ + and capture output/error during its execution. + """ so = cls() - try: + try: res = func(*args, **kwargs) - finally: + finally: out, err = so.reset() - return res, out, err - call = classmethod(call) + return res, out, err + call = classmethod(call) def reset(self): """ reset sys.stdout/stderr and return captured output as strings. """ - outfile, errfile = self.done() + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) out, err = "", "" if outfile and not outfile.closed: out = outfile.read() @@ -173,13 +175,13 @@ class Capture(object): return outerr -class StdCaptureFD(Capture): - """ This class allows to capture writes to FD1 and FD2 +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 and may connect a NULL file to FD0 (and prevent - reads from sys.stdin). If any of the 0,1,2 file descriptors - is invalid it will not be captured. + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. """ - def __init__(self, out=True, err=True, mixed=False, + def __init__(self, out=True, err=True, mixed=False, in_=True, patchsys=True, now=True): self._options = locals() self._save() @@ -197,30 +199,30 @@ class StdCaptureFD(Capture): self.in_ = FDCapture(0, tmpfile=None, now=False, patchsys=patchsys) except OSError: - pass + pass if out: tmpfile = None if hasattr(out, 'write'): tmpfile = out try: - self.out = FDCapture(1, tmpfile=tmpfile, + self.out = FDCapture(1, tmpfile=tmpfile, now=False, patchsys=patchsys) self._options['out'] = self.out.tmpfile except OSError: - pass + pass if err: if out and mixed: - tmpfile = self.out.tmpfile + tmpfile = self.out.tmpfile elif hasattr(err, 'write'): tmpfile = err else: tmpfile = None try: - self.err = FDCapture(2, tmpfile=tmpfile, - now=False, patchsys=patchsys) + self.err = FDCapture(2, tmpfile=tmpfile, + now=False, patchsys=patchsys) self._options['err'] = self.err.tmpfile except OSError: - pass + pass def startall(self): if hasattr(self, 'in_'): @@ -234,17 +236,18 @@ class StdCaptureFD(Capture): """ resume capturing with original temp files. """ self.startall() - def done(self): + def done(self, save=True): """ return (outfile, errfile) and stop capturing. """ outfile = errfile = None if hasattr(self, 'out') and not self.out.tmpfile.closed: - outfile = self.out.done() + outfile = self.out.done() if hasattr(self, 'err') and not self.err.tmpfile.closed: - errfile = self.err.done() + errfile = self.err.done() if hasattr(self, 'in_'): tmpfile = self.in_.done() - self._save() - return outfile, errfile + if save: + self._save() + return outfile, errfile def readouterr(self): """ return snapshot value of stdout/stderr capturings. """ @@ -258,13 +261,13 @@ class StdCaptureFD(Capture): f.truncate(0) f.seek(0) l.append(res) - return l + return l class StdCapture(Capture): """ This class allows to capture writes to sys.stdout|stderr "in-memory" and will raise errors on tries to read from sys.stdin. It only - modifies sys.stdout|stderr|stdin attributes and does not - touch underlying File Descriptors (use StdCaptureFD for that). + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). """ def __init__(self, out=True, err=True, in_=True, mixed=False, now=True): self._oldout = sys.stdout @@ -284,26 +287,26 @@ class StdCapture(Capture): self.startall() def startall(self): - if self.out: + if self.out: sys.stdout = self.out - if self.err: + if self.err: sys.stderr = self.err if self.in_: sys.stdin = self.in_ = DontReadFromInput() - def done(self): + def done(self, save=True): """ return (outfile, errfile) and stop capturing. """ outfile = errfile = None if self.out and not self.out.closed: - sys.stdout = self._oldout + sys.stdout = self._oldout outfile = self.out outfile.seek(0) - if self.err and not self.err.closed: - sys.stderr = self._olderr - errfile = self.err + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err errfile.seek(0) if self.in_: - sys.stdin = self._oldin + sys.stdin = self._oldin return outfile, errfile def resume(self): @@ -321,7 +324,7 @@ class StdCapture(Capture): err = self.err.getvalue() self.err.truncate(0) self.err.seek(0) - return out, err + return out, err class DontReadFromInput: """Temporary stub class. Ideally when stdin is accessed, the @@ -335,9 +338,9 @@ class DontReadFromInput: readline = read readlines = read __iter__ = read - + def fileno(self): - raise ValueError("redirected Stdin is pseudofile, has no fileno()") + raise ValueError("redirected Stdin is pseudofile, has no fileno()") def isatty(self): return False def close(self): diff --git a/py/_io/saferepr.py b/py/_io/saferepr.py index 215bfc2869..afc968d3ab 100644 --- a/py/_io/saferepr.py +++ b/py/_io/saferepr.py @@ -5,23 +5,21 @@ builtin_repr = repr reprlib = py.builtin._tryimport('repr', 'reprlib') -sysex = (KeyboardInterrupt, MemoryError, SystemExit) - class SafeRepr(reprlib.Repr): - """ subclass of repr.Repr that limits the resulting size of repr() - and includes information on exceptions raised during the call. - """ + """ subclass of repr.Repr that limits the resulting size of repr() + and includes information on exceptions raised during the call. + """ def repr(self, x): return self._callhelper(reprlib.Repr.repr, self, x) def repr_instance(self, x, level): return self._callhelper(builtin_repr, x) - + def _callhelper(self, call, x, *args): try: # Try the vanilla repr and make sure that the result is a string s = call(x, *args) - except sysex: + except py.builtin._sysex: raise except: cls, e, tb = sys.exc_info() @@ -42,11 +40,11 @@ class SafeRepr(reprlib.Repr): return s def saferepr(obj, maxsize=240): - """ return a size-limited safe repr-string for the given object. + """ return a size-limited safe repr-string for the given object. Failing __repr__ functions of user instances will be represented with a short exception info and 'saferepr' generally takes care to never raise exceptions itself. This function is a wrapper - around the Repr/reprlib functionality of the standard 2.6 lib. + around the Repr/reprlib functionality of the standard 2.6 lib. """ # review exception handling srepr = SafeRepr() diff --git a/py/_io/terminalwriter.py b/py/_io/terminalwriter.py index 7b93b896f0..1d9b80f30a 100644 --- a/py/_io/terminalwriter.py +++ b/py/_io/terminalwriter.py @@ -1,6 +1,6 @@ """ -Helper functions for writing to terminals and files. +Helper functions for writing to terminals and files. """ @@ -8,75 +8,31 @@ Helper functions for writing to terminals and files. import sys, os import py +win32_and_ctypes = False +if sys.platform == "win32": + try: + import ctypes + win32_and_ctypes = True + except ImportError: + pass + def _getdimensions(): import termios,fcntl,struct - call = fcntl.ioctl(0,termios.TIOCGWINSZ,"\000"*8) + call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8) height,width = struct.unpack( "hhhh", call ) [:2] - return height, width - -if sys.platform == 'win32': - # ctypes access to the Windows console - - STD_OUTPUT_HANDLE = -11 - STD_ERROR_HANDLE = -12 - FOREGROUND_BLUE = 0x0001 # text color contains blue. - FOREGROUND_GREEN = 0x0002 # text color contains green. - FOREGROUND_RED = 0x0004 # text color contains red. - FOREGROUND_WHITE = 0x0007 - FOREGROUND_INTENSITY = 0x0008 # text color is intensified. - BACKGROUND_BLUE = 0x0010 # background color contains blue. - BACKGROUND_GREEN = 0x0020 # background color contains green. - BACKGROUND_RED = 0x0040 # background color contains red. - BACKGROUND_WHITE = 0x0070 - BACKGROUND_INTENSITY = 0x0080 # background color is intensified. + return height, width - def GetStdHandle(kind): - import ctypes - return ctypes.windll.kernel32.GetStdHandle(kind) - - def SetConsoleTextAttribute(handle, attr): - import ctypes - ctypes.windll.kernel32.SetConsoleTextAttribute( - handle, attr) - - def _getdimensions(): - import ctypes - from ctypes import wintypes - - SHORT = ctypes.c_short - class COORD(ctypes.Structure): - _fields_ = [('X', SHORT), - ('Y', SHORT)] - class SMALL_RECT(ctypes.Structure): - _fields_ = [('Left', SHORT), - ('Top', SHORT), - ('Right', SHORT), - ('Bottom', SHORT)] - class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): - _fields_ = [('dwSize', COORD), - ('dwCursorPosition', COORD), - ('wAttributes', wintypes.WORD), - ('srWindow', SMALL_RECT), - ('dwMaximumWindowSize', COORD)] - STD_OUTPUT_HANDLE = -11 - handle = GetStdHandle(STD_OUTPUT_HANDLE) - info = CONSOLE_SCREEN_BUFFER_INFO() - ctypes.windll.kernel32.GetConsoleScreenBufferInfo( - handle, ctypes.byref(info)) - # Substract one from the width, otherwise the cursor wraps - # and the ending \n causes an empty line to display. - return info.dwSize.Y, info.dwSize.X - 1 def get_terminal_width(): try: height, width = _getdimensions() - except (SystemExit, KeyboardInterrupt): + except py.builtin._sysex: raise except: # FALLBACK width = int(os.environ.get('COLUMNS', 80)) else: - # XXX the windows getdimensions may be bogus, let's sanify a bit + # XXX the windows getdimensions may be bogus, let's sanify a bit if width < 40: width = 80 return width @@ -91,13 +47,13 @@ def ansi_print(text, esc, file=None, newline=True, flush=False): if esc and not isinstance(esc, tuple): esc = (esc,) if esc and sys.platform != "win32" and file.isatty(): - text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + text + '\x1b[0m') # ANSI color code "reset" if newline: text += '\n' - if esc and sys.platform == "win32" and file.isatty(): + if esc and win32_and_ctypes and file.isatty(): if 1 in esc: bold = True esc = tuple([x for x in esc if x != 1]) @@ -122,9 +78,11 @@ def ansi_print(text, esc, file=None, newline=True, flush=False): handle = GetStdHandle(STD_ERROR_HANDLE) else: handle = GetStdHandle(STD_OUTPUT_HANDLE) + oldcolors = GetConsoleInfo(handle).wAttributes + attr |= (oldcolors & 0x0f0) SetConsoleTextAttribute(handle, attr) file.write(text) - SetConsoleTextAttribute(handle, FOREGROUND_WHITE) + SetConsoleTextAttribute(handle, oldcolors) else: file.write(text) @@ -137,32 +95,31 @@ def should_do_markup(file): and not (sys.platform.startswith('java') and os._name == 'nt') class TerminalWriter(object): - _esctable = dict(black=30, red=31, green=32, yellow=33, + _esctable = dict(black=30, red=31, green=32, yellow=33, blue=34, purple=35, cyan=36, white=37, - Black=40, Red=41, Green=42, Yellow=43, + Black=40, Red=41, Green=42, Yellow=43, Blue=44, Purple=45, Cyan=46, White=47, bold=1, light=2, blink=5, invert=7) # XXX deprecate stringio argument def __init__(self, file=None, stringio=False, encoding=None): - if file is None: if stringio: self.stringio = file = py.io.TextIO() else: - file = py.std.sys.stdout + file = py.std.sys.stdout if hasattr(file, 'encoding'): encoding = file.encoding elif hasattr(file, '__call__'): file = WriteFile(file, encoding=encoding) - self.encoding = encoding + self.encoding = encoding self._file = file self.fullwidth = get_terminal_width() self.hasmarkup = should_do_markup(file) def _escaped(self, text, esc): if esc and self.hasmarkup: - text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + text +'\x1b[0m') return text @@ -227,12 +184,12 @@ class TerminalWriter(object): class Win32ConsoleWriter(TerminalWriter): def write(self, s, **kw): if s: - s = self._getbytestring(s) - if self.hasmarkup: - handle = GetStdHandle(STD_OUTPUT_HANDLE) - + oldcolors = None if self.hasmarkup and kw: - attr = 0 + handle = GetStdHandle(STD_OUTPUT_HANDLE) + oldcolors = GetConsoleInfo(handle).wAttributes + default_bg = oldcolors & 0x00F0 + attr = default_bg if kw.pop('bold', False): attr |= FOREGROUND_INTENSITY @@ -243,31 +200,86 @@ class Win32ConsoleWriter(TerminalWriter): elif kw.pop('green', False): attr |= FOREGROUND_GREEN else: - attr |= FOREGROUND_WHITE + attr |= FOREGROUND_BLACK # (oldcolors & 0x0007) SetConsoleTextAttribute(handle, attr) + if not isinstance(self._file, WriteFile): + s = self._getbytestring(s) self._file.write(s) self._file.flush() - if self.hasmarkup: - SetConsoleTextAttribute(handle, FOREGROUND_WHITE) + if oldcolors: + SetConsoleTextAttribute(handle, oldcolors) def line(self, s="", **kw): self.write(s+"\n", **kw) -if sys.platform == 'win32': - TerminalWriter = Win32ConsoleWriter - -class WriteFile(object): - def __init__(self, writemethod, encoding=None): - self.encoding = encoding - self._writemethod = writemethod +class WriteFile(object): + def __init__(self, writemethod, encoding=None): + self.encoding = encoding + self._writemethod = writemethod def write(self, data): if self.encoding: data = data.encode(self.encoding) self._writemethod(data) - def flush(self): - return + def flush(self): + return +if win32_and_ctypes: + TerminalWriter = Win32ConsoleWriter + import ctypes + from ctypes import wintypes + + # ctypes access to the Windows console + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + FOREGROUND_BLACK = 0x0000 # black text + FOREGROUND_BLUE = 0x0001 # text color contains blue. + FOREGROUND_GREEN = 0x0002 # text color contains green. + FOREGROUND_RED = 0x0004 # text color contains red. + FOREGROUND_WHITE = 0x0007 + FOREGROUND_INTENSITY = 0x0008 # text color is intensified. + BACKGROUND_BLACK = 0x0000 # background color black + BACKGROUND_BLUE = 0x0010 # background color contains blue. + BACKGROUND_GREEN = 0x0020 # background color contains green. + BACKGROUND_RED = 0x0040 # background color contains red. + BACKGROUND_WHITE = 0x0070 + BACKGROUND_INTENSITY = 0x0080 # background color is intensified. + + SHORT = ctypes.c_short + class COORD(ctypes.Structure): + _fields_ = [('X', SHORT), + ('Y', SHORT)] + class SMALL_RECT(ctypes.Structure): + _fields_ = [('Left', SHORT), + ('Top', SHORT), + ('Right', SHORT), + ('Bottom', SHORT)] + class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): + _fields_ = [('dwSize', COORD), + ('dwCursorPosition', COORD), + ('wAttributes', wintypes.WORD), + ('srWindow', SMALL_RECT), + ('dwMaximumWindowSize', COORD)] + + def GetStdHandle(kind): + return ctypes.windll.kernel32.GetStdHandle(kind) + + SetConsoleTextAttribute = \ + ctypes.windll.kernel32.SetConsoleTextAttribute + + def GetConsoleInfo(handle): + info = CONSOLE_SCREEN_BUFFER_INFO() + ctypes.windll.kernel32.GetConsoleScreenBufferInfo(\ + handle, ctypes.byref(info)) + return info + + def _getdimensions(): + handle = GetStdHandle(STD_OUTPUT_HANDLE) + info = GetConsoleInfo(handle) + # Substract one from the width, otherwise the cursor wraps + # and the ending \n causes an empty line to display. + return info.dwSize.Y, info.dwSize.X - 1 + diff --git a/py/_log/log.py b/py/_log/log.py index a32e850c54..ce47e8c754 100644 --- a/py/_log/log.py +++ b/py/_log/log.py @@ -1,34 +1,34 @@ """ -basic logging functionality based on a producer/consumer scheme. +basic logging functionality based on a producer/consumer scheme. XXX implement this API: (maybe put it into slogger.py?) log = Logger( - info=py.log.STDOUT, - debug=py.log.STDOUT, + info=py.log.STDOUT, + debug=py.log.STDOUT, command=None) log.info("hello", "world") log.command("hello", "world") - log = Logger(info=Logger(something=...), - debug=py.log.STDOUT, + log = Logger(info=Logger(something=...), + debug=py.log.STDOUT, command=None) """ import py, sys -class Message(object): - def __init__(self, keywords, args): - self.keywords = keywords - self.args = args +class Message(object): + def __init__(self, keywords, args): + self.keywords = keywords + self.args = args - def content(self): + def content(self): return " ".join(map(str, self.args)) - def prefix(self): + def prefix(self): return "[%s] " % (":".join(self.keywords)) - def __str__(self): - return self.prefix() + self.content() + def __str__(self): + return self.prefix() + self.content() class Producer(object): @@ -36,11 +36,11 @@ class Producer(object): to a 'consumer' object, which then prints them to stdout, stderr, files, etc. Used extensively by PyPy-1.1. """ - - Message = Message # to allow later customization + + Message = Message # to allow later customization keywords2consumer = {} - def __init__(self, keywords, keywordmapper=None, **kw): + def __init__(self, keywords, keywordmapper=None, **kw): if hasattr(keywords, 'split'): keywords = tuple(keywords.split()) self._keywords = keywords @@ -49,22 +49,22 @@ class Producer(object): self._keywordmapper = keywordmapper def __repr__(self): - return "<py.log.Producer %s>" % ":".join(self._keywords) + return "<py.log.Producer %s>" % ":".join(self._keywords) def __getattr__(self, name): - if '_' in name: + if '_' in name: raise AttributeError(name) producer = self.__class__(self._keywords + (name,)) setattr(self, name, producer) - return producer - + return producer + def __call__(self, *args): """ write a message to the appropriate consumer(s) """ func = self._keywordmapper.getconsumer(self._keywords) - if func is not None: + if func is not None: func(self.Message(self._keywords, args)) -class KeywordMapper: +class KeywordMapper: def __init__(self): self.keywords2consumer = {} @@ -75,36 +75,36 @@ class KeywordMapper: self.keywords2consumer.update(state) def getconsumer(self, keywords): - """ return a consumer matching the given keywords. - + """ return a consumer matching the given keywords. + tries to find the most suitable consumer by walking, starting from the back, the list of keywords, the first consumer matching a keyword is returned (falling back to py.log.default) """ - for i in range(len(keywords), 0, -1): - try: + for i in range(len(keywords), 0, -1): + try: return self.keywords2consumer[keywords[:i]] - except KeyError: + except KeyError: continue return self.keywords2consumer.get('default', default_consumer) - def setconsumer(self, keywords, consumer): - """ set a consumer for a set of keywords. """ - # normalize to tuples - if isinstance(keywords, str): + def setconsumer(self, keywords, consumer): + """ set a consumer for a set of keywords. """ + # normalize to tuples + if isinstance(keywords, str): keywords = tuple(filter(None, keywords.split())) - elif hasattr(keywords, '_keywords'): - keywords = keywords._keywords - elif not isinstance(keywords, tuple): + elif hasattr(keywords, '_keywords'): + keywords = keywords._keywords + elif not isinstance(keywords, tuple): raise TypeError("key %r is not a string or tuple" % (keywords,)) - if consumer is not None and not py.builtin.callable(consumer): - if not hasattr(consumer, 'write'): + if consumer is not None and not py.builtin.callable(consumer): + if not hasattr(consumer, 'write'): raise TypeError( "%r should be None, callable or file-like" % (consumer,)) consumer = File(consumer) - self.keywords2consumer[keywords] = consumer + self.keywords2consumer[keywords] = consumer -def default_consumer(msg): +def default_consumer(msg): """ the default consumer, prints the message to stdout (using 'print') """ sys.stderr.write(str(msg)+"\n") @@ -122,22 +122,22 @@ def getstate(): # Consumers # -class File(object): +class File(object): """ log consumer wrapping a file(-like) object """ - def __init__(self, f): + def __init__(self, f): assert hasattr(f, 'write') - #assert isinstance(f, file) or not hasattr(f, 'open') - self._file = f + #assert isinstance(f, file) or not hasattr(f, 'open') + self._file = f - def __call__(self, msg): + def __call__(self, msg): """ write a message to the log """ self._file.write(str(msg) + "\n") if hasattr(self._file, 'flush'): self._file.flush() -class Path(object): +class Path(object): """ log consumer that opens and writes to a Path """ - def __init__(self, filename, append=False, + def __init__(self, filename, append=False, delayed_create=False, buffering=False): self._append = append self._filename = str(filename) @@ -158,11 +158,11 @@ class Path(object): if not self._buffering: self._file.flush() -def STDOUT(msg): +def STDOUT(msg): """ consumer that writes to sys.stdout """ sys.stdout.write(str(msg)+"\n") -def STDERR(msg): +def STDERR(msg): """ consumer that writes to sys.stderr """ sys.stderr.write(str(msg)+"\n") diff --git a/py/_log/warning.py b/py/_log/warning.py index 9f46feeaf5..722e31e910 100644 --- a/py/_log/warning.py +++ b/py/_log/warning.py @@ -4,11 +4,11 @@ class DeprecationWarning(DeprecationWarning): def __init__(self, msg, path, lineno): self.msg = msg self.path = path - self.lineno = lineno + self.lineno = lineno def __repr__(self): return "%s:%d: %s" %(self.path, self.lineno+1, self.msg) def __str__(self): - return self.msg + return self.msg def _apiwarn(startversion, msg, stacklevel=2, function=None): # below is mostly COPIED from python2.4/warnings.py's def warn() @@ -21,7 +21,7 @@ def _apiwarn(startversion, msg, stacklevel=2, function=None): co = frame.f_code if co.co_filename.find(stacklevel) == -1: if found: - stacklevel = level + stacklevel = level break else: found = True @@ -67,8 +67,8 @@ def warn(msg, stacklevel=1, function=None): filename = module path = py.path.local(filename) warning = DeprecationWarning(msg, path, lineno) - py.std.warnings.warn_explicit(warning, category=Warning, - filename=str(warning.path), + py.std.warnings.warn_explicit(warning, category=Warning, + filename=str(warning.path), lineno=warning.lineno, registry=py.std.warnings.__dict__.setdefault( "__warningsregistry__", {}) diff --git a/py/_path/cacheutil.py b/py/_path/cacheutil.py index bac07a9818..9922504750 100644 --- a/py/_path/cacheutil.py +++ b/py/_path/cacheutil.py @@ -1,12 +1,12 @@ """ This module contains multithread-safe cache implementations. -All Caches have +All Caches have - getorbuild(key, builder) - delentry(key) + getorbuild(key, builder) + delentry(key) -methods and allow configuration when instantiating the cache class. +methods and allow configuration when instantiating the cache class. """ from time import time as gettime @@ -24,7 +24,7 @@ class BasicCache(object): def _putentry(self, key, entry): self._prunelowestweight() - self._dict[key] = entry + self._dict[key] = entry def delentry(self, key, raising=False): try: @@ -46,14 +46,14 @@ class BasicCache(object): numentries = len(self._dict) if numentries >= self.maxentries: # evict according to entry's weight - items = [(entry.weight, key) + items = [(entry.weight, key) for key, entry in self._dict.items()] items.sort() index = numentries - self.prunenum if index > 0: for weight, key in items[:index]: # in MT situations the element might be gone - self.delentry(key, raising=False) + self.delentry(key, raising=False) class BuildcostAccessCache(BasicCache): """ A BuildTime/Access-counting cache implementation. @@ -78,7 +78,7 @@ class BuildcostAccessCache(BasicCache): class WeightedCountingEntry(object): def __init__(self, value, oneweight): self._value = value - self.weight = self._oneweight = oneweight + self.weight = self._oneweight = oneweight def value(self): self.weight += self._oneweight @@ -95,8 +95,8 @@ class AgingCache(BasicCache): def _getentry(self, key): entry = self._dict[key] if entry.isexpired(): - self.delentry(key) - raise KeyError(key) + self.delentry(key) + raise KeyError(key) return entry def _build(self, key, builder): @@ -111,4 +111,4 @@ class AgingEntry(object): def isexpired(self): t = gettime() - return t >= self.weight + return t >= self.weight diff --git a/py/_path/common.py b/py/_path/common.py index 15aa80f3cf..42f50295b3 100644 --- a/py/_path/common.py +++ b/py/_path/common.py @@ -36,7 +36,7 @@ class Checkers: return self.path.relto(arg) def fnmatch(self, arg): - return FNMatcher(arg)(self.path) + return self.path.fnmatch(arg) def endswith(self, arg): return str(self.path).endswith(arg) @@ -75,7 +75,7 @@ class Checkers: return False return True -class NeverRaised(Exception): +class NeverRaised(Exception): pass class PathBase(object): @@ -91,6 +91,11 @@ class PathBase(object): return self._getbyspec('basename')[0] basename = property(basename, None, None, basename.__doc__) + def dirname(self): + """ dirname part of path. """ + return self._getbyspec('dirname')[0] + dirname = property(dirname, None, None, dirname.__doc__) + def purebasename(self): """ pure base name of the path.""" return self._getbyspec('purebasename')[0] @@ -143,7 +148,7 @@ newline will be removed from the end of each line. """ def move(self, target): """ move this path to target. """ if target.relto(self): - raise py.error.EINVAL(target, + raise py.error.EINVAL(target, "cannot move path into a subdirectory of itself") try: self.rename(target) @@ -156,27 +161,50 @@ newline will be removed from the end of each line. """ return repr(str(self)) def check(self, **kw): - """ check a path for existence, or query its properties + """ check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. - without arguments, this returns True if the path exists (on the - filesystem), False if not + valid checkers:: - with (keyword only) arguments, the object compares the value - of the argument with the value of a property with the same name - (if it has one, else it raises a TypeError) + file=1 # is a file + file=0 # is not a file (may not even exist) + dir=1 # is a dir + link=1 # is a link + exists=1 # exists - when for example the keyword argument 'ext' is '.py', this will - return True if self.ext == '.py', False otherwise + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file """ if not kw: kw = {'exists' : 1} return self.Checkers(self)._evaluate(kw) + def fnmatch(self, pattern): + """return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + def relto(self, relpath): """ return a string which is the relative part of the path - to the given 'relpath'. + to the given 'relpath'. """ - if not isinstance(relpath, (str, PathBase)): + if not isinstance(relpath, (str, PathBase)): raise TypeError("%r: not a string or path object" %(relpath,)) strrelpath = str(relpath) if strrelpath and strrelpath[-1] != self.sep: @@ -187,17 +215,20 @@ newline will be removed from the end of each line. """ if sys.platform == "win32" or getattr(os, '_name', None) == 'nt': if os.path.normcase(strself).startswith( os.path.normcase(strrelpath)): - return strself[len(strrelpath):] + return strself[len(strrelpath):] elif strself.startswith(strrelpath): return strself[len(strrelpath):] return "" - def bestrelpath(self, dest): - """ return a string which is a relative path from self - to dest such that self.join(bestrelpath) == dest and - if not such path can be determined return dest. - """ + def bestrelpath(self, dest): + """ return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ try: + if self == dest: + return os.curdir base = self.common(dest) if not base: # can be the case on windows return str(dest) @@ -207,11 +238,11 @@ newline will be removed from the end of each line. """ n = self2base.count(self.sep) + 1 else: n = 0 - l = ['..'] * n + l = [os.pardir] * n if reldest: - l.append(reldest) + l.append(reldest) target = dest.sep.join(l) - return target + return target except AttributeError: return str(dest) @@ -256,11 +287,11 @@ newline will be removed from the end of each line. """ def __lt__(self, other): try: - return self.strpath < other.strpath + return self.strpath < other.strpath except AttributeError: return str(self) < str(other) - def visit(self, fil=None, rec=None, ignore=NeverRaised): + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): """ yields all paths below the current one fil is a filter (glob pattern or callable), if not matching the @@ -272,26 +303,14 @@ newline will be removed from the end of each line. """ ignore is an Exception class that is ignoredwhen calling dirlist() on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. """ - if isinstance(fil, str): - fil = FNMatcher(fil) - if rec: - if isinstance(rec, str): - rec = fnmatch(fil) - elif not hasattr(rec, '__call__'): - rec = None - try: - entries = self.listdir() - except ignore: - return - dirs = [p for p in entries - if p.check(dir=1) and (rec is None or rec(p))] - for subdir in dirs: - for p in subdir.visit(fil=fil, rec=rec, ignore=ignore): - yield p - for p in entries: - if fil is None or fil(p): - yield p + for x in Visitor(fil, rec, ignore, bf, sort).gen(self): + yield x def _sortlist(self, res, sort): if sort: @@ -304,24 +323,45 @@ newline will be removed from the end of each line. """ """ return True if other refers to the same stat object as self. """ return self.strpath == str(other) +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, str): + fil = FNMatcher(fil) + if isinstance(rec, str): + self.rec = fnmatch(fil) + elif not hasattr(rec, '__call__') and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = sort and sorted or (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort([p for p in entries + if p.check(dir=1) and (rec is None or rec(p))]) + if not self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + class FNMatcher: def __init__(self, pattern): self.pattern = pattern def __call__(self, path): - """return true if the basename/fullname matches the glob-'pattern'. - - * matches everything - ? matches any single character - [seq] matches any character in seq - [!seq] matches any char not in seq - - if the pattern contains a path-separator then the full path - is used for pattern matching and a '*' is prepended to the - pattern. - - if the pattern doesn't contain a path-separator the pattern - is only matched against the basename. - """ pattern = self.pattern if pattern.find(path.sep) == -1: name = path.basename diff --git a/py/_path/gateway/__init__.py b/py/_path/gateway/__init__.py deleted file mode 100644 index 792d600548..0000000000 --- a/py/_path/gateway/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# diff --git a/py/_path/gateway/channeltest.py b/py/_path/gateway/channeltest.py deleted file mode 100644 index ac821aeb92..0000000000 --- a/py/_path/gateway/channeltest.py +++ /dev/null @@ -1,65 +0,0 @@ -import threading - - -class PathServer: - - def __init__(self, channel): - self.channel = channel - self.C2P = {} - self.next_id = 0 - threading.Thread(target=self.serve).start() - - def p2c(self, path): - id = self.next_id - self.next_id += 1 - self.C2P[id] = path - return id - - def command_LIST(self, id, *args): - path = self.C2P[id] - answer = [(self.p2c(p), p.basename) for p in path.listdir(*args)] - self.channel.send(answer) - - def command_DEL(self, id): - del self.C2P[id] - - def command_GET(self, id, spec): - path = self.C2P[id] - self.channel.send(path._getbyspec(spec)) - - def command_READ(self, id): - path = self.C2P[id] - self.channel.send(path.read()) - - def command_JOIN(self, id, resultid, *args): - path = self.C2P[id] - assert resultid not in self.C2P - self.C2P[resultid] = path.join(*args) - - def command_DIRPATH(self, id, resultid): - path = self.C2P[id] - assert resultid not in self.C2P - self.C2P[resultid] = path.dirpath() - - def serve(self): - try: - while 1: - msg = self.channel.receive() - meth = getattr(self, 'command_' + msg[0]) - meth(*msg[1:]) - except EOFError: - pass - -if __name__ == '__main__': - import py - gw = execnet.PopenGateway() - channel = gw._channelfactory.new() - srv = PathServer(channel) - c = gw.remote_exec(""" - import remotepath - p = remotepath.RemotePath(channel.receive(), channel.receive()) - channel.send(len(p.listdir())) - """) - c.send(channel) - c.send(srv.p2c(py.path.local('/tmp'))) - print(c.receive()) diff --git a/py/_path/gateway/channeltest2.py b/py/_path/gateway/channeltest2.py deleted file mode 100644 index 827abb7d32..0000000000 --- a/py/_path/gateway/channeltest2.py +++ /dev/null @@ -1,21 +0,0 @@ -import py -from remotepath import RemotePath - - -SRC = open('channeltest.py', 'r').read() - -SRC += ''' -import py -srv = PathServer(channel.receive()) -channel.send(srv.p2c(py.path.local("/tmp"))) -''' - - -#gw = execnet.SshGateway('codespeak.net') -gw = execnet.PopenGateway() -gw.remote_init_threads(5) -c = gw.remote_exec(SRC, stdout=py.std.sys.stdout, stderr=py.std.sys.stderr) -subchannel = gw._channelfactory.new() -c.send(subchannel) - -p = RemotePath(subchannel, c.receive()) diff --git a/py/_path/gateway/remotepath.py b/py/_path/gateway/remotepath.py deleted file mode 100644 index 149baa4354..0000000000 --- a/py/_path/gateway/remotepath.py +++ /dev/null @@ -1,47 +0,0 @@ -import py, itertools -from py._path import common - -COUNTER = itertools.count() - -class RemotePath(common.PathBase): - sep = '/' - - def __init__(self, channel, id, basename=None): - self._channel = channel - self._id = id - self._basename = basename - self._specs = {} - - def __del__(self): - self._channel.send(('DEL', self._id)) - - def __repr__(self): - return 'RemotePath(%s)' % self.basename - - def listdir(self, *args): - self._channel.send(('LIST', self._id) + args) - return [RemotePath(self._channel, id, basename) - for (id, basename) in self._channel.receive()] - - def dirpath(self): - id = ~COUNTER.next() - self._channel.send(('DIRPATH', self._id, id)) - return RemotePath(self._channel, id) - - def join(self, *args): - id = ~COUNTER.next() - self._channel.send(('JOIN', self._id, id) + args) - return RemotePath(self._channel, id) - - def _getbyspec(self, spec): - parts = spec.split(',') - ask = [x for x in parts if x not in self._specs] - if ask: - self._channel.send(('GET', self._id, ",".join(ask))) - for part, value in zip(ask, self._channel.receive()): - self._specs[part] = value - return [self._specs[x] for x in parts] - - def read(self): - self._channel.send(('READ', self._id)) - return self._channel.receive() diff --git a/py/_path/local.py b/py/_path/local.py index 8878fed3cd..fbe3ae5eb5 100644 --- a/py/_path/local.py +++ b/py/_path/local.py @@ -11,17 +11,17 @@ class Stat(object): def __getattr__(self, name): return getattr(self._osstatresult, "st_" + name) - def __init__(self, path, osstatresult): - self.path = path + def __init__(self, path, osstatresult): + self.path = path self._osstatresult = osstatresult def owner(self): if iswin32: raise NotImplementedError("XXX win32") - import pwd + import pwd entry = py.error.checked_call(pwd.getpwuid, self.uid) return entry[0] - owner = property(owner, None, None, "owner of path") + owner = property(owner, None, None, "owner of path") def group(self): """ return group name of file. """ @@ -30,7 +30,7 @@ class Stat(object): import grp entry = py.error.checked_call(grp.getgrgid, self.gid) return entry[0] - group = property(group) + group = property(group) class PosixPath(common.PathBase): def chown(self, user, group, rec=0): @@ -42,7 +42,7 @@ class PosixPath(common.PathBase): uid = getuserid(user) gid = getgroupid(group) if rec: - for x in self.visit(rec=lambda x: x.check(link=0)): + for x in self.visit(rec=lambda x: x.check(link=0)): if x.check(link=0): py.error.checked_call(os.chown, str(x), uid, gid) py.error.checked_call(os.chown, str(self), uid, gid) @@ -68,10 +68,6 @@ class PosixPath(common.PathBase): target = self.sep.join(('..', )*n + (relsource, )) py.error.checked_call(os.symlink, target, self.strpath) - def samefile(self, other): - """ return True if other refers to the same stat object as self. """ - return py.error.checked_call(os.path.samefile, str(self), str(other)) - def getuserid(user): import pwd if not isinstance(user, int): @@ -87,9 +83,12 @@ def getgroupid(group): FSBase = not iswin32 and PosixPath or common.PathBase class LocalPath(FSBase): - """ object oriented interface to os.path and other local filesystem - related information. + """ object oriented interface to os.path and other local filesystem + related information. """ + class ImportMismatchError(ImportError): + """ raised on pyimport() if there is a mismatch of __file__'s""" + sep = os.sep class Checkers(common.Checkers): def _stat(self): @@ -146,7 +145,7 @@ class LocalPath(FSBase): def __eq__(self, other): s1 = str(self) s2 = str(other) - if iswin32: + if iswin32: s1 = s1.lower() s2 = s2.lower() return s1 == s2 @@ -157,18 +156,30 @@ class LocalPath(FSBase): def __lt__(self, other): return str(self) < str(other) - def remove(self, rec=1): - """ remove a file or directory (or a directory tree if rec=1). """ + def samefile(self, other): + """ return True if 'other' references the same file as 'self'. """ + if self == other: + return True + if not iswin32: + return py.error.checked_call(os.path.samefile, str(self), str(other)) + return False + + def remove(self, rec=1, ignore_errors=False): + """ remove a file or directory (or a directory tree if rec=1). + if ignore_errors is True, errors while removing directories will + be ignored. + """ if self.check(dir=1, link=0): if rec: - # force remove of readonly files on windows - if iswin32: + # force remove of readonly files on windows + if iswin32: self.chmod(448, rec=1) # octcal 0700 - py.error.checked_call(py.std.shutil.rmtree, self.strpath) + py.error.checked_call(py.std.shutil.rmtree, self.strpath, + ignore_errors=ignore_errors) else: py.error.checked_call(os.rmdir, self.strpath) else: - if iswin32: + if iswin32: self.chmod(448) # octcal 0700 py.error.checked_call(os.remove, self.strpath) @@ -190,20 +201,20 @@ class LocalPath(FSBase): buf = f.read(chunksize) if not buf: return hash.hexdigest() - hash.update(buf) + hash.update(buf) finally: f.close() def new(self, **kw): """ create a modified version of this path. - the following keyword arguments modify various path parts: + the following keyword arguments modify various path parts:: a:/some/path/to/a/file.ext - || drive - |-------------| dirname - |------| basename - |--| purebasename - |--| ext + xx drive + xxxxxxxxxxxxxxxxx dirname + xxxxxxxx basename + xxxx purebasename + xxx ext """ obj = object.__new__(self.__class__) drive, dirname, basename, purebasename,ext = self._getbyspec( @@ -222,24 +233,17 @@ class LocalPath(FSBase): ext = '.' + ext kw['basename'] = pb + ext - kw.setdefault('drive', drive) - kw.setdefault('dirname', dirname) + if ('dirname' in kw and not kw['dirname']): + kw['dirname'] = drive + else: + kw.setdefault('dirname', dirname) kw.setdefault('sep', self.sep) obj.strpath = os.path.normpath( - "%(drive)s%(dirname)s%(sep)s%(basename)s" % kw) + "%(dirname)s%(sep)s%(basename)s" % kw) return obj - + def _getbyspec(self, spec): - """ return a sequence of specified path parts. 'spec' is - a comma separated string containing path part names. - according to the following convention: - a:/some/path/to/a/file.ext - || drive - |-------------| dirname - |------| basename - |--| purebasename - |--| ext - """ + """ see new for what 'spec' can be. """ res = [] parts = self.strpath.split(self.sep) @@ -249,7 +253,7 @@ class LocalPath(FSBase): if name == 'drive': append(parts[0]) elif name == 'dirname': - append(self.sep.join(['']+parts[1:-1])) + append(self.sep.join(parts[:-1])) else: basename = parts[-1] if name == 'basename': @@ -314,7 +318,7 @@ class LocalPath(FSBase): if fil is None or fil(childurl): res.append(childurl) self._sortlist(res, sort) - return res + return res def size(self): """ return size of the underlying file object """ @@ -454,8 +458,8 @@ class LocalPath(FSBase): return self.strpath def pypkgpath(self, pkgname=None): - """ return the path's package path by looking for the given - pkgname. If pkgname is None then look for the last + """ return the Python package path by looking for a + pkgname. If pkgname is None look for the last directory upwards which still contains an __init__.py and whose basename is python-importable. Return None if a pkgpath can not be determined. @@ -512,6 +516,8 @@ class LocalPath(FSBase): pkg = __import__(pkgpath.basename, None, None, []) names = self.new(ext='').relto(pkgpath.dirpath()) names = names.split(self.sep) + if names and names[-1] == "__init__": + names.pop() modname = ".".join(names) else: # no package scope, still make it possible @@ -519,16 +525,20 @@ class LocalPath(FSBase): self._prependsyspath(self.dirpath()) modname = self.purebasename mod = __import__(modname, None, None, ['__doc__']) + if self.basename == "__init__.py": + return mod # we don't check anything as we might + # we in a namespace package ... too icky to check modfile = mod.__file__ if modfile[-4:] in ('.pyc', '.pyo'): modfile = modfile[:-1] elif modfile.endswith('$py.class'): modfile = modfile[:-9] + '.py' + if modfile.endswith(os.path.sep + "__init__.py"): + if self.basename != "__init__.py": + modfile = modfile[:-12] + if not self.samefile(modfile): - raise EnvironmentError("mismatch:\n" - "imported module %r\n" - "does not stem from %r\n" - "maybe __init__.py files are missing?" % (mod, str(self))) + raise self.ImportMismatchError(modname, modfile, self) return mod else: try: @@ -547,8 +557,8 @@ class LocalPath(FSBase): def sysexec(self, *argv, **popen_opts): """ return stdout text from executing a system child process, - where the 'self' path points to executable. - The process is directly invoked and not through a system shell. + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. """ from subprocess import Popen, PIPE argv = map(str, argv) @@ -589,7 +599,7 @@ class LocalPath(FSBase): else: paths = [re.sub('%SystemRoot%', systemroot, path) for path in paths] - tryadd = '', '.exe', '.com', '.bat', '.cmd' # XXX add more? + tryadd = [''] + os.environ['PATHEXT'].split(os.pathsep) else: paths = py.std.os.environ['PATH'].split(':') tryadd = ('',) @@ -612,7 +622,7 @@ class LocalPath(FSBase): try: x = os.environ['HOME'] except KeyError: - x = os.environ['HOMEPATH'] + x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH'] return cls(x) _gethomedir = classmethod(_gethomedir) @@ -626,21 +636,14 @@ class LocalPath(FSBase): return py.path.local(py.std.tempfile.gettempdir()) get_temproot = classmethod(get_temproot) - def mkdtemp(cls): + def mkdtemp(cls, rootdir=None): """ return a Path object pointing to a fresh new temporary directory (which we created ourself). """ import tempfile - tries = 10 - for i in range(tries): - dname = tempfile.mktemp() - dpath = cls(tempfile.mktemp()) - try: - dpath.mkdir() - except (py.error.EEXIST, py.error.EPERM, py.error.EACCES): - continue - return dpath - raise py.error.ENOENT(dpath, "could not create tempdir, %d tries" % tries) + if rootdir is None: + rootdir = cls.get_temproot() + return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir))) mkdtemp = classmethod(mkdtemp) def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, @@ -726,7 +729,7 @@ class LocalPath(FSBase): raise except: # this might be py.error.Error, WindowsError ... pass - + # make link... try: username = os.environ['USER'] #linux, et al @@ -766,41 +769,6 @@ def copychunked(src, dest): finally: fsrc.close() -def autopath(globs=None): - """ (deprecated) return the (local) path of the "current" file pointed to by globals or - if it is none - alternatively the callers frame globals. - - the path will always point to a .py file or to None. - the path will have the following payload: - pkgdir is the last parent directory path containing __init__.py - """ - py.log._apiwarn("1.1", "py.magic.autopath deprecated, " - "use py.path.local(__file__) and maybe pypkgpath/pyimport().") - if globs is None: - globs = sys._getframe(1).f_globals - try: - __file__ = globs['__file__'] - except KeyError: - if not sys.argv[0]: - raise ValueError("cannot compute autopath in interactive mode") - __file__ = os.path.abspath(sys.argv[0]) - - ret = py.path.local(__file__) - if ret.ext in ('.pyc', '.pyo'): - ret = ret.new(ext='.py') - current = pkgdir = ret.dirpath() - while 1: - if current.join('__init__.py').check(): - pkgdir = current - current = current.dirpath() - if pkgdir != current: - continue - elif str(current) not in sys.path: - sys.path.insert(0, str(current)) - break - ret.pkgdir = pkgdir - return ret - - def isimportable(name): if name: if not (name[0].isalpha() or name[0] == '_'): diff --git a/py/_path/svnurl.py b/py/_path/svnurl.py index 383b01ed4e..f55b6c65da 100644 --- a/py/_path/svnurl.py +++ b/py/_path/svnurl.py @@ -1,7 +1,7 @@ """ module defining a subversion path object based on the external command 'svn'. This modules aims to work with svn 1.3 and higher -but might also interact well with earlier versions. +but might also interact well with earlier versions. """ import os, sys, time, re @@ -11,7 +11,7 @@ from py._path import common from py._path import svnwc as svncommon from py._path.cacheutil import BuildcostAccessCache, AgingCache -DEBUG=False +DEBUG=False class SvnCommandPath(svncommon.SvnPathBase): """ path implementation that offers access to (possibly remote) subversion @@ -22,10 +22,10 @@ class SvnCommandPath(svncommon.SvnPathBase): def __new__(cls, path, rev=None, auth=None): self = object.__new__(cls) - if isinstance(path, cls): - rev = path.rev + if isinstance(path, cls): + rev = path.rev auth = path.auth - path = path.strpath + path = path.strpath svncommon.checkbadchars(path) path = path.rstrip('/') self.strpath = path @@ -97,7 +97,7 @@ class SvnCommandPath(svncommon.SvnPathBase): def open(self, mode='r'): """ return an opened file with the given mode. """ - if mode not in ("r", "rU",): + if mode not in ("r", "rU",): raise ValueError("mode %r not supported" % (mode,)) assert self.check(file=1) # svn cat returns an empty file otherwise if self.rev is None: @@ -111,17 +111,17 @@ class SvnCommandPath(svncommon.SvnPathBase): """ return the directory path of the current path joined with any given path arguments. """ - l = self.strpath.split(self.sep) - if len(l) < 4: - raise py.error.EINVAL(self, "base is not valid") - elif len(l) == 4: - return self.join(*args, **kwargs) - else: + l = self.strpath.split(self.sep) + if len(l) < 4: + raise py.error.EINVAL(self, "base is not valid") + elif len(l) == 4: + return self.join(*args, **kwargs) + else: return self.new(basename='').join(*args, **kwargs) # modifying methods (cache must be invalidated) def mkdir(self, *args, **kwargs): - """ create & return the directory joined with args. + """ create & return the directory joined with args. pass a 'msg' keyword argument to set the commit message. """ commit_msg = kwargs.get('msg', "mkdir by py lib invocation") @@ -177,29 +177,29 @@ checkin message msg.""" if getattr(self, 'rev', None) is not None: raise py.error.EINVAL(self, "revisions are immutable") target = self.join(*args) - dir = kwargs.get('dir', 0) - for x in target.parts(reverse=True): - if x.check(): - break - else: - raise py.error.ENOENT(target, "has not any valid base!") - if x == target: - if not x.check(dir=dir): - raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) - return x - tocreate = target.relto(x) + dir = kwargs.get('dir', 0) + for x in target.parts(reverse=True): + if x.check(): + break + else: + raise py.error.ENOENT(target, "has not any valid base!") + if x == target: + if not x.check(dir=dir): + raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) + return x + tocreate = target.relto(x) basename = tocreate.split(self.sep, 1)[0] tempdir = py.path.local.mkdtemp() - try: - tempdir.ensure(tocreate, dir=dir) + try: + tempdir.ensure(tocreate, dir=dir) cmd = 'svn import -m "%s" "%s" "%s"' % ( - "ensure %s" % self._escape(tocreate), - self._escape(tempdir.join(basename)), + "ensure %s" % self._escape(tocreate), + self._escape(tempdir.join(basename)), x.join(basename)._encodedurl()) - self._svncmdexecauth(cmd) + self._svncmdexecauth(cmd) self._norev_delentry(x) - finally: - tempdir.remove() + finally: + tempdir.remove() return target # end of modifying methods @@ -247,7 +247,7 @@ checkin message msg.""" for lsline in lines: if lsline: info = InfoSvnCommand(lsline) - if info._name != '.': # svn 1.5 produces '.' dirs, + if info._name != '.': # svn 1.5 produces '.' dirs, nameinfo_seq.append((info._name, info)) nameinfo_seq.sort() return nameinfo_seq diff --git a/py/_path/svnwc.py b/py/_path/svnwc.py index 5a25d0f230..72769dbbb0 100644 --- a/py/_path/svnwc.py +++ b/py/_path/svnwc.py @@ -75,13 +75,13 @@ class RepoCache: repositories = RepoCache() -# svn support code +# svn support code ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested if sys.platform == "win32": ALLOWED_CHARS += ":" ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:' - + def _getsvnversion(ver=[]): try: return ver[0] @@ -108,7 +108,7 @@ def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS): return False def checkbadchars(url): - # (hpk) not quite sure about the exact purpose, guido w.? + # (hpk) not quite sure about the exact purpose, guido w.? proto, uri = url.split("://", 1) if proto != "file": host, uripath = uri.split('/', 1) @@ -116,7 +116,7 @@ def checkbadchars(url): if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \ or _check_for_bad_chars(uripath, ALLOWED_CHARS)): raise ValueError("bad char in %r" % (url, )) - + #_______________________________________________________________ @@ -138,7 +138,7 @@ class SvnPathBase(common.PathBase): def new(self, **kw): """ create a modified version of this path. A 'rev' argument indicates a new revision. - the following keyword arguments modify various path parts: + the following keyword arguments modify various path parts:: http://host.com/repo/path/file.ext |-----------------------| dirname @@ -351,7 +351,7 @@ def path_to_fspath(path, addat=True): elif addat: sp = '%s@HEAD' % (sp,) return sp - + def url_from_path(path): fspath = path_to_fspath(path, False) quote = py.std.urllib.quote @@ -460,7 +460,7 @@ class SvnWCCommandPath(common.PathBase): args = args and list(args) or [] args.append(self._makeauthoptions()) return self._svn(cmd, *args) - + def _svn(self, cmd, *args): l = ['svn %s' % cmd] args = [self._escape(item) for item in args] @@ -482,9 +482,9 @@ class SvnWCCommandPath(common.PathBase): except py.process.cmdexec.Error: e = sys.exc_info()[1] strerr = e.err.lower() - if strerr.find('file not found') != -1: - raise py.error.ENOENT(self) - if (strerr.find('file exists') != -1 or + if strerr.find('file not found') != -1: + raise py.error.ENOENT(self) + if (strerr.find('file exists') != -1 or strerr.find('file already exists') != -1 or strerr.find("can't create directory") != -1): raise py.error.EEXIST(self) @@ -503,7 +503,7 @@ class SvnWCCommandPath(common.PathBase): if rev is None or rev == -1: if (py.std.sys.platform != 'win32' and _getsvnversion() == '1.3'): - url += "@HEAD" + url += "@HEAD" else: if _getsvnversion() == '1.3': url += "@%d" % rev @@ -544,7 +544,7 @@ class SvnWCCommandPath(common.PathBase): if p.check(): if p.check(versioned=False): p.add() - return p + return p if kwargs.get('dir', 0): return p._ensuredirs() parent = p.dirpath() @@ -594,7 +594,7 @@ class SvnWCCommandPath(common.PathBase): if not out: # warning or error, raise exception raise Exception(out[4:]) - + def unlock(self): """ unset a previously set lock """ out = self._authsvn('unlock').strip() @@ -627,8 +627,8 @@ class SvnWCCommandPath(common.PathBase): rec = '--non-recursive' # XXX does not work on all subversion versions - #if not externals: - # externals = '--ignore-externals' + #if not externals: + # externals = '--ignore-externals' if updates: updates = '-u' @@ -688,19 +688,19 @@ class SvnWCCommandPath(common.PathBase): del cache.info[self] except KeyError: pass - if out: + if out: m = self._rex_commit.match(out) return int(m.group(1)) def propset(self, name, value, *args): """ set property name to value on this path. """ - d = py.path.local.mkdtemp() - try: - p = d.join('value') - p.write(value) + d = py.path.local.mkdtemp() + try: + p = d.join('value') + p.write(value) self._svn('propset', name, '--file', str(p), *args) - finally: - d.remove() + finally: + d.remove() def propget(self, name): """ get property name on this path. """ @@ -776,16 +776,16 @@ recursively. """ # XXX SVN 1.3 has output on stderr instead of stdout (while it does # return 0!), so a bit nasty, but we assume no output is output # to stderr... - if (output.strip() == '' or + if (output.strip() == '' or output.lower().find('not a versioned resource') != -1): raise py.error.ENOENT(self, output) info = InfoSvnWCCommand(output) # Can't reliably compare on Windows without access to win32api - if py.std.sys.platform != 'win32': - if info.path != self.localpath: - raise py.error.ENOENT(self, "not a versioned resource:" + - " %s != %s" % (info.path, self.localpath)) + if py.std.sys.platform != 'win32': + if info.path != self.localpath: + raise py.error.ENOENT(self, "not a versioned resource:" + + " %s != %s" % (info.path, self.localpath)) cache.info[self] = info return info @@ -799,7 +799,7 @@ recursively. """ fil = common.FNMatcher(fil) # XXX unify argument naming with LocalPath.listdir def notsvn(path): - return path.basename != '.svn' + return path.basename != '.svn' paths = [] for localpath in self.localpath.listdir(notsvn): @@ -823,8 +823,8 @@ recursively. """ def versioned(self): try: s = self.svnwcpath.info() - except (py.error.ENOENT, py.error.EEXIST): - return False + except (py.error.ENOENT, py.error.EEXIST): + return False except py.process.cmdexec.Error: e = sys.exc_info()[1] if e.err.find('is not a working copy')!=-1: @@ -833,7 +833,7 @@ recursively. """ return False raise else: - return True + return True def log(self, rev_start=None, rev_end=1, verbose=False): """ return a list of LogEntry instances for this path. @@ -859,9 +859,9 @@ if verbose is True, then the LogEntry instances also know which files changed. cmd = locale_env + 'svn log --xml %s %s %s "%s"' % ( rev_opt, verbose_opt, auth_opt, self.strpath) - popen = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + popen = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=True, ) stdout, stderr = popen.communicate() diff --git a/py/_plugin/__init__.py b/py/_plugin/__init__.py deleted file mode 100644 index 792d600548..0000000000 --- a/py/_plugin/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# diff --git a/py/_plugin/hookspec.py b/py/_plugin/hookspec.py deleted file mode 100644 index 37230671bf..0000000000 --- a/py/_plugin/hookspec.py +++ /dev/null @@ -1,172 +0,0 @@ -""" -hook specifications for py.test plugins -""" - -# ------------------------------------------------------------------------- -# Command line and configuration -# ------------------------------------------------------------------------- - -def pytest_namespace(): - "return dict of name->object which will get stored at py.test. namespace" - -def pytest_addoption(parser): - "add optparse-style options via parser.addoption." - -def pytest_addhooks(pluginmanager): - "add hooks via pluginmanager.registerhooks(module)" - -def pytest_configure(config): - """ called after command line options have been parsed. - and all plugins and initial conftest files been loaded. - """ - -def pytest_unconfigure(config): - """ called before test process is exited. """ - -# ------------------------------------------------------------------------- -# collection hooks -# ------------------------------------------------------------------------- - -def pytest_ignore_collect(path, config): - """ return true value to prevent considering this path for collection. - This hook is consulted for all files and directories prior to considering - collection hooks. - """ -pytest_ignore_collect.firstresult = True - -def pytest_collect_directory(path, parent): - """ return Collection node or None for the given path. """ -pytest_collect_directory.firstresult = True - -def pytest_collect_file(path, parent): - """ return Collection node or None for the given path. """ - -def pytest_collectstart(collector): - """ collector starts collecting. """ - -def pytest_collectreport(report): - """ collector finished collecting. """ - -def pytest_deselected(items): - """ called for test items deselected by keyword. """ - -def pytest_make_collect_report(collector): - """ perform a collection and return a collection. """ -pytest_make_collect_report.firstresult = True - -# XXX rename to item_collected()? meaning in distribution context? -def pytest_itemstart(item, node=None): - """ test item gets collected. """ - -# ------------------------------------------------------------------------- -# Python test function related hooks -# ------------------------------------------------------------------------- - -def pytest_pycollect_makemodule(path, parent): - """ return a Module collector or None for the given path. - This hook will be called for each matching test module path. - The pytest_collect_file hook needs to be used if you want to - create test modules for files that do not match as a test module. - """ -pytest_pycollect_makemodule.firstresult = True - -def pytest_pycollect_makeitem(collector, name, obj): - """ return custom item/collector for a python object in a module, or None. """ -pytest_pycollect_makeitem.firstresult = True - -def pytest_pyfunc_call(pyfuncitem): - """ call underlying test function. """ -pytest_pyfunc_call.firstresult = True - -def pytest_generate_tests(metafunc): - """ generate (multiple) parametrized calls to a test function.""" - -# ------------------------------------------------------------------------- -# generic runtest related hooks -# ------------------------------------------------------------------------- - -def pytest_runtest_protocol(item): - """ implement fixture, run and report about the given test item. """ -pytest_runtest_protocol.firstresult = True - -def pytest_runtest_setup(item): - """ called before pytest_runtest_call(). """ - -def pytest_runtest_call(item): - """ execute test item. """ - -def pytest_runtest_teardown(item): - """ called after pytest_runtest_call(). """ - -def pytest_runtest_makereport(item, call): - """ make a test report for the given item and call outcome. """ -pytest_runtest_makereport.firstresult = True - -def pytest_runtest_logreport(report): - """ process item test report. """ - -# special handling for final teardown - somewhat internal for now -def pytest__teardown_final(session): - """ called before test session finishes. """ -pytest__teardown_final.firstresult = True - -def pytest__teardown_final_logerror(report): - """ called if runtest_teardown_final failed. """ - -# ------------------------------------------------------------------------- -# test session related hooks -# ------------------------------------------------------------------------- - -def pytest_sessionstart(session): - """ before session.main() is called. """ - -def pytest_sessionfinish(session, exitstatus): - """ whole test run finishes. """ - -# ------------------------------------------------------------------------- -# hooks for influencing reporting (invoked from pytest_terminal) -# ------------------------------------------------------------------------- - -def pytest_report_header(config): - """ return a string to be displayed as header info for terminal reporting.""" - -def pytest_report_teststatus(report): - """ return result-category, shortletter and verbose word for reporting.""" -pytest_report_teststatus.firstresult = True - -def pytest_terminal_summary(terminalreporter): - """ add additional section in terminal summary reporting. """ - -def pytest_report_iteminfo(item): - """ return (fspath, lineno, name) for the item. - the information is used for result display and to sort tests - """ -pytest_report_iteminfo.firstresult = True - -# ------------------------------------------------------------------------- -# doctest hooks -# ------------------------------------------------------------------------- - -def pytest_doctest_prepare_content(content): - """ return processed content for a given doctest""" -pytest_doctest_prepare_content.firstresult = True - - -# ------------------------------------------------------------------------- -# error handling and internal debugging hooks -# ------------------------------------------------------------------------- - -def pytest_plugin_registered(plugin, manager): - """ a new py lib plugin got registered. """ - -def pytest_plugin_unregistered(plugin): - """ a py lib plugin got unregistered. """ - -def pytest_internalerror(excrepr): - """ called for internal errors. """ - -def pytest_keyboard_interrupt(excinfo): - """ called for keyboard interrupt. """ - -def pytest_trace(category, msg): - """ called for debug info. """ diff --git a/py/_plugin/pytest__pytest.py b/py/_plugin/pytest__pytest.py deleted file mode 100644 index f84a0d67b8..0000000000 --- a/py/_plugin/pytest__pytest.py +++ /dev/null @@ -1,101 +0,0 @@ -import py - -from py._test.pluginmanager import HookRelay - -def pytest_funcarg___pytest(request): - return PytestArg(request) - -class PytestArg: - def __init__(self, request): - self.request = request - - def gethookrecorder(self, hook): - hookrecorder = HookRecorder(hook._registry) - hookrecorder.start_recording(hook._hookspecs) - self.request.addfinalizer(hookrecorder.finish_recording) - return hookrecorder - -class ParsedCall: - def __init__(self, name, locals): - assert '_name' not in locals - self.__dict__.update(locals) - self.__dict__.pop('self') - self._name = name - - def __repr__(self): - d = self.__dict__.copy() - del d['_name'] - return "<ParsedCall %r(**%r)>" %(self._name, d) - -class HookRecorder: - def __init__(self, registry): - self._registry = registry - self.calls = [] - self._recorders = {} - - def start_recording(self, hookspecs): - if not isinstance(hookspecs, (list, tuple)): - hookspecs = [hookspecs] - for hookspec in hookspecs: - assert hookspec not in self._recorders - class RecordCalls: - _recorder = self - for name, method in vars(hookspec).items(): - if name[0] != "_": - setattr(RecordCalls, name, self._makecallparser(method)) - recorder = RecordCalls() - self._recorders[hookspec] = recorder - self._registry.register(recorder) - self.hook = HookRelay(hookspecs, registry=self._registry, - prefix="pytest_") - - def finish_recording(self): - for recorder in self._recorders.values(): - self._registry.unregister(recorder) - self._recorders.clear() - - def _makecallparser(self, method): - name = method.__name__ - args, varargs, varkw, default = py.std.inspect.getargspec(method) - if not args or args[0] != "self": - args.insert(0, 'self') - fspec = py.std.inspect.formatargspec(args, varargs, varkw, default) - # we use exec because we want to have early type - # errors on wrong input arguments, using - # *args/**kwargs delays this and gives errors - # elsewhere - exec (py.code.compile(""" - def %(name)s%(fspec)s: - self._recorder.calls.append( - ParsedCall(%(name)r, locals())) - """ % locals())) - return locals()[name] - - def getcalls(self, names): - if isinstance(names, str): - names = names.split() - for name in names: - for cls in self._recorders: - if name in vars(cls): - break - else: - raise ValueError("callname %r not found in %r" %( - name, self._recorders.keys())) - l = [] - for call in self.calls: - if call._name in names: - l.append(call) - return l - - def popcall(self, name): - for i, call in enumerate(self.calls): - if call._name == name: - del self.calls[i] - return call - raise ValueError("could not find call %r" %(name, )) - - def getcall(self, name): - l = self.getcalls(name) - assert len(l) == 1, (name, l) - return l[0] - diff --git a/py/_plugin/pytest_assertion.py b/py/_plugin/pytest_assertion.py deleted file mode 100644 index c4e221319b..0000000000 --- a/py/_plugin/pytest_assertion.py +++ /dev/null @@ -1,28 +0,0 @@ -import py -import sys - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group._addoption('--no-assert', action="store_true", default=False, - dest="noassert", - help="disable python assert expression reinterpretation."), - -def pytest_configure(config): - if not config.getvalue("noassert") and not config.getvalue("nomagic"): - warn_about_missing_assertion() - config._oldassertion = py.builtin.builtins.AssertionError - py.builtin.builtins.AssertionError = py.code._AssertionError - -def pytest_unconfigure(config): - if hasattr(config, '_oldassertion'): - py.builtin.builtins.AssertionError = config._oldassertion - del config._oldassertion - -def warn_about_missing_assertion(): - try: - assert False - except AssertionError: - pass - else: - py.std.warnings.warn("Assertions are turned off!" - " (are you using python -O?)") diff --git a/py/_plugin/pytest_capture.py b/py/_plugin/pytest_capture.py deleted file mode 100644 index eb3c603420..0000000000 --- a/py/_plugin/pytest_capture.py +++ /dev/null @@ -1,288 +0,0 @@ -""" -configurable per-test stdout/stderr capturing mechanisms. - -This plugin captures stdout/stderr output for each test separately. -In case of test failures this captured output is shown grouped -togtther with the test. - -The plugin also provides test function arguments that help to -assert stdout/stderr output from within your tests, see the -`funcarg example`_. - - -Capturing of input/output streams during tests ---------------------------------------------------- - -By default ``sys.stdout`` and ``sys.stderr`` are substituted with -temporary streams during the execution of tests and setup/teardown code. -During the whole testing process it will re-use the same temporary -streams allowing to play well with the logging module which easily -takes ownership on these streams. - -Also, 'sys.stdin' is substituted with a file-like "null" object that -does not return any values. This is to immediately error out -on tests that wait on reading something from stdin. - -You can influence output capturing mechanisms from the command line:: - - py.test -s # disable all capturing - py.test --capture=sys # replace sys.stdout/stderr with in-mem files - py.test --capture=fd # point filedescriptors 1 and 2 to temp file - -If you set capturing values in a conftest file like this:: - - # conftest.py - option_capture = 'fd' - -then all tests in that directory will execute with "fd" style capturing. - -sys-level capturing ------------------------------------------- - -Capturing on 'sys' level means that ``sys.stdout`` and ``sys.stderr`` -will be replaced with in-memory files (``py.io.TextIO`` to be precise) -that capture writes and decode non-unicode strings to a unicode object -(using a default, usually, UTF-8, encoding). - -FD-level capturing and subprocesses ------------------------------------------- - -The ``fd`` based method means that writes going to system level files -based on the standard file descriptors will be captured, for example -writes such as ``os.write(1, 'hello')`` will be captured properly. -Capturing on fd-level will include output generated from -any subprocesses created during a test. - -.. _`funcarg example`: - -Example Usage of the capturing Function arguments ---------------------------------------------------- - -You can use the `capsys funcarg`_ and `capfd funcarg`_ to -capture writes to stdout and stderr streams. Using the -funcargs frees your test from having to care about setting/resetting -the old streams and also interacts well with py.test's own -per-test capturing. Here is an example test function: - -.. sourcecode:: python - - def test_myoutput(capsys): - print ("hello") - sys.stderr.write("world\\n") - out, err = capsys.readouterr() - assert out == "hello\\n" - assert err == "world\\n" - print "next" - out, err = capsys.readouterr() - assert out == "next\\n" - -The ``readouterr()`` call snapshots the output so far - -and capturing will be continued. After the test -function finishes the original streams will -be restored. If you want to capture on -the filedescriptor level you can use the ``capfd`` function -argument which offers the same interface. -""" - -import py -import os - -def pytest_addoption(parser): - group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], - help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", - help="shortcut for --capture=no.") - -def addouterr(rep, outerr): - repr = getattr(rep, 'longrepr', None) - if not hasattr(repr, 'addsection'): - return - for secname, content in zip(["out", "err"], outerr): - if content: - repr.addsection("Captured std%s" % secname, content.rstrip()) - -def pytest_configure(config): - config.pluginmanager.register(CaptureManager(), 'capturemanager') - -class NoCapture: - def startall(self): - pass - def resume(self): - pass - def suspend(self): - return "", "" - -class CaptureManager: - def __init__(self): - self._method2capture = {} - - def _maketempfile(self): - f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") - return newf - - def _makestringio(self): - return py.io.TextIO() - - def _getcapture(self, method): - if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() - ) - elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() - ) - elif method == "no": - return NoCapture() - else: - raise ValueError("unknown capturing method: %r" % method) - - def _getmethod(self, config, fspath): - if config.option.capture: - method = config.option.capture - else: - try: - method = config._conftest.rget("option_capture", path=fspath) - except KeyError: - method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython - method = "sys" - return method - - def resumecapture_item(self, item): - method = self._getmethod(item.config, item.fspath) - if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item - return self.resumecapture(method) - - def resumecapture(self, method): - if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % - (self._capturing,)) - cap = self._method2capture.get(method) - self._capturing = method - if cap is None: - self._method2capture[method] = cap = self._getcapture(method) - cap.startall() - else: - cap.resume() - - def suspendcapture(self, item=None): - self.deactivate_funcargs() - if hasattr(self, '_capturing'): - method = self._capturing - cap = self._method2capture.get(method) - if cap is not None: - outerr = cap.suspend() - del self._capturing - if item: - outerr = (item.outerr[0] + outerr[0], - item.outerr[1] + outerr[1]) - return outerr - return "", "" - - def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() - - def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs - - def pytest_make_collect_report(self, __multicall__, collector): - method = self._getmethod(collector.config, collector.fspath) - self.resumecapture(method) - try: - rep = __multicall__.execute() - finally: - outerr = self.suspendcapture() - addouterr(rep, outerr) - return rep - - def pytest_runtest_setup(self, item): - self.resumecapture_item(item) - - def pytest_runtest_call(self, item): - self.resumecapture_item(item) - self.activate_funcargs(item) - - def pytest_runtest_teardown(self, item): - self.resumecapture_item(item) - - def pytest__teardown_final(self, __multicall__, session): - method = self._getmethod(session.config, None) - self.resumecapture(method) - try: - rep = __multicall__.execute() - finally: - outerr = self.suspendcapture() - if rep: - addouterr(rep, outerr) - return rep - - def pytest_keyboard_interrupt(self, excinfo): - if hasattr(self, '_capturing'): - self.suspendcapture() - - def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() - rep = __multicall__.execute() - outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) - if not rep.passed or rep.when == "teardown": - outerr = ('', '') - item.outerr = outerr - return rep - -def pytest_funcarg__capsys(request): - """captures writes to sys.stdout/sys.stderr and makes - them available successively via a ``capsys.readouterr()`` method - which returns a ``(out, err)`` tuple of captured snapshot strings. - """ - return CaptureFuncarg(request, py.io.StdCapture) - -def pytest_funcarg__capfd(request): - """captures writes to file descriptors 1 and 2 and makes - snapshotted ``(out, err)`` string tuples available - via the ``capsys.readouterr()`` method. If the underlying - platform does not have ``os.dup`` (e.g. Jython) tests using - this funcarg will automatically skip. - """ - if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(request, py.io.StdCaptureFD) - - -class CaptureFuncarg: - def __init__(self, request, captureclass): - self._cclass = captureclass - self.capture = self._cclass(now=False) - #request.addfinalizer(self._finalize) - - def _start(self): - self.capture.startall() - - def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture - - def readouterr(self): - return self.capture.readouterr() - - def close(self): - self._finalize() diff --git a/py/_plugin/pytest_default.py b/py/_plugin/pytest_default.py deleted file mode 100644 index 287331736b..0000000000 --- a/py/_plugin/pytest_default.py +++ /dev/null @@ -1,131 +0,0 @@ -""" default hooks and general py.test options. """ - -import sys -import py - -def pytest_pyfunc_call(__multicall__, pyfuncitem): - if not __multicall__.execute(): - testfunction = pyfuncitem.obj - if pyfuncitem._isyieldedfunction(): - testfunction(*pyfuncitem._args) - else: - funcargs = pyfuncitem.funcargs - testfunction(**funcargs) - -def pytest_collect_file(path, parent): - ext = path.ext - pb = path.purebasename - if pb.startswith("test_") or pb.endswith("_test") or \ - path in parent.config._argfspaths: - if ext == ".py": - return parent.ihook.pytest_pycollect_makemodule( - path=path, parent=parent) - -def pytest_pycollect_makemodule(path, parent): - return parent.Module(path, parent) - -def pytest_funcarg__pytestconfig(request): - """ the pytest config object with access to command line opts.""" - return request.config - -def pytest_ignore_collect(path, config): - ignore_paths = config.getconftest_pathlist("collect_ignore", path=path) - ignore_paths = ignore_paths or [] - excludeopt = config.getvalue("ignore") - if excludeopt: - ignore_paths.extend([py.path.local(x) for x in excludeopt]) - return path in ignore_paths - # XXX more refined would be: - if ignore_paths: - for p in ignore_paths: - if path == p or path.relto(p): - return True - - -def pytest_collect_directory(path, parent): - # XXX reconsider the following comment - # not use parent.Directory here as we generally - # want dir/conftest.py to be able to - # define Directory(dir) already - if not parent.recfilter(path): # by default special ".cvs", ... - # check if cmdline specified this dir or a subdir directly - for arg in parent.config._argfspaths: - if path == arg or arg.relto(path): - break - else: - return - Directory = parent.config._getcollectclass('Directory', path) - return Directory(path, parent=parent) - -def pytest_report_iteminfo(item): - return item.reportinfo() - -def pytest_addoption(parser): - group = parser.getgroup("general", "running and selection options") - group._addoption('-x', '--exitfirst', action="store_true", default=False, - dest="exitfirst", - help="exit instantly on first error or failed test."), - group._addoption('--maxfail', metavar="num", - action="store", type="int", dest="maxfail", default=0, - help="exit after first num failures or errors.") - group._addoption('-k', - action="store", dest="keyword", default='', - help="only run test items matching the given " - "space separated keywords. precede a keyword with '-' to negate. " - "Terminate the expression with ':' to treat a match as a signal " - "to run all subsequent tests. ") - - group = parser.getgroup("collect", "collection") - group.addoption('--collectonly', - action="store_true", dest="collectonly", - help="only collect tests, don't execute them."), - group.addoption("--ignore", action="append", metavar="path", - help="ignore path during collection (multi-allowed).") - group.addoption('--confcutdir', dest="confcutdir", default=None, - metavar="dir", - help="only load conftest.py's relative to specified dir.") - - group = parser.getgroup("debugconfig", - "test process debugging and configuration") - group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", - help="base temporary directory for this test run.") - -def pytest_configure(config): - setsession(config) - # compat - if config.getvalue("exitfirst"): - config.option.maxfail = 1 - -def setsession(config): - val = config.getvalue - if val("collectonly"): - from py._test.session import Session - config.setsessionclass(Session) - -# pycollect related hooks and code, should move to pytest_pycollect.py - -def pytest_pycollect_makeitem(__multicall__, collector, name, obj): - res = __multicall__.execute() - if res is not None: - return res - if collector._istestclasscandidate(name, obj): - res = collector._deprecated_join(name) - if res is not None: - return res - return collector.Class(name, parent=collector) - elif collector.funcnamefilter(name) and hasattr(obj, '__call__'): - res = collector._deprecated_join(name) - if res is not None: - return res - if is_generator(obj): - # XXX deprecation warning - return collector.Generator(name, parent=collector) - else: - return collector._genfunctions(name, obj) - -def is_generator(func): - try: - return py.code.getrawcode(func).co_flags & 32 # generator function - except AttributeError: # builtin functions have no bytecode - # assume them to not be generators - return False diff --git a/py/_plugin/pytest_doctest.py b/py/_plugin/pytest_doctest.py deleted file mode 100644 index 1f30d217bc..0000000000 --- a/py/_plugin/pytest_doctest.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -collect and execute doctests from modules and test files. - -Usage -------------- - -By default all files matching the ``test*.txt`` pattern will -be run through the python standard ``doctest`` module. Issue:: - - py.test --doctest-glob='*.rst' - -to change the pattern. Additionally you can trigger running of -tests in all python modules (including regular python test modules):: - - py.test --doctest-modules - -You can also make these changes permanent in your project by -putting them into a conftest.py file like this:: - - # content of conftest.py - option_doctestmodules = True - option_doctestglob = "*.rst" -""" - -import py -from py._code.code import TerminalRepr, ReprFileLocation -import doctest - -def pytest_addoption(parser): - group = parser.getgroup("collect") - group.addoption("--doctest-modules", - action="store_true", default=False, - help="run doctests in all .py modules", - dest="doctestmodules") - group.addoption("--doctest-glob", - action="store", default="test*.txt", metavar="pat", - help="doctests file matching pattern, default: test*.txt", - dest="doctestglob") - -def pytest_collect_file(path, parent): - config = parent.config - if path.ext == ".py": - if config.getvalue("doctestmodules"): - return DoctestModule(path, parent) - elif path.check(fnmatch=config.getvalue("doctestglob")): - return DoctestTextfile(path, parent) - -class ReprFailDoctest(TerminalRepr): - def __init__(self, reprlocation, lines): - self.reprlocation = reprlocation - self.lines = lines - def toterminal(self, tw): - for line in self.lines: - tw.line(line) - self.reprlocation.toterminal(tw) - -class DoctestItem(py.test.collect.Item): - def __init__(self, path, parent): - name = self.__class__.__name__ + ":" + path.basename - super(DoctestItem, self).__init__(name=name, parent=parent) - self.fspath = path - - def repr_failure(self, excinfo): - if excinfo.errisinstance(doctest.DocTestFailure): - doctestfailure = excinfo.value - example = doctestfailure.example - test = doctestfailure.test - filename = test.filename - lineno = test.lineno + example.lineno + 1 - message = excinfo.type.__name__ - reprlocation = ReprFileLocation(filename, lineno, message) - checker = doctest.OutputChecker() - REPORT_UDIFF = doctest.REPORT_UDIFF - filelines = py.path.local(filename).readlines(cr=0) - i = max(test.lineno, max(0, lineno - 10)) # XXX? - lines = [] - for line in filelines[i:lineno]: - lines.append("%03d %s" % (i+1, line)) - i += 1 - lines += checker.output_difference(example, - doctestfailure.got, REPORT_UDIFF).split("\n") - return ReprFailDoctest(reprlocation, lines) - elif excinfo.errisinstance(doctest.UnexpectedException): - excinfo = py.code.ExceptionInfo(excinfo.value.exc_info) - return super(DoctestItem, self).repr_failure(excinfo) - else: - return super(DoctestItem, self).repr_failure(excinfo) - -class DoctestTextfile(DoctestItem): - def runtest(self): - if not self._deprecated_testexecution(): - failed, tot = doctest.testfile( - str(self.fspath), module_relative=False, - raise_on_error=True, verbose=0) - -class DoctestModule(DoctestItem): - def runtest(self): - module = self.fspath.pyimport() - failed, tot = doctest.testmod( - module, raise_on_error=True, verbose=0) diff --git a/py/_plugin/pytest_genscript.py b/py/_plugin/pytest_genscript.py deleted file mode 100755 index 531c8795f9..0000000000 --- a/py/_plugin/pytest_genscript.py +++ /dev/null @@ -1,69 +0,0 @@ -#! /usr/bin/env python -""" -generate standalone test script to be distributed along with an application. -""" - -import os -import sys -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption("--genscript", action="store", default=None, - dest="genscript", metavar="path", - help="create standalone py.test script at given target path.") - -def pytest_configure(config): - genscript = config.getvalue("genscript") - if genscript: - import py - mydir = py.path.local(__file__).dirpath() - infile = mydir.join("standalonetemplate.py") - pybasedir = py.path.local(py.__file__).dirpath().dirpath() - genscript = py.path.local(genscript) - main(pybasedir, outfile=genscript, infile=infile) - raise SystemExit(0) - -def main(pybasedir, outfile, infile): - import base64 - import zlib - try: - import pickle - except Importerror: - import cPickle as pickle - - outfile = str(outfile) - infile = str(infile) - assert os.path.isabs(outfile) - os.chdir(str(pybasedir)) - files = [] - for dirpath, dirnames, filenames in os.walk("py"): - for f in filenames: - if not f.endswith(".py"): - continue - - fn = os.path.join(dirpath, f) - files.append(fn) - - name2src = {} - for f in files: - k = f.replace(os.sep, ".")[:-3] - name2src[k] = open(f, "r").read() - - data = pickle.dumps(name2src, 2) - data = zlib.compress(data, 9) - data = base64.encodestring(data) - data = data.decode("ascii") - - exe = open(infile, "r").read() - exe = exe.replace("@SOURCES@", data) - - open(outfile, "w").write(exe) - os.chmod(outfile, 493) # 0755 - sys.stdout.write("generated standalone py.test at %r, have fun!\n" % outfile) - -if __name__=="__main__": - dn = os.path.dirname - here = os.path.abspath(dn(__file__)) # py/plugin/ - pybasedir = dn(dn(here)) - outfile = os.path.join(os.getcwd(), "py.test-standalone") - infile = os.path.join(here, 'standalonetemplate.py') - main(pybasedir, outfile, infile) diff --git a/py/_plugin/pytest_helpconfig.py b/py/_plugin/pytest_helpconfig.py deleted file mode 100644 index f5f5f7501d..0000000000 --- a/py/_plugin/pytest_helpconfig.py +++ /dev/null @@ -1,164 +0,0 @@ -""" provide version info, conftest/environment config names. -""" -import py -import inspect, sys - -def pytest_addoption(parser): - group = parser.getgroup('debugconfig') - group.addoption('--version', action="store_true", - help="display py lib version and import information.") - group._addoption('-p', action="append", dest="plugins", default = [], - metavar="name", - help="early-load given plugin (multi-allowed).") - group.addoption('--traceconfig', - action="store_true", dest="traceconfig", default=False, - help="trace considerations of conftest.py files."), - group._addoption('--nomagic', - action="store_true", dest="nomagic", default=False, - help="don't reinterpret asserts, no traceback cutting. ") - group.addoption('--debug', - action="store_true", dest="debug", default=False, - help="generate and show internal debugging information.") - group.addoption("--help-config", action="store_true", dest="helpconfig", - help="show available conftest.py and ENV-variable names.") - - -def pytest_configure(__multicall__, config): - if config.option.version: - p = py.path.local(py.__file__).dirpath() - sys.stderr.write("This is py.test version %s, imported from %s\n" % - (py.__version__, p)) - sys.exit(0) - if not config.option.helpconfig: - return - __multicall__.execute() - options = [] - for group in config._parser._groups: - options.extend(group.options) - widths = [0] * 10 - tw = py.io.TerminalWriter() - tw.sep("-") - tw.line("%-13s | %-18s | %-25s | %s" %( - "cmdline name", "conftest.py name", "ENV-variable name", "help")) - tw.sep("-") - - options = [opt for opt in options if opt._long_opts] - options.sort(key=lambda x: x._long_opts) - for opt in options: - if not opt._long_opts or not opt.dest: - continue - optstrings = list(opt._long_opts) # + list(opt._short_opts) - optstrings = filter(None, optstrings) - optstring = "|".join(optstrings) - line = "%-13s | %-18s | %-25s | %s" %( - optstring, - "option_%s" % opt.dest, - "PYTEST_OPTION_%s" % opt.dest.upper(), - opt.help and opt.help or "", - ) - tw.line(line[:tw.fullwidth]) - for name, help in conftest_options: - line = "%-13s | %-18s | %-25s | %s" %( - "", - name, - "", - help, - ) - tw.line(line[:tw.fullwidth]) - - tw.sep("-") - sys.exit(0) - -conftest_options = ( - ('pytest_plugins', 'list of plugin names to load'), - ('collect_ignore', '(relative) paths ignored during collection'), - ('rsyncdirs', 'to-be-rsynced directories for dist-testing'), -) - -def pytest_report_header(config): - lines = [] - if config.option.debug or config.option.traceconfig: - lines.append("using py lib: %s" % (py.path.local(py.__file__).dirpath())) - if config.option.traceconfig: - lines.append("active plugins:") - plugins = [] - items = config.pluginmanager._name2plugin.items() - for name, plugin in items: - lines.append(" %-20s: %s" %(name, repr(plugin))) - return lines - - -# ===================================================== -# validate plugin syntax and hooks -# ===================================================== - -def pytest_plugin_registered(manager, plugin): - methods = collectattr(plugin) - hooks = {} - for hookspec in manager.hook._hookspecs: - hooks.update(collectattr(hookspec)) - - stringio = py.io.TextIO() - def Print(*args): - if args: - stringio.write(" ".join(map(str, args))) - stringio.write("\n") - - fail = False - while methods: - name, method = methods.popitem() - #print "checking", name - if isgenerichook(name): - continue - if name not in hooks: - if not getattr(method, 'optionalhook', False): - Print("found unknown hook:", name) - fail = True - else: - #print "checking", method - method_args = getargs(method) - #print "method_args", method_args - if '__multicall__' in method_args: - method_args.remove('__multicall__') - hook = hooks[name] - hookargs = getargs(hook) - for arg in method_args: - if arg not in hookargs: - Print("argument %r not available" %(arg, )) - Print("actual definition: %s" %(formatdef(method))) - Print("available hook arguments: %s" % - ", ".join(hookargs)) - fail = True - break - #if not fail: - # print "matching hook:", formatdef(method) - if fail: - name = getattr(plugin, '__name__', plugin) - raise PluginValidationError("%s:\n%s" %(name, stringio.getvalue())) - -class PluginValidationError(Exception): - """ plugin failed validation. """ - -def isgenerichook(name): - return name == "pytest_plugins" or \ - name.startswith("pytest_funcarg__") - -def getargs(func): - args = inspect.getargs(py.code.getrawcode(func))[0] - startindex = inspect.ismethod(func) and 1 or 0 - return args[startindex:] - -def collectattr(obj, prefixes=("pytest_",)): - methods = {} - for apiname in dir(obj): - for prefix in prefixes: - if apiname.startswith(prefix): - methods[apiname] = getattr(obj, apiname) - return methods - -def formatdef(func): - return "%s%s" %( - func.__name__, - inspect.formatargspec(*inspect.getargspec(func)) - ) - diff --git a/py/_plugin/pytest_hooklog.py b/py/_plugin/pytest_hooklog.py deleted file mode 100644 index 777ac94826..0000000000 --- a/py/_plugin/pytest_hooklog.py +++ /dev/null @@ -1,33 +0,0 @@ -""" log invocations of extension hooks to a file. """ -import py - -def pytest_addoption(parser): - parser.addoption("--hooklog", dest="hooklog", default=None, - help="write hook calls to the given file.") - -def pytest_configure(config): - hooklog = config.getvalue("hooklog") - if hooklog: - config._hooklogfile = open(hooklog, 'w') - config._hooklog_oldperformcall = config.hook._performcall - config.hook._performcall = (lambda name, multicall: - logged_call(name=name, multicall=multicall, config=config)) - -def logged_call(name, multicall, config): - f = config._hooklogfile - f.write("%s(**%s)\n" % (name, multicall.kwargs)) - try: - res = config._hooklog_oldperformcall(name=name, multicall=multicall) - except: - f.write("-> exception") - raise - f.write("-> %r" % (res,)) - return res - -def pytest_unconfigure(config): - try: - del config.hook.__dict__['_performcall'] - except KeyError: - pass - else: - config._hooklogfile.close() diff --git a/py/_plugin/pytest_junitxml.py b/py/_plugin/pytest_junitxml.py deleted file mode 100644 index 94001b74ed..0000000000 --- a/py/_plugin/pytest_junitxml.py +++ /dev/null @@ -1,171 +0,0 @@ -""" - logging of test results in JUnit-XML format, for use with Hudson - and build integration servers. Based on initial code from Ross Lawley. -""" - -import py -import time - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting") - group.addoption('--junitxml', action="store", dest="xmlpath", - metavar="path", default=None, - help="create junit-xml style report file at given path.") - -def pytest_configure(config): - xmlpath = config.option.xmlpath - if xmlpath: - config._xml = LogXML(xmlpath) - config.pluginmanager.register(config._xml) - -def pytest_unconfigure(config): - xml = getattr(config, '_xml', None) - if xml: - del config._xml - config.pluginmanager.unregister(xml) - -class LogXML(object): - def __init__(self, logfile): - self.logfile = logfile - self.test_logs = [] - self.passed = self.skipped = 0 - self.failed = self.errors = 0 - self._durations = {} - - def _opentestcase(self, report): - node = report.item - d = {'time': self._durations.pop(report.item, "0")} - names = [x.replace(".py", "") for x in node.listnames() if x != "()"] - d['classname'] = ".".join(names[:-1]) - d['name'] = names[-1] - attrs = ['%s="%s"' % item for item in sorted(d.items())] - self.test_logs.append("\n<testcase %s>" % " ".join(attrs)) - - def _closetestcase(self): - self.test_logs.append("</testcase>") - - def appendlog(self, fmt, *args): - args = tuple([py.xml.escape(arg) for arg in args]) - self.test_logs.append(fmt % args) - - def append_pass(self, report): - self.passed += 1 - self._opentestcase(report) - self._closetestcase() - - def append_failure(self, report): - self._opentestcase(report) - #msg = str(report.longrepr.reprtraceback.extraline) - if "xfail" in report.keywords: - self.appendlog( - '<skipped message="xfail-marked test passes unexpectedly"/>') - self.skipped += 1 - else: - self.appendlog('<failure message="test failure">%s</failure>', - report.longrepr) - self.failed += 1 - self._closetestcase() - - def _opentestcase_collectfailure(self, report): - node = report.collector - d = {'time': '???'} - names = [x.replace(".py", "") for x in node.listnames() if x != "()"] - d['classname'] = ".".join(names[:-1]) - d['name'] = names[-1] - attrs = ['%s="%s"' % item for item in sorted(d.items())] - self.test_logs.append("\n<testcase %s>" % " ".join(attrs)) - - def append_collect_failure(self, report): - self._opentestcase_collectfailure(report) - #msg = str(report.longrepr.reprtraceback.extraline) - self.appendlog('<failure message="collection failure">%s</failure>', - report.longrepr) - self._closetestcase() - self.errors += 1 - - def append_collect_skipped(self, report): - self._opentestcase_collectfailure(report) - #msg = str(report.longrepr.reprtraceback.extraline) - self.appendlog('<skipped message="collection skipped">%s</skipped>', - report.longrepr) - self._closetestcase() - self.skipped += 1 - - def append_error(self, report): - self._opentestcase(report) - self.appendlog('<error message="test setup failure">%s</error>', - report.longrepr) - self._closetestcase() - self.errors += 1 - - def append_skipped(self, report): - self._opentestcase(report) - if "xfail" in report.keywords: - self.appendlog( - '<skipped message="expected test failure">%s</skipped>', - report.keywords['xfail']) - else: - self.appendlog("<skipped/>") - self._closetestcase() - self.skipped += 1 - - def pytest_runtest_logreport(self, report): - if report.passed: - self.append_pass(report) - elif report.failed: - if report.when != "call": - self.append_error(report) - else: - self.append_failure(report) - elif report.skipped: - self.append_skipped(report) - - def pytest_runtest_call(self, item, __multicall__): - start = time.time() - try: - return __multicall__.execute() - finally: - self._durations[item] = time.time() - start - - def pytest_collectreport(self, report): - if not report.passed: - if report.failed: - self.append_collect_failure(report) - else: - self.append_collect_skipped(report) - - def pytest_internalerror(self, excrepr): - self.errors += 1 - data = py.xml.escape(excrepr) - self.test_logs.append( - '\n<testcase classname="pytest" name="internal">' - ' <error message="internal error">' - '%s</error></testcase>' % data) - - def pytest_sessionstart(self, session): - self.suite_start_time = time.time() - - def pytest_sessionfinish(self, session, exitstatus, __multicall__): - if py.std.sys.version_info[0] < 3: - logfile = py.std.codecs.open(self.logfile, 'w', encoding='utf-8') - else: - logfile = open(self.logfile, 'w', encoding='utf-8') - - suite_stop_time = time.time() - suite_time_delta = suite_stop_time - self.suite_start_time - numtests = self.passed + self.failed - logfile.write('<?xml version="1.0" encoding="utf-8"?>') - logfile.write('<testsuite ') - logfile.write('name="" ') - logfile.write('errors="%i" ' % self.errors) - logfile.write('failures="%i" ' % self.failed) - logfile.write('skips="%i" ' % self.skipped) - logfile.write('tests="%i" ' % numtests) - logfile.write('time="%.3f"' % suite_time_delta) - logfile.write(' >') - logfile.writelines(self.test_logs) - logfile.write('</testsuite>') - logfile.close() - tw = session.config.pluginmanager.getplugin("terminalreporter")._tw - tw.line() - tw.sep("-", "generated xml file: %s" %(self.logfile)) diff --git a/py/_plugin/pytest_mark.py b/py/_plugin/pytest_mark.py deleted file mode 100644 index 6cb942123b..0000000000 --- a/py/_plugin/pytest_mark.py +++ /dev/null @@ -1,174 +0,0 @@ -""" -generic mechanism for marking python functions. - -By using the ``py.test.mark`` helper you can instantiate -decorators that will set named meta data on test functions. - -Marking a single function ----------------------------------------------------- - -You can "mark" a test function with meta data like this:: - - @py.test.mark.webtest - def test_send_http(): - ... - -This will set a "Marker" instance as a function attribute named "webtest". -You can also specify parametrized meta data like this:: - - @py.test.mark.webtest(firefox=30) - def test_receive(): - ... - -The named marker can be accessed like this later:: - - test_receive.webtest.kwargs['firefox'] == 30 - -In addition to set key-value pairs you can also use positional arguments:: - - @py.test.mark.webtest("triangular") - def test_receive(): - ... - -and later access it with ``test_receive.webtest.args[0] == 'triangular``. - -.. _`scoped-marking`: - -Marking whole classes or modules ----------------------------------------------------- - -If you are programming with Python2.6 you may use ``py.test.mark`` decorators -with classes to apply markers to all its test methods:: - - @py.test.mark.webtest - class TestClass: - def test_startup(self): - ... - def test_startup_and_more(self): - ... - -This is equivalent to directly applying the decorator to the -two test functions. - -To remain compatible with Python2.5 you can also set a -``pytestmark`` attribute on a TestClass like this:: - - import py - - class TestClass: - pytestmark = py.test.mark.webtest - -or if you need to use multiple markers you can use a list:: - - import py - - class TestClass: - pytestmark = [py.test.mark.webtest, pytest.mark.slowtest] - -You can also set a module level marker:: - - import py - pytestmark = py.test.mark.webtest - -in which case it will be applied to all functions and -methods defined in the module. - -Using "-k MARKNAME" to select tests ----------------------------------------------------- - -You can use the ``-k`` command line option to select -tests:: - - py.test -k webtest # will only run tests marked as webtest - -""" -import py - -def pytest_namespace(): - return {'mark': MarkGenerator()} - -class MarkGenerator: - """ non-underscore attributes of this object can be used as decorators for - marking test functions. Example: @py.test.mark.slowtest in front of a - function will set the 'slowtest' marker object on it. """ - def __getattr__(self, name): - if name[0] == "_": - raise AttributeError(name) - return MarkDecorator(name) - -class MarkDecorator: - """ decorator for setting function attributes. """ - def __init__(self, name): - self.markname = name - self.kwargs = {} - self.args = [] - - def __repr__(self): - d = self.__dict__.copy() - name = d.pop('markname') - return "<MarkDecorator %r %r>" %(name, d) - - def __call__(self, *args, **kwargs): - """ if passed a single callable argument: decorate it with mark info. - otherwise add *args/**kwargs in-place to mark information. """ - if args: - func = args[0] - if len(args) == 1 and hasattr(func, '__call__') or \ - hasattr(func, '__bases__'): - if hasattr(func, '__bases__'): - if hasattr(func, 'pytestmark'): - l = func.pytestmark - if not isinstance(l, list): - func.pytestmark = [l, self] - else: - l.append(self) - else: - func.pytestmark = [self] - else: - holder = getattr(func, self.markname, None) - if holder is None: - holder = MarkInfo(self.markname, self.args, self.kwargs) - setattr(func, self.markname, holder) - else: - holder.kwargs.update(self.kwargs) - holder.args.extend(self.args) - return func - else: - self.args.extend(args) - self.kwargs.update(kwargs) - return self - -class MarkInfo: - def __init__(self, name, args, kwargs): - self._name = name - self.args = args - self.kwargs = kwargs - - def __getattr__(self, name): - if name[0] != '_' and name in self.kwargs: - py.log._apiwarn("1.1", "use .kwargs attribute to access key-values") - return self.kwargs[name] - raise AttributeError(name) - - def __repr__(self): - return "<MarkInfo %r args=%r kwargs=%r>" % ( - self._name, self.args, self.kwargs) - - -def pytest_pycollect_makeitem(__multicall__, collector, name, obj): - item = __multicall__.execute() - if isinstance(item, py.test.collect.Function): - cls = collector.getparent(py.test.collect.Class) - mod = collector.getparent(py.test.collect.Module) - func = item.obj - func = getattr(func, '__func__', func) # py3 - func = getattr(func, 'im_func', func) # py2 - for parent in [x for x in (mod, cls) if x]: - marker = getattr(parent.obj, 'pytestmark', None) - if marker is not None: - if not isinstance(marker, list): - marker = [marker] - for mark in marker: - if isinstance(mark, MarkDecorator): - mark(func) - return item diff --git a/py/_plugin/pytest_monkeypatch.py b/py/_plugin/pytest_monkeypatch.py deleted file mode 100644 index 13c6a6d9af..0000000000 --- a/py/_plugin/pytest_monkeypatch.py +++ /dev/null @@ -1,141 +0,0 @@ -""" -safely patch object attributes, dicts and environment variables. - -Usage ----------------- - -Use the `monkeypatch funcarg`_ to tweak your global test environment -for running a particular test. You can safely set/del an attribute, -dictionary item or environment variable by respective methods -on the monkeypatch funcarg. If you want e.g. to set an ENV1 variable -and have os.path.expanduser return a particular directory, you can -write it down like this: - -.. sourcecode:: python - - def test_mytest(monkeypatch): - monkeypatch.setenv('ENV1', 'myval') - monkeypatch.setattr(os.path, 'expanduser', lambda x: '/tmp/xyz') - ... # your test code that uses those patched values implicitely - -After the test function finished all modifications will be undone, -because the ``monkeypatch.undo()`` method is registered as a finalizer. - -``monkeypatch.setattr/delattr/delitem/delenv()`` all -by default raise an Exception if the target does not exist. -Pass ``raising=False`` if you want to skip this check. - -prepending to PATH or other environment variables ---------------------------------------------------------- - -To prepend a value to an already existing environment parameter: - -.. sourcecode:: python - - def test_mypath_finding(monkeypatch): - monkeypatch.setenv('PATH', 'x/y', prepend=":") - # in bash language: export PATH=x/y:$PATH - -calling "undo" finalization explicitely ------------------------------------------ - -At the end of function execution py.test invokes -a teardown hook which undoes all monkeypatch changes. -If you do not want to wait that long you can call -finalization explicitely:: - - monkeypatch.undo() - -This will undo previous changes. This call consumes the -undo stack. Calling it a second time has no effect unless -you start monkeypatching after the undo call. - -.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/ -""" - -import py, os, sys - -def pytest_funcarg__monkeypatch(request): - """The returned ``monkeypatch`` funcarg provides these - helper methods to modify objects, dictionaries or os.environ:: - - monkeypatch.setattr(obj, name, value, raising=True) - monkeypatch.delattr(obj, name, raising=True) - monkeypatch.setitem(mapping, name, value) - monkeypatch.delitem(obj, name, raising=True) - monkeypatch.setenv(name, value, prepend=False) - monkeypatch.delenv(name, value, raising=True) - monkeypatch.syspath_prepend(path) - - All modifications will be undone when the requesting - test function finished its execution. The ``raising`` - parameter determines if a KeyError or AttributeError - will be raised if the set/deletion operation has no target. - """ - monkeypatch = MonkeyPatch() - request.addfinalizer(monkeypatch.undo) - return monkeypatch - -notset = object() - -class MonkeyPatch: - def __init__(self): - self._setattr = [] - self._setitem = [] - - def setattr(self, obj, name, value, raising=True): - oldval = getattr(obj, name, notset) - if raising and oldval is notset: - raise AttributeError("%r has no attribute %r" %(obj, name)) - self._setattr.insert(0, (obj, name, oldval)) - setattr(obj, name, value) - - def delattr(self, obj, name, raising=True): - if not hasattr(obj, name): - if raising: - raise AttributeError(name) - else: - self._setattr.insert(0, (obj, name, getattr(obj, name, notset))) - delattr(obj, name) - - def setitem(self, dic, name, value): - self._setitem.insert(0, (dic, name, dic.get(name, notset))) - dic[name] = value - - def delitem(self, dic, name, raising=True): - if name not in dic: - if raising: - raise KeyError(name) - else: - self._setitem.insert(0, (dic, name, dic.get(name, notset))) - del dic[name] - - def setenv(self, name, value, prepend=None): - value = str(value) - if prepend and name in os.environ: - value = value + prepend + os.environ[name] - self.setitem(os.environ, name, value) - - def delenv(self, name, raising=True): - self.delitem(os.environ, name, raising=raising) - - def syspath_prepend(self, path): - if not hasattr(self, '_savesyspath'): - self._savesyspath = sys.path[:] - sys.path.insert(0, str(path)) - - def undo(self): - for obj, name, value in self._setattr: - if value is not notset: - setattr(obj, name, value) - else: - delattr(obj, name) - self._setattr[:] = [] - for dictionary, name, value in self._setitem: - if value is notset: - del dictionary[name] - else: - dictionary[name] = value - self._setitem[:] = [] - if hasattr(self, '_savesyspath'): - sys.path[:] = self._savesyspath diff --git a/py/_plugin/pytest_nose.py b/py/_plugin/pytest_nose.py deleted file mode 100644 index 244f5b61d5..0000000000 --- a/py/_plugin/pytest_nose.py +++ /dev/null @@ -1,98 +0,0 @@ -"""nose-compatibility plugin: allow to run nose test suites natively. - -This is an experimental plugin for allowing to run tests written -in 'nosetests style with py.test. - -Usage -------------- - -type:: - - py.test # instead of 'nosetests' - -and you should be able to run nose style tests and at the same -time can make full use of py.test's capabilities. - -Supported nose Idioms ----------------------- - -* setup and teardown at module/class/method level -* SkipTest exceptions and markers -* setup/teardown decorators -* yield-based tests and their setup -* general usage of nose utilities - -Unsupported idioms / issues ----------------------------------- - -- nose-style doctests are not collected and executed correctly, - also fixtures don't work. - -- no nose-configuration is recognized - -If you find other issues or have suggestions please run:: - - py.test --pastebin=all - -and send the resulting URL to a py.test contact channel, -at best to the mailing list. -""" -import py -import inspect -import sys - -def pytest_runtest_makereport(__multicall__, item, call): - SkipTest = getattr(sys.modules.get('nose', None), 'SkipTest', None) - if SkipTest: - if call.excinfo and call.excinfo.errisinstance(SkipTest): - # let's substitute the excinfo with a py.test.skip one - call2 = call.__class__(lambda: py.test.skip(str(call.excinfo.value)), call.when) - call.excinfo = call2.excinfo - -def pytest_report_iteminfo(item): - # nose 0.11.1 uses decorators for "raises" and other helpers. - # for reporting progress by filename we fish for the filename - if isinstance(item, py.test.collect.Function): - obj = item.obj - if hasattr(obj, 'compat_co_firstlineno'): - fn = sys.modules[obj.__module__].__file__ - if fn.endswith(".pyc"): - fn = fn[:-1] - #assert 0 - #fn = inspect.getsourcefile(obj) or inspect.getfile(obj) - lineno = obj.compat_co_firstlineno - return py.path.local(fn), lineno, obj.__module__ - -def pytest_runtest_setup(item): - if isinstance(item, (py.test.collect.Function)): - if isinstance(item.parent, py.test.collect.Generator): - gen = item.parent - if not hasattr(gen, '_nosegensetup'): - call_optional(gen.obj, 'setup') - if isinstance(gen.parent, py.test.collect.Instance): - call_optional(gen.parent.obj, 'setup') - gen._nosegensetup = True - if not call_optional(item.obj, 'setup'): - # call module level setup if there is no object level one - call_optional(item.parent.obj, 'setup') - -def pytest_runtest_teardown(item): - if isinstance(item, py.test.collect.Function): - if not call_optional(item.obj, 'teardown'): - call_optional(item.parent.obj, 'teardown') - #if hasattr(item.parent, '_nosegensetup'): - # #call_optional(item._nosegensetup, 'teardown') - # del item.parent._nosegensetup - -def pytest_make_collect_report(collector): - if isinstance(collector, py.test.collect.Generator): - call_optional(collector.obj, 'setup') - -def call_optional(obj, name): - method = getattr(obj, name, None) - if method: - ismethod = inspect.ismethod(method) - rawcode = py.code.getrawcode(method) - if not rawcode.co_varnames[ismethod:]: - method() - return True diff --git a/py/_plugin/pytest_pastebin.py b/py/_plugin/pytest_pastebin.py deleted file mode 100644 index e6a9ba8ab5..0000000000 --- a/py/_plugin/pytest_pastebin.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -submit failure or test session information to a pastebin service. - -Usage ----------- - -**Creating a URL for each test failure**:: - - py.test --pastebin=failed - -This will submit test run information to a remote Paste service and -provide a URL for each failure. You may select tests as usual or add -for example ``-x`` if you only want to send one particular failure. - -**Creating a URL for a whole test session log**:: - - py.test --pastebin=all - -Currently only pasting to the http://paste.pocoo.org service is implemented. - -""" -import py, sys - -class url: - base = "http://paste.pocoo.org" - xmlrpc = base + "/xmlrpc/" - show = base + "/show/" - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting") - group._addoption('--pastebin', metavar="mode", - action='store', dest="pastebin", default=None, - type="choice", choices=['failed', 'all'], - help="send failed|all info to Pocoo pastebin service.") - -def pytest_configure(__multicall__, config): - import tempfile - __multicall__.execute() - if config.option.pastebin == "all": - config._pastebinfile = tempfile.TemporaryFile('w+') - tr = config.pluginmanager.getplugin('terminalreporter') - oldwrite = tr._tw.write - def tee_write(s, **kwargs): - oldwrite(s, **kwargs) - config._pastebinfile.write(str(s)) - tr._tw.write = tee_write - -def pytest_unconfigure(config): - if hasattr(config, '_pastebinfile'): - config._pastebinfile.seek(0) - sessionlog = config._pastebinfile.read() - config._pastebinfile.close() - del config._pastebinfile - proxyid = getproxy().newPaste("python", sessionlog) - pastebinurl = "%s%s" % (url.show, proxyid) - sys.stderr.write("pastebin session-log: %s\n" % pastebinurl) - tr = config.pluginmanager.getplugin('terminalreporter') - del tr._tw.__dict__['write'] - -def getproxy(): - return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes - -def pytest_terminal_summary(terminalreporter): - if terminalreporter.config.option.pastebin != "failed": - return - tr = terminalreporter - if 'failed' in tr.stats: - terminalreporter.write_sep("=", "Sending information to Paste Service") - if tr.config.option.debug: - terminalreporter.write_line("xmlrpcurl: %s" %(url.xmlrpc,)) - serverproxy = getproxy() - for rep in terminalreporter.stats.get('failed'): - try: - msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc - except AttributeError: - msg = tr._getfailureheadline(rep) - tw = py.io.TerminalWriter(stringio=True) - rep.toterminal(tw) - s = tw.stringio.getvalue() - assert len(s) - proxyid = serverproxy.newPaste("python", s) - pastebinurl = "%s%s" % (url.show, proxyid) - tr.write_line("%s --> %s" %(msg, pastebinurl)) diff --git a/py/_plugin/pytest_pdb.py b/py/_plugin/pytest_pdb.py deleted file mode 100644 index f8b69f7992..0000000000 --- a/py/_plugin/pytest_pdb.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -interactive debugging with the Python Debugger. -""" -import py -import pdb, sys, linecache - -def pytest_addoption(parser): - group = parser.getgroup("general") - group._addoption('--pdb', - action="store_true", dest="usepdb", default=False, - help="start the interactive Python debugger on errors.") - -def pytest_configure(config): - if config.getvalue("usepdb"): - config.pluginmanager.register(PdbInvoke(), 'pdb') - -class PdbInvoke: - def pytest_runtest_makereport(self, item, call): - if call.excinfo and not \ - call.excinfo.errisinstance(py.test.skip.Exception): - # play well with capturing, slightly hackish - capman = item.config.pluginmanager.getplugin('capturemanager') - capman.suspendcapture() - - tw = py.io.TerminalWriter() - repr = call.excinfo.getrepr() - repr.toterminal(tw) - post_mortem(call.excinfo._excinfo[2]) - - capman.resumecapture_item(item) - -class Pdb(py.std.pdb.Pdb): - def do_list(self, arg): - self.lastcmd = 'list' - last = None - if arg: - try: - x = eval(arg, {}, {}) - if type(x) == type(()): - first, last = x - first = int(first) - last = int(last) - if last < first: - # Assume it's a count - last = first + last - else: - first = max(1, int(x) - 5) - except: - print ('*** Error in argument: %s' % repr(arg)) - return - elif self.lineno is None: - first = max(1, self.curframe.f_lineno - 5) - else: - first = self.lineno + 1 - if last is None: - last = first + 10 - filename = self.curframe.f_code.co_filename - breaklist = self.get_file_breaks(filename) - try: - for lineno in range(first, last+1): - # start difference from normal do_line - line = self._getline(filename, lineno) - # end difference from normal do_line - if not line: - print ('[EOF]') - break - else: - s = repr(lineno).rjust(3) - if len(s) < 4: s = s + ' ' - if lineno in breaklist: s = s + 'B' - else: s = s + ' ' - if lineno == self.curframe.f_lineno: - s = s + '->' - sys.stdout.write(s + '\t' + line) - self.lineno = lineno - except KeyboardInterrupt: - pass - do_l = do_list - - def _getline(self, filename, lineno): - if hasattr(filename, "__source__"): - try: - return filename.__source__.lines[lineno - 1] + "\n" - except IndexError: - return None - return linecache.getline(filename, lineno) - - def get_stack(self, f, t): - # Modified from bdb.py to be able to walk the stack beyond generators, - # which does not work in the normal pdb :-( - stack, i = pdb.Pdb.get_stack(self, f, t) - if f is None: - i = max(0, len(stack) - 1) - while i and stack[i][0].f_locals.get("__tracebackhide__", False): - i-=1 - return stack, i - -def post_mortem(t): - p = Pdb() - p.reset() - p.interaction(None, t) - -def set_trace(): - # again, a copy of the version in pdb.py - Pdb().set_trace(sys._getframe().f_back) diff --git a/py/_plugin/pytest_pylint.py b/py/_plugin/pytest_pylint.py deleted file mode 100644 index b2656ff7e0..0000000000 --- a/py/_plugin/pytest_pylint.py +++ /dev/null @@ -1,36 +0,0 @@ -"""pylint plugin - -XXX: Currently in progress, NOT IN WORKING STATE. -""" -import py - -pylint = py.test.importorskip("pylint.lint") - -def pytest_addoption(parser): - group = parser.getgroup('pylint options') - group.addoption('--pylint', action='store_true', - default=False, dest='pylint', - help='run pylint on python files.') - -def pytest_collect_file(path, parent): - if path.ext == ".py": - if parent.config.getvalue('pylint'): - return PylintItem(path, parent) - -#def pytest_terminal_summary(terminalreporter): -# print 'placeholder for pylint output' - -class PylintItem(py.test.collect.Item): - def runtest(self): - capture = py.io.StdCaptureFD() - try: - linter = pylint.lint.PyLinter() - linter.check(str(self.fspath)) - finally: - out, err = capture.reset() - rating = out.strip().split('\n')[-1] - sys.stdout.write(">>>") - print(rating) - assert 0 - - diff --git a/py/_plugin/pytest_pytester.py b/py/_plugin/pytest_pytester.py deleted file mode 100644 index bb6790d75c..0000000000 --- a/py/_plugin/pytest_pytester.py +++ /dev/null @@ -1,500 +0,0 @@ -""" -funcargs and support code for testing py.test's own functionality. -""" - -import py -import sys, os -import re -import inspect -import time -from py._test.config import Config as pytestConfig -from py.builtin import print_ - -def pytest_addoption(parser): - group = parser.getgroup("pylib") - group.addoption('--tools-on-path', - action="store_true", dest="toolsonpath", default=False, - help=("discover tools on PATH instead of going through py.cmdline.") - ) - -pytest_plugins = '_pytest' - -def pytest_funcarg__linecomp(request): - return LineComp() - -def pytest_funcarg__LineMatcher(request): - return LineMatcher - -def pytest_funcarg__testdir(request): - tmptestdir = TmpTestdir(request) - return tmptestdir - -rex_outcome = re.compile("(\d+) (\w+)") -class RunResult: - def __init__(self, ret, outlines, errlines, duration): - self.ret = ret - self.outlines = outlines - self.errlines = errlines - self.stdout = LineMatcher(outlines) - self.stderr = LineMatcher(errlines) - self.duration = duration - - def parseoutcomes(self): - for line in reversed(self.outlines): - if 'seconds' in line: - outcomes = rex_outcome.findall(line) - if outcomes: - d = {} - for num, cat in outcomes: - d[cat] = int(num) - return d - -class TmpTestdir: - def __init__(self, request): - self.request = request - self._pytest = request.getfuncargvalue("_pytest") - # XXX remove duplication with tmpdir plugin - basetmp = request.config.ensuretemp("testdir") - name = request.function.__name__ - for i in range(100): - try: - tmpdir = basetmp.mkdir(name + str(i)) - except py.error.EEXIST: - continue - break - # we need to create another subdir - # because Directory.collect() currently loads - # conftest.py from sibling directories - self.tmpdir = tmpdir.mkdir(name) - self.plugins = [] - self._syspathremove = [] - self.chdir() # always chdir - self.request.addfinalizer(self.finalize) - - def __repr__(self): - return "<TmpTestdir %r>" % (self.tmpdir,) - - def Config(self, topdir=None): - if topdir is None: - topdir = self.tmpdir.dirpath() - return pytestConfig(topdir=topdir) - - def finalize(self): - for p in self._syspathremove: - py.std.sys.path.remove(p) - if hasattr(self, '_olddir'): - self._olddir.chdir() - # delete modules that have been loaded from tmpdir - for name, mod in list(sys.modules.items()): - if mod: - fn = getattr(mod, '__file__', None) - if fn and fn.startswith(str(self.tmpdir)): - del sys.modules[name] - - def getreportrecorder(self, obj): - if hasattr(obj, 'config'): - obj = obj.config - if hasattr(obj, 'hook'): - obj = obj.hook - assert hasattr(obj, '_hookspecs'), obj - reprec = ReportRecorder(obj) - reprec.hookrecorder = self._pytest.gethookrecorder(obj) - reprec.hook = reprec.hookrecorder.hook - return reprec - - def chdir(self): - old = self.tmpdir.chdir() - if not hasattr(self, '_olddir'): - self._olddir = old - - def _makefile(self, ext, args, kwargs): - items = list(kwargs.items()) - if args: - source = "\n".join(map(str, args)) + "\n" - basename = self.request.function.__name__ - items.insert(0, (basename, source)) - ret = None - for name, value in items: - p = self.tmpdir.join(name).new(ext=ext) - source = str(py.code.Source(value)).lstrip() - p.write(source.encode("utf-8"), "wb") - if ret is None: - ret = p - return ret - - - def makefile(self, ext, *args, **kwargs): - return self._makefile(ext, args, kwargs) - - def makeconftest(self, source): - return self.makepyfile(conftest=source) - - def makepyfile(self, *args, **kwargs): - return self._makefile('.py', args, kwargs) - - def maketxtfile(self, *args, **kwargs): - return self._makefile('.txt', args, kwargs) - - def syspathinsert(self, path=None): - if path is None: - path = self.tmpdir - py.std.sys.path.insert(0, str(path)) - self._syspathremove.append(str(path)) - - def mkdir(self, name): - return self.tmpdir.mkdir(name) - - def mkpydir(self, name): - p = self.mkdir(name) - p.ensure("__init__.py") - return p - - def genitems(self, colitems): - return list(self.session.genitems(colitems)) - - def inline_genitems(self, *args): - #config = self.parseconfig(*args) - config = self.parseconfig(*args) - session = config.initsession() - rec = self.getreportrecorder(config) - colitems = [config.getnode(arg) for arg in config.args] - items = list(session.genitems(colitems)) - return items, rec - - def runitem(self, source): - # used from runner functional tests - item = self.getitem(source) - # the test class where we are called from wants to provide the runner - testclassinstance = py.builtin._getimself(self.request.function) - runner = testclassinstance.getrunner() - return runner(item) - - def inline_runsource(self, source, *cmdlineargs): - p = self.makepyfile(source) - l = list(cmdlineargs) + [p] - return self.inline_run(*l) - - def inline_runsource1(self, *args): - args = list(args) - source = args.pop() - p = self.makepyfile(source) - l = list(args) + [p] - reprec = self.inline_run(*l) - reports = reprec.getreports("pytest_runtest_logreport") - assert len(reports) == 1, reports - return reports[0] - - def inline_run(self, *args): - args = ("-s", ) + args # otherwise FD leakage - config = self.parseconfig(*args) - config.pluginmanager.do_configure(config) - session = config.initsession() - reprec = self.getreportrecorder(config) - colitems = config.getinitialnodes() - session.main(colitems) - config.pluginmanager.do_unconfigure(config) - return reprec - - def config_preparse(self): - config = self.Config() - for plugin in self.plugins: - if isinstance(plugin, str): - config.pluginmanager.import_plugin(plugin) - else: - if isinstance(plugin, dict): - plugin = PseudoPlugin(plugin) - if not config.pluginmanager.isregistered(plugin): - config.pluginmanager.register(plugin) - return config - - def parseconfig(self, *args): - if not args: - args = (self.tmpdir,) - config = self.config_preparse() - args = list(args) + ["--basetemp=%s" % self.tmpdir.dirpath('basetemp')] - config.parse(args) - return config - - def reparseconfig(self, args=None): - """ this is used from tests that want to re-invoke parse(). """ - if not args: - args = [self.tmpdir] - from py._test import config - oldconfig = config.config_per_process # py.test.config - try: - c = config.config_per_process = py.test.config = pytestConfig() - c.basetemp = oldconfig.mktemp("reparse", numbered=True) - c.parse(args) - return c - finally: - config.config_per_process = py.test.config = oldconfig - - def parseconfigure(self, *args): - config = self.parseconfig(*args) - config.pluginmanager.do_configure(config) - return config - - def getitem(self, source, funcname="test_func"): - modcol = self.getmodulecol(source) - moditems = modcol.collect() - for item in modcol.collect(): - if item.name == funcname: - return item - else: - assert 0, "%r item not found in module:\n%s" %(funcname, source) - - def getitems(self, source): - modcol = self.getmodulecol(source) - return list(modcol.config.initsession().genitems([modcol])) - #assert item is not None, "%r item not found in module:\n%s" %(funcname, source) - #return item - - def getfscol(self, path, configargs=()): - self.config = self.parseconfig(path, *configargs) - self.session = self.config.initsession() - return self.config.getnode(path) - - def getmodulecol(self, source, configargs=(), withinit=False): - kw = {self.request.function.__name__: py.code.Source(source).strip()} - path = self.makepyfile(**kw) - if withinit: - self.makepyfile(__init__ = "#") - self.config = self.parseconfig(path, *configargs) - self.session = self.config.initsession() - #self.config.pluginmanager.do_configure(config=self.config) - # XXX - self.config.pluginmanager.import_plugin("runner") - plugin = self.config.pluginmanager.getplugin("runner") - plugin.pytest_configure(config=self.config) - - return self.config.getnode(path) - - def popen(self, cmdargs, stdout, stderr, **kw): - if not hasattr(py.std, 'subprocess'): - py.test.skip("no subprocess module") - env = os.environ.copy() - env['PYTHONPATH'] = ":".join(filter(None, [ - str(os.getcwd()), env.get('PYTHONPATH', '')])) - kw['env'] = env - #print "env", env - return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) - - def run(self, *cmdargs): - return self._run(*cmdargs) - - def _run(self, *cmdargs): - cmdargs = [str(x) for x in cmdargs] - p1 = self.tmpdir.join("stdout") - p2 = self.tmpdir.join("stderr") - print_("running", cmdargs, "curdir=", py.path.local()) - f1 = p1.open("wb") - f2 = p2.open("wb") - now = time.time() - popen = self.popen(cmdargs, stdout=f1, stderr=f2, - close_fds=(sys.platform != "win32")) - ret = popen.wait() - f1.close() - f2.close() - out = p1.read("rb") - out = getdecoded(out).splitlines() - err = p2.read("rb") - err = getdecoded(err).splitlines() - def dump_lines(lines, fp): - try: - for line in lines: - py.builtin.print_(line, file=fp) - except UnicodeEncodeError: - print("couldn't print to %s because of encoding" % (fp,)) - dump_lines(out, sys.stdout) - dump_lines(err, sys.stderr) - return RunResult(ret, out, err, time.time()-now) - - def runpybin(self, scriptname, *args): - fullargs = self._getpybinargs(scriptname) + args - return self.run(*fullargs) - - def _getpybinargs(self, scriptname): - if self.request.config.getvalue("toolsonpath"): - script = py.path.local.sysfind(scriptname) - assert script, "script %r not found" % scriptname - return (script,) - else: - cmdlinename = scriptname.replace(".", "") - assert hasattr(py.cmdline, cmdlinename), cmdlinename - source = ("import sys;sys.path.insert(0,%r);" - "import py;py.cmdline.%s()" % - (str(py._pydir.dirpath()), cmdlinename)) - return (sys.executable, "-c", source,) - - def runpython(self, script): - s = self._getsysprepend() - if s: - script.write(s + "\n" + script.read()) - return self.run(sys.executable, script) - - def _getsysprepend(self): - if not self.request.config.getvalue("toolsonpath"): - s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath()) - else: - s = "" - return s - - def runpython_c(self, command): - command = self._getsysprepend() + command - return self.run(py.std.sys.executable, "-c", command) - - def runpytest(self, *args): - p = py.path.local.make_numbered_dir(prefix="runpytest-", - keep=None, rootdir=self.tmpdir) - args = ('--basetemp=%s' % p, ) + args - plugins = [x for x in self.plugins if isinstance(x, str)] - if plugins: - args = ('-p', plugins[0]) + args - return self.runpybin("py.test", *args) - - def spawn_pytest(self, string, expect_timeout=10.0): - pexpect = py.test.importorskip("pexpect", "2.4") - if not self.request.config.getvalue("toolsonpath"): - py.test.skip("need --tools-on-path to run py.test script") - basetemp = self.tmpdir.mkdir("pexpect") - invoke = self._getpybinargs("py.test")[0] - cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) - child = pexpect.spawn(cmd, logfile=basetemp.join("spawn.out").open("w")) - child.timeout = expect_timeout - return child - -def getdecoded(out): - try: - return out.decode("utf-8") - except UnicodeDecodeError: - return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( - py.io.saferepr(out),) - -class PseudoPlugin: - def __init__(self, vars): - self.__dict__.update(vars) - -class ReportRecorder(object): - def __init__(self, hook): - self.hook = hook - self.registry = hook._registry - self.registry.register(self) - - def getcall(self, name): - return self.hookrecorder.getcall(name) - - def popcall(self, name): - return self.hookrecorder.popcall(name) - - def getcalls(self, names): - """ return list of ParsedCall instances matching the given eventname. """ - return self.hookrecorder.getcalls(names) - - # functionality for test reports - - def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): - return [x.report for x in self.getcalls(names)] - - def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport"): - """ return a testreport whose dotted import path matches """ - l = [] - for rep in self.getreports(names=names): - colitem = rep.getnode() - if not inamepart or inamepart in colitem.listnames(): - l.append(rep) - if not l: - raise ValueError("could not find test report matching %r: no test reports at all!" % - (inamepart,)) - if len(l) > 1: - raise ValueError("found more than one testreport matching %r: %s" %( - inamepart, l)) - return l[0] - - def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'): - return [rep for rep in self.getreports(names) if rep.failed] - - def getfailedcollections(self): - return self.getfailures('pytest_collectreport') - - def listoutcomes(self): - passed = [] - skipped = [] - failed = [] - for rep in self.getreports("pytest_runtest_logreport"): - if rep.passed: - if rep.when == "call": - passed.append(rep) - elif rep.skipped: - skipped.append(rep) - elif rep.failed: - failed.append(rep) - return passed, skipped, failed - - def countoutcomes(self): - return [len(x) for x in self.listoutcomes()] - - def assertoutcome(self, passed=0, skipped=0, failed=0): - realpassed, realskipped, realfailed = self.listoutcomes() - assert passed == len(realpassed) - assert skipped == len(realskipped) - assert failed == len(realfailed) - - def clear(self): - self.hookrecorder.calls[:] = [] - - def unregister(self): - self.registry.unregister(self) - self.hookrecorder.finish_recording() - -class LineComp: - def __init__(self): - self.stringio = py.io.TextIO() - - def assert_contains_lines(self, lines2): - """ assert that lines2 are contained (linearly) in lines1. - return a list of extralines found. - """ - __tracebackhide__ = True - val = self.stringio.getvalue() - self.stringio.truncate(0) - self.stringio.seek(0) - lines1 = val.split("\n") - return LineMatcher(lines1).fnmatch_lines(lines2) - -class LineMatcher: - def __init__(self, lines): - self.lines = lines - - def str(self): - return "\n".join(self.lines) - - def fnmatch_lines(self, lines2): - if isinstance(lines2, str): - lines2 = py.code.Source(lines2) - if isinstance(lines2, py.code.Source): - lines2 = lines2.strip().lines - - from fnmatch import fnmatch - lines1 = self.lines[:] - nextline = None - extralines = [] - __tracebackhide__ = True - for line in lines2: - nomatchprinted = False - while lines1: - nextline = lines1.pop(0) - if line == nextline: - print_("exact match:", repr(line)) - break - elif fnmatch(nextline, line): - print_("fnmatch:", repr(line)) - print_(" with:", repr(nextline)) - break - else: - if not nomatchprinted: - print_("nomatch:", repr(line)) - nomatchprinted = True - print_(" and:", repr(nextline)) - extralines.append(nextline) - else: - assert line == nextline diff --git a/py/_plugin/pytest_recwarn.py b/py/_plugin/pytest_recwarn.py deleted file mode 100644 index 4bfc57cdd1..0000000000 --- a/py/_plugin/pytest_recwarn.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -helpers for asserting deprecation and other warnings. - -Example usage ---------------------- - -You can use the ``recwarn`` funcarg to track -warnings within a test function: - -.. sourcecode:: python - - def test_hello(recwarn): - from warnings import warn - warn("hello", DeprecationWarning) - w = recwarn.pop(DeprecationWarning) - assert issubclass(w.category, DeprecationWarning) - assert 'hello' in str(w.message) - assert w.filename - assert w.lineno - -You can also call a global helper for checking -taht a certain function call yields a Deprecation -warning: - -.. sourcecode:: python - - import py - - def test_global(): - py.test.deprecated_call(myfunction, 17) - - -""" - -import py -import os - -def pytest_funcarg__recwarn(request): - """Return a WarningsRecorder instance that provides these methods: - - * ``pop(category=None)``: return last warning matching the category. - * ``clear()``: clear list of warnings - """ - warnings = WarningsRecorder() - request.addfinalizer(warnings.finalize) - return warnings - -def pytest_namespace(): - return {'deprecated_call': deprecated_call} - -def deprecated_call(func, *args, **kwargs): - """ assert that calling func(*args, **kwargs) - triggers a DeprecationWarning. - """ - warningmodule = py.std.warnings - l = [] - oldwarn_explicit = getattr(warningmodule, 'warn_explicit') - def warn_explicit(*args, **kwargs): - l.append(args) - oldwarn_explicit(*args, **kwargs) - oldwarn = getattr(warningmodule, 'warn') - def warn(*args, **kwargs): - l.append(args) - oldwarn(*args, **kwargs) - - warningmodule.warn_explicit = warn_explicit - warningmodule.warn = warn - try: - ret = func(*args, **kwargs) - finally: - warningmodule.warn_explicit = warn_explicit - warningmodule.warn = warn - if not l: - #print warningmodule - __tracebackhide__ = True - raise AssertionError("%r did not produce DeprecationWarning" %(func,)) - return ret - - -class RecordedWarning: - def __init__(self, message, category, filename, lineno, line): - self.message = message - self.category = category - self.filename = filename - self.lineno = lineno - self.line = line - -class WarningsRecorder: - def __init__(self): - warningmodule = py.std.warnings - self.list = [] - def showwarning(message, category, filename, lineno, line=0): - self.list.append(RecordedWarning( - message, category, filename, lineno, line)) - try: - self.old_showwarning(message, category, - filename, lineno, line=line) - except TypeError: - # < python2.6 - self.old_showwarning(message, category, filename, lineno) - self.old_showwarning = warningmodule.showwarning - warningmodule.showwarning = showwarning - - def pop(self, cls=Warning): - """ pop the first recorded warning, raise exception if not exists.""" - for i, w in enumerate(self.list): - if issubclass(w.category, cls): - return self.list.pop(i) - __tracebackhide__ = True - assert 0, "%r not found in %r" %(cls, self.list) - - #def resetregistry(self): - # import warnings - # warnings.onceregistry.clear() - # warnings.__warningregistry__.clear() - - def clear(self): - self.list[:] = [] - - def finalize(self): - py.std.warnings.showwarning = self.old_showwarning diff --git a/py/_plugin/pytest_restdoc.py b/py/_plugin/pytest_restdoc.py deleted file mode 100644 index 6f815550d0..0000000000 --- a/py/_plugin/pytest_restdoc.py +++ /dev/null @@ -1,429 +0,0 @@ -""" -perform ReST syntax, local and remote reference tests on .rst/.txt files. -""" -import py -import sys, os, re - -def pytest_addoption(parser): - group = parser.getgroup("ReST", "ReST documentation check options") - group.addoption('-R', '--urlcheck', - action="store_true", dest="urlcheck", default=False, - help="urlopen() remote links found in ReST text files.") - group.addoption('--urltimeout', action="store", metavar="secs", - type="int", dest="urlcheck_timeout", default=5, - help="timeout in seconds for remote urlchecks") - group.addoption('--forcegen', - action="store_true", dest="forcegen", default=False, - help="force generation of html files.") - -def pytest_collect_file(path, parent): - if path.ext in (".txt", ".rst"): - project = getproject(path) - if project is not None: - return ReSTFile(path, parent=parent, project=project) - -def getproject(path): - for parent in path.parts(reverse=True): - confrest = parent.join("confrest.py") - if confrest.check(): - Project = confrest.pyimport().Project - return Project(parent) - -class ReSTFile(py.test.collect.File): - def __init__(self, fspath, parent, project): - super(ReSTFile, self).__init__(fspath=fspath, parent=parent) - self.project = project - - def collect(self): - return [ - ReSTSyntaxTest("ReSTSyntax", parent=self, project=self.project), - LinkCheckerMaker("checklinks", parent=self), - DoctestText("doctest", parent=self), - ] - -def deindent(s, sep='\n'): - leastspaces = -1 - lines = s.split(sep) - for line in lines: - if not line.strip(): - continue - spaces = len(line) - len(line.lstrip()) - if leastspaces == -1 or spaces < leastspaces: - leastspaces = spaces - if leastspaces == -1: - return s - for i, line in enumerate(lines): - if not line.strip(): - lines[i] = '' - else: - lines[i] = line[leastspaces:] - return sep.join(lines) - -class ReSTSyntaxTest(py.test.collect.Item): - def __init__(self, name, parent, project): - super(ReSTSyntaxTest, self).__init__(name=name, parent=parent) - self.project = project - - def reportinfo(self): - return self.fspath, None, "syntax check" - - def runtest(self): - self.restcheck(py.path.svnwc(self.fspath)) - - def restcheck(self, path): - py.test.importorskip("docutils") - self.register_linkrole() - from docutils.utils import SystemMessage - try: - self._checkskip(path, self.project.get_htmloutputpath(path)) - self.project.process(path) - except KeyboardInterrupt: - raise - except SystemMessage: - # we assume docutils printed info on stdout - py.test.fail("docutils processing failed, see captured stderr") - - def register_linkrole(self): - #directive.register_linkrole('api', self.resolve_linkrole) - #directive.register_linkrole('source', self.resolve_linkrole) -# -# # XXX fake sphinx' "toctree" and refs -# directive.register_linkrole('ref', self.resolve_linkrole) - - from docutils.parsers.rst import directives - def toctree_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return [] - toctree_directive.content = 1 - toctree_directive.options = {'maxdepth': int, 'glob': directives.flag, - 'hidden': directives.flag} - directives.register_directive('toctree', toctree_directive) - self.register_pygments() - - def register_pygments(self): - # taken from pygments-main/external/rst-directive.py - from docutils.parsers.rst import directives - try: - from pygments.formatters import HtmlFormatter - except ImportError: - def pygments_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return [] - pygments_directive.options = {} - else: - # The default formatter - DEFAULT = HtmlFormatter(noclasses=True) - # Add name -> formatter pairs for every variant you want to use - VARIANTS = { - # 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True), - } - - from docutils import nodes - - from pygments import highlight - from pygments.lexers import get_lexer_by_name, TextLexer - - def pygments_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - try: - lexer = get_lexer_by_name(arguments[0]) - except ValueError: - # no lexer found - use the text one instead of an exception - lexer = TextLexer() - # take an arbitrary option if more than one is given - formatter = options and VARIANTS[options.keys()[0]] or DEFAULT - parsed = highlight('\n'.join(content), lexer, formatter) - return [nodes.raw('', parsed, format='html')] - - pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS]) - - pygments_directive.arguments = (1, 0, 1) - pygments_directive.content = 1 - directives.register_directive('sourcecode', pygments_directive) - - def resolve_linkrole(self, name, text, check=True): - apigen_relpath = self.project.apigen_relpath - - if name == 'api': - if text == 'py': - return ('py', apigen_relpath + 'api/index.html') - else: - assert text.startswith('py.'), ( - 'api link "%s" does not point to the py package') % (text,) - dotted_name = text - if dotted_name.find('(') > -1: - dotted_name = dotted_name[:text.find('(')] - # remove pkg root - path = dotted_name.split('.')[1:] - dotted_name = '.'.join(path) - obj = py - if check: - for chunk in path: - try: - obj = getattr(obj, chunk) - except AttributeError: - raise AssertionError( - 'problem with linkrole :api:`%s`: can not resolve ' - 'dotted name %s' % (text, dotted_name,)) - return (text, apigen_relpath + 'api/%s.html' % (dotted_name,)) - elif name == 'source': - assert text.startswith('py/'), ('source link "%s" does not point ' - 'to the py package') % (text,) - relpath = '/'.join(text.split('/')[1:]) - if check: - pkgroot = py._pydir - abspath = pkgroot.join(relpath) - assert pkgroot.join(relpath).check(), ( - 'problem with linkrole :source:`%s`: ' - 'path %s does not exist' % (text, relpath)) - if relpath.endswith('/') or not relpath: - relpath += 'index.html' - else: - relpath += '.html' - return (text, apigen_relpath + 'source/%s' % (relpath,)) - elif name == 'ref': - return ("", "") - - def _checkskip(self, lpath, htmlpath=None): - if not self.config.getvalue("forcegen"): - lpath = py.path.local(lpath) - if htmlpath is not None: - htmlpath = py.path.local(htmlpath) - if lpath.ext == '.txt': - htmlpath = htmlpath or lpath.new(ext='.html') - if htmlpath.check(file=1) and htmlpath.mtime() >= lpath.mtime(): - py.test.skip("html file is up to date, use --forcegen to regenerate") - #return [] # no need to rebuild - -class DoctestText(py.test.collect.Item): - def reportinfo(self): - return self.fspath, None, "doctest" - - def runtest(self): - content = self._normalize_linesep() - newcontent = self.config.hook.pytest_doctest_prepare_content(content=content) - if newcontent is not None: - content = newcontent - s = content - l = [] - prefix = '.. >>> ' - mod = py.std.types.ModuleType(self.fspath.purebasename) - skipchunk = False - for line in deindent(s).split('\n'): - stripped = line.strip() - if skipchunk and line.startswith(skipchunk): - py.builtin.print_("skipping", line) - continue - skipchunk = False - if stripped.startswith(prefix): - try: - py.builtin.exec_(py.code.Source( - stripped[len(prefix):]).compile(), mod.__dict__) - except ValueError: - e = sys.exc_info()[1] - if e.args and e.args[0] == "skipchunk": - skipchunk = " " * (len(line) - len(line.lstrip())) - else: - raise - else: - l.append(line) - docstring = "\n".join(l) - mod.__doc__ = docstring - failed, tot = py.std.doctest.testmod(mod, verbose=1) - if failed: - py.test.fail("doctest %s: %s failed out of %s" %( - self.fspath, failed, tot)) - - def _normalize_linesep(self): - # XXX quite nasty... but it works (fixes win32 issues) - s = self.fspath.read() - linesep = '\n' - if '\r' in s: - if '\n' not in s: - linesep = '\r' - else: - linesep = '\r\n' - s = s.replace(linesep, '\n') - return s - -class LinkCheckerMaker(py.test.collect.Collector): - def collect(self): - return list(self.genlinkchecks()) - - def genlinkchecks(self): - path = self.fspath - # generating functions + args as single tests - timeout = self.config.getvalue("urlcheck_timeout") - for lineno, line in enumerate(path.readlines()): - line = line.strip() - if line.startswith('.. _'): - if line.startswith('.. _`'): - delim = '`:' - else: - delim = ':' - l = line.split(delim, 1) - if len(l) != 2: - continue - tryfn = l[1].strip() - name = "%s:%d" %(tryfn, lineno) - if tryfn.startswith('http:') or tryfn.startswith('https'): - if self.config.getvalue("urlcheck"): - yield CheckLink(name, parent=self, - args=(tryfn, path, lineno, timeout), checkfunc=urlcheck) - elif tryfn.startswith('webcal:'): - continue - else: - i = tryfn.find('#') - if i != -1: - checkfn = tryfn[:i] - else: - checkfn = tryfn - if checkfn.strip() and (1 or checkfn.endswith('.html')): - yield CheckLink(name, parent=self, - args=(tryfn, path, lineno), checkfunc=localrefcheck) - -class CheckLink(py.test.collect.Item): - def __init__(self, name, parent, args, checkfunc): - super(CheckLink, self).__init__(name, parent) - self.args = args - self.checkfunc = checkfunc - - def runtest(self): - return self.checkfunc(*self.args) - - def reportinfo(self, basedir=None): - return (self.fspath, self.args[2], "checklink: %s" % self.args[0]) - -def urlcheck(tryfn, path, lineno, TIMEOUT_URLOPEN): - old = py.std.socket.getdefaulttimeout() - py.std.socket.setdefaulttimeout(TIMEOUT_URLOPEN) - try: - try: - py.builtin.print_("trying remote", tryfn) - py.std.urllib2.urlopen(tryfn) - finally: - py.std.socket.setdefaulttimeout(old) - except (py.std.urllib2.URLError, py.std.urllib2.HTTPError): - e = sys.exc_info()[1] - if getattr(e, 'code', None) in (401, 403): # authorization required, forbidden - py.test.skip("%s: %s" %(tryfn, str(e))) - else: - py.test.fail("remote reference error %r in %s:%d\n%s" %( - tryfn, path.basename, lineno+1, e)) - -def localrefcheck(tryfn, path, lineno): - # assume it should be a file - i = tryfn.find('#') - if tryfn.startswith('javascript:'): - return # don't check JS refs - if i != -1: - anchor = tryfn[i+1:] - tryfn = tryfn[:i] - else: - anchor = '' - fn = path.dirpath(tryfn) - ishtml = fn.ext == '.html' - fn = ishtml and fn.new(ext='.txt') or fn - py.builtin.print_("filename is", fn) - if not fn.check(): # not ishtml or not fn.check(): - if not py.path.local(tryfn).check(): # the html could be there - py.test.fail("reference error %r in %s:%d" %( - tryfn, path.basename, lineno+1)) - if anchor: - source = unicode(fn.read(), 'latin1') - source = source.lower().replace('-', ' ') # aehem - - anchor = anchor.replace('-', ' ') - match2 = ".. _`%s`:" % anchor - match3 = ".. _%s:" % anchor - candidates = (anchor, match2, match3) - py.builtin.print_("candidates", repr(candidates)) - for line in source.split('\n'): - line = line.strip() - if line in candidates: - break - else: - py.test.fail("anchor reference error %s#%s in %s:%d" %( - tryfn, anchor, path.basename, lineno+1)) - -if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): - def log(msg): - print(msg) -else: - def log(msg): - pass - -def convert_rest_html(source, source_path, stylesheet=None, encoding='latin1'): - """ return html latin1-encoded document for the given input. - source a ReST-string - sourcepath where to look for includes (basically) - stylesheet path (to be used if any) - """ - from docutils.core import publish_string - kwargs = { - 'stylesheet' : stylesheet, - 'stylesheet_path': None, - 'traceback' : 1, - 'embed_stylesheet': 0, - 'output_encoding' : encoding, - #'halt' : 0, # 'info', - 'halt_level' : 2, - } - # docutils uses os.getcwd() :-( - source_path = os.path.abspath(str(source_path)) - prevdir = os.getcwd() - try: - #os.chdir(os.path.dirname(source_path)) - return publish_string(source, source_path, writer_name='html', - settings_overrides=kwargs) - finally: - os.chdir(prevdir) - -def process(txtpath, encoding='latin1'): - """ process a textfile """ - log("processing %s" % txtpath) - assert txtpath.check(ext='.txt') - if isinstance(txtpath, py.path.svnwc): - txtpath = txtpath.localpath - htmlpath = txtpath.new(ext='.html') - #svninfopath = txtpath.localpath.new(ext='.svninfo') - - style = txtpath.dirpath('style.css') - if style.check(): - stylesheet = style.basename - else: - stylesheet = None - content = unicode(txtpath.read(), encoding) - doc = convert_rest_html(content, txtpath, stylesheet=stylesheet, encoding=encoding) - htmlpath.open('wb').write(doc) - #log("wrote %r" % htmlpath) - #if txtpath.check(svnwc=1, versioned=1): - # info = txtpath.info() - # svninfopath.dump(info) - -if sys.version_info > (3, 0): - def _uni(s): return s -else: - def _uni(s): - return unicode(s) - -rex1 = re.compile(r'.*<body>(.*)</body>.*', re.MULTILINE | re.DOTALL) -rex2 = re.compile(r'.*<div class="document">(.*)</div>.*', re.MULTILINE | re.DOTALL) - -def strip_html_header(string, encoding='utf8'): - """ return the content of the body-tag """ - uni = unicode(string, encoding) - for rex in rex1,rex2: - match = rex.search(uni) - if not match: - break - uni = match.group(1) - return uni - -class Project: # used for confrest.py files - def __init__(self, sourcepath): - self.sourcepath = sourcepath - def process(self, path): - return process(path) - def get_htmloutputpath(self, path): - return path.new(ext='html') diff --git a/py/_plugin/pytest_resultlog.py b/py/_plugin/pytest_resultlog.py deleted file mode 100644 index 6763ebfd08..0000000000 --- a/py/_plugin/pytest_resultlog.py +++ /dev/null @@ -1,98 +0,0 @@ -"""non-xml machine-readable logging of test results. - Useful for buildbot integration code. See the `PyPy-test`_ - web page for post-processing. - -.. _`PyPy-test`: http://codespeak.net:8099/summary - -""" - -import py -from py.builtin import print_ - -def pytest_addoption(parser): - group = parser.getgroup("resultlog", "resultlog plugin options") - group.addoption('--resultlog', action="store", dest="resultlog", metavar="path", default=None, - help="path for machine-readable result log.") - -def pytest_configure(config): - resultlog = config.option.resultlog - if resultlog: - logfile = open(resultlog, 'w', 1) # line buffered - config._resultlog = ResultLog(config, logfile) - config.pluginmanager.register(config._resultlog) - -def pytest_unconfigure(config): - resultlog = getattr(config, '_resultlog', None) - if resultlog: - resultlog.logfile.close() - del config._resultlog - config.pluginmanager.unregister(resultlog) - -def generic_path(item): - chain = item.listchain() - gpath = [chain[0].name] - fspath = chain[0].fspath - fspart = False - for node in chain[1:]: - newfspath = node.fspath - if newfspath == fspath: - if fspart: - gpath.append(':') - fspart = False - else: - gpath.append('.') - else: - gpath.append('/') - fspart = True - name = node.name - if name[0] in '([': - gpath.pop() - gpath.append(name) - fspath = newfspath - return ''.join(gpath) - -class ResultLog(object): - def __init__(self, config, logfile): - self.config = config - self.logfile = logfile # preferably line buffered - - def write_log_entry(self, testpath, shortrepr, longrepr): - print_("%s %s" % (shortrepr, testpath), file=self.logfile) - for line in longrepr.splitlines(): - print_(" %s" % line, file=self.logfile) - - def log_outcome(self, node, shortrepr, longrepr): - testpath = generic_path(node) - self.write_log_entry(testpath, shortrepr, longrepr) - - def pytest_runtest_logreport(self, report): - res = self.config.hook.pytest_report_teststatus(report=report) - if res is not None: - code = res[1] - else: - code = report.shortrepr - if code == 'x': - longrepr = str(report.longrepr) - elif code == 'X': - longrepr = '' - elif report.passed: - longrepr = "" - elif report.failed: - longrepr = str(report.longrepr) - elif report.skipped: - longrepr = str(report.longrepr.reprcrash.message) - self.log_outcome(report.item, code, longrepr) - - def pytest_collectreport(self, report): - if not report.passed: - if report.failed: - code = "F" - else: - assert report.skipped - code = "S" - longrepr = str(report.longrepr.reprcrash) - self.log_outcome(report.collector, code, longrepr) - - def pytest_internalerror(self, excrepr): - path = excrepr.reprcrash.path - self.write_log_entry(path, '!', str(excrepr)) diff --git a/py/_plugin/pytest_runner.py b/py/_plugin/pytest_runner.py deleted file mode 100644 index 0c39ba775e..0000000000 --- a/py/_plugin/pytest_runner.py +++ /dev/null @@ -1,417 +0,0 @@ -""" -collect and run test items and create reports. -""" - -import py, sys - -def pytest_namespace(): - return { - 'raises' : raises, - 'skip' : skip, - 'importorskip' : importorskip, - 'fail' : fail, - 'xfail' : xfail, - 'exit' : exit, - } - -# -# pytest plugin hooks - -# XXX move to pytest_sessionstart and fix py.test owns tests -def pytest_configure(config): - config._setupstate = SetupState() - -def pytest_sessionfinish(session, exitstatus): - if hasattr(session.config, '_setupstate'): - hook = session.config.hook - rep = hook.pytest__teardown_final(session=session) - if rep: - hook.pytest__teardown_final_logerror(report=rep) - -def pytest_make_collect_report(collector): - result = excinfo = None - try: - result = collector._memocollect() - except KeyboardInterrupt: - raise - except: - excinfo = py.code.ExceptionInfo() - return CollectReport(collector, result, excinfo) - -def pytest_runtest_protocol(item): - runtestprotocol(item) - return True - -def runtestprotocol(item, log=True): - rep = call_and_report(item, "setup", log) - reports = [rep] - if rep.passed: - reports.append(call_and_report(item, "call", log)) - reports.append(call_and_report(item, "teardown", log)) - return reports - -def pytest_runtest_setup(item): - item.config._setupstate.prepare(item) - -def pytest_runtest_call(item): - if not item._deprecated_testexecution(): - item.runtest() - -def pytest_runtest_makereport(item, call): - return ItemTestReport(item, call.excinfo, call.when) - -def pytest_runtest_teardown(item): - item.config._setupstate.teardown_exact(item) - -def pytest__teardown_final(session): - call = CallInfo(session.config._setupstate.teardown_all, when="teardown") - if call.excinfo: - ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir) - call.excinfo.traceback = ntraceback.filter() - rep = TeardownErrorReport(call.excinfo) - return rep - -def pytest_report_teststatus(report): - if report.when in ("setup", "teardown"): - if report.failed: - # category, shortletter, verbose-word - return "error", "E", "ERROR" - elif report.skipped: - return "skipped", "s", "SKIPPED" - else: - return "", "", "" -# -# Implementation - -def call_and_report(item, when, log=True): - call = call_runtest_hook(item, when) - hook = item.ihook - report = hook.pytest_runtest_makereport(item=item, call=call) - if log and (when == "call" or not report.passed): - hook.pytest_runtest_logreport(report=report) - return report - -def call_runtest_hook(item, when): - hookname = "pytest_runtest_" + when - ihook = getattr(item.ihook, hookname) - return CallInfo(lambda: ihook(item=item), when=when) - -class CallInfo: - excinfo = None - def __init__(self, func, when): - self.when = when - try: - self.result = func() - except KeyboardInterrupt: - raise - except: - self.excinfo = py.code.ExceptionInfo() - - def __repr__(self): - if self.excinfo: - status = "exception: %s" % str(self.excinfo.value) - else: - status = "result: %r" % (self.result,) - return "<CallInfo when=%r %s>" % (self.when, status) - -class BaseReport(object): - def __repr__(self): - l = ["%s=%s" %(key, value) - for key, value in self.__dict__.items()] - return "<%s %s>" %(self.__class__.__name__, " ".join(l),) - - def toterminal(self, out): - longrepr = self.longrepr - if hasattr(longrepr, 'toterminal'): - longrepr.toterminal(out) - else: - out.line(str(longrepr)) - -class ItemTestReport(BaseReport): - failed = passed = skipped = False - - def __init__(self, item, excinfo=None, when=None): - self.item = item - self.when = when - if item and when != "setup": - self.keywords = item.readkeywords() - else: - # if we fail during setup it might mean - # we are not able to access the underlying object - # this might e.g. happen if we are unpickled - # and our parent collector did not collect us - # (because it e.g. skipped for platform reasons) - self.keywords = {} - if not excinfo: - self.passed = True - self.shortrepr = "." - else: - if not isinstance(excinfo, py.code.ExceptionInfo): - self.failed = True - shortrepr = "?" - longrepr = excinfo - elif excinfo.errisinstance(py.test.skip.Exception): - self.skipped = True - shortrepr = "s" - longrepr = self.item._repr_failure_py(excinfo) - else: - self.failed = True - shortrepr = self.item.shortfailurerepr - if self.when == "call": - longrepr = self.item.repr_failure(excinfo) - else: # exception in setup or teardown - longrepr = self.item._repr_failure_py(excinfo) - shortrepr = shortrepr.lower() - self.shortrepr = shortrepr - self.longrepr = longrepr - - def __repr__(self): - status = (self.passed and "passed" or - self.skipped and "skipped" or - self.failed and "failed" or - "CORRUPT") - l = [repr(self.item.name), "when=%r" % self.when, "outcome %r" % status,] - if hasattr(self, 'node'): - l.append("txnode=%s" % self.node.gateway.id) - info = " " .join(map(str, l)) - return "<ItemTestReport %s>" % info - - def getnode(self): - return self.item - -class CollectReport(BaseReport): - skipped = failed = passed = False - - def __init__(self, collector, result, excinfo=None): - self.collector = collector - if not excinfo: - self.passed = True - self.result = result - else: - style = "short" - if collector.config.getvalue("fulltrace"): - style = "long" - self.longrepr = self.collector._repr_failure_py(excinfo, - style=style) - if excinfo.errisinstance(py.test.skip.Exception): - self.skipped = True - self.reason = str(excinfo.value) - else: - self.failed = True - - def getnode(self): - return self.collector - -class TeardownErrorReport(BaseReport): - skipped = passed = False - failed = True - when = "teardown" - def __init__(self, excinfo): - self.longrepr = excinfo.getrepr(funcargs=True) - -class SetupState(object): - """ shared state for setting up/tearing down test items or collectors. """ - def __init__(self): - self.stack = [] - self._finalizers = {} - - def addfinalizer(self, finalizer, colitem): - """ attach a finalizer to the given colitem. - if colitem is None, this will add a finalizer that - is called at the end of teardown_all(). - """ - assert hasattr(finalizer, '__call__') - #assert colitem in self.stack - self._finalizers.setdefault(colitem, []).append(finalizer) - - def _pop_and_teardown(self): - colitem = self.stack.pop() - self._teardown_with_finalization(colitem) - - def _callfinalizers(self, colitem): - finalizers = self._finalizers.pop(colitem, None) - while finalizers: - fin = finalizers.pop() - fin() - - def _teardown_with_finalization(self, colitem): - self._callfinalizers(colitem) - if colitem: - colitem.teardown() - for colitem in self._finalizers: - assert colitem is None or colitem in self.stack - - def teardown_all(self): - while self.stack: - self._pop_and_teardown() - self._teardown_with_finalization(None) - assert not self._finalizers - - def teardown_exact(self, item): - if self.stack and item == self.stack[-1]: - self._pop_and_teardown() - else: - self._callfinalizers(item) - - def prepare(self, colitem): - """ setup objects along the collector chain to the test-method - and teardown previously setup objects.""" - needed_collectors = colitem.listchain() - while self.stack: - if self.stack == needed_collectors[:len(self.stack)]: - break - self._pop_and_teardown() - # check if the last collection node has raised an error - for col in self.stack: - if hasattr(col, '_prepare_exc'): - py.builtin._reraise(*col._prepare_exc) - for col in needed_collectors[len(self.stack):]: - self.stack.append(col) - try: - col.setup() - except Exception: - col._prepare_exc = sys.exc_info() - raise - -# ============================================================= -# Test OutcomeExceptions and helpers for creating them. - - -class OutcomeException(Exception): - """ OutcomeException and its subclass instances indicate and - contain info about test and collection outcomes. - """ - def __init__(self, msg=None, excinfo=None): - self.msg = msg - self.excinfo = excinfo - - def __repr__(self): - if self.msg: - return repr(self.msg) - return "<%s instance>" %(self.__class__.__name__,) - __str__ = __repr__ - -class Skipped(OutcomeException): - # XXX hackish: on 3k we fake to live in the builtins - # in order to have Skipped exception printing shorter/nicer - __module__ = 'builtins' - -class Failed(OutcomeException): - """ raised from an explicit call to py.test.fail() """ - __module__ = 'builtins' - -class XFailed(OutcomeException): - """ raised from an explicit call to py.test.xfail() """ - __module__ = 'builtins' - -class ExceptionFailure(Failed): - """ raised by py.test.raises on an exception-assertion mismatch. """ - def __init__(self, expr, expected, msg=None, excinfo=None): - Failed.__init__(self, msg=msg, excinfo=excinfo) - self.expr = expr - self.expected = expected - -class Exit(KeyboardInterrupt): - """ raised by py.test.exit for immediate program exits without tracebacks and reporter/summary. """ - def __init__(self, msg="unknown reason"): - self.msg = msg - KeyboardInterrupt.__init__(self, msg) - -# exposed helper methods - -def exit(msg): - """ exit testing process as if KeyboardInterrupt was triggered. """ - __tracebackhide__ = True - raise Exit(msg) - -exit.Exception = Exit - -def skip(msg=""): - """ skip an executing test with the given message. Note: it's usually - better use the py.test.mark.skipif marker to declare a test to be - skipped under certain conditions like mismatching platforms or - dependencies. See the pytest_skipping plugin for details. - """ - __tracebackhide__ = True - raise Skipped(msg=msg) - -skip.Exception = Skipped - -def fail(msg=""): - """ explicitely fail an currently-executing test with the given Message. """ - __tracebackhide__ = True - raise Failed(msg=msg) - -fail.Exception = Failed - -def xfail(reason=""): - """ xfail an executing test or setup functions, taking an optional - reason string. - """ - __tracebackhide__ = True - raise XFailed(reason) -xfail.Exception = XFailed - -def raises(ExpectedException, *args, **kwargs): - """ if args[0] is callable: raise AssertionError if calling it with - the remaining arguments does not raise the expected exception. - if args[0] is a string: raise AssertionError if executing the - the string in the calling scope does not raise expected exception. - for examples: - x = 5 - raises(TypeError, lambda x: x + 'hello', x=x) - raises(TypeError, "x + 'hello'") - """ - __tracebackhide__ = True - assert args - if isinstance(args[0], str): - code, = args - assert isinstance(code, str) - frame = sys._getframe(1) - loc = frame.f_locals.copy() - loc.update(kwargs) - #print "raises frame scope: %r" % frame.f_locals - try: - code = py.code.Source(code).compile() - py.builtin.exec_(code, frame.f_globals, loc) - # XXX didn'T mean f_globals == f_locals something special? - # this is destroyed here ... - except ExpectedException: - return py.code.ExceptionInfo() - else: - func = args[0] - try: - func(*args[1:], **kwargs) - except ExpectedException: - return py.code.ExceptionInfo() - k = ", ".join(["%s=%r" % x for x in kwargs.items()]) - if k: - k = ', ' + k - expr = '%s(%r%s)' %(getattr(func, '__name__', func), args, k) - raise ExceptionFailure(msg="DID NOT RAISE", - expr=args, expected=ExpectedException) - -raises.Exception = ExceptionFailure - -def importorskip(modname, minversion=None): - """ return imported module if it has a higher __version__ than the - optionally specified 'minversion' - otherwise call py.test.skip() - with a message detailing the mismatch. - """ - compile(modname, '', 'eval') # to catch syntaxerrors - try: - mod = __import__(modname, None, None, ['__doc__']) - except ImportError: - py.test.skip("could not import %r" %(modname,)) - if minversion is None: - return mod - verattr = getattr(mod, '__version__', None) - if isinstance(minversion, str): - minver = minversion.split(".") - else: - minver = list(minversion) - if verattr is None or verattr.split(".") < minver: - py.test.skip("module %r has __version__ %r, required is: %r" %( - modname, verattr, minversion)) - return mod - diff --git a/py/_plugin/pytest_skipping.py b/py/_plugin/pytest_skipping.py deleted file mode 100644 index c7de83924f..0000000000 --- a/py/_plugin/pytest_skipping.py +++ /dev/null @@ -1,347 +0,0 @@ -""" -advanced skipping for python test functions, classes or modules. - -With this plugin you can mark test functions for conditional skipping -or as "xfail", expected-to-fail. Skipping a test will avoid running it -while xfail-marked tests will run and result in an inverted outcome: -a pass becomes a failure and a fail becomes a semi-passing one. - -The need for skipping a test is usually connected to a condition. -If a test fails under all conditions then it's probably better -to mark your test as 'xfail'. - -By passing ``-rxs`` to the terminal reporter you will see extra -summary information on skips and xfail-run tests at the end of a test run. - -.. _skipif: - -Skipping a single function -------------------------------------------- - -Here is an example for marking a test function to be skipped -when run on a Python3 interpreter:: - - @py.test.mark.skipif("sys.version_info >= (3,0)") - def test_function(): - ... - -During test function setup the skipif condition is -evaluated by calling ``eval(expr, namespace)``. The namespace -contains the ``sys`` and ``os`` modules and the test -``config`` object. The latter allows you to skip based -on a test configuration value e.g. like this:: - - @py.test.mark.skipif("not config.getvalue('db')") - def test_function(...): - ... - -Create a shortcut for your conditional skip decorator -at module level like this:: - - win32only = py.test.mark.skipif("sys.platform != 'win32'") - - @win32only - def test_function(): - ... - - -skip groups of test functions --------------------------------------- - -As with all metadata function marking you can do it at -`whole class- or module level`_. Here is an example -for skipping all methods of a test class based on platform:: - - class TestPosixCalls: - pytestmark = py.test.mark.skipif("sys.platform == 'win32'") - - def test_function(self): - # will not be setup or run under 'win32' platform - # - -The ``pytestmark`` decorator will be applied to each test function. -If your code targets python2.6 or above you can equivalently use -the skipif decorator on classes:: - - @py.test.mark.skipif("sys.platform == 'win32'") - class TestPosixCalls: - - def test_function(self): - # will not be setup or run under 'win32' platform - # - -It is fine in general to apply multiple "skipif" decorators -on a single function - this means that if any of the conditions -apply the function will be skipped. - -.. _`whole class- or module level`: mark.html#scoped-marking - - -mark a test function as **expected to fail** -------------------------------------------------------- - -You can use the ``xfail`` marker to indicate that you -expect the test to fail:: - - @py.test.mark.xfail - def test_function(): - ... - -This test will be run but no traceback will be reported -when it fails. Instead terminal reporting will list it in the -"expected to fail" or "unexpectedly passing" sections. - -Same as with skipif_ you can also selectively expect a failure -depending on platform:: - - @py.test.mark.xfail("sys.version_info >= (3,0)") - def test_function(): - ... - -To not run a test and still regard it as "xfailed":: - - @py.test.mark.xfail(..., run=False) - -To specify an explicit reason to be shown with xfailure detail:: - - @py.test.mark.xfail(..., reason="my reason") - -imperative xfail from within a test or setup function ------------------------------------------------------- - -If you cannot declare xfail-conditions at import time -you can also imperatively produce an XFail-outcome from -within test or setup code. Example:: - - def test_function(): - if not valid_config(): - py.test.xfail("unsuppored configuration") - - -skipping on a missing import dependency --------------------------------------------------- - -You can use the following import helper at module level -or within a test or test setup function:: - - docutils = py.test.importorskip("docutils") - -If ``docutils`` cannot be imported here, this will lead to a -skip outcome of the test. You can also skip dependeing if -if a library does not come with a high enough version:: - - docutils = py.test.importorskip("docutils", minversion="0.3") - -The version will be read from the specified module's ``__version__`` attribute. - -imperative skip from within a test or setup function ------------------------------------------------------- - -If for some reason you cannot declare skip-conditions -you can also imperatively produce a Skip-outcome from -within test or setup code. Example:: - - def test_function(): - if not valid_config(): - py.test.skip("unsuppored configuration") - -""" - -import py - -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption('--runxfail', - action="store_true", dest="runxfail", default=False, - help="run tests even if they are marked xfail") - -class MarkEvaluator: - def __init__(self, item, name): - self.item = item - self.name = name - self.holder = getattr(item.obj, name, None) - - def __bool__(self): - return bool(self.holder) - __nonzero__ = __bool__ - - def istrue(self): - if self.holder: - d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config} - if self.holder.args: - self.result = False - for expr in self.holder.args: - self.expr = expr - if isinstance(expr, str): - result = cached_eval(self.item.config, expr, d) - else: - result = expr - if result: - self.result = True - self.expr = expr - break - else: - self.result = True - return getattr(self, 'result', False) - - def get(self, attr, default=None): - return self.holder.kwargs.get(attr, default) - - def getexplanation(self): - expl = self.get('reason', None) - if not expl: - if not hasattr(self, 'expr'): - return "" - else: - return "condition: " + self.expr - return expl - - -def pytest_runtest_setup(item): - if not isinstance(item, py.test.collect.Function): - return - evalskip = MarkEvaluator(item, 'skipif') - if evalskip.istrue(): - py.test.skip(evalskip.getexplanation()) - item._evalxfail = MarkEvaluator(item, 'xfail') - if not item.config.getvalue("runxfail"): - if item._evalxfail.istrue(): - if not item._evalxfail.get('run', True): - py.test.skip("xfail") - -def pytest_runtest_makereport(__multicall__, item, call): - if not isinstance(item, py.test.collect.Function): - return - if not (call.excinfo and - call.excinfo.errisinstance(py.test.xfail.Exception)): - evalxfail = getattr(item, '_evalxfail', None) - if not evalxfail: - return - if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception): - if not item.config.getvalue("runxfail"): - rep = __multicall__.execute() - rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg - rep.skipped = True - rep.failed = False - return rep - if call.when == "setup": - rep = __multicall__.execute() - if rep.skipped and evalxfail.istrue(): - expl = evalxfail.getexplanation() - if not evalxfail.get("run", True): - expl = "[NOTRUN] " + expl - rep.keywords['xfail'] = expl - return rep - elif call.when == "call": - rep = __multicall__.execute() - if not item.config.getvalue("runxfail") and evalxfail.istrue(): - if call.excinfo: - rep.skipped = True - rep.failed = rep.passed = False - else: - rep.skipped = rep.passed = False - rep.failed = True - rep.keywords['xfail'] = evalxfail.getexplanation() - else: - if 'xfail' in rep.keywords: - del rep.keywords['xfail'] - return rep - -# called by terminalreporter progress reporting -def pytest_report_teststatus(report): - if 'xfail' in report.keywords: - if report.skipped: - return "xfailed", "x", "xfail" - elif report.failed: - return "xpassed", "X", "XPASS" - -# called by the terminalreporter instance/plugin -def pytest_terminal_summary(terminalreporter): - tr = terminalreporter - if not tr.reportchars: - #for name in "xfailed skipped failed xpassed": - # if not tr.stats.get(name, 0): - # tr.write_line("HINT: use '-r' option to see extra " - # "summary info about tests") - # break - return - - lines = [] - for char in tr.reportchars: - if char == "x": - show_xfailed(terminalreporter, lines) - elif char == "X": - show_xpassed(terminalreporter, lines) - elif char == "f": - show_failed(terminalreporter, lines) - elif char == "s": - show_skipped(terminalreporter, lines) - if lines: - tr._tw.sep("=", "short test summary info") - for line in lines: - tr._tw.line(line) - -def show_failed(terminalreporter, lines): - tw = terminalreporter._tw - failed = terminalreporter.stats.get("failed") - if failed: - for rep in failed: - pos = terminalreporter.gettestid(rep.item) - lines.append("FAIL %s" %(pos, )) - -def show_xfailed(terminalreporter, lines): - xfailed = terminalreporter.stats.get("xfailed") - if xfailed: - for rep in xfailed: - pos = terminalreporter.gettestid(rep.item) - reason = rep.keywords['xfail'] - lines.append("XFAIL %s %s" %(pos, reason)) - -def show_xpassed(terminalreporter, lines): - xpassed = terminalreporter.stats.get("xpassed") - if xpassed: - for rep in xpassed: - pos = terminalreporter.gettestid(rep.item) - reason = rep.keywords['xfail'] - lines.append("XPASS %s %s" %(pos, reason)) - -def cached_eval(config, expr, d): - if not hasattr(config, '_evalcache'): - config._evalcache = {} - try: - return config._evalcache[expr] - except KeyError: - #import sys - #print >>sys.stderr, ("cache-miss: %r" % expr) - config._evalcache[expr] = x = eval(expr, d) - return x - - -def folded_skips(skipped): - d = {} - for event in skipped: - entry = event.longrepr.reprcrash - key = entry.path, entry.lineno, entry.message - d.setdefault(key, []).append(event) - l = [] - for key, events in d.items(): - l.append((len(events),) + key) - return l - -def show_skipped(terminalreporter, lines): - tr = terminalreporter - skipped = tr.stats.get('skipped', []) - if skipped: - #if not tr.hasopt('skipped'): - # tr.write_line( - # "%d skipped tests, specify -rs for more info" % - # len(skipped)) - # return - fskips = folded_skips(skipped) - if fskips: - #tr.write_sep("_", "skipped test summary") - for num, fspath, lineno, reason in fskips: - if reason.startswith("Skipped: "): - reason = reason[9:] - lines.append("SKIP [%d] %s:%d: %s" % - (num, fspath, lineno, reason)) diff --git a/py/_plugin/pytest_terminal.py b/py/_plugin/pytest_terminal.py deleted file mode 100644 index 7ed0ca85e7..0000000000 --- a/py/_plugin/pytest_terminal.py +++ /dev/null @@ -1,540 +0,0 @@ -""" -Implements terminal reporting of the full testing process. - -This is a good source for looking at the various reporting hooks. -""" -import py -import sys - -optionalhook = py.test.mark.optionalhook - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting", "reporting", after="general") - group._addoption('-v', '--verbose', action="count", - dest="verbose", default=0, help="increase verbosity."), - group._addoption('-r', - action="store", dest="reportchars", default=None, metavar="chars", - help="show extra test summary info as specified by chars (f)ailed, " - "(s)skipped, (x)failed, (X)passed.") - group._addoption('-l', '--showlocals', - action="store_true", dest="showlocals", default=False, - help="show locals in tracebacks (disabled by default).") - group._addoption('--report', - action="store", dest="report", default=None, metavar="opts", - help="(deprecated, use -r)") - group._addoption('--tb', metavar="style", - action="store", dest="tbstyle", default='long', - type="choice", choices=['long', 'short', 'no', 'line'], - help="traceback print mode (long/short/line/no).") - group._addoption('--fulltrace', - action="store_true", dest="fulltrace", default=False, - help="don't cut any tracebacks (default is to cut).") - group._addoption('--funcargs', - action="store_true", dest="showfuncargs", default=False, - help="show available function arguments, sorted by plugin") - -def pytest_configure(config): - if config.option.collectonly: - reporter = CollectonlyReporter(config) - elif config.option.showfuncargs: - config.setsessionclass(ShowFuncargSession) - reporter = None - else: - reporter = TerminalReporter(config) - if reporter: - # XXX see remote.py's XXX - for attr in 'pytest_terminal_hasmarkup', 'pytest_terminal_fullwidth': - if hasattr(config, attr): - #print "SETTING TERMINAL OPTIONS", attr, getattr(config, attr) - name = attr.split("_")[-1] - assert hasattr(self.reporter._tw, name), name - setattr(reporter._tw, name, getattr(config, attr)) - config.pluginmanager.register(reporter, 'terminalreporter') - -def getreportopt(config): - reportopts = "" - optvalue = config.getvalue("report") - if optvalue: - py.builtin.print_("DEPRECATED: use -r instead of --report option.", - file=py.std.sys.stderr) - if optvalue: - for setting in optvalue.split(","): - setting = setting.strip() - if setting == "skipped": - reportopts += "s" - elif setting == "xfailed": - reportopts += "x" - reportchars = config.getvalue("reportchars") - if reportchars: - for char in reportchars: - if char not in reportopts: - reportopts += char - return reportopts - -class TerminalReporter: - def __init__(self, config, file=None): - self.config = config - self.stats = {} - self.curdir = py.path.local() - if file is None: - file = py.std.sys.stdout - self._tw = py.io.TerminalWriter(file) - self.currentfspath = None - self.gateway2info = {} - self.reportchars = getreportopt(config) - - def hasopt(self, char): - char = {'xfailed': 'x', 'skipped': 's'}.get(char,char) - return char in self.reportchars - - def write_fspath_result(self, fspath, res): - fspath = self.curdir.bestrelpath(fspath) - if fspath != self.currentfspath: - self._tw.line() - relpath = self.curdir.bestrelpath(fspath) - self._tw.write(relpath + " ") - self.currentfspath = fspath - self._tw.write(res) - - def write_ensure_prefix(self, prefix, extra="", **kwargs): - if self.currentfspath != prefix: - self._tw.line() - self.currentfspath = prefix - self._tw.write(prefix) - if extra: - self._tw.write(extra, **kwargs) - self.currentfspath = -2 - - def ensure_newline(self): - if self.currentfspath: - self._tw.line() - self.currentfspath = None - - def write_line(self, line, **markup): - line = str(line) - self.ensure_newline() - self._tw.line(line, **markup) - - def write_sep(self, sep, title=None, **markup): - self.ensure_newline() - self._tw.sep(sep, title, **markup) - - def getcategoryletterword(self, rep): - res = self.config.hook.pytest_report_teststatus(report=rep) - if res: - return res - for cat in 'skipped failed passed ???'.split(): - if getattr(rep, cat, None): - break - return cat, self.getoutcomeletter(rep), self.getoutcomeword(rep) - - def getoutcomeletter(self, rep): - return rep.shortrepr - - def getoutcomeword(self, rep): - if rep.passed: - return "PASS", dict(green=True) - elif rep.failed: - return "FAIL", dict(red=True) - elif rep.skipped: - return "SKIP" - else: - return "???", dict(red=True) - - def gettestid(self, item, relative=True): - fspath = item.fspath - chain = [x for x in item.listchain() if x.fspath == fspath] - chain = chain[1:] - names = [x.name for x in chain if x.name != "()"] - path = item.fspath - if relative: - relpath = path.relto(self.curdir) - if relpath: - path = relpath - names.insert(0, str(path)) - return "::".join(names) - - - def pytest_internalerror(self, excrepr): - for line in str(excrepr).split("\n"): - self.write_line("INTERNALERROR> " + line) - - def pytest_plugin_registered(self, plugin): - if self.config.option.traceconfig: - msg = "PLUGIN registered: %s" %(plugin,) - # XXX this event may happen during setup/teardown time - # which unfortunately captures our output here - # which garbles our output if we use self.write_line - self.write_line(msg) - - @optionalhook - def pytest_gwmanage_newgateway(self, gateway, platinfo): - #self.write_line("%s instantiated gateway from spec %r" %(gateway.id, gateway.spec._spec)) - d = {} - d['version'] = repr_pythonversion(platinfo.version_info) - d['id'] = gateway.id - d['spec'] = gateway.spec._spec - d['platform'] = platinfo.platform - if self.config.option.verbose: - d['extra'] = "- " + platinfo.executable - else: - d['extra'] = "" - d['cwd'] = platinfo.cwd - infoline = ("[%(id)s] %(spec)s -- platform %(platform)s, " - "Python %(version)s " - "cwd: %(cwd)s" - "%(extra)s" % d) - self.write_line(infoline) - self.gateway2info[gateway] = infoline - - @optionalhook - def pytest_testnodeready(self, node): - self.write_line("[%s] txnode ready to receive tests" %(node.gateway.id,)) - - @optionalhook - def pytest_testnodedown(self, node, error): - if error: - self.write_line("[%s] node down, error: %s" %(node.gateway.id, error)) - - @optionalhook - def pytest_rescheduleitems(self, items): - if self.config.option.debug: - self.write_sep("!", "RESCHEDULING %s " %(items,)) - - @optionalhook - def pytest_looponfailinfo(self, failreports, rootdirs): - if failreports: - self.write_sep("#", "LOOPONFAILING", red=True) - for report in failreports: - loc = self._getcrashline(report) - self.write_line(loc, red=True) - self.write_sep("#", "waiting for changes") - for rootdir in rootdirs: - self.write_line("### Watching: %s" %(rootdir,), bold=True) - - - def pytest_trace(self, category, msg): - if self.config.option.debug or \ - self.config.option.traceconfig and category.find("config") != -1: - self.write_line("[%s] %s" %(category, msg)) - - def pytest_deselected(self, items): - self.stats.setdefault('deselected', []).append(items) - - def pytest_itemstart(self, item, node=None): - if getattr(self.config.option, 'dist', 'no') != "no": - # for dist-testing situations itemstart means we - # queued the item for sending, not interesting (unless debugging) - if self.config.option.debug: - line = self._reportinfoline(item) - extra = "" - if node: - extra = "-> [%s]" % node.gateway.id - self.write_ensure_prefix(line, extra) - else: - if self.config.option.verbose: - line = self._reportinfoline(item) - self.write_ensure_prefix(line, "") - else: - # ensure that the path is printed before the - # 1st test of a module starts running - - self.write_fspath_result(self._getfspath(item), "") - - def pytest__teardown_final_logerror(self, report): - self.stats.setdefault("error", []).append(report) - - def pytest_runtest_logreport(self, report): - rep = report - cat, letter, word = self.getcategoryletterword(rep) - if not letter and not word: - # probably passed setup/teardown - return - if isinstance(word, tuple): - word, markup = word - else: - markup = {} - self.stats.setdefault(cat, []).append(rep) - if not self.config.option.verbose: - self.write_fspath_result(self._getfspath(rep.item), letter) - else: - line = self._reportinfoline(rep.item) - if not hasattr(rep, 'node'): - self.write_ensure_prefix(line, word, **markup) - else: - self.ensure_newline() - if hasattr(rep, 'node'): - self._tw.write("[%s] " % rep.node.gateway.id) - self._tw.write(word, **markup) - self._tw.write(" " + line) - self.currentfspath = -2 - - def pytest_collectreport(self, report): - if not report.passed: - if report.failed: - self.stats.setdefault("error", []).append(report) - msg = report.longrepr.reprcrash.message - self.write_fspath_result(report.collector.fspath, "E") - elif report.skipped: - self.stats.setdefault("skipped", []).append(report) - self.write_fspath_result(report.collector.fspath, "S") - - def pytest_sessionstart(self, session): - self.write_sep("=", "test session starts", bold=True) - self._sessionstarttime = py.std.time.time() - - verinfo = ".".join(map(str, sys.version_info[:3])) - msg = "platform %s -- Python %s" % (sys.platform, verinfo) - msg += " -- pytest-%s" % (py.__version__) - if self.config.option.verbose or self.config.option.debug or getattr(self.config.option, 'pastebin', None): - msg += " -- " + str(sys.executable) - self.write_line(msg) - lines = self.config.hook.pytest_report_header(config=self.config) - lines.reverse() - for line in flatten(lines): - self.write_line(line) - for i, testarg in enumerate(self.config.args): - self.write_line("test object %d: %s" %(i+1, testarg)) - - def pytest_sessionfinish(self, exitstatus, __multicall__): - __multicall__.execute() - self._tw.line("") - if exitstatus in (0, 1, 2): - self.summary_errors() - self.summary_failures() - self.config.hook.pytest_terminal_summary(terminalreporter=self) - if exitstatus == 2: - self._report_keyboardinterrupt() - self.summary_deselected() - self.summary_stats() - - def pytest_keyboard_interrupt(self, excinfo): - self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) - - def _report_keyboardinterrupt(self): - excrepr = self._keyboardinterrupt_memo - msg = excrepr.reprcrash.message - self.write_sep("!", msg) - if "KeyboardInterrupt" in msg: - if self.config.getvalue("fulltrace"): - excrepr.toterminal(self._tw) - else: - excrepr.reprcrash.toterminal(self._tw) - - def _getcrashline(self, report): - try: - return report.longrepr.reprcrash - except AttributeError: - return str(report.longrepr)[:50] - - def _reportinfoline(self, item): - collect_fspath = self._getfspath(item) - fspath, lineno, msg = self._getreportinfo(item) - if fspath and fspath != collect_fspath: - fspath = "%s <- %s" % ( - self.curdir.bestrelpath(collect_fspath), - self.curdir.bestrelpath(fspath)) - elif fspath: - fspath = self.curdir.bestrelpath(fspath) - if lineno is not None: - lineno += 1 - if fspath and lineno and msg: - line = "%(fspath)s:%(lineno)s: %(msg)s" - elif fspath and msg: - line = "%(fspath)s: %(msg)s" - elif fspath and lineno: - line = "%(fspath)s:%(lineno)s %(extrapath)s" - else: - line = "[noreportinfo]" - return line % locals() + " " - - def _getfailureheadline(self, rep): - if hasattr(rep, "collector"): - return str(rep.collector.fspath) - elif hasattr(rep, 'item'): - fspath, lineno, msg = self._getreportinfo(rep.item) - return msg - else: - return "test session" - - def _getreportinfo(self, item): - try: - return item.__reportinfo - except AttributeError: - pass - reportinfo = item.config.hook.pytest_report_iteminfo(item=item) - # cache on item - item.__reportinfo = reportinfo - return reportinfo - - def _getfspath(self, item): - try: - return item.fspath - except AttributeError: - fspath, lineno, msg = self._getreportinfo(item) - return fspath - - # - # summaries for sessionfinish - # - - def summary_failures(self): - tbstyle = self.config.getvalue("tbstyle") - if 'failed' in self.stats and tbstyle != "no": - self.write_sep("=", "FAILURES") - for rep in self.stats['failed']: - if tbstyle == "line": - line = self._getcrashline(rep) - self.write_line(line) - else: - msg = self._getfailureheadline(rep) - self.write_sep("_", msg) - self.write_platinfo(rep) - rep.toterminal(self._tw) - - def summary_errors(self): - if 'error' in self.stats and self.config.option.tbstyle != "no": - self.write_sep("=", "ERRORS") - for rep in self.stats['error']: - msg = self._getfailureheadline(rep) - if not hasattr(rep, 'when'): - # collect - msg = "ERROR during collection " + msg - elif rep.when == "setup": - msg = "ERROR at setup of " + msg - elif rep.when == "teardown": - msg = "ERROR at teardown of " + msg - self.write_sep("_", msg) - self.write_platinfo(rep) - rep.toterminal(self._tw) - - def write_platinfo(self, rep): - if hasattr(rep, 'node'): - self.write_line(self.gateway2info.get( - rep.node.gateway, - "node %r (platinfo not found? strange)") - [:self._tw.fullwidth-1]) - - def summary_stats(self): - session_duration = py.std.time.time() - self._sessionstarttime - - keys = "failed passed skipped deselected".split() - for key in self.stats.keys(): - if key not in keys: - keys.append(key) - parts = [] - for key in keys: - val = self.stats.get(key, None) - if val: - parts.append("%d %s" %(len(val), key)) - line = ", ".join(parts) - # XXX coloring - self.write_sep("=", "%s in %.2f seconds" %(line, session_duration)) - - def summary_deselected(self): - if 'deselected' in self.stats: - self.write_sep("=", "%d tests deselected by %r" %( - len(self.stats['deselected']), self.config.option.keyword), bold=True) - - -class CollectonlyReporter: - INDENT = " " - - def __init__(self, config, out=None): - self.config = config - if out is None: - out = py.std.sys.stdout - self.out = py.io.TerminalWriter(out) - self.indent = "" - self._failed = [] - - def outindent(self, line): - self.out.line(self.indent + str(line)) - - def pytest_internalerror(self, excrepr): - for line in str(excrepr).split("\n"): - self.out.line("INTERNALERROR> " + line) - - def pytest_collectstart(self, collector): - self.outindent(collector) - self.indent += self.INDENT - - def pytest_itemstart(self, item, node=None): - self.outindent(item) - - def pytest_collectreport(self, report): - if not report.passed: - self.outindent("!!! %s !!!" % report.longrepr.reprcrash.message) - self._failed.append(report) - self.indent = self.indent[:-len(self.INDENT)] - - def pytest_sessionfinish(self, session, exitstatus): - if self._failed: - self.out.sep("!", "collection failures") - for rep in self._failed: - rep.toterminal(self.out) - - -def repr_pythonversion(v=None): - if v is None: - v = sys.version_info - try: - return "%s.%s.%s-%s-%s" % v - except (TypeError, ValueError): - return str(v) - -def flatten(l): - for x in l: - if isinstance(x, (list, tuple)): - for y in flatten(x): - yield y - else: - yield x - -from py._test.session import Session -class ShowFuncargSession(Session): - def main(self, colitems): - self.fspath = py.path.local() - self.sessionstarts() - try: - self.showargs(colitems[0]) - finally: - self.sessionfinishes(exitstatus=1) - - def showargs(self, colitem): - tw = py.io.TerminalWriter() - from py._test.funcargs import getplugins - from py._test.funcargs import FuncargRequest - plugins = getplugins(colitem, withpy=True) - verbose = self.config.getvalue("verbose") - for plugin in plugins: - available = [] - for name, factory in vars(plugin).items(): - if name.startswith(FuncargRequest._argprefix): - name = name[len(FuncargRequest._argprefix):] - if name not in available: - available.append([name, factory]) - if available: - pluginname = plugin.__name__ - for name, factory in available: - loc = self.getlocation(factory) - if verbose: - funcargspec = "%s -- %s" %(name, loc,) - else: - funcargspec = name - tw.line(funcargspec, green=True) - doc = factory.__doc__ or "" - if doc: - for line in doc.split("\n"): - tw.line(" " + line.strip()) - else: - tw.line(" %s: no docstring available" %(loc,), - red=True) - - def getlocation(self, function): - import inspect - fn = py.path.local(inspect.getfile(function)) - lineno = py.builtin._getcode(function).co_firstlineno - if fn.relto(self.fspath): - fn = fn.relto(self.fspath) - return "%s:%d" %(fn, lineno+1) diff --git a/py/_plugin/pytest_tmpdir.py b/py/_plugin/pytest_tmpdir.py deleted file mode 100644 index 4c105cb42c..0000000000 --- a/py/_plugin/pytest_tmpdir.py +++ /dev/null @@ -1,22 +0,0 @@ -"""provide temporary directories to test functions. - -usage example:: - - def test_plugin(tmpdir): - tmpdir.join("hello").write("hello") - -.. _`py.path.local`: ../../path.html - -""" -import py - -def pytest_funcarg__tmpdir(request): - """return a temporary directory path object - unique to each test function invocation, - created as a sub directory of the base temporary - directory. The returned object is a `py.path.local`_ - path object. - """ - name = request.function.__name__ - x = request.config.mktemp(name, numbered=True) - return x.realpath() diff --git a/py/_plugin/pytest_unittest.py b/py/_plugin/pytest_unittest.py deleted file mode 100644 index 54209d0d90..0000000000 --- a/py/_plugin/pytest_unittest.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -automatically discover and run traditional "unittest.py" style tests. - -Usage ----------------- - -This plugin collects and runs Python `unittest.py style`_ tests. -It will automatically collect ``unittest.TestCase`` subclasses -and their ``test`` methods from the test modules of a project -(usually following the ``test_*.py`` pattern). - -This plugin is enabled by default. - -.. _`unittest.py style`: http://docs.python.org/library/unittest.html -""" -import py -import sys - -def pytest_pycollect_makeitem(collector, name, obj): - if 'unittest' not in sys.modules: - return # nobody derived unittest.TestCase - try: - isunit = issubclass(obj, py.std.unittest.TestCase) - except KeyboardInterrupt: - raise - except Exception: - pass - else: - if isunit: - return UnitTestCase(name, parent=collector) - -class UnitTestCase(py.test.collect.Class): - def collect(self): - return [UnitTestCaseInstance("()", self)] - - def setup(self): - pass - - def teardown(self): - pass - -_dummy = object() -class UnitTestCaseInstance(py.test.collect.Instance): - def collect(self): - loader = py.std.unittest.TestLoader() - names = loader.getTestCaseNames(self.obj.__class__) - l = [] - for name in names: - callobj = getattr(self.obj, name) - if py.builtin.callable(callobj): - l.append(UnitTestFunction(name, parent=self)) - return l - - def _getobj(self): - x = self.parent.obj - return self.parent.obj(methodName='run') - -class UnitTestFunction(py.test.collect.Function): - def __init__(self, name, parent, args=(), obj=_dummy, sort_value=None): - super(UnitTestFunction, self).__init__(name, parent) - self._args = args - if obj is not _dummy: - self._obj = obj - self._sort_value = sort_value - if hasattr(self.parent, 'newinstance'): - self.parent.newinstance() - self.obj = self._getobj() - - def runtest(self): - target = self.obj - args = self._args - target(*args) - - def setup(self): - instance = py.builtin._getimself(self.obj) - instance.setUp() - - def teardown(self): - instance = py.builtin._getimself(self.obj) - instance.tearDown() - diff --git a/py/_plugin/standalonetemplate.py b/py/_plugin/standalonetemplate.py deleted file mode 100755 index 2d238b578b..0000000000 --- a/py/_plugin/standalonetemplate.py +++ /dev/null @@ -1,63 +0,0 @@ -#! /usr/bin/env python - -sources = """ -@SOURCES@""" - -import sys -import base64 -import zlib -import imp - -class DictImporter(object): - def __init__(self, sources): - self.sources = sources - - def find_module(self, fullname, path=None): - if fullname in self.sources: - return self - if fullname+'.__init__' in self.sources: - return self - return None - - def load_module(self, fullname): - # print "load_module:", fullname - from types import ModuleType - try: - s = self.sources[fullname] - is_pkg = False - except KeyError: - s = self.sources[fullname+'.__init__'] - is_pkg = True - - co = compile(s, fullname, 'exec') - module = sys.modules.setdefault(fullname, ModuleType(fullname)) - module.__file__ = "%s/%s" % (__file__, fullname) - module.__loader__ = self - if is_pkg: - module.__path__ = [fullname] - - do_exec(co, module.__dict__) - return sys.modules[fullname] - - def get_source(self, name): - res = self.sources.get(name) - if res is None: - res = self.sources.get(name+'.__init__') - return res - -if __name__ == "__main__": - if sys.version_info >= (3,0): - exec("def do_exec(co, loc): exec(co, loc)\n") - import pickle - sources = sources.encode("ascii") # ensure bytes - sources = pickle.loads(zlib.decompress(base64.decodebytes(sources))) - else: - import cPickle as pickle - exec("def do_exec(co, loc): exec co in loc\n") - sources = pickle.loads(zlib.decompress(base64.decodestring(sources))) - - importer = DictImporter(sources) - sys.meta_path.append(importer) - - import py - py.cmdline.pytest() diff --git a/py/_process/cmdexec.py b/py/_process/cmdexec.py index 43c71629c7..4ceb647a90 100644 --- a/py/_process/cmdexec.py +++ b/py/_process/cmdexec.py @@ -16,11 +16,11 @@ def cmdexec(cmd): if the subprocess module does not provide a proper encoding/unicode strings sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'. """ - process = subprocess.Popen(cmd, shell=True, + process = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() - if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not + if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not try: default_encoding = sys.getdefaultencoding() # jython may not have it except AttributeError: diff --git a/py/_process/forkedfunc.py b/py/_process/forkedfunc.py index c8b6502465..604412c2e5 100644 --- a/py/_process/forkedfunc.py +++ b/py/_process/forkedfunc.py @@ -1,10 +1,10 @@ -""" +""" ForkedFunc provides a way to run a function in a forked process and get at its return value, stdout and stderr output as well - as signals and exitstatusus. + as signals and exitstatusus. - XXX see if tempdir handling is sane + XXX see if tempdir handling is sane """ import py @@ -29,8 +29,8 @@ class ForkedFunc(object): pid = os.fork() if pid: # in parent process - self.pid = pid - else: # in child process + self.pid = pid + else: # in child process self._child(nice_level) def _child(self, nice_level): @@ -65,7 +65,7 @@ class ForkedFunc(object): os.close(1) os.close(2) os._exit(EXITSTATUS) - + def waitfinish(self, waiter=os.waitpid): pid, systemstatus = waiter(self.pid, 0) if systemstatus: diff --git a/py/_process/killproc.py b/py/_process/killproc.py index 8fa628cbdd..18e8310b5f 100644 --- a/py/_process/killproc.py +++ b/py/_process/killproc.py @@ -7,7 +7,7 @@ if sys.platform == "win32" or getattr(os, '_name', '') == 'nt': except ImportError: def dokill(pid): py.process.cmdexec("taskkill /F /PID %d" %(pid,)) - else: + else: def dokill(pid): PROCESS_TERMINATE = 1 handle = ctypes.windll.kernel32.OpenProcess( @@ -16,7 +16,7 @@ if sys.platform == "win32" or getattr(os, '_name', '') == 'nt': ctypes.windll.kernel32.CloseHandle(handle) else: def dokill(pid): - os.kill(pid, 15) + os.kill(pid, 15) def kill(pid): """ kill process by id. """ diff --git a/py/_std.py b/py/_std.py index bca23ea2db..97a9853323 100644 --- a/py/_std.py +++ b/py/_std.py @@ -1,9 +1,9 @@ import sys class Std(object): - """ makes top-level python modules available as an attribute, - importing them on first access. - """ + """ makes top-level python modules available as an attribute, + importing them on first access. + """ def __init__(self): self.__dict__ = sys.modules diff --git a/py/_test/__init__.py b/py/_test/__init__.py deleted file mode 100644 index 86bf9b3e17..0000000000 --- a/py/_test/__init__.py +++ /dev/null @@ -1 +0,0 @@ -""" assertion and py.test helper API.""" diff --git a/py/_test/cmdline.py b/py/_test/cmdline.py deleted file mode 100644 index 80ca99d4f7..0000000000 --- a/py/_test/cmdline.py +++ /dev/null @@ -1,24 +0,0 @@ -import py -import sys - -# -# main entry point -# - -def main(args=None): - if args is None: - args = sys.argv[1:] - config = py.test.config - try: - config.parse(args) - config.pluginmanager.do_configure(config) - session = config.initsession() - colitems = config.getinitialnodes() - exitstatus = session.main(colitems) - config.pluginmanager.do_unconfigure(config) - except config.Error: - e = sys.exc_info()[1] - sys.stderr.write("ERROR: %s\n" %(e.args[0],)) - exitstatus = 3 - py.test.config = py.test.config.__class__() - return exitstatus diff --git a/py/_test/collect.py b/py/_test/collect.py deleted file mode 100644 index 60d9a1fa1b..0000000000 --- a/py/_test/collect.py +++ /dev/null @@ -1,418 +0,0 @@ -""" -test collection nodes, forming a tree, Items are leafs. -""" -import py - -def configproperty(name): - def fget(self): - #print "retrieving %r property from %s" %(name, self.fspath) - return self.config._getcollectclass(name, self.fspath) - return property(fget) - -class HookProxy: - def __init__(self, node): - self.node = node - def __getattr__(self, name): - if name[0] == "_": - raise AttributeError(name) - hookmethod = getattr(self.node.config.hook, name) - def call_matching_hooks(**kwargs): - plugins = self.node.config._getmatchingplugins(self.node.fspath) - return hookmethod.pcall(plugins, **kwargs) - return call_matching_hooks - -class Node(object): - """ base class for all Nodes in the collection tree. - Collector subclasses have children, Items are terminal nodes. - """ - def __init__(self, name, parent=None, config=None): - self.name = name - self.parent = parent - self.config = config or parent.config - self.fspath = getattr(parent, 'fspath', None) - self.ihook = HookProxy(self) - - def _reraiseunpicklingproblem(self): - if hasattr(self, '_unpickle_exc'): - py.builtin._reraise(*self._unpickle_exc) - - # - # note to myself: Pickling is uh. - # - def __getstate__(self): - return (self.name, self.parent) - def __setstate__(self, nameparent): - name, parent = nameparent - try: - colitems = parent._memocollect() - for colitem in colitems: - if colitem.name == name: - # we are a copy that will not be returned - # by our parent - self.__dict__ = colitem.__dict__ - break - else: - raise ValueError("item %r not found in parent collection %r" %( - name, [x.name for x in colitems])) - except KeyboardInterrupt: - raise - except Exception: - # our parent can't collect us but we want unpickling to - # otherwise continue - self._reraiseunpicklingproblem() will - # reraise the problem - self._unpickle_exc = py.std.sys.exc_info() - self.name = name - self.parent = parent - self.config = parent.config - - def __repr__(self): - if getattr(self.config.option, 'debug', False): - return "<%s %r %0x>" %(self.__class__.__name__, - getattr(self, 'name', None), id(self)) - else: - return "<%s %r>" %(self.__class__.__name__, - getattr(self, 'name', None)) - - # methods for ordering nodes - - def __eq__(self, other): - if not isinstance(other, Node): - return False - return self.name == other.name and self.parent == other.parent - - def __ne__(self, other): - return not self == other - - def __hash__(self): - return hash((self.name, self.parent)) - - def setup(self): - pass - - def teardown(self): - pass - - def _memoizedcall(self, attrname, function): - exattrname = "_ex_" + attrname - failure = getattr(self, exattrname, None) - if failure is not None: - py.builtin._reraise(failure[0], failure[1], failure[2]) - if hasattr(self, attrname): - return getattr(self, attrname) - try: - res = function() - except (KeyboardInterrupt, SystemExit): - raise - except: - failure = py.std.sys.exc_info() - setattr(self, exattrname, failure) - raise - setattr(self, attrname, res) - return res - - def listchain(self): - """ return list of all parent collectors up to self, - starting from root of collection tree. """ - l = [self] - while 1: - x = l[0] - if x.parent is not None and x.parent.parent is not None: - l.insert(0, x.parent) - else: - return l - - def listnames(self): - return [x.name for x in self.listchain()] - - def getparent(self, cls): - current = self - while current and not isinstance(current, cls): - current = current.parent - return current - - def readkeywords(self): - return dict([(x, True) for x in self._keywords()]) - - def _keywords(self): - return [self.name] - - def _skipbykeyword(self, keywordexpr): - """ return True if they given keyword expression means to - skip this collector/item. - """ - if not keywordexpr: - return - chain = self.listchain() - for key in filter(None, keywordexpr.split()): - eor = key[:1] == '-' - if eor: - key = key[1:] - if not (eor ^ self._matchonekeyword(key, chain)): - return True - - def _matchonekeyword(self, key, chain): - elems = key.split(".") - # XXX O(n^2), anyone cares? - chain = [item.readkeywords() for item in chain if item._keywords()] - for start, _ in enumerate(chain): - if start + len(elems) > len(chain): - return False - for num, elem in enumerate(elems): - for keyword in chain[num + start]: - ok = False - if elem in keyword: - ok = True - break - if not ok: - break - if num == len(elems) - 1 and ok: - return True - return False - - def _prunetraceback(self, traceback): - return traceback - - def _repr_failure_py(self, excinfo, style=None): - excinfo.traceback = self._prunetraceback(excinfo.traceback) - # XXX should excinfo.getrepr record all data and toterminal() - # process it? - if style is None: - if self.config.option.tbstyle == "short": - style = "short" - else: - style = "long" - return excinfo.getrepr(funcargs=True, - showlocals=self.config.option.showlocals, - style=style) - - repr_failure = _repr_failure_py - shortfailurerepr = "F" - -class Collector(Node): - """ - Collector instances create children through collect() - and thus iteratively build a tree. attributes:: - - parent: attribute pointing to the parent collector - (or None if this is the root collector) - name: basename of this collector object - """ - Directory = configproperty('Directory') - Module = configproperty('Module') - - def collect(self): - """ returns a list of children (items and collectors) - for this collection node. - """ - raise NotImplementedError("abstract") - - def collect_by_name(self, name): - """ return a child matching the given name, else None. """ - for colitem in self._memocollect(): - if colitem.name == name: - return colitem - - def repr_failure(self, excinfo, outerr=None): - """ represent a failure. """ - assert outerr is None, "XXX deprecated" - return self._repr_failure_py(excinfo) - - def _memocollect(self): - """ internal helper method to cache results of calling collect(). """ - return self._memoizedcall('_collected', self.collect) - - # ********************************************************************** - # DEPRECATED METHODS - # ********************************************************************** - - def _deprecated_collect(self): - # avoid recursion: - # collect -> _deprecated_collect -> custom run() -> - # super().run() -> collect - attrname = '_depcollectentered' - if hasattr(self, attrname): - return - setattr(self, attrname, True) - method = getattr(self.__class__, 'run', None) - if method is not None and method != Collector.run: - warnoldcollect(function=method) - names = self.run() - return [x for x in [self.join(name) for name in names] if x] - - def run(self): - """ DEPRECATED: returns a list of names available from this collector. - You can return an empty list. Callers of this method - must take care to catch exceptions properly. - """ - return [colitem.name for colitem in self._memocollect()] - - def join(self, name): - """ DEPRECATED: return a child collector or item for the given name. - If the return value is None there is no such child. - """ - return self.collect_by_name(name) - - def _prunetraceback(self, traceback): - if hasattr(self, 'fspath'): - path = self.fspath - ntraceback = traceback.cut(path=self.fspath) - if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=py._pydir) - traceback = ntraceback.filter() - return traceback - -class FSCollector(Collector): - def __init__(self, fspath, parent=None, config=None): - fspath = py.path.local(fspath) - super(FSCollector, self).__init__(fspath.basename, parent, config=config) - self.fspath = fspath - - def __getstate__(self): - # RootCollector.getbynames() inserts a directory which we need - # to throw out here for proper re-instantiation - if isinstance(self.parent.parent, RootCollector): - assert self.parent.fspath == self.parent.parent.fspath, self.parent - return (self.name, self.parent.parent) # shortcut - return super(Collector, self).__getstate__() - -class File(FSCollector): - """ base class for collecting tests from a file. """ - -class Directory(FSCollector): - def recfilter(self, path): - if path.check(dir=1, dotfile=0): - return path.basename not in ('CVS', '_darcs', '{arch}') - - def collect(self): - l = self._deprecated_collect() - if l is not None: - return l - l = [] - for path in self.fspath.listdir(sort=True): - res = self.consider(path) - if res is not None: - if isinstance(res, (list, tuple)): - l.extend(res) - else: - l.append(res) - return l - - def consider(self, path): - if self.ihook.pytest_ignore_collect(path=path, config=self.config): - return - if path.check(file=1): - res = self.consider_file(path) - elif path.check(dir=1): - res = self.consider_dir(path) - else: - res = None - if isinstance(res, list): - # throw out identical results - l = [] - for x in res: - if x not in l: - assert x.parent == self, (x.parent, self) - assert x.fspath == path, (x.fspath, path) - l.append(x) - res = l - return res - - def consider_file(self, path): - return self.ihook.pytest_collect_file(path=path, parent=self) - - def consider_dir(self, path, usefilters=None): - if usefilters is not None: - py.log._apiwarn("0.99", "usefilters argument not needed") - return self.ihook.pytest_collect_directory(path=path, parent=self) - -class Item(Node): - """ a basic test item. """ - def _deprecated_testexecution(self): - if self.__class__.run != Item.run: - warnoldtestrun(function=self.run) - elif self.__class__.execute != Item.execute: - warnoldtestrun(function=self.execute) - else: - return False - self.run() - return True - - def run(self): - """ deprecated, here because subclasses might call it. """ - return self.execute(self.obj) - - def execute(self, obj): - """ deprecated, here because subclasses might call it. """ - return obj() - - def reportinfo(self): - return self.fspath, None, "" - -def warnoldcollect(function=None): - py.log._apiwarn("1.0", - "implement collector.collect() instead of " - "collector.run() and collector.join()", - stacklevel=2, function=function) - -def warnoldtestrun(function=None): - py.log._apiwarn("1.0", - "implement item.runtest() instead of " - "item.run() and item.execute()", - stacklevel=2, function=function) - - - -class RootCollector(Directory): - def __init__(self, config): - Directory.__init__(self, config.topdir, parent=None, config=config) - self.name = None - - def __repr__(self): - return "<RootCollector fspath=%r>" %(self.fspath,) - - def getbynames(self, names): - current = self.consider(self.config.topdir) - while names: - name = names.pop(0) - if name == ".": # special "identity" name - continue - l = [] - for x in current._memocollect(): - if x.name == name: - l.append(x) - elif x.fspath == current.fspath.join(name): - l.append(x) - elif x.name == "()": - names.insert(0, name) - l.append(x) - break - if not l: - raise ValueError("no node named %r below %r" %(name, current)) - current = l[0] - return current - - def totrail(self, node): - chain = node.listchain() - names = [self._getrelpath(chain[0].fspath)] - names += [x.name for x in chain[1:]] - return names - - def fromtrail(self, trail): - return self.config._rootcol.getbynames(trail) - - def _getrelpath(self, fspath): - topdir = self.config.topdir - relpath = fspath.relto(topdir) - if not relpath: - if fspath == topdir: - relpath = "." - else: - raise ValueError("%r not relative to topdir %s" - %(self.fspath, topdir)) - return relpath - - def __getstate__(self): - return self.config - - def __setstate__(self, config): - self.__init__(config) diff --git a/py/_test/config.py b/py/_test/config.py deleted file mode 100644 index 03e27ece51..0000000000 --- a/py/_test/config.py +++ /dev/null @@ -1,291 +0,0 @@ -import py, os -from py._test.conftesthandle import Conftest -from py._test.pluginmanager import PluginManager -from py._test import parseopt -from py._test.collect import RootCollector - -def ensuretemp(string, dir=1): - """ (deprecated) return temporary directory path with - the given string as the trailing part. It is usually - better to use the 'tmpdir' function argument which will - take care to provide empty unique directories for each - test call even if the test is called multiple times. - """ - #py.log._apiwarn(">1.1", "use tmpdir function argument") - return py.test.config.ensuretemp(string, dir=dir) - -class CmdOptions(object): - """ holds cmdline options as attributes.""" - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - def __repr__(self): - return "<CmdOptions %r>" %(self.__dict__,) - -class Error(Exception): - """ Test Configuration Error. """ - -class Config(object): - """ access to config values, pluginmanager and plugin hooks. """ - Option = py.std.optparse.Option - Error = Error - basetemp = None - _sessionclass = None - - def __init__(self, topdir=None, option=None): - self.option = option or CmdOptions() - self.topdir = topdir - self._parser = parseopt.Parser( - usage="usage: %prog [options] [file_or_dir] [file_or_dir] [...]", - processopt=self._processopt, - ) - self.pluginmanager = PluginManager() - self._conftest = Conftest(onimport=self._onimportconftest) - self.hook = self.pluginmanager.hook - - def _onimportconftest(self, conftestmodule): - self.trace("loaded conftestmodule %r" %(conftestmodule,)) - self.pluginmanager.consider_conftest(conftestmodule) - - def _getmatchingplugins(self, fspath): - allconftests = self._conftest._conftestpath2mod.values() - plugins = [x for x in self.pluginmanager.getplugins() - if x not in allconftests] - plugins += self._conftest.getconftestmodules(fspath) - return plugins - - def trace(self, msg): - if getattr(self.option, 'traceconfig', None): - self.hook.pytest_trace(category="config", msg=msg) - - def _processopt(self, opt): - if hasattr(opt, 'default') and opt.dest: - val = os.environ.get("PYTEST_OPTION_" + opt.dest.upper(), None) - if val is not None: - if opt.type == "int": - val = int(val) - elif opt.type == "long": - val = long(val) - elif opt.type == "float": - val = float(val) - elif not opt.type and opt.action in ("store_true", "store_false"): - val = eval(val) - opt.default = val - else: - name = "option_" + opt.dest - try: - opt.default = self._conftest.rget(name) - except (ValueError, KeyError): - pass - if not hasattr(self.option, opt.dest): - setattr(self.option, opt.dest, opt.default) - - def _preparse(self, args): - self.pluginmanager.consider_setuptools_entrypoints() - self.pluginmanager.consider_env() - self.pluginmanager.consider_preparse(args) - self._conftest.setinitial(args) - self.pluginmanager.do_addoption(self._parser) - - def parse(self, args): - """ parse cmdline arguments into this config object. - Note that this can only be called once per testing process. - """ - assert not hasattr(self, 'args'), ( - "can only parse cmdline args at most once per Config object") - self._preparse(args) - self._parser.hints.extend(self.pluginmanager._hints) - args = self._parser.parse_setoption(args, self.option) - if not args: - args.append(py.std.os.getcwd()) - self.topdir = gettopdir(args) - self._rootcol = RootCollector(config=self) - self._setargs(args) - - def _setargs(self, args): - self.args = list(args) - self._argfspaths = [py.path.local(decodearg(x)[0]) for x in args] - - # config objects are usually pickled across system - # barriers but they contain filesystem paths. - # upon getstate/setstate we take care to do everything - # relative to "topdir". - def __getstate__(self): - l = [] - for path in self.args: - path = py.path.local(path) - l.append(path.relto(self.topdir)) - return l, self.option.__dict__ - - def __setstate__(self, repr): - # we have to set py.test.config because loading - # of conftest files may use it (deprecated) - # mainly by py.test.config.addoptions() - global config_per_process - py.test.config = config_per_process = self - args, cmdlineopts = repr - cmdlineopts = CmdOptions(**cmdlineopts) - # next line will registers default plugins - self.__init__(topdir=py.path.local(), option=cmdlineopts) - self._rootcol = RootCollector(config=self) - args = [str(self.topdir.join(x)) for x in args] - self._preparse(args) - self._setargs(args) - - def ensuretemp(self, string, dir=True): - return self.getbasetemp().ensure(string, dir=dir) - - def getbasetemp(self): - if self.basetemp is None: - basetemp = self.option.basetemp - if basetemp: - basetemp = py.path.local(basetemp) - if not basetemp.check(dir=1): - basetemp.mkdir() - else: - basetemp = py.path.local.make_numbered_dir(prefix='pytest-') - self.basetemp = basetemp - return self.basetemp - - def mktemp(self, basename, numbered=False): - basetemp = self.getbasetemp() - if not numbered: - return basetemp.mkdir(basename) - else: - return py.path.local.make_numbered_dir(prefix=basename, - keep=0, rootdir=basetemp, lock_timeout=None) - - def getinitialnodes(self): - return [self.getnode(arg) for arg in self.args] - - def getnode(self, arg): - parts = decodearg(arg) - path = py.path.local(parts.pop(0)) - if not path.check(): - raise self.Error("file not found: %s" %(path,)) - topdir = self.topdir - if path != topdir and not path.relto(topdir): - raise self.Error("path %r is not relative to %r" % - (str(path), str(topdir))) - # assumtion: pytest's fs-collector tree follows the filesystem tree - names = list(filter(None, path.relto(topdir).split(path.sep))) - names += parts - try: - return self._rootcol.getbynames(names) - except ValueError: - e = py.std.sys.exc_info()[1] - raise self.Error("can't collect: %s\n%s" % (arg, e.args[0])) - - def _getcollectclass(self, name, path): - try: - cls = self._conftest.rget(name, path) - except KeyError: - return getattr(py.test.collect, name) - else: - py.log._apiwarn(">1.1", "%r was found in a conftest.py file, " - "use pytest_collect hooks instead." % (cls,)) - return cls - - def getconftest_pathlist(self, name, path=None): - """ return a matching value, which needs to be sequence - of filenames that will be returned as a list of Path - objects (they can be relative to the location - where they were found). - """ - try: - mod, relroots = self._conftest.rget_with_confmod(name, path) - except KeyError: - return None - modpath = py.path.local(mod.__file__).dirpath() - l = [] - for relroot in relroots: - if not isinstance(relroot, py.path.local): - relroot = relroot.replace("/", py.path.local.sep) - relroot = modpath.join(relroot, abs=True) - l.append(relroot) - return l - - def addoptions(self, groupname, *specs): - """ add a named group of options to the current testing session. - This function gets invoked during testing session initialization. - """ - py.log._apiwarn("1.0", "define pytest_addoptions(parser) to add options", stacklevel=2) - group = self._parser.getgroup(groupname) - for opt in specs: - group._addoption_instance(opt) - return self.option - - def addoption(self, *optnames, **attrs): - return self._parser.addoption(*optnames, **attrs) - - def getvalueorskip(self, name, path=None): - """ return getvalue() or call py.test.skip if no value exists. """ - try: - val = self.getvalue(name, path) - if val is None: - raise KeyError(name) - return val - except KeyError: - py.test.skip("no %r value found" %(name,)) - - def getvalue(self, name, path=None): - """ return 'name' value looked up from the 'options' - and then from the first conftest file found up - the path (including the path itself). - if path is None, lookup the value in the initial - conftest modules found during command line parsing. - """ - try: - return getattr(self.option, name) - except AttributeError: - return self._conftest.rget(name, path) - - def setsessionclass(self, cls): - if self._sessionclass is not None: - raise ValueError("sessionclass already set to: %r" %( - self._sessionclass)) - self._sessionclass = cls - - def initsession(self): - """ return an initialized session object. """ - cls = self._sessionclass - if cls is None: - from py._test.session import Session - cls = Session - session = cls(self) - self.trace("instantiated session %r" % session) - return session - -# -# helpers -# - -def gettopdir(args): - """ return the top directory for the given paths. - if the common base dir resides in a python package - parent directory of the root package is returned. - """ - fsargs = [py.path.local(decodearg(arg)[0]) for arg in args] - p = fsargs and fsargs[0] or None - for x in fsargs[1:]: - p = p.common(x) - assert p, "cannot determine common basedir of %s" %(fsargs,) - pkgdir = p.pypkgpath() - if pkgdir is None: - if p.check(file=1): - p = p.dirpath() - return p - else: - return pkgdir.dirpath() - -def decodearg(arg): - arg = str(arg) - return arg.split("::") - -def onpytestaccess(): - # it's enough to have our containing module loaded as - # it initializes a per-process config instance - # which loads default plugins which add to py.test.* - pass - -# a default per-process instance of py.test configuration -config_per_process = Config() diff --git a/py/_test/conftesthandle.py b/py/_test/conftesthandle.py deleted file mode 100644 index 8c345eb37e..0000000000 --- a/py/_test/conftesthandle.py +++ /dev/null @@ -1,113 +0,0 @@ -import py - -class Conftest(object): - """ the single place for accessing values and interacting - towards conftest modules from py.test objects. - - (deprecated) - Note that triggering Conftest instances to import - conftest.py files may result in added cmdline options. - """ - def __init__(self, onimport=None, confcutdir=None): - self._path2confmods = {} - self._onimport = onimport - self._conftestpath2mod = {} - self._confcutdir = confcutdir - - def setinitial(self, args): - """ try to find a first anchor path for looking up global values - from conftests. This function is usually called _before_ - argument parsing. conftest files may add command line options - and we thus have no completely safe way of determining - which parts of the arguments are actually related to options - and which are file system paths. We just try here to get - bootstrapped ... - """ - current = py.path.local() - opt = '--confcutdir' - for i in range(len(args)): - opt1 = str(args[i]) - if opt1.startswith(opt): - if opt1 == opt: - if len(args) > i: - p = current.join(args[i+1], abs=True) - elif opt1.startswith(opt + "="): - p = current.join(opt1[len(opt)+1:], abs=1) - self._confcutdir = p - break - for arg in args + [current]: - anchor = current.join(arg, abs=1) - if anchor.check(): # we found some file object - self._path2confmods[None] = self.getconftestmodules(anchor) - # let's also consider test* dirs - if anchor.check(dir=1): - for x in anchor.listdir(lambda x: x.check(dir=1, dotfile=0)): - self.getconftestmodules(x) - break - else: - assert 0, "no root of filesystem?" - - def getconftestmodules(self, path): - """ return a list of imported conftest modules for the given path. """ - try: - clist = self._path2confmods[path] - except KeyError: - if path is None: - raise ValueError("missing default confest.") - dp = path.dirpath() - if dp == path: - clist = [] - else: - cutdir = self._confcutdir - clist = self.getconftestmodules(dp) - if cutdir and path != cutdir and not path.relto(cutdir): - pass - else: - conftestpath = path.join("conftest.py") - if conftestpath.check(file=1): - clist.append(self.importconftest(conftestpath)) - self._path2confmods[path] = clist - # be defensive: avoid changes from caller side to - # affect us by always returning a copy of the actual list - return clist[:] - - def rget(self, name, path=None): - mod, value = self.rget_with_confmod(name, path) - return value - - def rget_with_confmod(self, name, path=None): - modules = self.getconftestmodules(path) - modules.reverse() - for mod in modules: - try: - return mod, getattr(mod, name) - except AttributeError: - continue - raise KeyError(name) - - def importconftest(self, conftestpath): - assert conftestpath.check(), conftestpath - try: - return self._conftestpath2mod[conftestpath] - except KeyError: - if not conftestpath.dirpath('__init__.py').check(file=1): - # HACK: we don't want any "globally" imported conftest.py, - # prone to conflicts and subtle problems - modname = str(conftestpath).replace('.', conftestpath.sep) - mod = conftestpath.pyimport(modname=modname) - else: - mod = conftestpath.pyimport() - self._conftestpath2mod[conftestpath] = mod - dirpath = conftestpath.dirpath() - if dirpath in self._path2confmods: - for path, mods in self._path2confmods.items(): - if path and path.relto(dirpath) or path == dirpath: - assert mod not in mods - mods.append(mod) - self._postimport(mod) - return mod - - def _postimport(self, mod): - if self._onimport: - self._onimport(mod) - return mod diff --git a/py/_test/funcargs.py b/py/_test/funcargs.py deleted file mode 100644 index 4209acac2d..0000000000 --- a/py/_test/funcargs.py +++ /dev/null @@ -1,176 +0,0 @@ -import py - -def getfuncargnames(function): - argnames = py.std.inspect.getargs(py.code.getrawcode(function))[0] - startindex = py.std.inspect.ismethod(function) and 1 or 0 - defaults = getattr(function, 'func_defaults', - getattr(function, '__defaults__', None)) or () - numdefaults = len(defaults) - if numdefaults: - return argnames[startindex:-numdefaults] - return argnames[startindex:] - -def fillfuncargs(function): - """ fill missing funcargs. """ - request = FuncargRequest(pyfuncitem=function) - request._fillfuncargs() - -def getplugins(node, withpy=False): # might by any node - plugins = node.config._getmatchingplugins(node.fspath) - if withpy: - mod = node.getparent(py.test.collect.Module) - if mod is not None: - plugins.append(mod.obj) - inst = node.getparent(py.test.collect.Instance) - if inst is not None: - plugins.append(inst.obj) - return plugins - -_notexists = object() -class CallSpec: - def __init__(self, funcargs, id, param): - self.funcargs = funcargs - self.id = id - if param is not _notexists: - self.param = param - def __repr__(self): - return "<CallSpec id=%r param=%r funcargs=%r>" %( - self.id, getattr(self, 'param', '?'), self.funcargs) - -class Metafunc: - def __init__(self, function, config=None, cls=None, module=None): - self.config = config - self.module = module - self.function = function - self.funcargnames = getfuncargnames(function) - self.cls = cls - self.module = module - self._calls = [] - self._ids = py.builtin.set() - - def addcall(self, funcargs=None, id=_notexists, param=_notexists): - assert funcargs is None or isinstance(funcargs, dict) - if id is None: - raise ValueError("id=None not allowed") - if id is _notexists: - id = len(self._calls) - id = str(id) - if id in self._ids: - raise ValueError("duplicate id %r" % id) - self._ids.add(id) - self._calls.append(CallSpec(funcargs, id, param)) - -class FuncargRequest: - _argprefix = "pytest_funcarg__" - _argname = None - - class LookupError(LookupError): - """ error on performing funcarg request. """ - - def __init__(self, pyfuncitem): - self._pyfuncitem = pyfuncitem - self.function = pyfuncitem.obj - self.module = pyfuncitem.getparent(py.test.collect.Module).obj - clscol = pyfuncitem.getparent(py.test.collect.Class) - self.cls = clscol and clscol.obj or None - self.instance = py.builtin._getimself(self.function) - self.config = pyfuncitem.config - self.fspath = pyfuncitem.fspath - if hasattr(pyfuncitem, '_requestparam'): - self.param = pyfuncitem._requestparam - self._plugins = getplugins(pyfuncitem, withpy=True) - self._funcargs = self._pyfuncitem.funcargs.copy() - self._name2factory = {} - self._currentarg = None - - def _fillfuncargs(self): - argnames = getfuncargnames(self.function) - if argnames: - assert not getattr(self._pyfuncitem, '_args', None), ( - "yielded functions cannot have funcargs") - for argname in argnames: - if argname not in self._pyfuncitem.funcargs: - self._pyfuncitem.funcargs[argname] = self.getfuncargvalue(argname) - - def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): - """ cache and return result of calling setup(). - - The requested argument name, the scope and the ``extrakey`` - determine the cache key. The scope also determines when - teardown(result) will be called. valid scopes are: - scope == 'function': when the single test function run finishes. - scope == 'module': when tests in a different module are run - scope == 'session': when tests of the session have run. - """ - if not hasattr(self.config, '_setupcache'): - self.config._setupcache = {} # XXX weakref? - cachekey = (self._currentarg, self._getscopeitem(scope), extrakey) - cache = self.config._setupcache - try: - val = cache[cachekey] - except KeyError: - val = setup() - cache[cachekey] = val - if teardown is not None: - def finalizer(): - del cache[cachekey] - teardown(val) - self._addfinalizer(finalizer, scope=scope) - return val - - def getfuncargvalue(self, argname): - try: - return self._funcargs[argname] - except KeyError: - pass - if argname not in self._name2factory: - self._name2factory[argname] = self.config.pluginmanager.listattr( - plugins=self._plugins, - attrname=self._argprefix + str(argname) - ) - #else: we are called recursively - if not self._name2factory[argname]: - self._raiselookupfailed(argname) - funcargfactory = self._name2factory[argname].pop() - oldarg = self._currentarg - self._currentarg = argname - try: - self._funcargs[argname] = res = funcargfactory(request=self) - finally: - self._currentarg = oldarg - return res - - def _getscopeitem(self, scope): - if scope == "function": - return self._pyfuncitem - elif scope == "module": - return self._pyfuncitem.getparent(py.test.collect.Module) - elif scope == "session": - return None - raise ValueError("unknown finalization scope %r" %(scope,)) - - def _addfinalizer(self, finalizer, scope): - colitem = self._getscopeitem(scope) - self.config._setupstate.addfinalizer( - finalizer=finalizer, colitem=colitem) - - def addfinalizer(self, finalizer): - """ call the given finalizer after test function finished execution. """ - self._addfinalizer(finalizer, scope="function") - - def __repr__(self): - return "<FuncargRequest for %r>" %(self._pyfuncitem) - - def _raiselookupfailed(self, argname): - available = [] - for plugin in self._plugins: - for name in vars(plugin): - if name.startswith(self._argprefix): - name = name[len(self._argprefix):] - if name not in available: - available.append(name) - fspath, lineno, msg = self._pyfuncitem.reportinfo() - msg = "LookupError: no factory found for function argument %r" % (argname,) - msg += "\n available funcargs: %s" %(", ".join(available),) - msg += "\n use 'py.test --funcargs [testpath]' for help on them." - raise self.LookupError(msg) diff --git a/py/_test/parseopt.py b/py/_test/parseopt.py deleted file mode 100644 index 8e282f3371..0000000000 --- a/py/_test/parseopt.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -thin wrapper around Python's optparse.py -adding some extra checks and ways to systematically -have Environment variables provide default values -for options. basic usage: - - >>> parser = Parser() - >>> parser.addoption("--hello", action="store_true", dest="hello") - >>> option, args = parser.parse(['--hello']) - >>> option.hello - True - >>> args - [] - -""" -import py -import optparse - -class Parser: - """ Parser for command line arguments. """ - - def __init__(self, usage=None, processopt=None): - self._anonymous = OptionGroup("custom options", parser=self) - self._groups = [] - self._processopt = processopt - self._usage = usage - self.hints = [] - - def processoption(self, option): - if self._processopt: - if option.dest: - self._processopt(option) - - def addnote(self, note): - self._notes.append(note) - - def getgroup(self, name, description="", after=None): - for group in self._groups: - if group.name == name: - return group - group = OptionGroup(name, description, parser=self) - i = 0 - for i, grp in enumerate(self._groups): - if grp.name == after: - break - self._groups.insert(i+1, group) - return group - - addgroup = getgroup - def addgroup(self, name, description=""): - py.log._apiwarn("1.1", "use getgroup() which gets-or-creates") - return self.getgroup(name, description) - - def addoption(self, *opts, **attrs): - """ add an optparse-style option. """ - self._anonymous.addoption(*opts, **attrs) - - def parse(self, args): - optparser = MyOptionParser(self) - groups = self._groups + [self._anonymous] - for group in groups: - if group.options: - desc = group.description or group.name - optgroup = optparse.OptionGroup(optparser, desc) - optgroup.add_options(group.options) - optparser.add_option_group(optgroup) - return optparser.parse_args([str(x) for x in args]) - - def parse_setoption(self, args, option): - parsedoption, args = self.parse(args) - for name, value in parsedoption.__dict__.items(): - setattr(option, name, value) - return args - - -class OptionGroup: - def __init__(self, name, description="", parser=None): - self.name = name - self.description = description - self.options = [] - self.parser = parser - - def addoption(self, *optnames, **attrs): - """ add an option to this group. """ - option = optparse.Option(*optnames, **attrs) - self._addoption_instance(option, shortupper=False) - - def _addoption(self, *optnames, **attrs): - option = optparse.Option(*optnames, **attrs) - self._addoption_instance(option, shortupper=True) - - def _addoption_instance(self, option, shortupper=False): - if not shortupper: - for opt in option._short_opts: - if opt[0] == '-' and opt[1].islower(): - raise ValueError("lowercase shortoptions reserved") - if self.parser: - self.parser.processoption(option) - self.options.append(option) - - -class MyOptionParser(optparse.OptionParser): - def __init__(self, parser): - self._parser = parser - optparse.OptionParser.__init__(self, usage=parser._usage) - def format_epilog(self, formatter): - hints = self._parser.hints - if hints: - s = "\n".join(["hint: " + x for x in hints]) + "\n" - s = "\n" + s + "\n" - return s - return "" diff --git a/py/_test/pluginmanager.py b/py/_test/pluginmanager.py deleted file mode 100644 index d1b6734213..0000000000 --- a/py/_test/pluginmanager.py +++ /dev/null @@ -1,353 +0,0 @@ -""" -managing loading and interacting with pytest plugins. -""" -import py -import inspect -from py._plugin import hookspec - -default_plugins = ( - "default runner capture mark terminal skipping tmpdir monkeypatch " - "recwarn pdb pastebin unittest helpconfig nose assertion genscript " - "junitxml doctest").split() - -def check_old_use(mod, modname): - clsname = modname[len('pytest_'):].capitalize() + "Plugin" - assert not hasattr(mod, clsname), (mod, clsname) - -class PluginManager(object): - def __init__(self): - self.registry = Registry() - self._name2plugin = {} - self._hints = [] - self.hook = HookRelay([hookspec], registry=self.registry) - self.register(self) - for spec in default_plugins: - self.import_plugin(spec) - - def _getpluginname(self, plugin, name): - if name is None: - if hasattr(plugin, '__name__'): - name = plugin.__name__.split(".")[-1] - else: - name = id(plugin) - return name - - def register(self, plugin, name=None): - assert not self.isregistered(plugin), plugin - assert not self.registry.isregistered(plugin), plugin - name = self._getpluginname(plugin, name) - if name in self._name2plugin: - return False - self._name2plugin[name] = plugin - self.call_plugin(plugin, "pytest_addhooks", {'pluginmanager': self}) - self.hook.pytest_plugin_registered(manager=self, plugin=plugin) - self.registry.register(plugin) - return True - - def unregister(self, plugin): - self.hook.pytest_plugin_unregistered(plugin=plugin) - self.registry.unregister(plugin) - for name, value in list(self._name2plugin.items()): - if value == plugin: - del self._name2plugin[name] - - def isregistered(self, plugin, name=None): - if self._getpluginname(plugin, name) in self._name2plugin: - return True - for val in self._name2plugin.values(): - if plugin == val: - return True - - def addhooks(self, spec): - self.hook._addhooks(spec, prefix="pytest_") - - def getplugins(self): - return list(self.registry) - - def skipifmissing(self, name): - if not self.hasplugin(name): - py.test.skip("plugin %r is missing" % name) - - def hasplugin(self, name): - try: - self.getplugin(name) - except KeyError: - return False - else: - return True - - def getplugin(self, name): - try: - return self._name2plugin[name] - except KeyError: - impname = canonical_importname(name) - return self._name2plugin[impname] - - # API for bootstrapping - # - def _envlist(self, varname): - val = py.std.os.environ.get(varname, None) - if val is not None: - return val.split(',') - return () - - def consider_env(self): - for spec in self._envlist("PYTEST_PLUGINS"): - self.import_plugin(spec) - - def consider_setuptools_entrypoints(self): - try: - from pkg_resources import iter_entry_points - except ImportError: - return # XXX issue a warning - for ep in iter_entry_points('pytest11'): - name = canonical_importname(ep.name) - if name in self._name2plugin: - continue - plugin = ep.load() - self.register(plugin, name=name) - - def consider_preparse(self, args): - for opt1,opt2 in zip(args, args[1:]): - if opt1 == "-p": - self.import_plugin(opt2) - - def consider_conftest(self, conftestmodule): - cls = getattr(conftestmodule, 'ConftestPlugin', None) - if cls is not None: - raise ValueError("%r: 'ConftestPlugins' only existed till 1.0.0b1, " - "were removed in 1.0.0b2" % (cls,)) - if self.register(conftestmodule, name=conftestmodule.__file__): - self.consider_module(conftestmodule) - - def consider_module(self, mod): - attr = getattr(mod, "pytest_plugins", ()) - if attr: - if not isinstance(attr, (list, tuple)): - attr = (attr,) - for spec in attr: - self.import_plugin(spec) - - def import_plugin(self, spec): - assert isinstance(spec, str) - modname = canonical_importname(spec) - if modname in self._name2plugin: - return - try: - mod = importplugin(modname) - except KeyboardInterrupt: - raise - except py.test.skip.Exception: - e = py.std.sys.exc_info()[1] - self._hints.append("skipped plugin %r: %s" %((modname, e.msg))) - else: - check_old_use(mod, modname) - self.register(mod) - self.consider_module(mod) - - def pytest_terminal_summary(self, terminalreporter): - tw = terminalreporter._tw - if terminalreporter.config.option.traceconfig: - for hint in self._hints: - tw.line("hint: %s" % hint) - - # - # - # API for interacting with registered and instantiated plugin objects - # - # - def listattr(self, attrname, plugins=None): - return self.registry.listattr(attrname, plugins=plugins) - - def notify_exception(self, excinfo=None): - if excinfo is None: - excinfo = py.code.ExceptionInfo() - excrepr = excinfo.getrepr(funcargs=True, showlocals=True) - return self.hook.pytest_internalerror(excrepr=excrepr) - - def do_addoption(self, parser): - mname = "pytest_addoption" - methods = self.registry.listattr(mname, reverse=True) - mc = MultiCall(methods, {'parser': parser}) - mc.execute() - - def pytest_plugin_registered(self, plugin): - dic = self.call_plugin(plugin, "pytest_namespace", {}) or {} - for name, value in dic.items(): - setattr(py.test, name, value) - py.test.__all__.append(name) - if hasattr(self, '_config'): - self.call_plugin(plugin, "pytest_addoption", - {'parser': self._config._parser}) - self.call_plugin(plugin, "pytest_configure", - {'config': self._config}) - - def call_plugin(self, plugin, methname, kwargs): - return MultiCall( - methods=self.listattr(methname, plugins=[plugin]), - kwargs=kwargs, firstresult=True).execute() - - def do_configure(self, config): - assert not hasattr(self, '_config') - self._config = config - config.hook.pytest_configure(config=self._config) - - def do_unconfigure(self, config): - config = self._config - del self._config - config.hook.pytest_unconfigure(config=config) - config.pluginmanager.unregister(self) - -def canonical_importname(name): - name = name.lower() - modprefix = "pytest_" - if not name.startswith(modprefix): - name = modprefix + name - return name - -def importplugin(importspec): - try: - return __import__(importspec) - except ImportError: - e = py.std.sys.exc_info()[1] - if str(e).find(importspec) == -1: - raise - try: - return __import__("py._plugin.%s" %(importspec), - None, None, '__doc__') - except ImportError: - e = py.std.sys.exc_info()[1] - if str(e).find(importspec) == -1: - raise - # show the original exception, not the failing internal one - return __import__(importspec) - - -class MultiCall: - """ execute a call into multiple python functions/methods. """ - - def __init__(self, methods, kwargs, firstresult=False): - self.methods = methods[:] - self.kwargs = kwargs.copy() - self.kwargs['__multicall__'] = self - self.results = [] - self.firstresult = firstresult - - def __repr__(self): - status = "%d results, %d meths" % (len(self.results), len(self.methods)) - return "<MultiCall %s, kwargs=%r>" %(status, self.kwargs) - - def execute(self): - while self.methods: - method = self.methods.pop() - kwargs = self.getkwargs(method) - res = method(**kwargs) - if res is not None: - self.results.append(res) - if self.firstresult: - return res - if not self.firstresult: - return self.results - - def getkwargs(self, method): - kwargs = {} - for argname in varnames(method): - try: - kwargs[argname] = self.kwargs[argname] - except KeyError: - pass # might be optional param - return kwargs - -def varnames(func): - ismethod = inspect.ismethod(func) - rawcode = py.code.getrawcode(func) - try: - return rawcode.co_varnames[ismethod:] - except AttributeError: - return () - -class Registry: - """ - Manage Plugins: register/unregister call calls to plugins. - """ - def __init__(self, plugins=None): - if plugins is None: - plugins = [] - self._plugins = plugins - - def register(self, plugin): - assert not isinstance(plugin, str) - assert not plugin in self._plugins - self._plugins.append(plugin) - - def unregister(self, plugin): - self._plugins.remove(plugin) - - def isregistered(self, plugin): - return plugin in self._plugins - - def __iter__(self): - return iter(self._plugins) - - def listattr(self, attrname, plugins=None, reverse=False): - l = [] - if plugins is None: - plugins = self._plugins - for plugin in plugins: - try: - l.append(getattr(plugin, attrname)) - except AttributeError: - continue - if reverse: - l.reverse() - return l - -class HookRelay: - def __init__(self, hookspecs, registry, prefix="pytest_"): - if not isinstance(hookspecs, list): - hookspecs = [hookspecs] - self._hookspecs = [] - self._registry = registry - for hookspec in hookspecs: - self._addhooks(hookspec, prefix) - - def _addhooks(self, hookspecs, prefix): - self._hookspecs.append(hookspecs) - added = False - for name, method in vars(hookspecs).items(): - if name.startswith(prefix): - if not method.__doc__: - raise ValueError("docstring required for hook %r, in %r" - % (method, hookspecs)) - firstresult = getattr(method, 'firstresult', False) - hc = HookCaller(self, name, firstresult=firstresult) - setattr(self, name, hc) - added = True - #print ("setting new hook", name) - if not added: - raise ValueError("did not find new %r hooks in %r" %( - prefix, hookspecs,)) - - - def _performcall(self, name, multicall): - return multicall.execute() - -class HookCaller: - def __init__(self, hookrelay, name, firstresult): - self.hookrelay = hookrelay - self.name = name - self.firstresult = firstresult - - def __repr__(self): - return "<HookCaller %r>" %(self.name,) - - def __call__(self, **kwargs): - methods = self.hookrelay._registry.listattr(self.name) - mc = MultiCall(methods, kwargs, firstresult=self.firstresult) - return self.hookrelay._performcall(self.name, mc) - - def pcall(self, plugins, **kwargs): - methods = self.hookrelay._registry.listattr(self.name, plugins=plugins) - mc = MultiCall(methods, kwargs, firstresult=self.firstresult) - return self.hookrelay._performcall(self.name, mc) - diff --git a/py/_test/pycollect.py b/py/_test/pycollect.py deleted file mode 100644 index eb60f9e853..0000000000 --- a/py/_test/pycollect.py +++ /dev/null @@ -1,399 +0,0 @@ -""" -Python related collection nodes. -""" -import py -import inspect -from py._test.collect import configproperty, warnoldcollect -from py._test import funcargs -from py._code.code import TerminalRepr - -class PyobjMixin(object): - def obj(): - def fget(self): - try: - return self._obj - except AttributeError: - self._obj = obj = self._getobj() - return obj - def fset(self, value): - self._obj = value - return property(fget, fset, None, "underlying python object") - obj = obj() - - def _getobj(self): - return getattr(self.parent.obj, self.name) - - def getmodpath(self, stopatmodule=True, includemodule=False): - """ return python path relative to the containing module. """ - chain = self.listchain() - chain.reverse() - parts = [] - for node in chain: - if isinstance(node, Instance): - continue - name = node.name - if isinstance(node, Module): - assert name.endswith(".py") - name = name[:-3] - if stopatmodule: - if includemodule: - parts.append(name) - break - parts.append(name) - parts.reverse() - s = ".".join(parts) - return s.replace(".[", "[") - - def _getfslineno(self): - try: - return self._fslineno - except AttributeError: - pass - obj = self.obj - # xxx let decorators etc specify a sane ordering - if hasattr(obj, 'place_as'): - obj = obj.place_as - - self._fslineno = py.code.getfslineno(obj) - return self._fslineno - - def reportinfo(self): - fspath, lineno = self._getfslineno() - modpath = self.getmodpath() - return fspath, lineno, modpath - -class PyCollectorMixin(PyobjMixin, py.test.collect.Collector): - Class = configproperty('Class') - Instance = configproperty('Instance') - Function = configproperty('Function') - Generator = configproperty('Generator') - - def funcnamefilter(self, name): - return name.startswith('test') - def classnamefilter(self, name): - return name.startswith('Test') - - def collect(self): - l = self._deprecated_collect() - if l is not None: - return l - # NB. we avoid random getattrs and peek in the __dict__ instead - dicts = [getattr(self.obj, '__dict__', {})] - for basecls in inspect.getmro(self.obj.__class__): - dicts.append(basecls.__dict__) - seen = {} - l = [] - for dic in dicts: - for name, obj in dic.items(): - if name in seen: - continue - seen[name] = True - if name[0] != "_": - res = self.makeitem(name, obj) - if res is None: - continue - if not isinstance(res, list): - res = [res] - l.extend(res) - l.sort(key=lambda item: item.reportinfo()[:2]) - return l - - def _deprecated_join(self, name): - if self.__class__.join != py.test.collect.Collector.join: - warnoldcollect() - return self.join(name) - - def makeitem(self, name, obj): - return self.ihook.pytest_pycollect_makeitem( - collector=self, name=name, obj=obj) - - def _istestclasscandidate(self, name, obj): - if self.classnamefilter(name) and \ - inspect.isclass(obj): - if hasinit(obj): - # XXX WARN - return False - return True - - def _genfunctions(self, name, funcobj): - module = self.getparent(Module).obj - clscol = self.getparent(Class) - cls = clscol and clscol.obj or None - metafunc = funcargs.Metafunc(funcobj, config=self.config, - cls=cls, module=module) - gentesthook = self.config.hook.pytest_generate_tests - plugins = funcargs.getplugins(self, withpy=True) - gentesthook.pcall(plugins, metafunc=metafunc) - if not metafunc._calls: - return self.Function(name, parent=self) - l = [] - for callspec in metafunc._calls: - subname = "%s[%s]" %(name, callspec.id) - function = self.Function(name=subname, parent=self, - callspec=callspec, callobj=funcobj) - l.append(function) - return l - -class Module(py.test.collect.File, PyCollectorMixin): - def _getobj(self): - return self._memoizedcall('_obj', self._importtestmodule) - - def _importtestmodule(self): - # we assume we are only called once per module - mod = self.fspath.pyimport() - #print "imported test module", mod - self.config.pluginmanager.consider_module(mod) - return mod - - def setup(self): - if getattr(self.obj, 'disabled', 0): - py.log._apiwarn(">1.1.1", "%r uses 'disabled' which is deprecated, " - "use pytestmark=..., see pytest_skipping plugin" % (self.obj,)) - py.test.skip("%r is disabled" %(self.obj,)) - if hasattr(self.obj, 'setup_module'): - #XXX: nose compat hack, move to nose plugin - # if it takes a positional arg, its probably a py.test style one - # so we pass the current module object - if inspect.getargspec(self.obj.setup_module)[0]: - self.obj.setup_module(self.obj) - else: - self.obj.setup_module() - - def teardown(self): - if hasattr(self.obj, 'teardown_module'): - #XXX: nose compat hack, move to nose plugin - # if it takes a positional arg, its probably a py.test style one - # so we pass the current module object - if inspect.getargspec(self.obj.teardown_module)[0]: - self.obj.teardown_module(self.obj) - else: - self.obj.teardown_module() - -class Class(PyCollectorMixin, py.test.collect.Collector): - - def collect(self): - l = self._deprecated_collect() - if l is not None: - return l - return [self.Instance(name="()", parent=self)] - - def setup(self): - if getattr(self.obj, 'disabled', 0): - py.log._apiwarn(">1.1.1", "%r uses 'disabled' which is deprecated, " - "use pytestmark=..., see pytest_skipping plugin" % (self.obj,)) - py.test.skip("%r is disabled" %(self.obj,)) - setup_class = getattr(self.obj, 'setup_class', None) - if setup_class is not None: - setup_class = getattr(setup_class, 'im_func', setup_class) - setup_class(self.obj) - - def teardown(self): - teardown_class = getattr(self.obj, 'teardown_class', None) - if teardown_class is not None: - teardown_class = getattr(teardown_class, 'im_func', teardown_class) - teardown_class(self.obj) - -class Instance(PyCollectorMixin, py.test.collect.Collector): - def _getobj(self): - return self.parent.obj() - def Function(self): - return getattr(self.obj, 'Function', - PyCollectorMixin.Function.__get__(self)) # XXX for python 2.2 - def _keywords(self): - return [] - Function = property(Function) - - #def __repr__(self): - # return "<%s of '%s'>" %(self.__class__.__name__, - # self.parent.obj.__name__) - - def newinstance(self): - self.obj = self._getobj() - return self.obj - -class FunctionMixin(PyobjMixin): - """ mixin for the code common to Function and Generator. - """ - - def setup(self): - """ perform setup for this test function. """ - if inspect.ismethod(self.obj): - name = 'setup_method' - else: - name = 'setup_function' - if isinstance(self.parent, Instance): - obj = self.parent.newinstance() - self.obj = self._getobj() - else: - obj = self.parent.obj - setup_func_or_method = getattr(obj, name, None) - if setup_func_or_method is not None: - setup_func_or_method(self.obj) - - def teardown(self): - """ perform teardown for this test function. """ - if inspect.ismethod(self.obj): - name = 'teardown_method' - else: - name = 'teardown_function' - obj = self.parent.obj - teardown_func_or_meth = getattr(obj, name, None) - if teardown_func_or_meth is not None: - teardown_func_or_meth(self.obj) - - def _prunetraceback(self, traceback): - if hasattr(self, '_obj') and not self.config.option.fulltrace: - code = py.code.Code(self.obj) - path, firstlineno = code.path, code.firstlineno - ntraceback = traceback.cut(path=path, firstlineno=firstlineno) - if ntraceback == traceback: - ntraceback = ntraceback.cut(path=path) - if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=py._pydir) - traceback = ntraceback.filter() - return traceback - - def _repr_failure_py(self, excinfo, style="long"): - if excinfo.errisinstance(funcargs.FuncargRequest.LookupError): - fspath, lineno, msg = self.reportinfo() - lines, _ = inspect.getsourcelines(self.obj) - for i, line in enumerate(lines): - if line.strip().startswith('def'): - return FuncargLookupErrorRepr(fspath, lineno, - lines[:i+1], str(excinfo.value)) - return super(FunctionMixin, self)._repr_failure_py(excinfo, - style=style) - - def repr_failure(self, excinfo, outerr=None): - assert outerr is None, "XXX outerr usage is deprecated" - return self._repr_failure_py(excinfo, - style=self.config.getvalue("tbstyle")) - - shortfailurerepr = "F" - -class FuncargLookupErrorRepr(TerminalRepr): - def __init__(self, filename, firstlineno, deflines, errorstring): - self.deflines = deflines - self.errorstring = errorstring - self.filename = filename - self.firstlineno = firstlineno - - def toterminal(self, tw): - tw.line() - for line in self.deflines: - tw.line(" " + line.strip()) - for line in self.errorstring.split("\n"): - tw.line(" " + line.strip(), red=True) - tw.line() - tw.line("%s:%d" % (self.filename, self.firstlineno+1)) - -class Generator(FunctionMixin, PyCollectorMixin, py.test.collect.Collector): - def collect(self): - # test generators are seen as collectors but they also - # invoke setup/teardown on popular request - # (induced by the common "test_*" naming shared with normal tests) - self.config._setupstate.prepare(self) - l = [] - seen = {} - for i, x in enumerate(self.obj()): - name, call, args = self.getcallargs(x) - if not py.builtin.callable(call): - raise TypeError("%r yielded non callable test %r" %(self.obj, call,)) - if name is None: - name = "[%d]" % i - else: - name = "['%s']" % name - if name in seen: - raise ValueError("%r generated tests with non-unique name %r" %(self, name)) - seen[name] = True - l.append(self.Function(name, self, args=args, callobj=call)) - return l - - def getcallargs(self, obj): - if not isinstance(obj, (tuple, list)): - obj = (obj,) - # explict naming - if isinstance(obj[0], py.builtin._basestring): - name = obj[0] - obj = obj[1:] - else: - name = None - call, args = obj[0], obj[1:] - return name, call, args - - -# -# Test Items -# -_dummy = object() -class Function(FunctionMixin, py.test.collect.Item): - """ a Function Item is responsible for setting up - and executing a Python callable test object. - """ - _genid = None - def __init__(self, name, parent=None, args=None, config=None, - callspec=None, callobj=_dummy): - super(Function, self).__init__(name, parent, config=config) - self._args = args - if self._isyieldedfunction(): - assert not callspec, "yielded functions (deprecated) cannot have funcargs" - else: - if callspec is not None: - self.funcargs = callspec.funcargs or {} - self._genid = callspec.id - if hasattr(callspec, "param"): - self._requestparam = callspec.param - else: - self.funcargs = {} - if callobj is not _dummy: - self._obj = callobj - self.function = getattr(self.obj, 'im_func', self.obj) - - def _getobj(self): - name = self.name - i = name.find("[") # parametrization - if i != -1: - name = name[:i] - return getattr(self.parent.obj, name) - - def _isyieldedfunction(self): - return self._args is not None - - def readkeywords(self): - d = super(Function, self).readkeywords() - d.update(py.builtin._getfuncdict(self.obj)) - return d - - def runtest(self): - """ execute the underlying test function. """ - self.ihook.pytest_pyfunc_call(pyfuncitem=self) - - def setup(self): - super(Function, self).setup() - if hasattr(self, 'funcargs'): - funcargs.fillfuncargs(self) - - def __eq__(self, other): - try: - return (self.name == other.name and - self._args == other._args and - self.parent == other.parent and - self.obj == other.obj and - getattr(self, '_genid', None) == - getattr(other, '_genid', None) - ) - except AttributeError: - pass - return False - - def __ne__(self, other): - return not self == other - - def __hash__(self): - return hash((self.parent, self.name)) - -def hasinit(obj): - init = getattr(obj, '__init__', None) - if init: - if init != object.__init__: - return True diff --git a/py/_test/session.py b/py/_test/session.py deleted file mode 100644 index 66ea640ab0..0000000000 --- a/py/_test/session.py +++ /dev/null @@ -1,135 +0,0 @@ -""" basic test session implementation. - -* drives collection of tests -* triggers executions of tests -* produces events used by reporting -""" - -import py - -# exitcodes for the command line -EXIT_OK = 0 -EXIT_TESTSFAILED = 1 -EXIT_INTERRUPTED = 2 -EXIT_INTERNALERROR = 3 -EXIT_NOHOSTS = 4 - -# imports used for genitems() -Item = py.test.collect.Item -Collector = py.test.collect.Collector - -class Session(object): - nodeid = "" - class Interrupted(KeyboardInterrupt): - """ signals an interrupted test run. """ - __module__ = 'builtins' # for py3 - - def __init__(self, config): - self.config = config - self.pluginmanager = config.pluginmanager # shortcut - self.pluginmanager.register(self) - self._testsfailed = 0 - self._nomatch = False - self.shouldstop = False - - def genitems(self, colitems, keywordexpr=None): - """ yield Items from iterating over the given colitems. """ - if colitems: - colitems = list(colitems) - while colitems: - next = colitems.pop(0) - if isinstance(next, (tuple, list)): - colitems[:] = list(next) + colitems - continue - assert self.pluginmanager is next.config.pluginmanager - if isinstance(next, Item): - remaining = self.filteritems([next]) - if remaining: - self.config.hook.pytest_itemstart(item=next) - yield next - else: - assert isinstance(next, Collector) - self.config.hook.pytest_collectstart(collector=next) - rep = self.config.hook.pytest_make_collect_report(collector=next) - if rep.passed: - for x in self.genitems(rep.result, keywordexpr): - yield x - self.config.hook.pytest_collectreport(report=rep) - if self.shouldstop: - raise self.Interrupted(self.shouldstop) - - def filteritems(self, colitems): - """ return items to process (some may be deselected)""" - keywordexpr = self.config.option.keyword - if not keywordexpr or self._nomatch: - return colitems - if keywordexpr[-1] == ":": - keywordexpr = keywordexpr[:-1] - remaining = [] - deselected = [] - for colitem in colitems: - if isinstance(colitem, Item): - if colitem._skipbykeyword(keywordexpr): - deselected.append(colitem) - continue - remaining.append(colitem) - if deselected: - self.config.hook.pytest_deselected(items=deselected) - if self.config.option.keyword.endswith(":"): - self._nomatch = True - return remaining - - def collect(self, colitems): - keyword = self.config.option.keyword - for x in self.genitems(colitems, keyword): - yield x - - def sessionstarts(self): - """ setup any neccessary resources ahead of the test run. """ - self.config.hook.pytest_sessionstart(session=self) - - def pytest_runtest_logreport(self, report): - if report.failed: - self._testsfailed += 1 - maxfail = self.config.getvalue("maxfail") - if maxfail and self._testsfailed >= maxfail: - self.shouldstop = "stopping after %d failures" % ( - self._testsfailed) - pytest_collectreport = pytest_runtest_logreport - - def sessionfinishes(self, exitstatus): - """ teardown any resources after a test run. """ - self.config.hook.pytest_sessionfinish( - session=self, - exitstatus=exitstatus, - ) - - def main(self, colitems): - """ main loop for running tests. """ - self.shouldstop = False - self.sessionstarts() - exitstatus = EXIT_OK - try: - self._mainloop(colitems) - if self._testsfailed: - exitstatus = EXIT_TESTSFAILED - self.sessionfinishes(exitstatus=exitstatus) - except KeyboardInterrupt: - excinfo = py.code.ExceptionInfo() - self.config.hook.pytest_keyboard_interrupt(excinfo=excinfo) - exitstatus = EXIT_INTERRUPTED - except: - excinfo = py.code.ExceptionInfo() - self.config.pluginmanager.notify_exception(excinfo) - exitstatus = EXIT_INTERNALERROR - if exitstatus in (EXIT_INTERNALERROR, EXIT_INTERRUPTED): - self.sessionfinishes(exitstatus=exitstatus) - return exitstatus - - def _mainloop(self, colitems): - for item in self.collect(colitems): - if not self.config.option.collectonly: - item.config.hook.pytest_runtest_protocol(item=item) - if self.shouldstop: - raise self.Interrupted(self.shouldstop) - diff --git a/py/_xmlgen.py b/py/_xmlgen.py index 854fbccae3..1bcf959ac5 100644 --- a/py/_xmlgen.py +++ b/py/_xmlgen.py @@ -1,14 +1,14 @@ """ module for generating and serializing xml and html structures -by using simple python objects. +by using simple python objects. (c) holger krekel, holger at merlinux eu. 2009 -""" +""" import py import sys, re if sys.version_info >= (3,0): - def u(s): + def u(s): return s def unicode(x): if hasattr(x, '__unicode__'): @@ -17,64 +17,64 @@ if sys.version_info >= (3,0): else: def u(s): return unicode(s) - unicode = unicode - - -class NamespaceMetaclass(type): - def __getattr__(self, name): - if name[:1] == '_': - raise AttributeError(name) - if self == Namespace: - raise ValueError("Namespace class is abstract") + unicode = unicode + + +class NamespaceMetaclass(type): + def __getattr__(self, name): + if name[:1] == '_': + raise AttributeError(name) + if self == Namespace: + raise ValueError("Namespace class is abstract") tagspec = self.__tagspec__ - if tagspec is not None and name not in tagspec: - raise AttributeError(name) + if tagspec is not None and name not in tagspec: + raise AttributeError(name) classattr = {} - if self.__stickyname__: - classattr['xmlname'] = name - cls = type(name, (self.__tagclass__,), classattr) - setattr(self, name, cls) - return cls + if self.__stickyname__: + classattr['xmlname'] = name + cls = type(name, (self.__tagclass__,), classattr) + setattr(self, name, cls) + return cls class Tag(list): - class Attr(object): - def __init__(self, **kwargs): - self.__dict__.update(kwargs) + class Attr(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) def __init__(self, *args, **kwargs): super(Tag, self).__init__(args) - self.attr = self.Attr(**kwargs) + self.attr = self.Attr(**kwargs) def __unicode__(self): - return self.unicode(indent=0) + return self.unicode(indent=0) __str__ = __unicode__ def unicode(self, indent=2): l = [] - SimpleUnicodeVisitor(l.append, indent).visit(self) - return "".join(l) + SimpleUnicodeVisitor(l.append, indent).visit(self) + return "".join(l) def __repr__(self): - name = self.__class__.__name__ + name = self.__class__.__name__ return "<%r tag object %d>" % (name, id(self)) - + Namespace = NamespaceMetaclass('Namespace', (object, ), { - '__tagspec__': None, - '__tagclass__': Tag, - '__stickyname__': False, + '__tagspec__': None, + '__tagclass__': Tag, + '__stickyname__': False, }) -class HtmlTag(Tag): +class HtmlTag(Tag): def unicode(self, indent=2): l = [] - HtmlVisitor(l.append, indent, shortempty=False).visit(self) - return u("").join(l) + HtmlVisitor(l.append, indent, shortempty=False).visit(self) + return u("").join(l) -# exported plain html namespace +# exported plain html namespace class html(Namespace): __tagclass__ = HtmlTag - __stickyname__ = True - __tagspec__ = dict([(x,1) for x in ( + __stickyname__ = True + __tagspec__ = dict([(x,1) for x in ( 'a,abbr,acronym,address,applet,area,b,bdo,big,blink,' 'blockquote,body,br,button,caption,center,cite,code,col,' 'colgroup,comment,dd,del,dfn,dir,div,dl,dt,em,embed,' @@ -87,11 +87,11 @@ class html(Namespace): 'base,basefont,frame,hr,isindex,param,samp,var' ).split(',') if x]) - class Style(object): - def __init__(self, **kw): + class Style(object): + def __init__(self, **kw): for x, y in kw.items(): x = x.replace('_', '-') - setattr(self, x, y) + setattr(self, x, y) class raw(object): @@ -102,94 +102,94 @@ class raw(object): class SimpleUnicodeVisitor(object): """ recursive visitor to write unicode. """ - def __init__(self, write, indent=0, curindent=0, shortempty=True): + def __init__(self, write, indent=0, curindent=0, shortempty=True): self.write = write self.cache = {} self.visited = {} # for detection of recursion - self.indent = indent + self.indent = indent self.curindent = curindent self.parents = [] - self.shortempty = shortempty # short empty tags or not + self.shortempty = shortempty # short empty tags or not - def visit(self, node): + def visit(self, node): """ dispatcher on node's class/bases name. """ cls = node.__class__ try: - visitmethod = self.cache[cls] + visitmethod = self.cache[cls] except KeyError: - for subclass in cls.__mro__: + for subclass in cls.__mro__: visitmethod = getattr(self, subclass.__name__, None) if visitmethod is not None: break else: - visitmethod = self.object + visitmethod = self.object self.cache[cls] = visitmethod - visitmethod(node) + visitmethod(node) def object(self, obj): - #self.write(obj) + #self.write(obj) self.write(escape(unicode(obj))) def raw(self, obj): - self.write(obj.uniobj) + self.write(obj.uniobj) - def list(self, obj): + def list(self, obj): assert id(obj) not in self.visited self.visited[id(obj)] = 1 - map(self.visit, obj) + map(self.visit, obj) def Tag(self, tag): assert id(tag) not in self.visited - try: + try: tag.parent = self.parents[-1] - except IndexError: - tag.parent = None + except IndexError: + tag.parent = None self.visited[id(tag)] = 1 tagname = getattr(tag, 'xmlname', tag.__class__.__name__) if self.curindent and not self._isinline(tagname): - self.write("\n" + u(' ') * self.curindent) + self.write("\n" + u(' ') * self.curindent) if tag: - self.curindent += self.indent + self.curindent += self.indent self.write(u('<%s%s>') % (tagname, self.attributes(tag))) - self.parents.append(tag) + self.parents.append(tag) for x in tag: self.visit(x) - self.parents.pop() - self.write(u('</%s>') % tagname) - self.curindent -= self.indent + self.parents.pop() + self.write(u('</%s>') % tagname) + self.curindent -= self.indent else: - nameattr = tagname+self.attributes(tag) - if self._issingleton(tagname): + nameattr = tagname+self.attributes(tag) + if self._issingleton(tagname): self.write(u('<%s/>') % (nameattr,)) - else: + else: self.write(u('<%s></%s>') % (nameattr, tagname)) def attributes(self, tag): # serialize attributes - attrlist = dir(tag.attr) - attrlist.sort() + attrlist = dir(tag.attr) + attrlist.sort() l = [] - for name in attrlist: + for name in attrlist: res = self.repr_attribute(tag.attr, name) - if res is not None: - l.append(res) + if res is not None: + l.append(res) l.extend(self.getstyle(tag)) return u("").join(l) - def repr_attribute(self, attrs, name): - if name[:2] != '__': - value = getattr(attrs, name) - if name.endswith('_'): + def repr_attribute(self, attrs, name): + if name[:2] != '__': + value = getattr(attrs, name) + if name.endswith('_'): name = name[:-1] return ' %s="%s"' % (name, escape(unicode(value))) - def getstyle(self, tag): - """ return attribute list suitable for styling. """ - try: + def getstyle(self, tag): + """ return attribute list suitable for styling. """ + try: styledict = tag.style.__dict__ - except AttributeError: - return [] - else: + except AttributeError: + return [] + else: stylelist = [x+': ' + y for x,y in styledict.items()] return [u(' style="%s"') % u('; ').join(stylelist)] @@ -201,9 +201,9 @@ class SimpleUnicodeVisitor(object): """can (and will) be overridden in subclasses""" return False -class HtmlVisitor(SimpleUnicodeVisitor): - - single = dict([(x, 1) for x in +class HtmlVisitor(SimpleUnicodeVisitor): + + single = dict([(x, 1) for x in ('br,img,area,param,col,hr,meta,link,base,' 'input,frame').split(',')]) inline = dict([(x, 1) for x in @@ -211,12 +211,12 @@ class HtmlVisitor(SimpleUnicodeVisitor): 'i img input kbd label q s samp select small span strike ' 'strong sub sup textarea tt u var'.split(' '))]) - def repr_attribute(self, attrs, name): + def repr_attribute(self, attrs, name): if name == 'class_': - value = getattr(attrs, name) - if value is None: + value = getattr(attrs, name) + if value is None: return - return super(HtmlVisitor, self).repr_attribute(attrs, name) + return super(HtmlVisitor, self).repr_attribute(attrs, name) def _issingleton(self, tagname): return tagname in self.single @@ -224,11 +224,11 @@ class HtmlVisitor(SimpleUnicodeVisitor): def _isinline(self, tagname): return tagname in self.inline - + class _escape: def __init__(self): self.escape = { - u('"') : u('"'), u('<') : u('<'), u('>') : u('>'), + u('"') : u('"'), u('<') : u('<'), u('>') : u('>'), u('&') : u('&'), u("'") : u('''), } self.charef_rex = re.compile(u("|").join(self.escape.keys())) diff --git a/py/bin/_findpy.py b/py/bin/_findpy.py deleted file mode 100644 index c0c64581e0..0000000000 --- a/py/bin/_findpy.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python - -# -# find and import a version of 'py' -# -import sys -import os -from os.path import dirname as opd, exists, join, basename, abspath - -def searchpy(current): - while 1: - last = current - initpy = join(current, '__init__.py') - if not exists(initpy): - pydir = join(current, 'py') - # recognize py-package and ensure it is importable - if exists(pydir) and exists(join(pydir, '__init__.py')): - #for p in sys.path: - # if p == current: - # return True - if current != sys.path[0]: # if we are already first, then ok - sys.stderr.write("inserting into sys.path: %s\n" % current) - sys.path.insert(0, current) - return True - current = opd(current) - if last == current: - return False - -if not searchpy(abspath(os.curdir)): - if not searchpy(opd(abspath(sys.argv[0]))): - if not searchpy(opd(__file__)): - pass # let's hope it is just on sys.path - -import py - -if __name__ == '__main__': - print ("py lib is at %s" % py.__file__) diff --git a/py/bin/env.cmd b/py/bin/env.cmd deleted file mode 100644 index 1a59b40752..0000000000 --- a/py/bin/env.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -for /F "usebackq delims=" %%i in (`python "%~dp0\env.py"`) do %%i diff --git a/py/bin/env.py b/py/bin/env.py deleted file mode 100644 index 09c6c7de34..0000000000 --- a/py/bin/env.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python - -import sys, os, os.path - -progpath = sys.argv[0] -packagedir = os.path.dirname(os.path.dirname(os.path.abspath(progpath))) -packagename = os.path.basename(packagedir) -bindir = os.path.join(packagedir, 'bin') -if sys.platform == 'win32': - bindir = os.path.join(bindir, 'win32') -rootdir = os.path.dirname(packagedir) - -def prepend_path(name, value): - sep = os.path.pathsep - curpath = os.environ.get(name, '') - newpath = [value] + [ x for x in curpath.split(sep) if x and x != value ] - return setenv(name, sep.join(newpath)) - -def setenv(name, value): - shell = os.environ.get('SHELL', '') - comspec = os.environ.get('COMSPEC', '') - if shell.endswith('csh'): - cmd = 'setenv %s "%s"' % (name, value) - elif shell.endswith('sh'): - cmd = '%s="%s"; export %s' % (name, value, name) - elif comspec.endswith('cmd.exe'): - cmd = 'set %s=%s' % (name, value) - else: - assert False, 'Shell not supported.' - return cmd - -print(prepend_path('PATH', bindir)) -print(prepend_path('PYTHONPATH', rootdir)) diff --git a/py/bin/py.cleanup b/py/bin/py.cleanup deleted file mode 100755 index b285100288..0000000000 --- a/py/bin/py.cleanup +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pycleanup()
\ No newline at end of file diff --git a/py/bin/py.convert_unittest b/py/bin/py.convert_unittest deleted file mode 100755 index 3d1382ac74..0000000000 --- a/py/bin/py.convert_unittest +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pyconvert_unittest()
\ No newline at end of file diff --git a/py/bin/py.countloc b/py/bin/py.countloc deleted file mode 100755 index cf06529b96..0000000000 --- a/py/bin/py.countloc +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pycountloc()
\ No newline at end of file diff --git a/py/bin/py.lookup b/py/bin/py.lookup deleted file mode 100755 index c4af1cfbf4..0000000000 --- a/py/bin/py.lookup +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pylookup()
\ No newline at end of file diff --git a/py/bin/py.svnwcrevert b/py/bin/py.svnwcrevert deleted file mode 100755 index 67a1ec7f62..0000000000 --- a/py/bin/py.svnwcrevert +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pysvnwcrevert()
\ No newline at end of file diff --git a/py/bin/py.test b/py/bin/py.test deleted file mode 100755 index f1c3e8927e..0000000000 --- a/py/bin/py.test +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python - -# somewhat PYPY specific hack: -# let's make sure setuptools does show a warning when our inlined 'py' -# version shadows a properly installed one. -import warnings -warnings.filterwarnings("ignore", - "Module py was already imported", category=UserWarning) -from _findpy import py -py.cmdline.pytest() diff --git a/py/bin/py.which b/py/bin/py.which deleted file mode 100755 index f2b30cfff3..0000000000 --- a/py/bin/py.which +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env python -from _findpy import py -py.cmdline.pywhich()
\ No newline at end of file diff --git a/py/bin/win32/py.cleanup.cmd b/py/bin/win32/py.cleanup.cmd deleted file mode 100644 index e5d97b9980..0000000000 --- a/py/bin/win32/py.cleanup.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -python "%~dp0\..\py.cleanup" %*
\ No newline at end of file diff --git a/py/bin/win32/py.convert_unittest.cmd b/py/bin/win32/py.convert_unittest.cmd deleted file mode 100644 index 08ef4543dc..0000000000 --- a/py/bin/win32/py.convert_unittest.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -python "%~dp0\..\py.convert_unittest" %*
\ No newline at end of file diff --git a/py/bin/win32/py.countloc.cmd b/py/bin/win32/py.countloc.cmd deleted file mode 100644 index 82a1f3239d..0000000000 --- a/py/bin/win32/py.countloc.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -python "%~dp0\..\py.countloc" %*
\ No newline at end of file diff --git a/py/bin/win32/py.lookup.cmd b/py/bin/win32/py.lookup.cmd deleted file mode 100644 index a8fa51728a..0000000000 --- a/py/bin/win32/py.lookup.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -python "%~dp0\..\py.lookup" %*
\ No newline at end of file diff --git a/py/bin/win32/py.svnwcrevert.cmd b/py/bin/win32/py.svnwcrevert.cmd deleted file mode 100644 index d681b9c0e9..0000000000 --- a/py/bin/win32/py.svnwcrevert.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -python "%~dp0\..\py.svnwcrevert" %*
\ No newline at end of file diff --git a/py/bin/win32/py.test.cmd b/py/bin/win32/py.test.cmd deleted file mode 100644 index 36a1b9daed..0000000000 --- a/py/bin/win32/py.test.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -python "%~dp0\..\py.test" %*
\ No newline at end of file diff --git a/py/bin/win32/py.which.cmd b/py/bin/win32/py.which.cmd deleted file mode 100644 index 27af42fa68..0000000000 --- a/py/bin/win32/py.which.cmd +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -python "%~dp0\..\py.which" %*
\ No newline at end of file diff --git a/py/test.py b/py/test.py new file mode 100644 index 0000000000..aa5beb1789 --- /dev/null +++ b/py/test.py @@ -0,0 +1,10 @@ +import sys +if __name__ == '__main__': + import pytest + sys.exit(pytest.main()) +else: + import sys, pytest + sys.modules['py.test'] = pytest + +# for more API entry points see the 'tests' definition +# in __init__.py |