diff -ruN a/doc/juliadoc/juliadoc/__init__.py b/doc/juliadoc/juliadoc/__init__.py
--- a/doc/juliadoc/juliadoc/__init__.py 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/juliadoc/__init__.py 2014-09-16 21:28:29.352868875 +0200
@@ -0,0 +1,21 @@
+import os
+
+def get_theme_dir():
+ """
+ Returns path to directory containing this package's theme.
+
+ This is designed to be used when setting the ``html_theme_path``
+ option within Sphinx's ``conf.py`` file.
+ """
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), "theme"))
+
+def default_sidebars():
+ """
+ Returns a dictionary mapping for the templates used to render the
+ sidebar on the index page and sub-pages.
+ """
+ return {
+ '**': ['localtoc.html', 'relations.html', 'searchbox.html'],
+ 'index': ['searchbox.html'],
+ 'search': [],
+ }
diff -ruN a/doc/juliadoc/juliadoc/jldoctest.py b/doc/juliadoc/juliadoc/jldoctest.py
--- a/doc/juliadoc/juliadoc/jldoctest.py 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/juliadoc/jldoctest.py 2014-09-16 21:28:29.416869546 +0200
@@ -0,0 +1,889 @@
+
+import re
+import sys
+import time
+import codecs
+
+try:
+ import StringIO as sio
+except ImportError:
+ import io as sio
+
+from os import path
+import traceback
+
+import doctest
+from doctest import *
+from doctest import _SpoofOut, _indent, TestResults, _exception_traceback
+from doctest import OPTIONFLAGS_BY_NAME
+from subprocess import Popen, PIPE, STDOUT
+
+from docutils import nodes
+from docutils.parsers.rst import directives
+
+from sphinx.builders import Builder
+from sphinx.util import force_decode
+from sphinx.util.nodes import set_source_info
+from sphinx.util.compat import Directive
+from sphinx.util.console import bold
+from sphinx.util.pycompat import bytes
+from sphinx.ext.doctest import TestDirective, TestGroup, TestCode, \
+ TestsetupDirective, TestcleanupDirective, DoctestDirective, \
+ TestcodeDirective, TestoutputDirective
+
+class DocTestParser:
+ """
+ A class used to parse strings containing doctest examples.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+ _EXAMPLE_RE = re.compile(r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P<source>
+ (?:^(?P<indent> [ ]*) julia>[ ] .+) # PS1 line
+ (?:\n (?P=indent)? [ ]{7,13} .+)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*julia>) # Not a line starting with PS1
+ .+$\n? # But any other line
+ )*)
+ ''', re.MULTILINE | re.VERBOSE)
+
+ # A regular expression for handling `want` strings that contain
+ # expected exceptions. It divides `want` into three pieces:
+ # - the traceback header line (`hdr`)
+ # - the traceback stack (`stack`)
+ # - the exception message (`msg`), as generated by
+ # traceback.format_exception_only()
+ # `msg` may have multiple lines. We assume/require that the
+ # exception message is the first non-indented line starting with a word
+ # character following the traceback header line.
+ _EXCEPTION_RE = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+ # A callable returning a true value iff its argument is a blank line
+ # or contains a single comment.
+ _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append( Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options) )
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def get_doctest(self, string, globs, name, filename, lineno):
+ """
+ Extract all doctest examples from the given string, and
+ collect them into a `DocTest` object.
+
+ `globs`, `name`, `filename`, and `lineno` are attributes for
+ the new `DocTest` object. See the documentation for `DocTest`
+ for more information.
+ """
+ return DocTest(self.get_examples(string, name), globs,
+ name, filename, lineno, string)
+
+ def get_examples(self, string, name='<string>'):
+ """
+ Extract all doctest examples from the given string, and return
+ them as a list of `Example` objects. Line numbers are
+ 0-based, because it's most common in doctests that nothing
+ interesting appears on the same line as opening triple-quote,
+ and so the first interesting line is called \"line 1\" then.
+
+ The optional argument `name` is a name identifying this
+ string, and is only used for error messages.
+ """
+ return [x for x in self.parse(string, name)
+ if isinstance(x, Example)]
+
+ def _parse_example(self, m, name, lineno):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[1:], ' '*indent, name, lineno)
+ source = '\n'.join([sl[indent+7:] for sl in source_lines])
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ # This regular expression looks for option directives in the
+ # source code of an example. Option directives are comments
+ # starting with "doctest:". Warning: this may give false
+ # positives for string-literals that contain the string
+ # "#doctest:". Eliminating these false positives would require
+ # actually parsing the string; but we limit them by ignoring any
+ # line containing "#doctest:" that is *followed* by a quote mark.
+ _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+ re.MULTILINE)
+
+ def _find_options(self, source, name, lineno):
+ """
+ Return a dictionary containing option overrides extracted from
+ option directives in the given source string.
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ options = {}
+ # (note: with the current regexp, this will match at most once:)
+ for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+ option_strings = m.group(1).replace(',', ' ').split()
+ for option in option_strings:
+ if (option[0] not in '+-' or
+ option[1:] not in OPTIONFLAGS_BY_NAME):
+ raise ValueError('line %r of the doctest for %s '
+ 'has an invalid option: %r' %
+ (lineno+1, name, option))
+ flag = OPTIONFLAGS_BY_NAME[option[1:]]
+ options[flag] = (option[0] == '+')
+ if options and self._IS_BLANK_OR_COMMENT(source):
+ raise ValueError('line %r of the doctest for %s has an option '
+ 'directive on a line with no example: %r' %
+ (lineno, name, source))
+ return options
+
+ # This regular expression finds the indentation of every non-blank
+ # line in a string.
+ _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
+
+ def _min_indent(self, s):
+ "Return the minimum indentation of any non-blank line in `s`"
+ indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+ if len(indents) > 0:
+ return min(indents)
+ else:
+ return 0
+
+ def _check_prompt_blank(self, lines, indent, name, lineno):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+ """
+ n = len("julia>")
+ for i, line in enumerate(lines):
+ if len(line) >= indent+n+1 and line[indent+n] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:indent+n], line))
+
+ def _check_prefix(self, lines, prefix, name, lineno):
+ """
+ Check that every line in the given list starts with the given
+ prefix; if any line does not, then raise a ValueError.
+ """
+ for i, line in enumerate(lines):
+ if line and not line.startswith(prefix):
+ raise ValueError('line %r of the docstring for %s has '
+ 'inconsistent leading whitespace: %r' %
+ (lineno+i+1, name, line))
+
+parser = DocTestParser()
+
+class SphinxDocTestRunner(object):
+ """
+ A class used to run DocTest test cases, and accumulate statistics.
+ The `run` method is used to process a single DocTest case. It
+ returns a tuple `(f, t)`, where `t` is the number of test cases
+ tried, and `f` is the number of test cases that failed.
+
+ >>> tests = DocTestFinder().find(_TestClass)
+ >>> runner = DocTestRunner(verbose=False)
+ >>> tests.sort(key = lambda test: test.name)
+ >>> for test in tests:
+ ... print test.name, '->', runner.run(test)
+ _TestClass -> TestResults(failed=0, attempted=2)
+ _TestClass.__init__ -> TestResults(failed=0, attempted=2)
+ _TestClass.get -> TestResults(failed=0, attempted=2)
+ _TestClass.square -> TestResults(failed=0, attempted=1)
+
+ The `summarize` method prints a summary of all the test cases that
+ have been run by the runner, and returns an aggregated `(f, t)`
+ tuple:
+
+ >>> runner.summarize(verbose=1)
+ 4 items passed all tests:
+ 2 tests in _TestClass
+ 2 tests in _TestClass.__init__
+ 2 tests in _TestClass.get
+ 1 tests in _TestClass.square
+ 7 tests in 4 items.
+ 7 passed and 0 failed.
+ Test passed.
+ TestResults(failed=0, attempted=7)
+
+ The aggregated number of tried examples and failed examples is
+ also available via the `tries` and `failures` attributes:
+
+ >>> runner.tries
+ 7
+ >>> runner.failures
+ 0
+
+ The comparison between expected outputs and actual outputs is done
+ by an `OutputChecker`. This comparison may be customized with a
+ number of option flags; see the documentation for `testmod` for
+ more information. If the option flags are insufficient, then the
+ comparison may also be customized by passing a subclass of
+ `OutputChecker` to the constructor.
+
+ The test runner's display output can be controlled in two ways.
+ First, an output function (`out) can be passed to
+ `TestRunner.run`; this function will be called with strings that
+ should be displayed. It defaults to `sys.stdout.write`. If
+ capturing the output is not sufficient, then the display output
+ can be also customized by subclassing DocTestRunner, and
+ overriding the methods `report_start`, `report_success`,
+ `report_unexpected_exception`, and `report_failure`.
+ """
+ # This divider string is used to separate failure messages, and to
+ # separate sections of the summary.
+ DIVIDER = "*" * 70
+
+ def __init__(self, checker=None, verbose=False, optionflags=0):
+ """
+ Create a new test runner.
+
+ Optional keyword arg `checker` is the `OutputChecker` that
+ should be used to compare the expected outputs and actual
+ outputs of doctest examples.
+
+ Optional keyword arg 'verbose' prints lots of stuff if true,
+ only failures if false; by default, it's true iff '-v' is in
+ sys.argv.
+
+ Optional argument `optionflags` can be used to control how the
+ test runner compares expected output to actual output, and how
+ it displays failures. See the documentation for `testmod` for
+ more information.
+ """
+ self._checker = checker or OutputChecker()
+ self._verbose = verbose
+ self.optionflags = optionflags
+ self.original_optionflags = optionflags
+
+ # Keep track of the examples we've run.
+ self.tries = 0
+ self.failures = 0
+ self._name2ft = {}
+
+ # Create a fake output target for capturing doctest output.
+ self._fakeout = _SpoofOut()
+
+ #/////////////////////////////////////////////////////////////////
+ # Reporting methods
+ #/////////////////////////////////////////////////////////////////
+
+ def report_start(self, out, test, example):
+ """
+ Report that the test runner is about to process the given
+ example. (Only displays a message if verbose=True)
+ """
+ if self._verbose:
+ if example.want:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting:\n' + _indent(example.want))
+ else:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting nothing\n')
+
+ def report_success(self, out, test, example, got):
+ """
+ Report that the given example ran successfully. (Only
+ displays a message if verbose=True)
+ """
+ if self._verbose:
+ out("ok\n")
+
+ def report_failure(self, out, test, example, got):
+ """
+ Report that the given example failed.
+ """
+ try:
+ out(self._failure_header(test, example) +
+ self._checker.output_difference(example, got, self.optionflags))
+ except:
+ raise Exception(example.want, got)
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ """
+ Report that the given example raised an unexpected exception.
+ """
+ out(self._failure_header(test, example) +
+ 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+ def _failure_header(self, test, example):
+ out = [self.DIVIDER]
+ if test.filename:
+ if test.lineno is not None and example.lineno is not None:
+ lineno = test.lineno + example.lineno + 1
+ else:
+ lineno = '?'
+ out.append('File "%s", line %s, in %s' %
+ (test.filename, lineno, test.name))
+ else:
+ out.append('Line %s, in %s' % (example.lineno+1, test.name))
+ out.append('Failed example:')
+ source = example.source
+ out.append(_indent(source))
+ return '\n'.join(out)
+
+ #/////////////////////////////////////////////////////////////////
+ # DocTest Running
+ #/////////////////////////////////////////////////////////////////
+
+ def __run(self, test, out):
+ """
+ Run the examples in `test`. Write the outcome of each example
+ with one of the `DocTestRunner.report_*` methods, using the
+ writer function `out`. `compileflags` is the set of compiler
+ flags that should be used to execute examples. Return a tuple
+ `(f, t)`, where `t` is the number of examples tried, and `f`
+ is the number of examples that failed.
+ """
+ # Keep track of the number of failures and tries.
+ failures = tries = 0
+
+ # Save the option flags (since option directives can be used
+ # to modify them).
+ original_optionflags = self.optionflags
+
+ SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
+
+ check = self._checker.check_output
+
+ # Process each example.
+ for examplenum, example in enumerate(test.examples):
+
+ # If REPORT_ONLY_FIRST_FAILURE is set, then suppress
+ # reporting after the first failure.
+ quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+ failures > 0)
+
+ # Merge in the example's options.
+ self.optionflags = original_optionflags
+ if example.options:
+ for (optionflag, val) in example.options.items():
+ if val:
+ self.optionflags |= optionflag
+ else:
+ self.optionflags &= ~optionflag
+
+ # If 'SKIP' is set, then skip this example.
+ if self.optionflags & SKIP:
+ continue
+
+ # Record that we started this example.
+ tries += 1
+ if not quiet:
+ self.report_start(out, test, example)
+
+ # Run the example in the given context, and record
+ # any exception that gets raised. (But don't intercept
+ # keyboard interrupts.)
+ got = ""
+ try:
+ # Don't blink! This is where the user's code gets run.
+ src = example.source.strip().encode('utf-8').replace('"""',r'\"""')
+ # restore ans cleared by the separator println
+ self.julia.stdin.write('ans=_ans;')
+ # run command
+ show = 'true' if src[-1] != ';' else 'false'
+ cmd = 'Base.eval_user_input(Base.parse_input_line(raw""" ' \
+ + src + ' """),' + show + ');'
+ self.julia.stdin.write(cmd)
+ # save ans, and make sure no more output is generated
+ self.julia.stdin.write('_ans=ans; nothing\n')
+ # read separator
+ sep = 'fjsdiij3oi123j42'
+ self.julia.stdin.write('println("' + sep + '")\n')
+ got = []
+ line = ''
+ while line[:-1] != sep:
+ got.append(line)
+ line = self.julia.stdout.readline().decode('utf-8').rstrip() + '\n'
+ got = ''.join(got).expandtabs()
+ exception = None
+ except KeyboardInterrupt:
+ raise
+ except:
+ exception = sys.exc_info()
+
+ #got = self._fakeout.getvalue() # the actual output
+ self._fakeout.truncate(0)
+ outcome = FAILURE # guilty until proved innocent or insane
+
+ # If the example executed without raising any exceptions,
+ # verify its output.
+ if exception is None:
+ if check(example.want, got, self.optionflags):
+ outcome = SUCCESS
+
+ # The example raised an exception: check if it was expected.
+ else:
+ exc_info = sys.exc_info()
+ exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
+ if not quiet:
+ got += _exception_traceback(exc_info)
+
+ # If `example.exc_msg` is None, then we weren't expecting
+ # an exception.
+ if example.exc_msg is None:
+ outcome = BOOM
+
+ # We expected an exception: see whether it matches.
+ elif check(example.exc_msg, exc_msg, self.optionflags):
+ outcome = SUCCESS
+
+ # Another chance if they didn't care about the detail.
+ elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+ m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg)
+ m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg)
+ if m1 and m2 and check(m1.group(1), m2.group(1),
+ self.optionflags):
+ outcome = SUCCESS
+
+ # Report the outcome.
+ if outcome is SUCCESS:
+ if not quiet:
+ self.report_success(out, test, example, got)
+ elif outcome is FAILURE:
+ if not quiet:
+ self.report_failure(out, test, example, got)
+ failures += 1
+ elif outcome is BOOM:
+ if not quiet:
+ self.report_unexpected_exception(out, test, example,
+ exc_info)
+ failures += 1
+ else:
+ assert False, ("unknown outcome", outcome)
+
+ # Restore the option flags (in case they were modified)
+ self.optionflags = original_optionflags
+
+ # Record and return the number of failures and tries.
+ self.__record_outcome(test, failures, tries)
+ return TestResults(failures, tries)
+
+ def __record_outcome(self, test, f, t):
+ """
+ Record the fact that the given DocTest (`test`) generated `f`
+ failures out of `t` tried examples.
+ """
+ f2, t2 = self._name2ft.get(test.name, (0,0))
+ self._name2ft[test.name] = (f+f2, t+t2)
+ self.failures += f
+ self.tries += t
+
+ def run(self, test, out=None):
+ """
+ Run the examples in `test`, and display the results using the
+ writer function `out`.
+
+ The output of each example is checked using
+ `DocTestRunner.check_output`, and the results are formatted by
+ the `DocTestRunner.report_*` methods.
+ """
+ self.test = test
+
+ save_stdout = sys.stdout
+ if out is None:
+ out = save_stdout.write
+ sys.stdout = self._fakeout
+
+ try:
+ return self.__run(test, out)
+ finally:
+ sys.stdout = save_stdout
+
+ #/////////////////////////////////////////////////////////////////
+ # Summarization
+ #/////////////////////////////////////////////////////////////////
+ def summarize(self, out, verbose=None):
+ """
+ Print a summary of all the test cases that have been run by
+ this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+ the total number of failed examples, and `t` is the total
+ number of tried examples.
+
+ The optional `verbose` argument controls how detailed the
+ summary is. If the verbosity is not specified, then the
+ DocTestRunner's verbosity is used.
+ """
+ string_io = sio.StringIO()
+ old_stdout = sys.stdout
+ sys.stdout = string_io
+ try:
+ if verbose is None:
+ verbose = self._verbose
+ notests = []
+ passed = []
+ failed = []
+ totalt = totalf = 0
+ for x in self._name2ft.items():
+ name, (f, t) = x
+ assert f <= t
+ totalt += t
+ totalf += f
+ if t == 0:
+ notests.append(name)
+ elif f == 0:
+ passed.append( (name, t) )
+ else:
+ failed.append(x)
+ if verbose:
+ if notests:
+ print(len(notests), "items had no tests:")
+ notests.sort()
+ for thing in notests:
+ print(" ", thing)
+ if passed:
+ print(len(passed), "items passed all tests:")
+ passed.sort()
+ for thing, count in passed:
+ print(" %3d tests in %s" % (count, thing))
+ if failed:
+ print(self.DIVIDER)
+ print(len(failed), "items had failures:")
+ failed.sort()
+ for thing, (f, t) in failed:
+ print(" %3d of %3d in %s" % (f, t, thing))
+ if verbose:
+ print(totalt, "tests in", len(self._name2ft), "items.")
+ print(totalt - totalf, "passed and", totalf, "failed.")
+ if totalf:
+ print("***Test Failed***", totalf, "failures.")
+ elif verbose:
+ print("Test passed.")
+ res = TestResults(totalf, totalt)
+ finally:
+ sys.stdout = old_stdout
+ out(string_io.getvalue())
+ return res
+
+class DocTestBuilder(Builder):
+ """
+ Runs test snippets in the documentation.
+ """
+ name = 'doctest'
+
+ def init(self):
+ # default options
+ self.opt = doctest.DONT_ACCEPT_TRUE_FOR_1 | doctest.ELLIPSIS | \
+ doctest.IGNORE_EXCEPTION_DETAIL
+
+ # HACK HACK HACK
+ # doctest compiles its snippets with type 'single'. That is nice
+ # for doctest examples but unusable for multi-statement code such
+ # as setup code -- to be able to use doctest error reporting with
+ # that code nevertheless, we monkey-patch the "compile" it uses.
+ doctest.compile = self.compile
+
+ self.type = 'single'
+
+ self.total_failures = 0
+ self.total_tries = 0
+ self.setup_failures = 0
+ self.setup_tries = 0
+ self.cleanup_failures = 0
+ self.cleanup_tries = 0
+
+ date = time.strftime('%Y-%m-%d %H:%M:%S')
+
+ self.outfile = codecs.open(path.join(self.outdir, 'output.txt'),
+ 'w', encoding='utf-8')
+ self.outfile.write('''\
+Results of doctest builder run on %s
+==================================%s
+''' % (date, '='*len(date)))
+
+ def _out(self, text):
+ self.info(text, nonl=True)
+ self.outfile.write(text)
+
+ def _warn_out(self, text):
+ self.info(text, nonl=True)
+ if self.app.quiet:
+ self.warn(text)
+ if isinstance(text, bytes):
+ text = force_decode(text, None)
+ self.outfile.write(text)
+
+ def get_target_uri(self, docname, typ=None):
+ return ''
+
+ def get_outdated_docs(self):
+ return self.env.found_docs
+
+ def finish(self):
+ # write executive summary
+ def s(v):
+ return v != 1 and 's' or ''
+ self._out('''
+Doctest summary
+===============
+%5d test%s
+%5d failure%s in tests
+%5d failure%s in setup code
+%5d failure%s in cleanup code
+''' % (self.total_tries, s(self.total_tries),
+ self.total_failures, s(self.total_failures),
+ self.setup_failures, s(self.setup_failures),
+ self.cleanup_failures, s(self.cleanup_failures)))
+ self.outfile.close()
+
+ if self.total_failures or self.setup_failures or self.cleanup_failures:
+ self.app.statuscode = 1
+
+ def write(self, build_docnames, updated_docnames, method='update'):
+ if build_docnames is None:
+ build_docnames = sorted(self.env.all_docs)
+
+ self.info(bold('running tests...'))
+ for docname in build_docnames:
+ # no need to resolve the doctree
+ doctree = self.env.get_doctree(docname)
+ self.test_doc(docname, doctree)
+
+ def test_doc(self, docname, doctree):
+ groups = {}
+ add_to_all_groups = []
+ self.setup_runner = SphinxDocTestRunner(verbose=False,
+ optionflags=self.opt)
+ self.test_runner = SphinxDocTestRunner(verbose=False,
+ optionflags=self.opt)
+ self.cleanup_runner = SphinxDocTestRunner(verbose=False,
+ optionflags=self.opt)
+
+ self.test_runner._fakeout = self.setup_runner._fakeout
+ self.cleanup_runner._fakeout = self.setup_runner._fakeout
+
+ def condition(node):
+ return isinstance(node, (nodes.literal_block, nodes.comment)) \
+ and node.has_key('testnodetype')
+ for node in doctree.traverse(condition):
+ source = node.has_key('test') and node['test'] or node.astext()
+ if not source:
+ self.warn('no code/output in %s block at %s:%s' %
+ (node.get('testnodetype', 'doctest'),
+ self.env.doc2path(docname), node.line))
+ code = TestCode(source, type=node.get('testnodetype', 'doctest'),
+ lineno=node.line, options=node.get('options'))
+ node_groups = node.get('groups', ['default'])
+ if '*' in node_groups:
+ add_to_all_groups.append(code)
+ continue
+ for groupname in node_groups:
+ if groupname not in groups:
+ groups[groupname] = TestGroup(groupname)
+ groups[groupname].add_code(code)
+ for code in add_to_all_groups:
+ for group in groups.itervalues():
+ group.add_code(code)
+ if self.config.doctest_global_setup:
+ code = TestCode(self.config.doctest_global_setup,
+ 'testsetup', lineno=0)
+ for group in groups.itervalues():
+ group.add_code(code, prepend=True)
+ if self.config.doctest_global_cleanup:
+ code = TestCode(self.config.doctest_global_cleanup,
+ 'testcleanup', lineno=0)
+ for group in groups.itervalues():
+ group.add_code(code)
+ if not groups:
+ return
+
+ self._out('\nDocument: %s\n----------%s\n' %
+ (docname, '-'*len(docname)))
+ for group in groups.itervalues():
+ self.test_group(group, self.env.doc2path(docname, base=None))
+ # Separately count results from setup code
+ res_f, res_t = self.setup_runner.summarize(self._out, verbose=False)
+ self.setup_failures += res_f
+ self.setup_tries += res_t
+ if self.test_runner.tries:
+ res_f, res_t = self.test_runner.summarize(self._out, verbose=True)
+ self.total_failures += res_f
+ self.total_tries += res_t
+ if self.cleanup_runner.tries:
+ res_f, res_t = self.cleanup_runner.summarize(self._out,
+ verbose=True)
+ self.cleanup_failures += res_f
+ self.cleanup_tries += res_t
+
+ def compile(self, code, name, type, flags, dont_inherit):
+ return compile(code, name, self.type, flags, dont_inherit)
+
+ def test_group(self, group, filename):
+
+ j = Popen(["../julia"], stdin=PIPE, stdout=PIPE, stderr=STDOUT)
+ j.stdin.write("macro raw_mstr(s) s end\n")
+ j.stdin.write("_ans = nothing\n")
+ self.setup_runner.julia = j
+ self.test_runner.julia = j
+ self.cleanup_runner.julia = j
+
+ def run_setup_cleanup(runner, testcodes, what):
+ examples = []
+ for testcode in testcodes:
+ examples.append(doctest.Example(testcode.code, '',
+ lineno=testcode.lineno))
+ if not examples:
+ return True
+ # simulate a doctest with the code
+ sim_doctest = doctest.DocTest(examples, {},
+ '%s (%s code)' % (group.name, what),
+ filename, 0, None)
+ old_f = runner.failures
+ self.type = 'exec' # the snippet may contain multiple statements
+ runner.run(sim_doctest, out=self._warn_out)
+ if runner.failures > old_f:
+ return False
+ return True
+
+ # run the setup code
+ if not run_setup_cleanup(self.setup_runner, group.setup, 'setup'):
+ # if setup failed, don't run the group
+ return
+
+ # run the tests
+ for code in group.tests:
+ if len(code) == 1:
+ # ordinary doctests (code/output interleaved)
+ try:
+ test = parser.get_doctest(code[0].code, {}, group.name,
+ filename, code[0].lineno)
+ except Exception:
+ self.warn('ignoring invalid doctest code: %r' %
+ code[0].code,
+ '%s:%s' % (filename, code[0].lineno))
+ raise
+ continue
+ if not test.examples:
+ continue
+ for example in test.examples:
+ # apply directive's comparison options
+ new_opt = code[0].options.copy()
+ new_opt.update(example.options)
+ example.options = new_opt
+ self.type = 'single' # as for ordinary doctests
+ else:
+ # testcode and output separate
+ output = code[1] and code[1].code or ''
+ options = code[1] and code[1].options or {}
+ # disable <BLANKLINE> processing as it is not needed
+ options[doctest.DONT_ACCEPT_BLANKLINE] = True
+ # find out if we're testing an exception
+ m = parser._EXCEPTION_RE.match(output)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+ example = doctest.Example(code[0].code, output,
+ exc_msg=exc_msg,
+ lineno=code[0].lineno,
+ options=options)
+ test = doctest.DocTest([example], {}, group.name,
+ filename, code[0].lineno, None)
+ self.type = 'exec' # multiple statements again
+ self.test_runner.run(test, out=self._warn_out)
+
+ # run the cleanup
+ run_setup_cleanup(self.cleanup_runner, group.cleanup, 'cleanup')
+
+ j.kill()
+
+def setup(app):
+ app.add_directive('testsetup', TestsetupDirective)
+ app.add_directive('testcleanup', TestcleanupDirective)
+ app.add_directive('doctest', DoctestDirective)
+ app.add_directive('testcode', TestcodeDirective)
+ app.add_directive('testoutput', TestoutputDirective)
+ app.add_builder(DocTestBuilder)
+ app.add_config_value('doctest_global_setup', '', False)
+ app.add_config_value('doctest_global_cleanup', '', False)
diff -ruN a/doc/juliadoc/juliadoc/jlhelp.py b/doc/juliadoc/juliadoc/jlhelp.py
--- a/doc/juliadoc/juliadoc/jlhelp.py 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/juliadoc/jlhelp.py 2014-09-16 21:28:29.397869346 +0200
@@ -0,0 +1,112 @@
+import codecs
+from os import path
+
+from docutils import nodes
+from sphinx.builders.text import TextBuilder
+from sphinx.writers.text import TextTranslator
+from sphinx.writers.text import TextWriter
+
+from sphinx.util.osutil import ensuredir
+from sphinx.util.console import bold, purple, darkgreen, term_width_line
+
+def jl_escape(text):
+ # XXX: crude & fragile
+ return text.replace('\\',r'\\').replace('$',"\\$").replace('"',"\\\"")
+
+class JuliaHelpTranslator(TextTranslator):
+
+ def __init__(self, document, builder):
+ TextTranslator.__init__(self, document, builder)
+ self.in_desc = False
+
+ def add_text(self, text, escape=True, force=False):
+ if self.in_desc or force:
+ etext = jl_escape(text) if escape else text
+ self.states[-1].append((-1, etext))
+
+ def visit_title(self, node):
+ raise nodes.SkipNode
+
+ def visit_desc(self, node):
+ if node.attributes['objtype'] == 'attribute':
+ return
+ self.in_desc = True
+ self.new_state(0)
+
+ def visit_desc_signature(self, node):
+ self._current_module = node.attributes.get('module', None)
+ self._current_class = node.attributes.get('class', None)
+ TextTranslator.visit_desc_signature(self, node)
+
+ def visit_desc_name(self, node):
+ self._desc_name = node.astext()
+ TextTranslator.visit_desc_name(self, node)
+
+ def depart_desc(self, node):
+ if node.attributes['objtype'] == 'attribute':
+ return
+ self.add_text('"),\n', escape=False)
+ if self._current_module is not None:
+ module = self._current_module
+ else:
+ module = ''
+ name = self._desc_name
+ if self._current_class:
+ name = self._current_class
+ self.end_state(first='("%s","%s","' % ( \
+ jl_escape(module), \
+ jl_escape(name)))
+ self.in_desc = False
+
+class JuliaHelpWriter(TextWriter):
+
+ def translate(self):
+ visitor = JuliaHelpTranslator(self.document, self.builder)
+ self.document.walkabout(visitor)
+ self.output = visitor.body
+
+class JuliaHelpBuilder(TextBuilder):
+ name = "jlhelp"
+ out_suffix = ".jl"
+
+ def write(self, *ignored):
+ # build_all
+ docnames = set([doc for doc in self.env.found_docs if doc.startswith("stdlib")])
+
+ self.info(bold('preparing documents... '), nonl=True)
+ self.prepare_writing(docnames)
+ self.info('done')
+
+ # write target files
+ warnings = []
+ self.env.set_warnfunc(lambda *args: warnings.append(args))
+
+ outfilename = path.join(self.outdir, self.name + self.out_suffix)
+ ensuredir(path.dirname(outfilename))
+ try:
+ f = codecs.open(outfilename, 'w', 'utf-8')
+ try:
+ f.write('# automatically generated from files in doc/stdlib/ -- do not edit here\n\n' +
+ '{\n\n')
+
+ for docname in self.status_iterator(
+ sorted(docnames), 'processing... ', darkgreen, len(docnames)):
+ doctree = self.env.get_and_resolve_doctree(docname, self)
+ self.writer.write(doctree, f)
+ f.write("\n")
+
+ f.write('\n}\n')
+ finally:
+ f.close()
+ except (IOError, OSError) as err:
+ self.warn("error writing file %s: %s" % (outfilename, err))
+
+ for warning in warnings:
+ self.warn(*warning)
+ self.env.set_warnfunc(self.warn)
+
+ def prepare_writing(self, docnames):
+ self.writer = JuliaHelpWriter(self)
+
+def setup(app):
+ app.add_builder(JuliaHelpBuilder)
diff -ruN a/doc/juliadoc/juliadoc/julia.py b/doc/juliadoc/juliadoc/julia.py
--- a/doc/juliadoc/juliadoc/julia.py 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/juliadoc/julia.py 2014-09-16 21:28:29.352868875 +0200
@@ -0,0 +1,24 @@
+# Julia domain for Sphinx
+# http://sphinx.pocoo.org/domains.html
+
+import re
+import sphinx.domains.python
+
+sphinx.domains.python.py_sig_re = re.compile(
+ r'''^ ([\w.]+\.)? # class name(s)
+ ([^\s(]+) \s* # thing name
+ (?: \((.*)\) # optional: arguments
+ (?:\s* -> \s* (.*))? # return annotation
+ )? $ # and nothing more
+ ''', re.VERBOSE | re.UNICODE)
+
+class JuliaDomain(sphinx.domains.python.PythonDomain):
+ """Julia language domain."""
+ name = 'jl'
+ label = 'Julia'
+
+JuliaDomain.directives['type'] = JuliaDomain.directives['class']
+
+def setup(app):
+ app.add_domain(JuliaDomain)
+
diff -ruN a/doc/juliadoc/juliadoc/theme/julia/layout.html b/doc/juliadoc/juliadoc/theme/julia/layout.html
--- a/doc/juliadoc/juliadoc/theme/julia/layout.html 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/juliadoc/theme/julia/layout.html 2014-09-16 21:28:29.381869179 +0200
@@ -0,0 +1,164 @@
+{# TEMPLATE VAR SETTINGS #}
+{%- set css_files = css_files + ["_static/julia.css"] %}
+{%- set url_root = pathto('', 1) %}
+{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
+{%- if not embedded and docstitle %}
+ {%- set titlesuffix = " — "|safe + docstitle|e %}
+{%- else %}
+ {%- set titlesuffix = "" %}
+{%- endif %}
+
+<!DOCTYPE html>
+<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
+<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
+<head>
+ <meta charset="utf-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
+ {% block htmltitle %}
+ <title>{{ title|striptags|e }}{{ titlesuffix }}</title>
+ {% endblock %}
+
+ {# FAVICON #}
+ {% if favicon %}
+ <link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
+ {% endif %}
+
+ {# CSS #}
+ <link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>
+
+ {# OPENSEARCH #}
+ {% if not embedded %}
+ {% if use_opensearch %}
+ <link rel="search" type="application/opensearchdescription+xml" title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}" href="{{ pathto('_static/opensearch.xml', 1) }}"/>
+ {% endif %}
+
+ {% endif %}
+
+ {# RTD hosts this file, so just load on non RTD builds #}
+ {% if not READTHEDOCS %}
+ <link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
+ {% endif %}
+
+ {% for cssfile in css_files %}
+ <link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
+ {% endfor %}
+
+ {%- block linktags %}
+ {%- if hasdoc('about') %}
+ <link rel="author" title="{{ _('About these documents') }}"
+ href="{{ pathto('about') }}"/>
+ {%- endif %}
+ {%- if hasdoc('genindex') %}
+ <link rel="index" title="{{ _('Index') }}"
+ href="{{ pathto('genindex') }}"/>
+ {%- endif %}
+ {%- if hasdoc('search') %}
+ <link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}"/>
+ {%- endif %}
+ {%- if hasdoc('copyright') %}
+ <link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}"/>
+ {%- endif %}
+ <link rel="top" title="{{ docstitle|e }}" href="{{ pathto('index') }}"/>
+ {%- if parents %}
+ <link rel="up" title="{{ parents[-1].title|striptags|e }}" href="{{ parents[-1].link|e }}"/>
+ {%- endif %}
+ {%- if next %}
+ <link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}"/>
+ {%- endif %}
+ {%- if prev %}
+ <link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}"/>
+ {%- endif %}
+ {%- endblock %}
+ {%- block extrahead %} {% endblock %}
+
+ {# Keep modernizr in head - http://modernizr.com/docs/#installing #}
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/modernizr/2.6.2/modernizr.min.js"></script>
+
+</head>
+
+<body class="wy-body-for-nav" role="document">
+
+ <div class="wy-grid-for-nav">
+
+ {# SIDE NAV, TOGGLES ON MOBILE #}
+ <nav data-toggle="wy-nav-shift" class="wy-nav-side">
+ <div class="wy-side-nav-search">
+ <a href="http://julialang.org/"><img src="{{ pathto('_static/julia-logo.svg', 1) }}" class="logo"></a>
+ <!--
+ <a href="{{ pathto(master_doc) }}" class="fa fa-home"> {{ project }}</a>
+ -->
+ {% include "searchbox.html" %}
+ </div>
+
+ <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
+ {% set toctree = toctree(maxdepth=2, collapse=False, includehidden=True) %}
+ {% if toctree %}
+ {{ toctree }}
+ {% else %}
+ <!-- Local TOC -->
+ <div class="local-toc">{{ toc }}</div>
+ {% endif %}
+ </div>
+
+ </nav>
+
+ <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
+
+ {# MOBILE NAV, TRIGGLES SIDE NAV ON TOGGLE #}
+ <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
+ <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
+ <a href="{{ pathto(master_doc) }}">{{ project }}</a>
+ </nav>
+
+
+ {# PAGE CONTENT #}
+ <div class="wy-nav-content">
+ <div class="rst-content">
+ {% include "breadcrumbs.html" %}
+ <div role="main" class="document">
+ {% block body %}{% endblock %}
+ </div>
+ {% include "footer.html" %}
+ </div>
+ </div>
+
+ </section>
+
+ </div>
+ {% include "versions.html" %}
+
+ {% if not embedded %}
+
+ <script type="text/javascript">
+ var DOCUMENTATION_OPTIONS = {
+ URL_ROOT:'{{ url_root }}',
+ VERSION:'{{ release|e }}',
+ COLLAPSE_INDEX:false,
+ FILE_SUFFIX:'{{ '' if no_search_suffix else file_suffix }}',
+ HAS_SOURCE: {{ has_source|lower }}
+ };
+ </script>
+ {%- for scriptfile in script_files %}
+ <script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
+ {%- endfor %}
+
+ {% endif %}
+
+ {# RTD hosts this file, so just load on non RTD builds #}
+ {% if not READTHEDOCS %}
+ <script type="text/javascript" src="{{ pathto('_static/js/theme.js', 1) }}"></script>
+ {% endif %}
+
+ {# STICKY NAVIGATION #}
+ {% if theme_sticky_navigation %}
+ <script type="text/javascript">
+ jQuery(function () {
+ SphinxRtdTheme.StickyNav.enable();
+ });
+ </script>
+ {% endif %}
+
+ {%- block footer %} {% endblock %}
+
+</body>
+</html>
diff -ruN a/doc/juliadoc/juliadoc/theme/julia/static/julia.css_t b/doc/juliadoc/juliadoc/theme/julia/static/julia.css_t
--- a/doc/juliadoc/juliadoc/theme/julia/static/julia.css_t 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/juliadoc/theme/julia/static/julia.css_t 2014-09-16 21:28:29.372869085 +0200
@@ -0,0 +1,60 @@
+@import url("css/theme.css");
+
+.wy-nav-content,
+.wy-nav-content-wrap {
+ color: #222;
+ background: #fff;
+}
+
+body,
+.wy-body-for-nav,
+.wy-nav-side,
+.wy-side-nav-search,
+.wy-side-nav-search img {
+ background: #fcfcfc;
+}
+
+.wy-menu-vertical li.current a,
+.wy-side-nav-search,
+.wy-nav-side {
+ border-right: none;
+ box-shadow: inset -14px 0px 5px -12px rgb(210,210,210);
+}
+
+.wy-menu-vertical li.current > a,
+.wy-menu-vertical li.current > a:hover {
+ background: #fff;
+ box-shadow: none;
+}
+
+.wy-side-nav-search img {
+ width: 158px;
+ height: 106px;
+ border-radius: 0;
+ margin: 1em;
+ padding: 0;
+}
+
+.wy-side-nav-search input[type="text"] {
+ border-color: rgb(201, 201, 201);
+}
+
+div.section {
+ font-family: georgia, serif;
+}
+
+h1,h2,h3,h4,h5,h6 {
+ font-family: helvetica, arial, 'freesans clean', sans-serif;
+}
+
+.rst-content dl:not(.docutils) {
+ border-top: solid 1px #f8f8f8;
+}
+
+.rst-content dl:not(.docutils) dt {
+ background: #fdfdfd;
+ border-top: none;
+ color: rgb(64, 64, 64);
+ display: block;
+ margin: 1px 0;
+}
diff -ruN a/doc/juliadoc/juliadoc/theme/julia/static/julia-logo.svg b/doc/juliadoc/juliadoc/theme/julia/static/julia-logo.svg
--- a/doc/juliadoc/juliadoc/theme/julia/static/julia-logo.svg 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/juliadoc/theme/julia/static/julia-logo.svg 2014-09-16 21:28:29.372869085 +0200
@@ -0,0 +1,62 @@
+<?xml version="1.0" encoding="utf-8"?>
+<svg version="1.1" xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+ width="158px" height="106px" viewBox="0 0 310 216"
+ enable-background="new 0 0 310 216"
+ xml:space="preserve">
+
+<!-- blue dot -->
+<circle fill="#6b85dd" stroke="#4266d5" stroke-width="3" cx="50.5" cy="58.665" r="16.5"/>
+<!-- red dot -->
+<circle fill="#d66661" stroke="#c93d39" stroke-width="3" cx="212.459" cy="60.249" r="16.5"/>
+<!-- green dot -->
+<circle fill="#6bab5b" stroke="#3b972e" stroke-width="3" cx="233.834" cy="23.874" r="16.5"/>
+<!-- purple dot -->
+<circle fill="#aa7dc0" stroke="#945bb0" stroke-width="3" cx="255.459" cy="59.999" r="16.5"/>
+
+<!-- "j" -->
+<path fill="#252525" d="M37.216,138.427c0-15.839,0.006-31.679-0.018-47.517c-0.001-0.827,0.169-1.234,1.043-1.47
+ c7.876-2.127,15.739-4.308,23.606-6.47c1.33-0.366,1.333-0.36,1.333,1.019c0,25.758,0.015,51.517-0.012,77.274
+ c-0.006,5.514,0.245,11.032-0.272,16.543c-0.628,6.69-2.15,13.092-6.438,18.506c-3.781,4.771-8.898,7.25-14.767,8.338
+ c-6.599,1.222-13.251,1.552-19.934,0.938c-4.616-0.423-9.045-1.486-12.844-4.363c-2.863-2.168-4.454-4.935-3.745-8.603
+ c0.736-3.806,3.348-5.978,6.861-7.127c2.262-0.74,4.628-0.872,6.994-0.53c1.823,0.264,3.42,1.023,4.779,2.288
+ c1.38,1.284,2.641,2.674,3.778,4.177c0.872,1.15,1.793,2.256,2.991,3.086c2.055,1.426,4,0.965,5.213-1.216
+ c0.819-1.473,0.997-3.106,1.173-4.731c0.255-2.348,0.255-4.707,0.256-7.062C37.218,167.145,37.216,152.786,37.216,138.427z"/>
+
+<!-- "u" -->
+<path fill="#252525" d="M125.536,162.479c-2.908,2.385-5.783,4.312-8.88,5.904c-10.348,5.323-20.514,4.521-30.324-1.253
+ c-6.71-3.95-11.012-9.849-12.52-17.606c-0.236-1.213-0.363-2.438-0.363-3.688c0.01-19.797,0.017-39.593-0.02-59.39
+ c-0.002-1.102,0.285-1.357,1.363-1.351c7.798,0.049,15.597,0.044,23.396,0.003c0.95-0.005,1.177,0.25,1.175,1.183
+ c-0.027,19.356-0.025,38.713-0.018,58.07c0.002,6.34,3.599,10.934,9.672,12.42c2.13,0.521,4.19,0.396,6.173-0.6
+ c4.26-2.139,7.457-5.427,10.116-9.307c0.333-0.487,0.224-1,0.224-1.51c0.007-19.635,0.016-39.271-0.02-58.904
+ c-0.002-1.083,0.255-1.369,1.353-1.361c7.838,0.052,15.677,0.045,23.515,0.004c0.916-0.005,1.103,0.244,1.102,1.124
+ c-0.025,27.677-0.026,55.353,0.002,83.024c0.001,0.938-0.278,1.099-1.139,1.095c-7.918-0.028-15.837-0.028-23.756-0.001
+ c-0.815,0.003-1.1-0.166-1.073-1.037C125.581,167.117,125.536,164.928,125.536,162.479z"/>
+
+<!-- "l" -->
+<path fill="#252525" d="M187.423,107.08c0,20.637-0.011,41.273,0.026,61.91c0.003,1.119-0.309,1.361-1.381,1.355
+ c-7.799-0.052-15.598-0.047-23.396-0.008c-0.898,0.008-1.117-0.222-1.115-1.115c0.021-39.074,0.021-78.147,0-117.226
+ c0-0.811,0.189-1.169,1.006-1.392c7.871-2.149,15.73-4.327,23.584-6.545c1.045-0.295,1.308-0.17,1.306,0.985
+ C187.412,65.727,187.423,86.403,187.423,107.08z"/>
+
+<!-- "i" -->
+<path fill="#252525" d="M223.46,126.477c0,14.155-0.011,28.312,0.021,42.467c0.002,1.027-0.164,1.418-1.332,1.408
+ c-7.838-0.061-15.676-0.047-23.516-0.01c-0.881,0.004-1.121-0.189-1.119-1.104c0.026-26.153,0.025-52.307,0-78.458
+ c0-0.776,0.203-1.101,0.941-1.302c7.984-2.172,15.972-4.35,23.938-6.596c1.049-0.296,1.08,0.031,1.078,0.886
+ C223.454,98.004,223.46,112.239,223.46,126.477z"/>
+
+<!-- "a" -->
+<path fill="#252525" d="M277.695,163.6c-0.786,0.646-1.404,1.125-2,1.635c-4.375,3.746-9.42,5.898-15.16,6.42
+ c-5.792,0.527-11.479,0.244-16.934-2.047c-12.08-5.071-15.554-17.188-11.938-27.448c1.799-5.111,5.472-8.868,9.831-11.94
+ c5.681-4.003,12.009-6.732,18.504-9.074c5.576-2.014,11.186-3.939,16.955-5.347c0.445-0.104,0.773-0.243,0.757-0.854
+ c-0.136-4.389,0.261-8.79-0.479-13.165c-1.225-7.209-6.617-10.013-12.895-9.348c-0.516,0.055-1.029,0.129-1.536,0.241
+ c-4.877,1.081-7.312,4.413-7.374,10.127c-0.02,1.729-0.229,3.418-0.693,5.084c-0.906,3.229-2.969,5.354-6.168,6.266
+ c-3.422,0.979-6.893,0.998-10.23-0.305c-6.529-2.543-8.877-10.164-5.12-16.512c2.249-3.799,5.606-6.4,9.461-8.405
+ c6.238-3.246,12.914-4.974,19.896-5.537c7.565-0.61,15.096-0.366,22.49,1.507c4.285,1.085,8.312,2.776,11.744,5.657
+ c4.473,3.749,6.776,8.647,6.812,14.374c0.139,21.477,0.096,42.951,0.143,64.428c0.002,0.799-0.248,0.983-1.021,0.98
+ c-8.035-0.025-16.074-0.023-24.113-0.001c-0.716,0.002-0.973-0.146-0.941-0.915C277.736,167.562,277.695,165.698,277.695,163.6z
+ M277.695,126.393c-4.793,2.104-9.25,4.373-13.287,7.408c-2.151,1.618-4.033,3.483-5.732,5.581
+ c-4.229,5.226-1.988,13.343,1.693,16.599c1.592,1.406,3.359,1.906,5.419,1.521c1.621-0.307,3.149-0.857,4.549-1.734
+ c1.521-0.951,2.949-2.072,4.539-2.887c2.31-1.18,2.97-2.861,2.894-5.445C277.561,140.484,277.695,133.527,277.695,126.393z"/>
+
+</svg>
diff -ruN a/doc/juliadoc/juliadoc/theme/julia/theme.conf b/doc/juliadoc/juliadoc/theme/julia/theme.conf
--- a/doc/juliadoc/juliadoc/theme/julia/theme.conf 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/juliadoc/theme/julia/theme.conf 2014-09-16 21:28:29.372869085 +0200
@@ -0,0 +1,3 @@
+[theme]
+inherit = sphinx_rtd_theme
+stylesheet = julia.css
diff -ruN a/doc/juliadoc/juliadoc/theme/julia/versions.html b/doc/juliadoc/juliadoc/theme/julia/versions.html
--- a/doc/juliadoc/juliadoc/theme/julia/versions.html 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/juliadoc/theme/julia/versions.html 2014-09-16 21:28:29.372869085 +0200
@@ -0,0 +1 @@
+{# disable #}
diff -ruN a/doc/juliadoc/setup.py b/doc/juliadoc/setup.py
--- a/doc/juliadoc/setup.py 1970-01-01 01:00:00.000000000 +0100
+++ b/doc/juliadoc/setup.py 2014-09-16 21:28:29.342868770 +0200
@@ -0,0 +1,9 @@
+from distutils.core import setup
+
+setup(
+ name='JuliaDoc',
+ version='0.0.0',
+ packages=['juliadoc',],
+ license='MIT',
+ long_description=open('README.md').read(),
+)
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -13,6 +13,13 @@
import sys, os, re
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+
+juliadoc_dir = '{0}/juliadoc/'.format(os.path.abspath('.'))
+sys.path.append(juliadoc_dir)
+
import juliadoc
import sphinx_rtd_theme
--- a/doc/Makefile 2015-02-18 22:45:06.572802504 +0100
+++ b/doc/Makefile 2015-02-18 22:45:09.556835161 +0100
@@ -19,15 +19,12 @@
SPHINX_BUILD = $(JULIA_ENV)/bin/sphinx-build
$(ACTIVATE):
- $(MAKE) -C $(JULIAHOME)/deps install-virtualenv
touch -c $@
$(SPHINX_BUILD): $(ACTIVATE) requirements.txt
- . $(ACTIVATE) && pip install sphinx==1.2.3 \
- && pip install -r requirements.txt
touch -c $@
-SPHINXBUILD = . $(ACTIVATE) && sphinx-build
+SPHINXBUILD = sphinx-build
.PHONY: help clean cleanall html dirhtml singlehtml pickle json htmlhelp qthelp devhelp \
epub latex latexpdf text man changes linkcheck doctest gettext