M Makefile +4 -0
@@ 72,6 72,8 @@ cleanbutpackages:
rm -rf build mercurial/locale
$(MAKE) -C doc clean
$(MAKE) -C contrib/chg distclean
+ rm -rf rust/target
+ rm -f mercurial/rustext.so
clean: cleanbutpackages
rm -rf packages
@@ 178,6 180,7 @@ packaging_targets := \
docker-fedora20 \
docker-fedora21 \
docker-fedora28 \
+ docker-fedora29 \
docker-ubuntu-trusty \
docker-ubuntu-trusty-ppa \
docker-ubuntu-xenial \
@@ 189,6 192,7 @@ packaging_targets := \
fedora20 \
fedora21 \
fedora28 \
+ fedora29 \
linux-wheels \
linux-wheels-x86_64 \
linux-wheels-i686 \
M contrib/all-revsets.txt +15 -0
@@ 139,3 139,18 @@ secret()
# test finding common ancestors
heads(commonancestors(last(head(), 2)))
heads(commonancestors(head()))
+
+# more heads testing
+heads(all())
+heads(-10000:-1)
+(-5000:-1000) and heads(-10000:-1)
+heads(matching(tip, "author"))
+heads(matching(tip, "author")) and -10000:-1
+(-10000:-1) and heads(matching(tip, "author"))
+# more roots testing
+roots(all())
+roots(-10000:-1)
+(-5000:-1000) and roots(-10000:-1)
+roots(matching(tip, "author"))
+roots(matching(tip, "author")) and -10000:-1
+(-10000:-1) and roots(matching(tip, "author"))
M contrib/catapipe.py +9 -1
@@ 6,6 6,14 @@
# GNU General Public License version 2 or any later version.
"""Tool read primitive events from a pipe to produce a catapult trace.
+Usage:
+ Terminal 1: $ catapipe.py /tmp/mypipe /tmp/trace.json
+ Terminal 2: $ HGCATAPULTSERVERPIPE=/tmp/mypipe hg root
+ <ctrl-c catapipe.py in Terminal 1>
+ $ catapult/tracing/bin/trace2html /tmp/trace.json # produce /tmp/trace.html
+ <open trace.html in your browser of choice; the WASD keys are very useful>
+ (catapult is located at https://github.com/catapult-project/catapult)
+
For now the event stream supports
START $SESSIONID ...
@@ 24,7 32,7 @@ run-tests.py.
Typically you'll want to place the path to the named pipe in the
HGCATAPULTSERVERPIPE environment variable, which both run-tests and hg
-understand.
+understand. To trace *only* run-tests, use HGTESTCATAPULTSERVERPIPE instead.
"""
from __future__ import absolute_import, print_function
M contrib/check-commit +1 -1
@@ 34,7 34,7 @@ errors = [
(commitheader + r"(?!merge with )[^#]\S+[^:] ",
"summary line doesn't start with 'topic: '"),
(afterheader + r"[A-Z][a-z]\S+", "don't capitalize summary lines"),
- (afterheader + r"[^\n]*: *[A-Z][a-z]\S+", "don't capitalize summary lines"),
+ (afterheader + r"^\S+: *[A-Z][a-z]\S+", "don't capitalize summary lines"),
(afterheader + r"\S*[^A-Za-z0-9-_]\S*: ",
"summary keyword should be most user-relevant one-word command or topic"),
(afterheader + r".*\.\s*\n", "don't add trailing period on summary line"),
M contrib/clang-format-ignorelist +0 -1
@@ 3,7 3,6 @@
mercurial/cext/dirs.c
mercurial/cext/manifest.c
mercurial/cext/osutil.c
-mercurial/cext/revlog.c
# Vendored code that we should never format:
contrib/python-zstandard/c-ext/bufferutil.c
contrib/python-zstandard/c-ext/compressionchunker.c
A => contrib/discovery-helper.sh +64 -0
@@ 0,0 1,64 @@
+#!/bin/bash
+#
+# produces two repositories with different common and missing subsets
+#
+# $ discovery-helper.sh REPO NBHEADS DEPT
+#
+# The Goal is to produce two repositories with some common part and some
+# exclusive part on each side. Provide a source repository REPO, it will
+# produce two repositories REPO-left and REPO-right.
+#
+# Each repository will be missing some revisions exclusive to NBHEADS of the
+# repo topological heads. These heads and revisions exclusive to them (up to
+# DEPTH depth) are stripped.
+#
+# The "left" repository will use the NBHEADS first heads (sorted by
+# description). The "right" use the last NBHEADS one.
+#
+# To find out how many topological heads a repo has, use:
+#
+# $ hg heads -t -T '{rev}\n' | wc -l
+#
+# Example:
+#
+# The `pypy-2018-09-01` repository has 192 heads. To produce two repositories
+# with 92 common heads and ~50 exclusive heads on each side.
+#
+# $ ./discovery-helper.sh pypy-2018-08-01 50 10
+
+set -euo pipefail
+
+if [ $# -lt 3 ]; then
+ echo "usage: `basename $0` REPO NBHEADS DEPTH"
+ exit 64
+fi
+
+repo="$1"
+shift
+
+nbheads="$1"
+shift
+
+depth="$1"
+shift
+
+leftrepo="${repo}-left"
+rightrepo="${repo}-right"
+
+left="first(sort(heads(all()), 'desc'), $nbheads)"
+right="last(sort(heads(all()), 'desc'), $nbheads)"
+
+leftsubset="ancestors($left, $depth) and only($left, heads(all() - $left))"
+rightsubset="ancestors($right, $depth) and only($right, heads(all() - $right))"
+
+echo '### building left repository:' $left-repo
+echo '# cloning'
+hg clone --noupdate "${repo}" "${leftrepo}"
+echo '# stripping' '"'${leftsubset}'"'
+hg -R "${leftrepo}" --config extensions.strip= strip --rev "$leftsubset" --no-backup
+
+echo '### building right repository:' $right-repo
+echo '# cloning'
+hg clone --noupdate "${repo}" "${rightrepo}"
+echo '# stripping:' '"'${rightsubset}'"'
+hg -R "${rightrepo}" --config extensions.strip= strip --rev "$rightsubset" --no-backup
M contrib/fuzz/Makefile +49 -17
@@ 4,7 4,7 @@ CXX = clang++
all: bdiff mpatch xdiff
fuzzutil.o: fuzzutil.cc fuzzutil.h
- $(CXX) $(CXXFLAGS) -g -O1 -fsanitize=fuzzer-no-link,address \
+ $(CXX) $(CXXFLAGS) -g -O1 \
-std=c++17 \
-I../../mercurial -c -o fuzzutil.o fuzzutil.cc
@@ 12,6 12,11 @@ fuzzutil-oss-fuzz.o: fuzzutil.cc fuzzuti
$(CXX) $(CXXFLAGS) -std=c++17 \
-I../../mercurial -c -o fuzzutil-oss-fuzz.o fuzzutil.cc
+pyutil.o: pyutil.cc pyutil.h
+ $(CXX) $(CXXFLAGS) -g -O1 \
+ `$$OUT/sanpy/bin/python-config --cflags` \
+ -I../../mercurial -c -o pyutil.o pyutil.cc
+
bdiff.o: ../../mercurial/bdiff.c
$(CC) $(CFLAGS) -fsanitize=fuzzer-no-link,address -c -o bdiff.o \
../../mercurial/bdiff.c
@@ 70,59 75,86 @@ xdiff_fuzzer: xdiff.cc fuzz-xdiffi.o fuz
fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o \
-lFuzzingEngine -o $$OUT/xdiff_fuzzer
-# TODO use the $OUT env var instead of hardcoding /out
-/out/sanpy/bin/python:
- cd /Python-2.7.15/ && ./configure --without-pymalloc --prefix=$$OUT/sanpy CFLAGS='-O1 -fno-omit-frame-pointer -g -fwrapv -fstack-protector-strong' LDFLAGS=-lasan && ASAN_OPTIONS=detect_leaks=0 make && make install
-
-sanpy: /out/sanpy/bin/python
-
-manifest.o: sanpy ../../mercurial/cext/manifest.c
+manifest.o: ../../mercurial/cext/manifest.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o manifest.o ../../mercurial/cext/manifest.c
-charencode.o: sanpy ../../mercurial/cext/charencode.c
+charencode.o: ../../mercurial/cext/charencode.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o charencode.o ../../mercurial/cext/charencode.c
-parsers.o: sanpy ../../mercurial/cext/parsers.c
+parsers.o: ../../mercurial/cext/parsers.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o parsers.o ../../mercurial/cext/parsers.c
-dirs.o: sanpy ../../mercurial/cext/dirs.c
+dirs.o: ../../mercurial/cext/dirs.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o dirs.o ../../mercurial/cext/dirs.c
-pathencode.o: sanpy ../../mercurial/cext/pathencode.c
+pathencode.o: ../../mercurial/cext/pathencode.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o pathencode.o ../../mercurial/cext/pathencode.c
-revlog.o: sanpy ../../mercurial/cext/revlog.c
+revlog.o: ../../mercurial/cext/revlog.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o revlog.o ../../mercurial/cext/revlog.c
-manifest_fuzzer: sanpy manifest.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o
+manifest_fuzzer: manifest.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-Wno-register -Wno-macro-redefined \
-I../../mercurial manifest.cc \
- manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o \
+ manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
-lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
-o $$OUT/manifest_fuzzer
manifest_corpus.zip:
python manifest_corpus.py $$OUT/manifest_fuzzer_seed_corpus.zip
+revlog_fuzzer: revlog.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
+ $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+ -Wno-register -Wno-macro-redefined \
+ -I../../mercurial revlog.cc \
+ manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
+ -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+ -o $$OUT/revlog_fuzzer
+
+revlog_corpus.zip:
+ python revlog_corpus.py $$OUT/revlog_fuzzer_seed_corpus.zip
+
+dirstate_fuzzer: dirstate.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
+ $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+ -Wno-register -Wno-macro-redefined \
+ -I../../mercurial dirstate.cc \
+ manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
+ -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+ -o $$OUT/dirstate_fuzzer
+
+dirstate_corpus.zip:
+ python dirstate_corpus.py $$OUT/dirstate_fuzzer_seed_corpus.zip
+
+fm1readmarkers_fuzzer: fm1readmarkers.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
+ $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+ -Wno-register -Wno-macro-redefined \
+ -I../../mercurial fm1readmarkers.cc \
+ manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
+ -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+ -o $$OUT/fm1readmarkers_fuzzer
+
+fm1readmarkers_corpus.zip:
+ python fm1readmarkers_corpus.py $$OUT/fm1readmarkers_fuzzer_seed_corpus.zip
+
clean:
$(RM) *.o *_fuzzer \
bdiff \
mpatch \
xdiff
-oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer manifest_fuzzer manifest_corpus.zip
+oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer manifest_fuzzer manifest_corpus.zip revlog_fuzzer revlog_corpus.zip dirstate_fuzzer dirstate_corpus.zip fm1readmarkers_fuzzer fm1readmarkers_corpus.zip
-.PHONY: all clean oss-fuzz sanpy
+.PHONY: all clean oss-fuzz
A => contrib/fuzz/dirstate.cc +48 -0
@@ 0,0 1,48 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "pyutil.h"
+
+extern "C" {
+
+static PyCodeObject *code;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+ contrib::initpy(*argv[0]);
+ code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import parse_dirstate
+try:
+ dmap = {}
+ copymap = {}
+ p = parse_dirstate(dmap, copymap, data)
+except Exception as e:
+ pass
+ # uncomment this print if you're editing this Python code
+ # to debug failures.
+ # print e
+)py",
+ "fuzzer", Py_file_input);
+ return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+ PyObject *text =
+ PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+ PyObject *locals = PyDict_New();
+ PyDict_SetItemString(locals, "data", text);
+ PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
+ if (!res) {
+ PyErr_Print();
+ }
+ Py_XDECREF(res);
+ Py_DECREF(locals);
+ Py_DECREF(text);
+ return 0; // Non-zero return values are reserved for future use.
+}
+}
A => contrib/fuzz/dirstate_corpus.py +18 -0
@@ 0,0 1,18 @@
+from __future__ import absolute_import, print_function
+
+import argparse
+import os
+import zipfile
+
+ap = argparse.ArgumentParser()
+ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
+args = ap.parse_args()
+
+reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__),
+ '..', '..'))
+dirstate = os.path.join(reporoot, '.hg', 'dirstate')
+
+with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
+ if os.path.exists(dirstate):
+ with open(dirstate) as f:
+ zf.writestr("dirstate", f.read())
A => contrib/fuzz/fm1readmarkers.cc +60 -0
@@ 0,0 1,60 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "pyutil.h"
+
+extern "C" {
+
+static PyCodeObject *code;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+ contrib::initpy(*argv[0]);
+ code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import fm1readmarkers
+def maybeint(s, default):
+ try:
+ return int(s)
+ except ValueError:
+ return default
+try:
+ parts = data.split('\0', 2)
+ if len(parts) == 3:
+ offset, stop, data = parts
+ elif len(parts) == 2:
+ stop, data = parts
+ offset = 0
+ else:
+ offset = stop = 0
+ offset, stop = maybeint(offset, 0), maybeint(stop, len(data))
+ fm1readmarkers(data, offset, stop)
+except Exception as e:
+ pass
+ # uncomment this print if you're editing this Python code
+ # to debug failures.
+ # print e
+)py",
+ "fuzzer", Py_file_input);
+ return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+ PyObject *text =
+ PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+ PyObject *locals = PyDict_New();
+ PyDict_SetItemString(locals, "data", text);
+ PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
+ if (!res) {
+ PyErr_Print();
+ }
+ Py_XDECREF(res);
+ Py_DECREF(locals);
+ Py_DECREF(text);
+ return 0; // Non-zero return values are reserved for future use.
+}
+}
A => contrib/fuzz/fm1readmarkers_corpus.py +36 -0
@@ 0,0 1,36 @@
+from __future__ import absolute_import, print_function
+
+import argparse
+import zipfile
+
+ap = argparse.ArgumentParser()
+ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
+args = ap.parse_args()
+
+with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
+ zf.writestr(
+ 'smallish_obsstore',
+ (
+ # header: fm1readmarkers should start at offset 1, and
+ # read until byte 597.
+ '1\x00597\x00'
+ # body of obsstore file
+ '\x01\x00\x00\x00vA\xd7\x02+C\x1a<)\x01,\x00\x00\x01\x03\x03\xe6'
+ '\x92\xde)x\x16\xd1Xph\xc7\xa7[\xe5\xe2\x1a\xab\x1e6e\xaf\xc2\xae'
+ '\xe7\xbc\x83\xe1\x88\xa5\xda\xce>O\xbd\x04\xe9\x03\xc4o\xeb\x03'
+ '\x01\t\x05\x04\x1fef18operationamenduserAugie Fackler <raf@duri'
+ 'n42.com>\x00\x00\x00vA\xd7\x02-\x8aD\xaf-\x01,\x00\x00\x01\x03\x03'
+ '\x17*\xca\x8f\x9e}i\xe0i\xbb\xdf\x9fb\x03\xd2XG?\xd3h\x98\x89\x1a'
+ '=2\xeb\xc3\xc5<\xb3\x9e\xcc\x0e;#\xee\xc3\x10ux\x03\x01\t\x05\x04'
+ '\x1fef18operationamenduserAugie Fackler <raf@durin42.com>\x00\x00'
+ '\x00vA\xd7\x02Mn\xd9%\xea\x01,\x00\x00\x01\x03\x03\x98\x89\x1a='
+ '2\xeb\xc3\xc5<\xb3\x9e\xcc\x0e;#\xee\xc3\x10ux\xe0*\xcaT\x86Z8J'
+ '\x85)\x97\xff7\xcc)\xc1\x7f\x19\x0c\x01\x03\x01\t\x05\x04\x1fef'
+ '18operationamenduserAugie Fackler <raf@durin42.com>\x00\x00\x00'
+ 'yA\xd7\x02MtA\xbfj\x01,\x00\x00\x01\x03\x03\xe0*\xcaT\x86Z8J\x85'
+ ')\x97\xff7\xcc)\xc1\x7f\x19\x0c\x01\x00\x94\x01\xa9\n\xf80\x92\xa3'
+ 'j\xc5X\xb1\xc9:\xd51\xb8*\xa9\x03\x01\t\x08\x04\x1fef11operatio'
+ 'nhistedituserAugie Fackler <raf@durin42.com>\x00\x00\x00yA\xd7\x02'
+ 'MtA\xd4\xe1\x01,\x00\x00\x01\x03\x03"\xa5\xcb\x86\xb6\xf4\xbaO\xa0'
+ 'sH\xe7?\xcb\x9b\xc2n\xcfI\x9e\x14\xf0D\xf0!\x18DN\xcd\x97\x016\xa5'
+ '\xef\xa06\xcb\x884\x8a\x03\x01\t\x08\x04\x1fef14operationhisted'))
M contrib/fuzz/manifest.cc +4 -32
@@ 3,43 3,17 @@
#include <stdlib.h>
#include <unistd.h>
+#include "pyutil.h"
+
#include <string>
extern "C" {
-/* TODO: use Python 3 for this fuzzing? */
-PyMODINIT_FUNC initparsers(void);
-
-static char cpypath[8192] = "\0";
-
static PyCodeObject *code;
-static PyObject *mainmod;
-static PyObject *globals;
extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
{
- const std::string subdir = "/sanpy/lib/python2.7";
- /* HACK ALERT: we need a full Python installation built without
- pymalloc and with ASAN, so we dump one in
- $OUT/sanpy/lib/python2.7. This helps us wire that up. */
- std::string selfpath(*argv[0]);
- std::string pypath;
- auto pos = selfpath.rfind("/");
- if (pos == std::string::npos) {
- char wd[8192];
- getcwd(wd, 8192);
- pypath = std::string(wd) + subdir;
- } else {
- pypath = selfpath.substr(0, pos) + subdir;
- }
- strncpy(cpypath, pypath.c_str(), pypath.size());
- setenv("PYTHONPATH", cpypath, 1);
- setenv("PYTHONNOUSERSITE", "1", 1);
- /* prevent Python from looking up users in the fuzz environment */
- setenv("PYTHONUSERBASE", cpypath, 1);
- Py_SetPythonHome(cpypath);
- Py_InitializeEx(0);
- initparsers();
+ contrib::initpy(*argv[0]);
code = (PyCodeObject *)Py_CompileString(R"py(
from parsers import lazymanifest
try:
@@ 60,8 34,6 @@ except Exception as e:
# print e
)py",
"fuzzer", Py_file_input);
- mainmod = PyImport_AddModule("__main__");
- globals = PyModule_GetDict(mainmod);
return 0;
}
@@ 71,7 43,7 @@ int LLVMFuzzerTestOneInput(const uint8_t
PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
PyObject *locals = PyDict_New();
PyDict_SetItemString(locals, "mdata", mtext);
- PyObject *res = PyEval_EvalCode(code, globals, locals);
+ PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
if (!res) {
PyErr_Print();
}
A => contrib/fuzz/pyutil.cc +49 -0
@@ 0,0 1,49 @@
+#include "pyutil.h"
+
+#include <string>
+
+namespace contrib
+{
+
+static char cpypath[8192] = "\0";
+
+static PyObject *mainmod;
+static PyObject *globals;
+
+/* TODO: use Python 3 for this fuzzing? */
+PyMODINIT_FUNC initparsers(void);
+
+void initpy(const char *cselfpath)
+{
+ const std::string subdir = "/sanpy/lib/python2.7";
+ /* HACK ALERT: we need a full Python installation built without
+ pymalloc and with ASAN, so we dump one in
+ $OUT/sanpy/lib/python2.7. This helps us wire that up. */
+ std::string selfpath(cselfpath);
+ std::string pypath;
+ auto pos = selfpath.rfind("/");
+ if (pos == std::string::npos) {
+ char wd[8192];
+ getcwd(wd, 8192);
+ pypath = std::string(wd) + subdir;
+ } else {
+ pypath = selfpath.substr(0, pos) + subdir;
+ }
+ strncpy(cpypath, pypath.c_str(), pypath.size());
+ setenv("PYTHONPATH", cpypath, 1);
+ setenv("PYTHONNOUSERSITE", "1", 1);
+ /* prevent Python from looking up users in the fuzz environment */
+ setenv("PYTHONUSERBASE", cpypath, 1);
+ Py_SetPythonHome(cpypath);
+ Py_InitializeEx(0);
+ mainmod = PyImport_AddModule("__main__");
+ globals = PyModule_GetDict(mainmod);
+ initparsers();
+}
+
+PyObject *pyglobals()
+{
+ return globals;
+}
+
+} // namespace contrib
A => contrib/fuzz/pyutil.h +9 -0
@@ 0,0 1,9 @@
+#include <Python.h>
+
+namespace contrib
+{
+
+void initpy(const char *cselfpath);
+PyObject *pyglobals();
+
+} /* namespace contrib */
A => contrib/fuzz/revlog.cc +47 -0
@@ 0,0 1,47 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "pyutil.h"
+
+extern "C" {
+
+static PyCodeObject *code;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+ contrib::initpy(*argv[0]);
+ code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import parse_index2
+for inline in (True, False):
+ try:
+ index, cache = parse_index2(data, inline)
+ except Exception as e:
+ pass
+ # uncomment this print if you're editing this Python code
+ # to debug failures.
+ # print e
+)py",
+ "fuzzer", Py_file_input);
+ return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+ PyObject *text =
+ PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+ PyObject *locals = PyDict_New();
+ PyDict_SetItemString(locals, "data", text);
+ PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
+ if (!res) {
+ PyErr_Print();
+ }
+ Py_XDECREF(res);
+ Py_DECREF(locals);
+ Py_DECREF(text);
+ return 0; // Non-zero return values are reserved for future use.
+}
+}
A => contrib/fuzz/revlog_corpus.py +28 -0
@@ 0,0 1,28 @@
+from __future__ import absolute_import, print_function
+
+import argparse
+import os
+import zipfile
+
+ap = argparse.ArgumentParser()
+ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
+args = ap.parse_args()
+
+reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__),
+ '..', '..'))
+# typically a standalone index
+changelog = os.path.join(reporoot, '.hg', 'store', '00changelog.i')
+# an inline revlog with only a few revisions
+contributing = os.path.join(
+ reporoot, '.hg', 'store', 'data', 'contrib', 'fuzz', 'mpatch.cc.i')
+
+print(changelog, os.path.exists(changelog))
+print(contributing, os.path.exists(contributing))
+
+with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
+ if os.path.exists(changelog):
+ with open(changelog) as f:
+ zf.writestr("00changelog.i", f.read())
+ if os.path.exists(contributing):
+ with open(contributing) as f:
+ zf.writestr("contributing.i", f.read())
M contrib/fuzz/xdiff.cc +5 -0
@@ 22,6 22,11 @@ int hunk_consumer(long a1, long a2, long
int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
{
+ // Don't allow fuzzer inputs larger than 100k, since we'll just bog
+ // down and not accomplish much.
+ if (Size > 100000) {
+ return 0;
+ }
auto maybe_inputs = SplitInputs(Data, Size);
if (!maybe_inputs) {
return 0;
M contrib/hgclient.py +15 -2
@@ 27,12 27,18 @@ else:
stringio = cStringIO.StringIO
bprint = print
-def connectpipe(path=None):
+def connectpipe(path=None, extraargs=()):
cmdline = [b'hg', b'serve', b'--cmdserver', b'pipe']
if path:
cmdline += [b'-R', path]
+ cmdline.extend(extraargs)
- server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
+ def tonative(cmdline):
+ if os.name != r'nt':
+ return cmdline
+ return [arg.decode("utf-8") for arg in cmdline]
+
+ server = subprocess.Popen(tonative(cmdline), stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return server
@@ 114,6 120,8 @@ def runcommand(server, args, output=stdo
writeblock(server, input.read(data))
elif ch == b'L':
writeblock(server, input.readline(data))
+ elif ch == b'm':
+ bprint(b"message: %r" % data)
elif ch == b'r':
ret, = struct.unpack('>i', data)
if ret != 0:
@@ 132,3 140,8 @@ def check(func, connect=connectpipe):
finally:
server.stdin.close()
server.wait()
+
+def checkwith(connect=connectpipe, **kwargs):
+ def wrap(func):
+ return check(func, lambda: connect(**kwargs))
+ return wrap
M contrib/import-checker.py +8 -6
@@ 40,8 40,6 @@ allowsymbolimports = (
# third-party imports should be directly imported
'mercurial.thirdparty',
'mercurial.thirdparty.attr',
- 'mercurial.thirdparty.cbor',
- 'mercurial.thirdparty.cbor.cbor2',
'mercurial.thirdparty.zope',
'mercurial.thirdparty.zope.interface',
)
@@ 260,10 258,12 @@ def list_stdlib_modules():
break
else:
stdlib_prefixes.add(dirname)
+ sourceroot = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
for libpath in sys.path:
- # We want to walk everything in sys.path that starts with
- # something in stdlib_prefixes.
- if not any(libpath.startswith(p) for p in stdlib_prefixes):
+ # We want to walk everything in sys.path that starts with something in
+ # stdlib_prefixes, but not directories from the hg sources.
+ if (os.path.abspath(libpath).startswith(sourceroot)
+ or not any(libpath.startswith(p) for p in stdlib_prefixes)):
continue
for top, dirs, files in os.walk(libpath):
for i, d in reversed(list(enumerate(dirs))):
@@ 674,6 674,8 @@ def embedded(f, modname, src):
# "starts" is "line number" (1-origin), but embedded() is
# expected to return "line offset" (0-origin). Therefore, this
# yields "starts - 1".
+ if not isinstance(modname, str):
+ modname = modname.decode('utf8')
yield code, "%s[%d]" % (modname, starts), name, starts - 1
def sources(f, modname):
@@ 694,7 696,7 @@ def sources(f, modname):
if py or f.endswith('.t'):
with open(f, 'rb') as src:
for script, modname, t, line in embedded(f, modname, src):
- yield script, modname, t, line
+ yield script, modname.encode('utf8'), t, line
def main(argv):
if len(argv) < 2 or (argv[1] == '-' and len(argv) > 2):
M contrib/packaging/Makefile +2 -1
@@ 14,7 14,8 @@ UBUNTU_CODENAMES := \
FEDORA_RELEASES := \
20 \
21 \
- 28
+ 28 \
+ 29
CENTOS_RELEASES := \
5 \
M contrib/packaging/docker/centos5 +2 -2
@@ 1,7 1,7 @@
FROM centos:centos5
-RUN groupadd -g 1000 build && \
- useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build
+RUN groupadd -g %GID% build && \
+ useradd -u %UID% -g %GID% -s /bin/bash -d /build -m build
RUN \
sed -i 's/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo && \
M contrib/packaging/docker/centos6 +2 -2
@@ 1,7 1,7 @@
FROM centos:centos6
-RUN groupadd -g 1000 build && \
- useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build
+RUN groupadd -g %GID% build && \
+ useradd -u %UID% -g %GID% -s /bin/bash -d /build -m build
RUN yum install -y \
gcc \
M contrib/packaging/docker/centos7 +2 -2
@@ 1,7 1,7 @@
FROM centos:centos7
-RUN groupadd -g 1000 build && \
- useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build
+RUN groupadd -g %GID% build && \
+ useradd -u %UID% -g %GID% -s /bin/bash -d /build -m build
RUN yum install -y \
gcc \
M contrib/packaging/docker/fedora28 => contrib/packaging/docker/fedora29 +1 -1
@@ 1,4 1,4 @@
-FROM fedora:28
+FROM fedora:29
RUN groupadd -g 1000 build && \
useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build
M contrib/packaging/dockerrpm +9 -1
@@ 10,7 10,15 @@ DOCKER=$($BUILDDIR/hg-docker docker-path
CONTAINER=hg-docker-$PLATFORM
-$BUILDDIR/hg-docker build $BUILDDIR/docker/$PLATFORM $CONTAINER
+if [[ -z "${HG_DOCKER_OWN_USER}" ]]; then
+ DOCKERUID=1000
+ DOCKERGID=1000
+else
+ DOCKERUID=$(id -u)
+ DOCKERGID=$(id -g)
+fi
+
+$BUILDDIR/hg-docker build --build-arg UID=$DOCKERUID --build-arg GID=$DOCKERGID $BUILDDIR/docker/$PLATFORM $CONTAINER
RPMBUILDDIR=$ROOTDIR/packages/$PLATFORM
$ROOTDIR/contrib/packaging/buildrpm --rpmbuilddir $RPMBUILDDIR --prepare $*
M contrib/packaging/hg-docker +7 -2
@@ 47,7 47,7 @@ def get_dockerfile(path: pathlib.Path, a
df = fh.read()
for k, v in args:
- df = df.replace(b'%%%s%%' % k, v)
+ df = df.replace(bytes('%%%s%%' % k.decode(), 'utf-8'), v)
return df
@@ 72,7 72,12 @@ def build_docker_image(dockerfile: pathl
]
print('executing: %r' % args)
- subprocess.run(args, input=dockerfile, check=True)
+ p = subprocess.Popen(args, stdin=subprocess.PIPE)
+ p.communicate(input=dockerfile)
+ if p.returncode:
+ raise subprocess.CalledProcessException(
+ p.returncode, 'failed to build docker image: %s %s' \
+ % (p.stdout, p.stderr))
def command_build(args):
build_args = []
A => contrib/perf-utils/perf-revlog-write-plot.py +127 -0
@@ 0,0 1,127 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 Paul Morelle <Paul.Morelle@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+#
+# This script use the output of `hg perfrevlogwrite -T json --details` to draw
+# various plot related to write performance in a revlog
+#
+# usage: perf-revlog-write-plot.py details.json
+from __future__ import absolute_import, print_function
+import json
+import re
+
+import numpy as np
+import scipy.signal
+
+from matplotlib import (
+ pyplot as plt,
+ ticker as mticker,
+)
+
+
+def plot(data, title=None):
+ items = {}
+ re_title = re.compile(r'^revisions #\d+ of \d+, rev (\d+)$')
+ for item in data:
+ m = re_title.match(item['title'])
+ if m is None:
+ continue
+
+ rev = int(m.group(1))
+ items[rev] = item
+
+ min_rev = min(items.keys())
+ max_rev = max(items.keys())
+ ary = np.empty((2, max_rev - min_rev + 1))
+ for rev, item in items.items():
+ ary[0][rev - min_rev] = rev
+ ary[1][rev - min_rev] = item['wall']
+
+ fig = plt.figure()
+ comb_plt = fig.add_subplot(211)
+ other_plt = fig.add_subplot(212)
+
+ comb_plt.plot(ary[0],
+ np.cumsum(ary[1]),
+ color='red',
+ linewidth=1,
+ label='comb')
+
+ plots = []
+ p = other_plt.plot(ary[0],
+ ary[1],
+ color='red',
+ linewidth=1,
+ label='wall')
+ plots.append(p)
+
+ colors = {
+ 10: ('green', 'xkcd:grass green'),
+ 100: ('blue', 'xkcd:bright blue'),
+ 1000: ('purple', 'xkcd:dark pink'),
+ }
+ for n, color in colors.items():
+ avg_n = np.convolve(ary[1], np.full(n, 1. / n), 'valid')
+ p = other_plt.plot(ary[0][n - 1:],
+ avg_n,
+ color=color[0],
+ linewidth=1,
+ label='avg time last %d' % n)
+ plots.append(p)
+
+ med_n = scipy.signal.medfilt(ary[1], n + 1)
+ p = other_plt.plot(ary[0],
+ med_n,
+ color=color[1],
+ linewidth=1,
+ label='median time last %d' % n)
+ plots.append(p)
+
+ formatter = mticker.ScalarFormatter()
+ formatter.set_scientific(False)
+ formatter.set_useOffset(False)
+
+ comb_plt.grid()
+ comb_plt.xaxis.set_major_formatter(formatter)
+ comb_plt.legend()
+
+ other_plt.grid()
+ other_plt.xaxis.set_major_formatter(formatter)
+ leg = other_plt.legend()
+ leg2plot = {}
+ for legline, plot in zip(leg.get_lines(), plots):
+ legline.set_picker(5)
+ leg2plot[legline] = plot
+
+ def onpick(event):
+ legline = event.artist
+ plot = leg2plot[legline]
+ visible = not plot[0].get_visible()
+ for l in plot:
+ l.set_visible(visible)
+
+ if visible:
+ legline.set_alpha(1.0)
+ else:
+ legline.set_alpha(0.2)
+ fig.canvas.draw()
+ if title is not None:
+ fig.canvas.set_window_title(title)
+ fig.canvas.mpl_connect('pick_event', onpick)
+
+ plt.show()
+
+
+if __name__ == '__main__':
+ import sys
+
+ if len(sys.argv) > 1:
+ print('reading from %r' % sys.argv[1])
+ with open(sys.argv[1], 'r') as fp:
+ plot(json.load(fp), title=sys.argv[1])
+ else:
+ print('reading from stdin')
+ plot(json.load(sys.stdin))
M contrib/perf.py +607 -58
@@ 24,8 24,10 @@ import functools
import gc
import os
import random
+import shutil
import struct
import sys
+import tempfile
import threading
import time
from mercurial import (
@@ 35,6 37,7 @@ from mercurial import (
copies,
error,
extensions,
+ hg,
mdiff,
merge,
revlog,
@@ 65,6 68,11 @@ try:
from mercurial import scmutil # since 1.9 (or 8b252e826c68)
except ImportError:
pass
+try:
+ from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
+except ImportError:
+ pass
+
def identity(a):
return a
@@ 273,7 281,9 @@ def gettimer(ui, opts=None):
displayall = ui.configbool(b"perf", b"all-timing", False)
return functools.partial(_timer, fm, displayall=displayall), fm
-def stub_timer(fm, func, title=None):
+def stub_timer(fm, func, setup=None, title=None):
+ if setup is not None:
+ setup()
func()
@contextlib.contextmanager
@@ 287,12 297,14 @@ def timeone():
a, b = ostart, ostop
r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
-def _timer(fm, func, title=None, displayall=False):
+def _timer(fm, func, setup=None, title=None, displayall=False):
gc.collect()
results = []
begin = util.timer()
count = 0
while True:
+ if setup is not None:
+ setup()
with timeone() as item:
r = func()
count += 1
@@ 453,11 465,19 @@ def repocleartagscachefunc(repo):
# utilities to clear cache
-def clearfilecache(repo, attrname):
- unfi = repo.unfiltered()
- if attrname in vars(unfi):
- delattr(unfi, attrname)
- unfi._filecache.pop(attrname, None)
+def clearfilecache(obj, attrname):
+ unfiltered = getattr(obj, 'unfiltered', None)
+ if unfiltered is not None:
+ obj = obj.unfiltered()
+ if attrname in vars(obj):
+ delattr(obj, attrname)
+ obj._filecache.pop(attrname, None)
+
+def clearchangelog(repo):
+ if repo is not repo.unfiltered():
+ object.__setattr__(repo, r'_clcachekey', None)
+ object.__setattr__(repo, r'_clcache', None)
+ clearfilecache(repo.unfiltered(), 'changelog')
# perf commands
@@ 524,23 544,23 @@ def perfheads(ui, repo, **opts):
timer(d)
fm.end()
-@command(b'perftags', formatteropts)
+@command(b'perftags', formatteropts+
+ [
+ (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
+ ])
def perftags(ui, repo, **opts):
- import mercurial.changelog
- import mercurial.manifest
-
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
- svfs = getsvfs(repo)
repocleartagscache = repocleartagscachefunc(repo)
+ clearrevlogs = opts[b'clear_revlogs']
+ def s():
+ if clearrevlogs:
+ clearchangelog(repo)
+ clearfilecache(repo.unfiltered(), 'manifest')
+ repocleartagscache()
def t():
- repo.changelog = mercurial.changelog.changelog(svfs)
- rootmanifest = mercurial.manifest.manifestrevlog(svfs)
- repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
- rootmanifest)
- repocleartagscache()
return len(repo.tags())
- timer(t)
+ timer(t, setup=s)
fm.end()
@command(b'perfancestors', formatteropts)
@@ 567,15 587,38 @@ def perfancestorset(ui, repo, revset, **
timer(d)
fm.end()
-@command(b'perfbookmarks', formatteropts)
+@command(b'perfdiscovery', formatteropts, b'PATH')
+def perfdiscovery(ui, repo, path, **opts):
+ """benchmark discovery between local repo and the peer at given path
+ """
+ repos = [repo, None]
+ timer, fm = gettimer(ui, opts)
+ path = ui.expandpath(path)
+
+ def s():
+ repos[1] = hg.peer(ui, opts, path)
+ def d():
+ setdiscovery.findcommonheads(ui, *repos)
+ timer(d, setup=s)
+ fm.end()
+
+@command(b'perfbookmarks', formatteropts +
+ [
+ (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
+ ])
def perfbookmarks(ui, repo, **opts):
"""benchmark parsing bookmarks from disk to memory"""
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
- def d():
+
+ clearrevlogs = opts[b'clear_revlogs']
+ def s():
+ if clearrevlogs:
+ clearchangelog(repo)
clearfilecache(repo, b'_bookmarks')
+ def d():
repo._bookmarks
- timer(d)
+ timer(d, setup=s)
fm.end()
@command(b'perfbundleread', formatteropts, b'BUNDLE')
@@ 697,9 740,9 @@ def perfbundleread(ui, repo, bundlepath,
fm.end()
@command(b'perfchangegroupchangelog', formatteropts +
- [(b'', b'version', b'02', b'changegroup version'),
+ [(b'', b'cgversion', b'02', b'changegroup version'),
(b'r', b'rev', b'', b'revisions to add to changegroup')])
-def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
+def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
"""Benchmark producing a changelog group for a changegroup.
This measures the time spent processing the changelog during a
@@ 712,7 755,7 @@ def perfchangegroupchangelog(ui, repo, v
opts = _byteskwargs(opts)
cl = repo.changelog
nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
- bundler = changegroup.getbundler(version, repo)
+ bundler = changegroup.getbundler(cgversion, repo)
def d():
state, chunks = bundler._generatechangelog(cl, nodes)
@@ 819,6 862,7 @@ def perfmergecalculate(ui, repo, rev, **
@command(b'perfpathcopies', [], b"REV REV")
def perfpathcopies(ui, repo, rev1, rev2, **opts):
+ """benchmark the copy tracing logic"""
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
ctx1 = scmutil.revsingle(repo, rev1, rev1)
@@ 952,18 996,48 @@ def perfchangeset(ui, repo, rev, **opts)
timer(d)
fm.end()
-@command(b'perfindex', formatteropts)
+@command(b'perfignore', formatteropts)
+def perfignore(ui, repo, **opts):
+ """benchmark operation related to computing ignore"""
+ opts = _byteskwargs(opts)
+ timer, fm = gettimer(ui, opts)
+ dirstate = repo.dirstate
+
+ def setupone():
+ dirstate.invalidate()
+ clearfilecache(dirstate, b'_ignore')
+
+ def runone():
+ dirstate._ignore
+
+ timer(runone, setup=setupone, title=b"load")
+ fm.end()
+
+@command(b'perfindex', [
+ (b'', b'rev', b'', b'revision to be looked up (default tip)'),
+ ] + formatteropts)
def perfindex(ui, repo, **opts):
import mercurial.revlog
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
- n = repo[b"tip"].node()
- svfs = getsvfs(repo)
+ if opts[b'rev'] is None:
+ n = repo[b"tip"].node()
+ else:
+ rev = scmutil.revsingle(repo, opts[b'rev'])
+ n = repo[rev].node()
+
+ unfi = repo.unfiltered()
+ # find the filecache func directly
+ # This avoid polluting the benchmark with the filecache logic
+ makecl = unfi.__class__.changelog.func
+ def setup():
+ # probably not necessary, but for good measure
+ clearchangelog(unfi)
def d():
- cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
+ cl = makecl(unfi)
cl.rev(n)
- timer(d)
+ timer(d, setup=setup)
fm.end()
@command(b'perfstartup', formatteropts)
@@ 1144,6 1218,82 @@ def perftemplating(ui, repo, testedtempl
timer(format)
fm.end()
+@command(b'perfhelper-pathcopies', formatteropts +
+ [
+ (b'r', b'revs', [], b'restrict search to these revisions'),
+ (b'', b'timing', False, b'provides extra data (costly)'),
+ ])
+def perfhelperpathcopies(ui, repo, revs=[], **opts):
+ """find statistic about potential parameters for the `perftracecopies`
+
+ This command find source-destination pair relevant for copytracing testing.
+ It report value for some of the parameters that impact copy tracing time.
+
+ If `--timing` is set, rename detection is run and the associated timing
+ will be reported. The extra details comes at the cost of a slower command
+ execution.
+
+ Since the rename detection is only run once, other factors might easily
+ affect the precision of the timing. However it should give a good
+ approximation of which revision pairs are very costly.
+ """
+ opts = _byteskwargs(opts)
+ fm = ui.formatter(b'perf', opts)
+ dotiming = opts[b'timing']
+
+ if dotiming:
+ header = '%12s %12s %12s %12s %12s %12s\n'
+ output = ("%(source)12s %(destination)12s "
+ "%(nbrevs)12d %(nbmissingfiles)12d "
+ "%(nbrenamedfiles)12d %(time)18.5f\n")
+ header_names = ("source", "destination", "nb-revs", "nb-files",
+ "nb-renames", "time")
+ fm.plain(header % header_names)
+ else:
+ header = '%12s %12s %12s %12s\n'
+ output = ("%(source)12s %(destination)12s "
+ "%(nbrevs)12d %(nbmissingfiles)12d\n")
+ fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
+
+ if not revs:
+ revs = ['all()']
+ revs = scmutil.revrange(repo, revs)
+
+ roi = repo.revs('merge() and %ld', revs)
+ for r in roi:
+ ctx = repo[r]
+ p1 = ctx.p1().rev()
+ p2 = ctx.p2().rev()
+ bases = repo.changelog._commonancestorsheads(p1, p2)
+ for p in (p1, p2):
+ for b in bases:
+ base = repo[b]
+ parent = repo[p]
+ missing = copies._computeforwardmissing(base, parent)
+ if not missing:
+ continue
+ data = {
+ b'source': base.hex(),
+ b'destination': parent.hex(),
+ b'nbrevs': len(repo.revs('%d::%d', b, p)),
+ b'nbmissingfiles': len(missing),
+ }
+ if dotiming:
+ begin = util.timer()
+ renames = copies.pathcopies(base, parent)
+ end = util.timer()
+ # not very stable timing since we did only one run
+ data['time'] = end - begin
+ data['nbrenamedfiles'] = len(renames)
+ fm.startitem()
+ fm.data(**data)
+ out = data.copy()
+ out['source'] = fm.hexfunc(base.node())
+ out['destination'] = fm.hexfunc(parent.node())
+ fm.plain(output % out)
+
+ fm.end()
+
@command(b'perfcca', formatteropts)
def perfcca(ui, repo, **opts):
opts = _byteskwargs(opts)
@@ 1402,7 1552,7 @@ def perfdiffwd(ui, repo, **opts):
ui.popbuffer()
diffopt = diffopt.encode('ascii')
title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
- timer(d, title)
+ timer(d, title=title)
fm.end()
@command(b'perfrevlogindex', revlogopts + formatteropts,
@@ 1553,7 1703,7 @@ def perfrevlogrevisions(ui, repo, file_=
dist = opts[b'dist']
if reverse:
- beginrev, endrev = endrev, beginrev
+ beginrev, endrev = endrev - 1, beginrev - 1
dist = -1 * dist
for x in _xrange(beginrev, endrev, dist):
@@ 1565,6 1715,241 @@ def perfrevlogrevisions(ui, repo, file_=
timer(d)
fm.end()
+@command(b'perfrevlogwrite', revlogopts + formatteropts +
+ [(b's', b'startrev', 1000, b'revision to start writing at'),
+ (b'', b'stoprev', -1, b'last revision to write'),
+ (b'', b'count', 3, b'last revision to write'),
+ (b'', b'details', False, b'print timing for every revisions tested'),
+ (b'', b'source', b'full', b'the kind of data feed in the revlog'),
+ (b'', b'lazydeltabase', True, b'try the provided delta first'),
+ (b'', b'clear-caches', True, b'clear revlog cache between calls'),
+ ],
+ b'-c|-m|FILE')
+def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
+ """Benchmark writing a series of revisions to a revlog.
+
+ Possible source values are:
+ * `full`: add from a full text (default).
+ * `parent-1`: add from a delta to the first parent
+ * `parent-2`: add from a delta to the second parent if it exists
+ (use a delta from the first parent otherwise)
+ * `parent-smallest`: add from the smallest delta (either p1 or p2)
+ * `storage`: add from the existing precomputed deltas
+ """
+ opts = _byteskwargs(opts)
+
+ rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
+ rllen = getlen(ui)(rl)
+ if startrev < 0:
+ startrev = rllen + startrev
+ if stoprev < 0:
+ stoprev = rllen + stoprev
+
+ lazydeltabase = opts['lazydeltabase']
+ source = opts['source']
+ clearcaches = opts['clear_caches']
+ validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
+ b'storage')
+ if source not in validsource:
+ raise error.Abort('invalid source type: %s' % source)
+
+ ### actually gather results
+ count = opts['count']
+ if count <= 0:
+ raise error.Abort('invalide run count: %d' % count)
+ allresults = []
+ for c in range(count):
+ timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
+ lazydeltabase=lazydeltabase,
+ clearcaches=clearcaches)
+ allresults.append(timing)
+
+ ### consolidate the results in a single list
+ results = []
+ for idx, (rev, t) in enumerate(allresults[0]):
+ ts = [t]
+ for other in allresults[1:]:
+ orev, ot = other[idx]
+ assert orev == rev
+ ts.append(ot)
+ results.append((rev, ts))
+ resultcount = len(results)
+
+ ### Compute and display relevant statistics
+
+ # get a formatter
+ fm = ui.formatter(b'perf', opts)
+ displayall = ui.configbool(b"perf", b"all-timing", False)
+
+ # print individual details if requested
+ if opts['details']:
+ for idx, item in enumerate(results, 1):
+ rev, data = item
+ title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
+ formatone(fm, data, title=title, displayall=displayall)
+
+ # sorts results by median time
+ results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
+ # list of (name, index) to display)
+ relevants = [
+ ("min", 0),
+ ("10%", resultcount * 10 // 100),
+ ("25%", resultcount * 25 // 100),
+ ("50%", resultcount * 70 // 100),
+ ("75%", resultcount * 75 // 100),
+ ("90%", resultcount * 90 // 100),
+ ("95%", resultcount * 95 // 100),
+ ("99%", resultcount * 99 // 100),
+ ("99.9%", resultcount * 999 // 1000),
+ ("99.99%", resultcount * 9999 // 10000),
+ ("99.999%", resultcount * 99999 // 100000),
+ ("max", -1),
+ ]
+ if not ui.quiet:
+ for name, idx in relevants:
+ data = results[idx]
+ title = '%s of %d, rev %d' % (name, resultcount, data[0])
+ formatone(fm, data[1], title=title, displayall=displayall)
+
+ # XXX summing that many float will not be very precise, we ignore this fact
+ # for now
+ totaltime = []
+ for item in allresults:
+ totaltime.append((sum(x[1][0] for x in item),
+ sum(x[1][1] for x in item),
+ sum(x[1][2] for x in item),)
+ )
+ formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
+ displayall=displayall)
+ fm.end()
+
+class _faketr(object):
+ def add(s, x, y, z=None):
+ return None
+
+def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
+ lazydeltabase=True, clearcaches=True):
+ timings = []
+ tr = _faketr()
+ with _temprevlog(ui, orig, startrev) as dest:
+ dest._lazydeltabase = lazydeltabase
+ revs = list(orig.revs(startrev, stoprev))
+ total = len(revs)
+ topic = 'adding'
+ if runidx is not None:
+ topic += ' (run #%d)' % runidx
+ # Support both old and new progress API
+ if util.safehasattr(ui, 'makeprogress'):
+ progress = ui.makeprogress(topic, unit='revs', total=total)
+ def updateprogress(pos):
+ progress.update(pos)
+ def completeprogress():
+ progress.complete()
+ else:
+ def updateprogress(pos):
+ ui.progress(topic, pos, unit='revs', total=total)
+ def completeprogress():
+ ui.progress(topic, None, unit='revs', total=total)
+
+ for idx, rev in enumerate(revs):
+ updateprogress(idx)
+ addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
+ if clearcaches:
+ dest.index.clearcaches()
+ dest.clearcaches()
+ with timeone() as r:
+ dest.addrawrevision(*addargs, **addkwargs)
+ timings.append((rev, r[0]))
+ updateprogress(total)
+ completeprogress()
+ return timings
+
+def _getrevisionseed(orig, rev, tr, source):
+ from mercurial.node import nullid
+
+ linkrev = orig.linkrev(rev)
+ node = orig.node(rev)
+ p1, p2 = orig.parents(node)
+ flags = orig.flags(rev)
+ cachedelta = None
+ text = None
+
+ if source == b'full':
+ text = orig.revision(rev)
+ elif source == b'parent-1':
+ baserev = orig.rev(p1)
+ cachedelta = (baserev, orig.revdiff(p1, rev))
+ elif source == b'parent-2':
+ parent = p2
+ if p2 == nullid:
+ parent = p1
+ baserev = orig.rev(parent)
+ cachedelta = (baserev, orig.revdiff(parent, rev))
+ elif source == b'parent-smallest':
+ p1diff = orig.revdiff(p1, rev)
+ parent = p1
+ diff = p1diff
+ if p2 != nullid:
+ p2diff = orig.revdiff(p2, rev)
+ if len(p1diff) > len(p2diff):
+ parent = p2
+ diff = p2diff
+ baserev = orig.rev(parent)
+ cachedelta = (baserev, diff)
+ elif source == b'storage':
+ baserev = orig.deltaparent(rev)
+ cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
+
+ return ((text, tr, linkrev, p1, p2),
+ {'node': node, 'flags': flags, 'cachedelta': cachedelta})
+
+@contextlib.contextmanager
+def _temprevlog(ui, orig, truncaterev):
+ from mercurial import vfs as vfsmod
+
+ if orig._inline:
+ raise error.Abort('not supporting inline revlog (yet)')
+
+ origindexpath = orig.opener.join(orig.indexfile)
+ origdatapath = orig.opener.join(orig.datafile)
+ indexname = 'revlog.i'
+ dataname = 'revlog.d'
+
+ tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
+ try:
+ # copy the data file in a temporary directory
+ ui.debug('copying data in %s\n' % tmpdir)
+ destindexpath = os.path.join(tmpdir, 'revlog.i')
+ destdatapath = os.path.join(tmpdir, 'revlog.d')
+ shutil.copyfile(origindexpath, destindexpath)
+ shutil.copyfile(origdatapath, destdatapath)
+
+ # remove the data we want to add again
+ ui.debug('truncating data to be rewritten\n')
+ with open(destindexpath, 'ab') as index:
+ index.seek(0)
+ index.truncate(truncaterev * orig._io.size)
+ with open(destdatapath, 'ab') as data:
+ data.seek(0)
+ data.truncate(orig.start(truncaterev))
+
+ # instantiate a new revlog from the temporary copy
+ ui.debug('truncating adding to be rewritten\n')
+ vfs = vfsmod.vfs(tmpdir)
+ vfs.options = getattr(orig.opener, 'options', None)
+
+ dest = revlog.revlog(vfs,
+ indexfile=indexname,
+ datafile=dataname)
+ if dest._inline:
+ raise error.Abort('not supporting inline revlog (yet)')
+ # make sure internals are initialized
+ dest.revision(len(dest) - 1)
+ yield dest
+ del dest, vfs
+ finally:
+ shutil.rmtree(tmpdir, True)
+
@command(b'perfrevlogchunks', revlogopts + formatteropts +
[(b'e', b'engines', b'', b'compression engines to use'),
(b's', b'startrev', 0, b'revision to start at')],
@@ 1692,10 2077,11 @@ def perfrevlogrevision(ui, repo, file_,
Obtaining a revlog revision consists of roughly the following steps:
1. Compute the delta chain
- 2. Obtain the raw chunks for that delta chain
- 3. Decompress each raw chunk
- 4. Apply binary patches to obtain fulltext
- 5. Verify hash of fulltext
+ 2. Slice the delta chain if applicable
+ 3. Obtain the raw chunks for that delta chain
+ 4. Decompress each raw chunk
+ 5. Apply binary patches to obtain fulltext
+ 6. Verify hash of fulltext
This command measures the time spent in each of these phases.
"""
@@ 1723,17 2109,18 @@ def perfrevlogrevision(ui, repo, file_,
inline = r._inline
iosize = r._io.size
buffer = util.buffer
- offset = start(chain[0])
chunks = []
ladd = chunks.append
-
- for rev in chain:
- chunkstart = start(rev)
- if inline:
- chunkstart += (rev + 1) * iosize
- chunklength = length(rev)
- ladd(buffer(data, chunkstart - offset, chunklength))
+ for idx, item in enumerate(chain):
+ offset = start(item[0])
+ bits = data[idx]
+ for rev in item:
+ chunkstart = start(rev)
+ if inline:
+ chunkstart += (rev + 1) * iosize
+ chunklength = length(rev)
+ ladd(buffer(bits, chunkstart - offset, chunklength))
return chunks
@@ 1745,7 2132,12 @@ def perfrevlogrevision(ui, repo, file_,
def doread(chain):
if not cache:
r.clearcaches()
- segmentforrevs(chain[0], chain[-1])
+ for item in slicedchain:
+ segmentforrevs(item[0], item[-1])
+
+ def doslice(r, chain, size):
+ for s in slicechunk(r, chain, targetsize=size):
+ pass
def dorawchunks(data, chain):
if not cache:
@@ 1772,9 2164,19 @@ def perfrevlogrevision(ui, repo, file_,
r.clearcaches()
r.revision(node)
+ try:
+ from mercurial.revlogutils.deltas import slicechunk
+ except ImportError:
+ slicechunk = getattr(revlog, '_slicechunk', None)
+
+ size = r.length(rev)
chain = r._deltachain(rev)[0]
- data = segmentforrevs(chain[0], chain[-1])[1]
- rawchunks = getrawchunks(data, chain)
+ if not getattr(r, '_withsparseread', False):
+ slicedchain = (chain,)
+ else:
+ slicedchain = tuple(slicechunk(r, chain, targetsize=size))
+ data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
+ rawchunks = getrawchunks(data, slicedchain)
bins = r._chunks(chain)
text = bytes(bins[0])
bins = bins[1:]
@@ 1784,16 2186,23 @@ def perfrevlogrevision(ui, repo, file_,
(lambda: dorevision(), b'full'),
(lambda: dodeltachain(rev), b'deltachain'),
(lambda: doread(chain), b'read'),
- (lambda: dorawchunks(data, chain), b'rawchunks'),
+ ]
+
+ if getattr(r, '_withsparseread', False):
+ slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
+ benches.append(slicing)
+
+ benches.extend([
+ (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
(lambda: dodecompress(rawchunks), b'decompress'),
(lambda: dopatch(text, bins), b'patch'),
(lambda: dohash(text), b'hash'),
- ]
+ ])
+ timer, fm = gettimer(ui, opts)
for fn, title in benches:
- timer, fm = gettimer(ui, opts)
timer(fn, title=title)
- fm.end()
+ fm.end()
@command(b'perfrevset',
[(b'C', b'clear', False, b'clear volatile cache between each call.'),
@@ 1929,13 2338,120 @@ def perfbranchmap(ui, repo, *filternames
branchcachewrite.restore()
fm.end()
+@command(b'perfbranchmapupdate', [
+ (b'', b'base', [], b'subset of revision to start from'),
+ (b'', b'target', [], b'subset of revision to end with'),
+ (b'', b'clear-caches', False, b'clear cache between each runs')
+ ] + formatteropts)
+def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
+ """benchmark branchmap update from for <base> revs to <target> revs
+
+ If `--clear-caches` is passed, the following items will be reset before
+ each update:
+ * the changelog instance and associated indexes
+ * the rev-branch-cache instance
+
+ Examples:
+
+ # update for the one last revision
+ $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
+
+ $ update for change coming with a new branch
+ $ hg perfbranchmapupdate --base 'stable' --target 'default'
+ """
+ from mercurial import branchmap
+ from mercurial import repoview
+ opts = _byteskwargs(opts)
+ timer, fm = gettimer(ui, opts)
+ clearcaches = opts[b'clear_caches']
+ unfi = repo.unfiltered()
+ x = [None] # used to pass data between closure
+
+ # we use a `list` here to avoid possible side effect from smartset
+ baserevs = list(scmutil.revrange(repo, base))
+ targetrevs = list(scmutil.revrange(repo, target))
+ if not baserevs:
+ raise error.Abort(b'no revisions selected for --base')
+ if not targetrevs:
+ raise error.Abort(b'no revisions selected for --target')
+
+ # make sure the target branchmap also contains the one in the base
+ targetrevs = list(set(baserevs) | set(targetrevs))
+ targetrevs.sort()
+
+ cl = repo.changelog
+ allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
+ allbaserevs.sort()
+ alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
+
+ newrevs = list(alltargetrevs.difference(allbaserevs))
+ newrevs.sort()
+
+ allrevs = frozenset(unfi.changelog.revs())
+ basefilterrevs = frozenset(allrevs.difference(allbaserevs))
+ targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
+
+ def basefilter(repo, visibilityexceptions=None):
+ return basefilterrevs
+
+ def targetfilter(repo, visibilityexceptions=None):
+ return targetfilterrevs
+
+ msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
+ ui.status(msg % (len(allbaserevs), len(newrevs)))
+ if targetfilterrevs:
+ msg = b'(%d revisions still filtered)\n'
+ ui.status(msg % len(targetfilterrevs))
+
+ try:
+ repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
+ repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
+
+ baserepo = repo.filtered(b'__perf_branchmap_update_base')
+ targetrepo = repo.filtered(b'__perf_branchmap_update_target')
+
+ # try to find an existing branchmap to reuse
+ subsettable = getbranchmapsubsettable()
+ candidatefilter = subsettable.get(None)
+ while candidatefilter is not None:
+ candidatebm = repo.filtered(candidatefilter).branchmap()
+ if candidatebm.validfor(baserepo):
+ filtered = repoview.filterrevs(repo, candidatefilter)
+ missing = [r for r in allbaserevs if r in filtered]
+ base = candidatebm.copy()
+ base.update(baserepo, missing)
+ break
+ candidatefilter = subsettable.get(candidatefilter)
+ else:
+ # no suitable subset where found
+ base = branchmap.branchcache()
+ base.update(baserepo, allbaserevs)
+
+ def setup():
+ x[0] = base.copy()
+ if clearcaches:
+ unfi._revbranchcache = None
+ clearchangelog(repo)
+
+ def bench():
+ x[0].update(targetrepo, newrevs)
+
+ timer(bench, setup=setup)
+ fm.end()
+ finally:
+ repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
+ repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
+
@command(b'perfbranchmapload', [
(b'f', b'filter', b'', b'Specify repoview filter'),
(b'', b'list', False, b'List brachmap filter caches'),
+ (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
+
] + formatteropts)
-def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
+def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
"""benchmark reading the branchmap"""
opts = _byteskwargs(opts)
+ clearrevlogs = opts[b'clear_revlogs']
if list:
for name, kind, st in repo.cachevfs.readdir(stat=True):
@@ 1944,16 2460,31 @@ def perfbranchmapread(ui, repo, filter=b
ui.status(b'%s - %s\n'
% (filtername, util.bytecount(st.st_size)))
return
- if filter:
+ if not filter:
+ filter = None
+ subsettable = getbranchmapsubsettable()
+ if filter is None:
+ repo = repo.unfiltered()
+ else:
repo = repoview.repoview(repo, filter)
- else:
- repo = repo.unfiltered()
+
+ repo.branchmap() # make sure we have a relevant, up to date branchmap
+
+ currentfilter = filter
# try once without timer, the filter may not be cached
- if branchmap.read(repo) is None:
- raise error.Abort(b'No brachmap cached for %s repo'
- % (filter or b'unfiltered'))
+ while branchmap.read(repo) is None:
+ currentfilter = subsettable.get(currentfilter)
+ if currentfilter is None:
+ raise error.Abort(b'No branchmap cached for %s repo'
+ % (filter or b'unfiltered'))
+ repo = repo.filtered(currentfilter)
timer, fm = gettimer(ui, opts)
- timer(lambda: branchmap.read(repo) and None)
+ def setup():
+ if clearrevlogs:
+ clearchangelog(repo)
+ def bench():
+ branchmap.read(repo)
+ timer(bench, setup=setup)
fm.end()
@command(b'perfloadmarkers')
@@ 2124,3 2655,21 @@ def uisetup(ui):
hint=b"use 3.5 or later")
return orig(repo, cmd, file_, opts)
extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
+
+@command(b'perfprogress', formatteropts + [
+ (b'', b'topic', b'topic', b'topic for progress messages'),
+ (b'c', b'total', 1000000, b'total value we are progressing to'),
+], norepo=True)
+def perfprogress(ui, topic=None, total=None, **opts):
+ """printing of progress bars"""
+ opts = _byteskwargs(opts)
+
+ timer, fm = gettimer(ui, opts)
+
+ def doprogress():
+ with ui.makeprogress(topic, total=total) as progress:
+ for i in pycompat.xrange(total):
+ progress.increment()
+
+ timer(doprogress)
+ fm.end()
M contrib/python3-whitelist +61 -0
@@ 1,4 1,5 @@
test-abort-checkin.t
+test-absorb-edit-lines.t
test-absorb-filefixupstate.py
test-absorb-phase.t
test-absorb-rename.t
@@ 30,6 31,7 @@ test-bisect.t
test-bisect2.t
test-bisect3.t
test-blackbox.t
+test-bookflow.t
test-bookmarks-current.t
test-bookmarks-merge.t
test-bookmarks-pushpull.t
@@ 62,6 64,7 @@ test-check-commit.t
test-check-config.py
test-check-config.t
test-check-execute.t
+test-check-help.t
test-check-interfaces.py
test-check-module-imports.t
test-check-py3-compat.t
@@ 116,6 119,7 @@ test-convert-tagsbranch-topology.t
test-copy-move-merge.t
test-copy.t
test-copytrace-heuristics.t
+test-custom-filters.t
test-debugbuilddag.t
test-debugbundle.t
test-debugcommands.t
@@ 193,9 197,18 @@ test-execute-bit.t
test-export.t
test-extdata.t
test-extdiff.t
+test-extension-timing.t
test-extensions-afterloaded.t
test-extensions-wrapfunction.py
test-extra-filelog-entry.t
+test-fastannotate-corrupt.t
+test-fastannotate-diffopts.t
+test-fastannotate-hg.t
+test-fastannotate-perfhack.t
+test-fastannotate-protocol.t
+test-fastannotate-renames.t
+test-fastannotate-revmap.py
+test-fastannotate.t
test-fetch.t
test-filebranch.t
test-filecache.py
@@ 206,6 219,19 @@ test-fix-topology.t
test-fix.t
test-flags.t
test-fncache.t
+test-gendoc-da.t
+test-gendoc-de.t
+test-gendoc-el.t
+test-gendoc-fr.t
+test-gendoc-it.t
+test-gendoc-ja.t
+test-gendoc-pt_BR.t
+test-gendoc-ro.t
+test-gendoc-ru.t
+test-gendoc-sv.t
+test-gendoc-zh_CN.t
+test-gendoc-zh_TW.t
+test-gendoc.t
test-generaldelta.t
test-getbundle.t
test-git-export.t
@@ 217,6 243,7 @@ test-gpg.t
test-graft.t
test-grep.t
test-hardlinks.t
+test-help-hide.t
test-help.t
test-hg-parseurl.py
test-hghave.t
@@ 261,6 288,7 @@ test-i18n.t
test-identify.t
test-impexp-branch.t
test-import-bypass.t
+test-import-context.t
test-import-eol.t
test-import-merge.t
test-import-unknown.t
@@ 301,16 329,22 @@ test-largefiles-cache.t
test-largefiles-misc.t
test-largefiles-small-disk.t
test-largefiles-update.t
+test-largefiles-wireproto.t
test-largefiles.t
+test-lfconvert.t
+test-lfs-bundle.t
test-lfs-largefiles.t
test-lfs-pointer.py
+test-lfs.t
test-linelog.py
test-linerange.py
test-locate.t
test-lock-badness.t
+test-log-exthook.t
test-log-linerange.t
test-log.t
test-logexchange.t
+test-logtoprocess.t
test-lrucachedict.py
test-mactext.t
test-mailmap.t
@@ 394,6 428,8 @@ test-narrow-pull.t
test-narrow-rebase.t
test-narrow-shallow-merges.t
test-narrow-shallow.t
+test-narrow-share.t
+test-narrow-sparse.t
test-narrow-strip.t
test-narrow-trackedcmd.t
test-narrow-update.t
@@ 474,6 510,7 @@ test-push-checkheads-unpushed-D5.t
test-push-checkheads-unpushed-D6.t
test-push-checkheads-unpushed-D7.t
test-push-http.t
+test-push-race.t
test-push-warn.t
test-push.t
test-pushvars.t
@@ 512,6 549,28 @@ test-releasenotes-formatting.t
test-releasenotes-merging.t
test-releasenotes-parsing.t
test-relink.t
+test-remotefilelog-bad-configs.t
+test-remotefilelog-bgprefetch.t
+test-remotefilelog-blame.t
+test-remotefilelog-bundle2.t
+test-remotefilelog-bundles.t
+test-remotefilelog-cacheprocess.t
+test-remotefilelog-clone-tree.t
+test-remotefilelog-clone.t
+test-remotefilelog-gcrepack.t
+test-remotefilelog-http.t
+test-remotefilelog-keepset.t
+test-remotefilelog-local.t
+test-remotefilelog-log.t
+test-remotefilelog-partial-shallow.t
+test-remotefilelog-permissions.t
+test-remotefilelog-permisssions.t
+test-remotefilelog-prefetch.t
+test-remotefilelog-pull-noshallow.t
+test-remotefilelog-share.t
+test-remotefilelog-sparse.t
+test-remotefilelog-tags.t
+test-remotefilelog-wireproto.t
test-remove.t
test-removeemptydirs.t
test-rename-after-merge.t
@@ 541,11 600,13 @@ test-revset-outgoing.t
test-rollback.t
test-run-tests.py
test-run-tests.t
+test-rust-ancestor.py
test-schemes.t
test-serve.t
test-setdiscovery.t
test-share.t
test-shelve.t
+test-shelve2.t
test-show-stack.t
test-show-work.t
test-show.t
M contrib/revsetbenchmarks.py +3 -1
@@ 56,9 56,11 @@ def hg(cmd, repo=None):
def perf(revset, target=None, contexts=False):
"""run benchmark for this very revset"""
try:
- args = ['perfrevset', revset]
+ args = ['perfrevset']
if contexts:
args.append('--contexts')
+ args.append('--')
+ args.append(revset)
output = hg(args, repo=target)
return parseoutput(output)
except subprocess.CalledProcessError as exc:
M contrib/wix/help.wxs +1 -0
@@ 47,6 47,7 @@
<File Id="internals.censor.txt" Name="censor.txt" />
<File Id="internals.changegroups.txt" Name="changegroups.txt" />
<File Id="internals.config.txt" Name="config.txt" />
+ <File Id="internals.extensions.txt" Name="extensions.txt" />
<File Id="internals.linelog.txt" Name="linelog.txt" />
<File Id="internals.requirements.txt" Name="requirements.txt" />
<File Id="internals.revlogs.txt" Name="revlogs.txt" />
M doc/docchecker +25 -15
@@ 9,18 9,28 @@
from __future__ import absolute_import, print_function
+import os
import re
import sys
-leadingline = re.compile(r'(^\s*)(\S.*)$')
+try:
+ import msvcrt
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
+except ImportError:
+ pass
+
+stdout = getattr(sys.stdout, 'buffer', sys.stdout)
+
+leadingline = re.compile(br'(^\s*)(\S.*)$')
checks = [
- (r""":hg:`[^`]*'[^`]*`""",
- """warning: please avoid nesting ' in :hg:`...`"""),
- (r'\w:hg:`',
- 'warning: please have a space before :hg:'),
- (r"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""",
- '''warning: please use " instead of ' for hg ... "..."'''),
+ (br""":hg:`[^`]*'[^`]*`""",
+ b"""warning: please avoid nesting ' in :hg:`...`"""),
+ (br'\w:hg:`',
+ b'warning: please have a space before :hg:'),
+ (br"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""",
+ b'''warning: please use " instead of ' for hg ... "..."'''),
]
def check(line):
@@ 29,25 39,25 @@ def check(line):
if re.search(match, line):
messages.append(msg)
if messages:
- print(line)
+ stdout.write(b'%s\n' % line)
for msg in messages:
- print(msg)
+ stdout.write(b'%s\n' % msg)
def work(file):
- (llead, lline) = ('', '')
+ (llead, lline) = (b'', b'')
for line in file:
# this section unwraps lines
match = leadingline.match(line)
if not match:
check(lline)
- (llead, lline) = ('', '')
+ (llead, lline) = (b'', b'')
continue
lead, line = match.group(1), match.group(2)
if (lead == llead):
- if (lline != ''):
- lline += ' ' + line
+ if (lline != b''):
+ lline += b' ' + line
else:
lline = line
else:
@@ 58,9 68,9 @@ def work(file):
def main():
for f in sys.argv[1:]:
try:
- with open(f) as file:
+ with open(f, 'rb') as file:
work(file)
except BaseException as e:
- print("failed to process %s: %s" % (f, e))
+ sys.stdout.write(r"failed to process %s: %s\n" % (f, e))
main()
M doc/gendoc.py +89 -79
@@ 10,11 10,18 @@ import os
import sys
import textwrap
+try:
+ import msvcrt
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
+except ImportError:
+ pass
+
# This script is executed during installs and may not have C extensions
# available. Relax C module requirements.
-os.environ['HGMODULEPOLICY'] = 'allow'
+os.environ[r'HGMODULEPOLICY'] = r'allow'
# import from the live mercurial repo
-sys.path.insert(0, "..")
+sys.path.insert(0, r"..")
from mercurial import demandimport; demandimport.enable()
# Load util so that the locale path is set by i18n.setdatapath() before
# calling _().
@@ 22,9 29,11 @@ from mercurial import util
util.datapath
from mercurial import (
commands,
+ encoding,
extensions,
help,
minirst,
+ pycompat,
ui as uimod,
)
from mercurial.i18n import (
@@ 39,19 48,19 @@ loaddoc = help.loaddoc
def get_desc(docstr):
if not docstr:
- return "", ""
+ return b"", b""
# sanitize
- docstr = docstr.strip("\n")
+ docstr = docstr.strip(b"\n")
docstr = docstr.rstrip()
shortdesc = docstr.splitlines()[0].strip()
- i = docstr.find("\n")
+ i = docstr.find(b"\n")
if i != -1:
desc = docstr[i + 2:]
else:
desc = shortdesc
- desc = textwrap.dedent(desc)
+ desc = textwrap.dedent(desc.decode('latin1')).encode('latin1')
return (shortdesc, desc)
@@ 61,91 70,93 @@ def get_opts(opts):
shortopt, longopt, default, desc, optlabel = opt
else:
shortopt, longopt, default, desc = opt
- optlabel = _("VALUE")
+ optlabel = _(b"VALUE")
allopts = []
if shortopt:
- allopts.append("-%s" % shortopt)
+ allopts.append(b"-%s" % shortopt)
if longopt:
- allopts.append("--%s" % longopt)
+ allopts.append(b"--%s" % longopt)
if isinstance(default, list):
- allopts[-1] += " <%s[+]>" % optlabel
+ allopts[-1] += b" <%s[+]>" % optlabel
elif (default is not None) and not isinstance(default, bool):
- allopts[-1] += " <%s>" % optlabel
- if '\n' in desc:
+ allopts[-1] += b" <%s>" % optlabel
+ if b'\n' in desc:
# only remove line breaks and indentation
- desc = ' '.join(l.lstrip() for l in desc.split('\n'))
- desc += default and _(" (default: %s)") % default or ""
- yield (", ".join(allopts), desc)
+ desc = b' '.join(l.lstrip() for l in desc.split(b'\n'))
+ desc += default and _(b" (default: %s)") % bytes(default) or b""
+ yield (b", ".join(allopts), desc)
def get_cmd(cmd, cmdtable):
d = {}
attr = cmdtable[cmd]
- cmds = cmd.lstrip("^").split("|")
+ cmds = cmd.lstrip(b"^").split(b"|")
- d['cmd'] = cmds[0]
- d['aliases'] = cmd.split("|")[1:]
- d['desc'] = get_desc(gettext(attr[0].__doc__))
- d['opts'] = list(get_opts(attr[1]))
+ d[b'cmd'] = cmds[0]
+ d[b'aliases'] = cmd.split(b"|")[1:]
+ d[b'desc'] = get_desc(gettext(pycompat.getdoc(attr[0])))
+ d[b'opts'] = list(get_opts(attr[1]))
- s = 'hg ' + cmds[0]
+ s = b'hg ' + cmds[0]
if len(attr) > 2:
- if not attr[2].startswith('hg'):
- s += ' ' + attr[2]
+ if not attr[2].startswith(b'hg'):
+ s += b' ' + attr[2]
else:
s = attr[2]
- d['synopsis'] = s.strip()
+ d[b'synopsis'] = s.strip()
return d
def showdoc(ui):
# print options
- ui.write(minirst.section(_("Options")))
+ ui.write(minirst.section(_(b"Options")))
multioccur = False
for optstr, desc in get_opts(globalopts):
- ui.write("%s\n %s\n\n" % (optstr, desc))
- if optstr.endswith("[+]>"):
+ ui.write(b"%s\n %s\n\n" % (optstr, desc))
+ if optstr.endswith(b"[+]>"):
multioccur = True
if multioccur:
- ui.write(_("\n[+] marked option can be specified multiple times\n"))
- ui.write("\n")
+ ui.write(_(b"\n[+] marked option can be specified multiple times\n"))
+ ui.write(b"\n")
# print cmds
- ui.write(minirst.section(_("Commands")))
+ ui.write(minirst.section(_(b"Commands")))
commandprinter(ui, table, minirst.subsection)
# print help topics
# The config help topic is included in the hgrc.5 man page.
- helpprinter(ui, helptable, minirst.section, exclude=['config'])
+ helpprinter(ui, helptable, minirst.section, exclude=[b'config'])
- ui.write(minirst.section(_("Extensions")))
- ui.write(_("This section contains help for extensions that are "
- "distributed together with Mercurial. Help for other "
- "extensions is available in the help system."))
- ui.write(("\n\n"
- ".. contents::\n"
- " :class: htmlonly\n"
- " :local:\n"
- " :depth: 1\n\n"))
+ ui.write(minirst.section(_(b"Extensions")))
+ ui.write(_(b"This section contains help for extensions that are "
+ b"distributed together with Mercurial. Help for other "
+ b"extensions is available in the help system."))
+ ui.write((b"\n\n"
+ b".. contents::\n"
+ b" :class: htmlonly\n"
+ b" :local:\n"
+ b" :depth: 1\n\n"))
for extensionname in sorted(allextensionnames()):
mod = extensions.load(ui, extensionname, None)
ui.write(minirst.subsection(extensionname))
- ui.write("%s\n\n" % gettext(mod.__doc__))
+ ui.write(b"%s\n\n" % gettext(pycompat.getdoc(mod)))
cmdtable = getattr(mod, 'cmdtable', None)
if cmdtable:
- ui.write(minirst.subsubsection(_('Commands')))
+ ui.write(minirst.subsubsection(_(b'Commands')))
commandprinter(ui, cmdtable, minirst.subsubsubsection)
def showtopic(ui, topic):
extrahelptable = [
- (["common"], '', loaddoc('common'), help.TOPIC_CATEGORY_MISC),
- (["hg.1"], '', loaddoc('hg.1'), help.TOPIC_CATEGORY_CONFIG),
- (["hg-ssh.8"], '', loaddoc('hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG),
- (["hgignore.5"], '', loaddoc('hgignore.5'), help.TOPIC_CATEGORY_CONFIG),
- (["hgrc.5"], '', loaddoc('hgrc.5'), help.TOPIC_CATEGORY_CONFIG),
- (["hgignore.5.gendoc"], '', loaddoc('hgignore'),
+ ([b"common"], b'', loaddoc(b'common'), help.TOPIC_CATEGORY_MISC),
+ ([b"hg.1"], b'', loaddoc(b'hg.1'), help.TOPIC_CATEGORY_CONFIG),
+ ([b"hg-ssh.8"], b'', loaddoc(b'hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG),
+ ([b"hgignore.5"], b'', loaddoc(b'hgignore.5'),
help.TOPIC_CATEGORY_CONFIG),
- (["hgrc.5.gendoc"], '', loaddoc('config'), help.TOPIC_CATEGORY_CONFIG),
+ ([b"hgrc.5"], b'', loaddoc(b'hgrc.5'), help.TOPIC_CATEGORY_CONFIG),
+ ([b"hgignore.5.gendoc"], b'', loaddoc(b'hgignore'),
+ help.TOPIC_CATEGORY_CONFIG),
+ ([b"hgrc.5.gendoc"], b'', loaddoc(b'config'),
+ help.TOPIC_CATEGORY_CONFIG),
]
helpprinter(ui, helptable + extrahelptable, None, include=[topic])
@@ 157,74 168,73 @@ def helpprinter(ui, helptable, sectionfu
if include and names[0] not in include:
continue
for name in names:
- ui.write(".. _%s:\n" % name)
- ui.write("\n")
+ ui.write(b".. _%s:\n" % name)
+ ui.write(b"\n")
if sectionfunc:
ui.write(sectionfunc(sec))
if callable(doc):
doc = doc(ui)
ui.write(doc)
- ui.write("\n")
+ ui.write(b"\n")
def commandprinter(ui, cmdtable, sectionfunc):
h = {}
for c, attr in cmdtable.items():
- f = c.split("|")[0]
- f = f.lstrip("^")
+ f = c.split(b"|")[0]
+ f = f.lstrip(b"^")
h[f] = c
cmds = h.keys()
- cmds.sort()
- for f in cmds:
- if f.startswith("debug"):
+ for f in sorted(cmds):
+ if f.startswith(b"debug"):
continue
d = get_cmd(h[f], cmdtable)
- ui.write(sectionfunc(d['cmd']))
+ ui.write(sectionfunc(d[b'cmd']))
# short description
- ui.write(d['desc'][0])
+ ui.write(d[b'desc'][0])
# synopsis
- ui.write("::\n\n")
- synopsislines = d['synopsis'].splitlines()
+ ui.write(b"::\n\n")
+ synopsislines = d[b'synopsis'].splitlines()
for line in synopsislines:
# some commands (such as rebase) have a multi-line
# synopsis
- ui.write(" %s\n" % line)
- ui.write('\n')
+ ui.write(b" %s\n" % line)
+ ui.write(b'\n')
# description
- ui.write("%s\n\n" % d['desc'][1])
+ ui.write(b"%s\n\n" % d[b'desc'][1])
# options
- opt_output = list(d['opts'])
+ opt_output = list(d[b'opts'])
if opt_output:
opts_len = max([len(line[0]) for line in opt_output])
- ui.write(_("Options:\n\n"))
+ ui.write(_(b"Options:\n\n"))
multioccur = False
for optstr, desc in opt_output:
if desc:
- s = "%-*s %s" % (opts_len, optstr, desc)
+ s = b"%-*s %s" % (opts_len, optstr, desc)
else:
s = optstr
- ui.write("%s\n" % s)
- if optstr.endswith("[+]>"):
+ ui.write(b"%s\n" % s)
+ if optstr.endswith(b"[+]>"):
multioccur = True
if multioccur:
- ui.write(_("\n[+] marked option can be specified"
- " multiple times\n"))
- ui.write("\n")
+ ui.write(_(b"\n[+] marked option can be specified"
+ b" multiple times\n"))
+ ui.write(b"\n")
# aliases
- if d['aliases']:
- ui.write(_(" aliases: %s\n\n") % " ".join(d['aliases']))
+ if d[b'aliases']:
+ ui.write(_(b" aliases: %s\n\n") % b" ".join(d[b'aliases']))
def allextensionnames():
- return extensions.enabled().keys() + extensions.disabled().keys()
+ return set(extensions.enabled().keys()) | set(extensions.disabled().keys())
if __name__ == "__main__":
- doc = 'hg.1.gendoc'
+ doc = b'hg.1.gendoc'
if len(sys.argv) > 1:
- doc = sys.argv[1]
+ doc = encoding.strtolocal(sys.argv[1])
ui = uimod.ui.load()
- if doc == 'hg.1.gendoc':
+ if doc == b'hg.1.gendoc':
showdoc(ui)
else:
- showtopic(ui, sys.argv[1])
+ showtopic(ui, encoding.strtolocal(sys.argv[1]))
M hgext/absorb.py +6 -4
@@ 489,7 489,8 @@ class filefixupstate(object):
if l[colonpos - 1:colonpos + 2] != ' : ':
raise error.Abort(_('malformed line: %s') % l)
linecontent = l[colonpos + 2:]
- for i, ch in enumerate(l[leftpadpos:colonpos - 1]):
+ for i, ch in enumerate(
+ pycompat.bytestr(l[leftpadpos:colonpos - 1])):
if ch == 'y':
contents[visiblefctxs[i][0]] += linecontent
# chunkstats is hard to calculate if anything changes, therefore
@@ 971,9 972,10 @@ def absorb(ui, repo, stack=None, targetc
label='absorb.description')
fm.end()
if not opts.get('dry_run'):
- if not opts.get('apply_changes'):
- if ui.promptchoice("apply changes (yn)? $$ &Yes $$ &No", default=1):
- raise error.Abort(_('absorb cancelled\n'))
+ if (not opts.get('apply_changes') and
+ state.ctxaffected and
+ ui.promptchoice("apply changes (yn)? $$ &Yes $$ &No", default=1)):
+ raise error.Abort(_('absorb cancelled\n'))
state.apply()
if state.commit():
M hgext/amend.py +2 -0
@@ 36,6 36,8 @@ command = registrar.command(cmdtable)
('e', 'edit', None, _('invoke editor on commit messages')),
('i', 'interactive', None, _('use interactive mode')),
('n', 'note', '', _('store a note on the amend')),
+ ('D', 'currentdate', None,
+ _('record the current date as commit date')),
] + cmdutil.walkopts + cmdutil.commitopts + cmdutil.commitopts2,
_('[OPTION]... [FILE]...'),
helpcategory=command.CATEGORY_COMMITTING,
M hgext/beautifygraph.py +0 -2
@@ 31,8 31,6 @@ testedwith = 'ships-with-hg-core'
def prettyedge(before, edge, after):
if edge == '~':
return '\xE2\x95\xA7' # U+2567 ╧
- if edge == 'X':
- return '\xE2\x95\xB3' # U+2573 ╳
if edge == '/':
return '\xE2\x95\xB1' # U+2571 ╱
if edge == '-':
M hgext/blackbox.py +71 -130
@@ 33,11 33,15 @@ Examples::
# rotate up to N log files when the current one gets too big
maxfiles = 3
+ [blackbox]
+ # Include nanoseconds in log entries with %f (see Python function
+ # datetime.datetime.strftime)
+ date-format = '%Y-%m-%d @ %H:%M:%S.%f'
+
"""
from __future__ import absolute_import
-import errno
import re
from mercurial.i18n import _
@@ 45,10 49,8 @@ from mercurial.node import hex
from mercurial import (
encoding,
- pycompat,
+ loggingutil,
registrar,
- ui as uimod,
- util,
)
from mercurial.utils import (
dateutil,
@@ 82,131 84,69 @@ configitem('blackbox', 'maxfiles',
configitem('blackbox', 'track',
default=lambda: ['*'],
)
+configitem('blackbox', 'date-format',
+ default='%Y/%m/%d %H:%M:%S',
+)
-lastui = None
+_lastlogger = loggingutil.proxylogger()
-def _openlogfile(ui, vfs):
- def rotate(oldpath, newpath):
- try:
- vfs.unlink(newpath)
- except OSError as err:
- if err.errno != errno.ENOENT:
- ui.debug("warning: cannot remove '%s': %s\n" %
- (newpath, err.strerror))
- try:
- if newpath:
- vfs.rename(oldpath, newpath)
- except OSError as err:
- if err.errno != errno.ENOENT:
- ui.debug("warning: cannot rename '%s' to '%s': %s\n" %
- (newpath, oldpath, err.strerror))
+class blackboxlogger(object):
+ def __init__(self, ui, repo):
+ self._repo = repo
+ self._trackedevents = set(ui.configlist('blackbox', 'track'))
+ self._maxfiles = ui.configint('blackbox', 'maxfiles')
+ self._maxsize = ui.configbytes('blackbox', 'maxsize')
+ self._inlog = False
- maxsize = ui.configbytes('blackbox', 'maxsize')
- name = 'blackbox.log'
- if maxsize > 0:
+ def tracked(self, event):
+ return b'*' in self._trackedevents or event in self._trackedevents
+
+ def log(self, ui, event, msg, opts):
+ # self._log() -> ctx.dirty() may create new subrepo instance, which
+ # ui is derived from baseui. So the recursion guard in ui.log()
+ # doesn't work as it's local to the ui instance.
+ if self._inlog:
+ return
+ self._inlog = True
try:
- st = vfs.stat(name)
- except OSError:
- pass
- else:
- if st.st_size >= maxsize:
- path = vfs.join(name)
- maxfiles = ui.configint('blackbox', 'maxfiles')
- for i in pycompat.xrange(maxfiles - 1, 1, -1):
- rotate(oldpath='%s.%d' % (path, i - 1),
- newpath='%s.%d' % (path, i))
- rotate(oldpath=path,
- newpath=maxfiles > 0 and path + '.1')
- return vfs(name, 'a')
-
-def wrapui(ui):
- class blackboxui(ui.__class__):
- @property
- def _bbvfs(self):
- vfs = None
- repo = getattr(self, '_bbrepo', None)
- if repo:
- vfs = repo.vfs
- if not vfs.isdir('.'):
- vfs = None
- return vfs
-
- @util.propertycache
- def track(self):
- return self.configlist('blackbox', 'track')
-
- def debug(self, *msg, **opts):
- super(blackboxui, self).debug(*msg, **opts)
- if self.debugflag:
- self.log('debug', '%s', ''.join(msg))
-
- def log(self, event, *msg, **opts):
- global lastui
- super(blackboxui, self).log(event, *msg, **opts)
+ self._log(ui, event, msg, opts)
+ finally:
+ self._inlog = False
- if not '*' in self.track and not event in self.track:
- return
-
- if self._bbvfs:
- ui = self
- else:
- # certain ui instances exist outside the context of
- # a repo, so just default to the last blackbox that
- # was seen.
- ui = lastui
-
- if not ui:
- return
- vfs = ui._bbvfs
- if not vfs:
- return
+ def _log(self, ui, event, msg, opts):
+ default = ui.configdate('devel', 'default-date')
+ date = dateutil.datestr(default, ui.config('blackbox', 'date-format'))
+ user = procutil.getuser()
+ pid = '%d' % procutil.getpid()
+ rev = '(unknown)'
+ changed = ''
+ ctx = self._repo[None]
+ parents = ctx.parents()
+ rev = ('+'.join([hex(p.node()) for p in parents]))
+ if (ui.configbool('blackbox', 'dirty') and
+ ctx.dirty(missing=True, merge=False, branch=False)):
+ changed = '+'
+ if ui.configbool('blackbox', 'logsource'):
+ src = ' [%s]' % event
+ else:
+ src = ''
+ try:
+ fmt = '%s %s @%s%s (%s)%s> %s'
+ args = (date, user, rev, changed, pid, src, msg)
+ with loggingutil.openlogfile(
+ ui, self._repo.vfs, name='blackbox.log',
+ maxfiles=self._maxfiles, maxsize=self._maxsize) as fp:
+ fp.write(fmt % args)
+ except (IOError, OSError) as err:
+ # deactivate this to avoid failed logging again
+ self._trackedevents.clear()
+ ui.debug('warning: cannot write to blackbox.log: %s\n' %
+ encoding.strtolocal(err.strerror))
+ return
+ _lastlogger.logger = self
- repo = getattr(ui, '_bbrepo', None)
- if not lastui or repo:
- lastui = ui
- if getattr(ui, '_bbinlog', False):
- # recursion and failure guard
- return
- ui._bbinlog = True
- default = self.configdate('devel', 'default-date')
- date = dateutil.datestr(default, '%Y/%m/%d %H:%M:%S')
- user = procutil.getuser()
- pid = '%d' % procutil.getpid()
- formattedmsg = msg[0] % msg[1:]
- rev = '(unknown)'
- changed = ''
- if repo:
- ctx = repo[None]
- parents = ctx.parents()
- rev = ('+'.join([hex(p.node()) for p in parents]))
- if (ui.configbool('blackbox', 'dirty') and
- ctx.dirty(missing=True, merge=False, branch=False)):
- changed = '+'
- if ui.configbool('blackbox', 'logsource'):
- src = ' [%s]' % event
- else:
- src = ''
- try:
- fmt = '%s %s @%s%s (%s)%s> %s'
- args = (date, user, rev, changed, pid, src, formattedmsg)
- with _openlogfile(ui, vfs) as fp:
- fp.write(fmt % args)
- except (IOError, OSError) as err:
- self.debug('warning: cannot write to blackbox.log: %s\n' %
- encoding.strtolocal(err.strerror))
- # do not restore _bbinlog intentionally to avoid failed
- # logging again
- else:
- ui._bbinlog = False
-
- def setrepo(self, repo):
- self._bbrepo = repo
-
- ui.__class__ = blackboxui
- uimod.ui = blackboxui
-
-def uisetup(ui):
- wrapui(ui)
+def uipopulate(ui):
+ ui.setlogger(b'blackbox', _lastlogger)
def reposetup(ui, repo):
# During 'hg pull' a httppeer repo is created to represent the remote repo.
@@ 215,14 155,15 @@ def reposetup(ui, repo):
if not repo.local():
return
- if util.safehasattr(ui, 'setrepo'):
- ui.setrepo(repo)
+ # Since blackbox.log is stored in the repo directory, the logger should be
+ # instantiated per repository.
+ logger = blackboxlogger(ui, repo)
+ ui.setlogger(b'blackbox', logger)
- # Set lastui even if ui.log is not called. This gives blackbox a
- # fallback place to log.
- global lastui
- if lastui is None:
- lastui = ui
+ # Set _lastlogger even if ui.log is not called. This gives blackbox a
+ # fallback place to log
+ if _lastlogger.logger is None:
+ _lastlogger.logger = logger
repo._wlockfreeprefix.add('blackbox.log')
A => hgext/bookflow.py +104 -0
@@ 0,0 1,104 @@
+"""implements bookmark-based branching (EXPERIMENTAL)
+
+ - Disables creation of new branches (config: enable_branches=False).
+ - Requires an active bookmark on commit (config: require_bookmark=True).
+ - Doesn't move the active bookmark on update, only on commit.
+ - Requires '--rev' for moving an existing bookmark.
+ - Protects special bookmarks (config: protect=@).
+
+ flow related commands
+
+ :hg book NAME: create a new bookmark
+ :hg book NAME -r REV: move bookmark to revision (fast-forward)
+ :hg up|co NAME: switch to bookmark
+ :hg push -B .: push active bookmark
+"""
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ bookmarks,
+ commands,
+ error,
+ extensions,
+ registrar,
+)
+
+MY_NAME = 'bookflow'
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(MY_NAME, 'protect', ['@'])
+configitem(MY_NAME, 'require-bookmark', True)
+configitem(MY_NAME, 'enable-branches', False)
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+def commit_hook(ui, repo, **kwargs):
+ active = repo._bookmarks.active
+ if active:
+ if active in ui.configlist(MY_NAME, 'protect'):
+ raise error.Abort(
+ _('cannot commit, bookmark %s is protected') % active)
+ if not cwd_at_bookmark(repo, active):
+ raise error.Abort(
+ _('cannot commit, working directory out of sync with active bookmark'),
+ hint=_("run 'hg up %s'") % active)
+ elif ui.configbool(MY_NAME, 'require-bookmark', True):
+ raise error.Abort(_('cannot commit without an active bookmark'))
+ return 0
+
+def bookmarks_update(orig, repo, parents, node):
+ if len(parents) == 2:
+ # called during commit
+ return orig(repo, parents, node)
+ else:
+ # called during update
+ return False
+
+def bookmarks_addbookmarks(
+ orig, repo, tr, names, rev=None, force=False, inactive=False):
+ if not rev:
+ marks = repo._bookmarks
+ for name in names:
+ if name in marks:
+ raise error.Abort(_(
+ "bookmark %s already exists, to move use the --rev option"
+ ) % name)
+ return orig(repo, tr, names, rev, force, inactive)
+
+def commands_commit(orig, ui, repo, *args, **opts):
+ commit_hook(ui, repo)
+ return orig(ui, repo, *args, **opts)
+
+def commands_pull(orig, ui, repo, *args, **opts):
+ rc = orig(ui, repo, *args, **opts)
+ active = repo._bookmarks.active
+ if active and not cwd_at_bookmark(repo, active):
+ ui.warn(_(
+ "working directory out of sync with active bookmark, run "
+ "'hg up %s'"
+ ) % active)
+ return rc
+
+def commands_branch(orig, ui, repo, label=None, **opts):
+ if label and not opts.get(r'clean') and not opts.get(r'rev'):
+ raise error.Abort(
+ _("creating named branches is disabled and you should use bookmarks"),
+ hint="see 'hg help bookflow'")
+ return orig(ui, repo, label, **opts)
+
+def cwd_at_bookmark(repo, mark):
+ mark_id = repo._bookmarks[mark]
+ cur_id = repo.lookup('.')
+ return cur_id == mark_id
+
+def uisetup(ui):
+ extensions.wrapfunction(bookmarks, 'update', bookmarks_update)
+ extensions.wrapfunction(bookmarks, 'addbookmarks', bookmarks_addbookmarks)
+ extensions.wrapcommand(commands.table, 'commit', commands_commit)
+ extensions.wrapcommand(commands.table, 'pull', commands_pull)
+ if not ui.configbool(MY_NAME, 'enable-branches'):
+ extensions.wrapcommand(commands.table, 'branch', commands_branch)
M hgext/convert/filemap.py +15 -1
@@ 270,6 270,9 @@ class filemap_source(common.converter_so
self.children[p] = self.children.get(p, 0) + 1
return c
+ def numcommits(self):
+ return self.base.numcommits()
+
def _cachedcommit(self, rev):
if rev in self.commits:
return self.commits[rev]
@@ 302,7 305,18 @@ class filemap_source(common.converter_so
for f in files:
if self.filemapper(f):
return True
- return False
+
+ # The include directive is documented to include nothing else (though
+ # valid branch closes are included).
+ if self.filemapper.include:
+ return False
+
+ # Allow empty commits in the source revision through. The getchanges()
+ # method doesn't even bother calling this if it determines that the
+ # close marker is significant (i.e. all of the branch ancestors weren't
+ # eliminated). Therefore if there *is* a close marker, getchanges()
+ # doesn't consider it significant, and this revision should be dropped.
+ return not files and 'close' not in self.commits[rev].extra
def mark_not_wanted(self, rev, p):
# Mark rev as not interesting and update data structures.
M hgext/convert/hg.py +3 -0
@@ 597,6 597,9 @@ class mercurial_source(common.converter_
saverev=self.saverev,
phase=ctx.phase())
+ def numcommits(self):
+ return len(self.repo)
+
def gettags(self):
# This will get written to .hgtags, filter non global tags out.
tags = [t for t in self.repo.tagslist()
M hgext/extdiff.py +35 -25
@@ 139,7 139,7 @@ def snapshot(ui, repo, files, node, tmpr
repo.ui.setconfig("ui", "archivemeta", False)
archival.archive(repo, base, node, 'files',
- matchfn=scmutil.matchfiles(repo, files),
+ match=scmutil.matchfiles(repo, files),
subrepos=listsubrepos)
for fn in sorted(files):
@@ 152,6 152,29 @@ def snapshot(ui, repo, files, node, tmpr
fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
return dirname, fnsandstat
+def formatcmdline(cmdline, repo_root, do3way,
+ parent1, plabel1, parent2, plabel2, child, clabel):
+ # Function to quote file/dir names in the argument string.
+ # When not operating in 3-way mode, an empty string is
+ # returned for parent2
+ replace = {'parent': parent1, 'parent1': parent1, 'parent2': parent2,
+ 'plabel1': plabel1, 'plabel2': plabel2,
+ 'child': child, 'clabel': clabel,
+ 'root': repo_root}
+ def quote(match):
+ pre = match.group(2)
+ key = match.group(3)
+ if not do3way and key == 'parent2':
+ return pre
+ return pre + procutil.shellquote(replace[key])
+
+ # Match parent2 first, so 'parent1?' will match both parent1 and parent
+ regex = (br'''(['"]?)([^\s'"$]*)'''
+ br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
+ if not do3way and not re.search(regex, cmdline):
+ cmdline += ' $parent1 $child'
+ return re.sub(regex, quote, cmdline)
+
def dodiff(ui, repo, cmdline, pats, opts):
'''Do the actual diff:
@@ 281,28 304,14 @@ def dodiff(ui, repo, cmdline, pats, opts
label1b = None
fnsandstat = []
- # Function to quote file/dir names in the argument string.
- # When not operating in 3-way mode, an empty string is
- # returned for parent2
- replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
- 'plabel1': label1a, 'plabel2': label1b,
- 'clabel': label2, 'child': dir2,
- 'root': repo.root}
- def quote(match):
- pre = match.group(2)
- key = match.group(3)
- if not do3way and key == 'parent2':
- return pre
- return pre + procutil.shellquote(replace[key])
-
- # Match parent2 first, so 'parent1?' will match both parent1 and parent
- regex = (br'''(['"]?)([^\s'"$]*)'''
- br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
- if not do3way and not re.search(regex, cmdline):
- cmdline += ' $parent1 $child'
- cmdline = re.sub(regex, quote, cmdline)
-
- ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
+ # Run the external tool on the 2 temp directories or the patches
+ cmdline = formatcmdline(
+ cmdline, repo.root, do3way=do3way,
+ parent1=dir1a, plabel1=label1a,
+ parent2=dir1b, plabel2=label1b,
+ child=dir2, clabel=label2)
+ ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline),
+ tmproot))
ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
for copy_fn, working_fn, st in fnsandstat:
@@ 383,8 392,9 @@ class savedcmd(object):
def __init__(self, path, cmdline):
# We can't pass non-ASCII through docstrings (and path is
- # in an unknown encoding anyway)
- docpath = stringutil.escapestr(path)
+ # in an unknown encoding anyway), but avoid double separators on
+ # Windows
+ docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
self.__doc__ %= {r'path': pycompat.sysstr(stringutil.uirepr(docpath))}
self._cmdline = cmdline
M hgext/fastannotate/commands.py +3 -3
@@ 261,8 261,9 @@ def debugbuildannotatecache(ui, repo, *p
repo.prefetchfastannotate(paths)
else:
# server, or full repo
+ progress = ui.makeprogress(_('building'), total=len(paths))
for i, path in enumerate(paths):
- ui.progress(_('building'), i, total=len(paths))
+ progress.update(i)
with facontext.annotatecontext(repo, path) as actx:
try:
if actx.isuptodate(rev):
@@ 281,5 282,4 @@ def debugbuildannotatecache(ui, repo, *p
# cache for other files.
ui.warn(_('fastannotate: %s: failed to '
'build cache: %r\n') % (path, ex))
- # clear the progress bar
- ui.write()
+ progress.complete()
M hgext/fastannotate/context.py +8 -10
@@ 138,7 138,7 @@ def hashdiffopts(diffopts):
(k, getattr(diffopts, k))
for k in mdiff.diffopts.defaults
))
- return hashlib.sha1(diffoptstr).hexdigest()[:6]
+ return node.hex(hashlib.sha1(diffoptstr).digest())[:6]
_defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
@@ 156,6 156,7 @@ class annotateopts(object):
}
def __init__(self, **opts):
+ opts = pycompat.byteskwargs(opts)
for k, v in self.defaults.iteritems():
setattr(self, k, opts.get(k, v))
@@ 397,7 398,8 @@ class _annotatecontext(object):
# 3rd DFS does the actual annotate
visit = initvisit[:]
- progress = 0
+ progress = self.ui.makeprogress(('building cache'),
+ total=len(newmainbranch))
while visit:
f = visit[-1]
if f in hist:
@@ 436,10 438,7 @@ class _annotatecontext(object):
del pcache[f]
if ismainbranch: # need to write to linelog
- if not self.ui.quiet:
- progress += 1
- self.ui.progress(_('building cache'), progress,
- total=len(newmainbranch))
+ progress.increment()
bannotated = None
if len(pl) == 2 and self.opts.followmerge: # merge
bannotated = curr[0]
@@ 449,8 448,7 @@ class _annotatecontext(object):
elif showpath: # not append linelog, but we need to record path
self._node2path[f.node()] = f.path()
- if progress: # clean progress bar
- self.ui.write()
+ progress.complete()
result = [
((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
@@ 604,7 602,7 @@ class _annotatecontext(object):
the best case, the user provides a node and we don't need to read the
filelog or construct any filecontext.
"""
- if isinstance(f, str):
+ if isinstance(f, bytes):
hsh = f
else:
hsh = f.node()
@@ 627,7 625,7 @@ class _annotatecontext(object):
if showpath:
result = self._addpathtoresult(result)
if showlines:
- if isinstance(f, str): # f: node or fctx
+ if isinstance(f, bytes): # f: node or fctx
llrev = self.revmap.hsh2rev(f)
fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
else:
M hgext/fastannotate/formatter.py +11 -8
@@ 39,23 39,26 @@ class defaultformatter(object):
orig = hexfunc
hexfunc = lambda x: None if x is None else orig(x)
wnode = hexfunc(repo[None].p1().node()) + '+'
- wrev = str(repo[None].p1().rev())
+ wrev = '%d' % repo[None].p1().rev()
wrevpad = ''
if not opts.get('changeset'): # only show + if changeset is hidden
wrev += '+'
wrevpad = ' '
- revenc = lambda x: wrev if x is None else str(x) + wrevpad
- csetenc = lambda x: wnode if x is None else str(x) + ' '
+ revenc = lambda x: wrev if x is None else ('%d' % x) + wrevpad
+ def csetenc(x):
+ if x is None:
+ return wnode
+ return pycompat.bytestr(x) + ' '
else:
- revenc = csetenc = str
+ revenc = csetenc = pycompat.bytestr
# opt name, separator, raw value (for json/plain), encoder (for plain)
opmap = [('user', ' ', lambda x: getctx(x).user(), ui.shortuser),
('number', ' ', lambda x: getctx(x).rev(), revenc),
('changeset', ' ', lambda x: hexfunc(x[0]), csetenc),
('date', ' ', lambda x: getctx(x).date(), datefunc),
- ('file', ' ', lambda x: x[2], str),
- ('line_number', ':', lambda x: x[1] + 1, str)]
+ ('file', ' ', lambda x: x[2], pycompat.bytestr),
+ ('line_number', ':', lambda x: x[1] + 1, pycompat.bytestr)]
fieldnamemap = {'number': 'rev', 'changeset': 'node'}
funcmap = [(get, sep, fieldnamemap.get(op, op), enc)
for op, sep, get, enc in opmap
@@ 100,7 103,7 @@ class defaultformatter(object):
result += ': ' + self.ui.label('-' + lines[i],
'diff.deleted')
- if result[-1] != '\n':
+ if result[-1:] != '\n':
result += '\n'
self.ui.write(result)
@@ 125,7 128,7 @@ class jsonformatter(defaultformatter):
if annotatedresult:
self._writecomma()
- pieces = [(name, map(f, annotatedresult))
+ pieces = [(name, pycompat.maplist(f, annotatedresult))
for f, sep, name, enc in self.funcmap]
if lines is not None:
pieces.append(('line', lines))
M hgext/fastannotate/protocol.py +2 -2
@@ 98,10 98,10 @@ def _parseresponse(payload):
state = 0 # 0: vfspath, 1: size
vfspath = size = ''
while i < l:
- ch = payload[i]
+ ch = payload[i:i + 1]
if ch == '\0':
if state == 1:
- result[vfspath] = buffer(payload, i + 1, int(size))
+ result[vfspath] = payload[i + 1:i + 1 + int(size)]
i += int(size)
state = 0
vfspath = size = ''
M hgext/fastannotate/revmap.py +1 -1
@@ 207,7 207,7 @@ class revmap(object):
path = self.rev2path(rev)
if path is None:
raise error.CorruptedFileError('cannot find path for %s' % rev)
- f.write(path + '\0')
+ f.write(path + b'\0')
f.write(hsh)
@staticmethod
M hgext/fix.py +96 -22
@@ 15,13 15,15 @@ formatting fixes to modified lines in C+
[fix]
clang-format:command=clang-format --assume-filename={rootpath}
clang-format:linerange=--lines={first}:{last}
- clang-format:fileset=set:**.cpp or **.hpp
+ clang-format:pattern=set:**.cpp or **.hpp
The :command suboption forms the first part of the shell command that will be
used to fix a file. The content of the file is passed on standard input, and the
-fixed file content is expected on standard output. If there is any output on
-standard error, the file will not be affected. Some values may be substituted
-into the command::
+fixed file content is expected on standard output. Any output on standard error
+will be displayed as a warning. If the exit status is not zero, the file will
+not be affected. A placeholder warning is displayed if there is a non-zero exit
+status but no standard error output. Some values may be substituted into the
+command::
{rootpath} The path of the file being fixed, relative to the repo root
{basename} The name of the file being fixed, without the directory path
@@ 34,16 36,42 @@ substituted into the command::
{first} The 1-based line number of the first line in the modified range
{last} The 1-based line number of the last line in the modified range
-The :fileset suboption determines which files will be passed through each
-configured tool. See :hg:`help fileset` for possible values. If there are file
-arguments to :hg:`fix`, the intersection of these filesets is used.
+The :pattern suboption determines which files will be passed through each
+configured tool. See :hg:`help patterns` for possible values. If there are file
+arguments to :hg:`fix`, the intersection of these patterns is used.
There is also a configurable limit for the maximum size of file that will be
processed by :hg:`fix`::
[fix]
- maxfilesize=2MB
+ maxfilesize = 2MB
+
+Normally, execution of configured tools will continue after a failure (indicated
+by a non-zero exit status). It can also be configured to abort after the first
+such failure, so that no files will be affected if any tool fails. This abort
+will also cause :hg:`fix` to exit with a non-zero status::
+
+ [fix]
+ failure = abort
+When multiple tools are configured to affect a file, they execute in an order
+defined by the :priority suboption. The priority suboption has a default value
+of zero for each tool. Tools are executed in order of descending priority. The
+execution order of tools with equal priority is unspecified. For example, you
+could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
+in a text file by ensuring that 'sort' runs before 'head'::
+
+ [fix]
+ sort:command = sort -n
+ head:command = head -n 10
+ sort:pattern = numbers.txt
+ head:pattern = numbers.txt
+ sort:priority = 2
+ head:priority = 1
+
+To account for changes made by each tool, the line numbers used for incremental
+formatting are recomputed before executing the next tool. So, each tool may see
+different values for the arguments added by the :linerange suboption.
"""
from __future__ import absolute_import
@@ 90,16 118,36 @@ configtable = {}
configitem = registrar.configitem(configtable)
# Register the suboptions allowed for each configured fixer.
-FIXER_ATTRS = ('command', 'linerange', 'fileset')
+FIXER_ATTRS = {
+ 'command': None,
+ 'linerange': None,
+ 'fileset': None,
+ 'pattern': None,
+ 'priority': 0,
+}
-for key in FIXER_ATTRS:
- configitem('fix', '.*(:%s)?' % key, default=None, generic=True)
+for key, default in FIXER_ATTRS.items():
+ configitem('fix', '.*(:%s)?' % key, default=default, generic=True)
# A good default size allows most source code files to be fixed, but avoids
# letting fixer tools choke on huge inputs, which could be surprising to the
# user.
configitem('fix', 'maxfilesize', default='2MB')
+# Allow fix commands to exit non-zero if an executed fixer tool exits non-zero.
+# This helps users do shell scripts that stop when a fixer tool signals a
+# problem.
+configitem('fix', 'failure', default='continue')
+
+def checktoolfailureaction(ui, message, hint=None):
+ """Abort with 'message' if fix.failure=abort"""
+ action = ui.config('fix', 'failure')
+ if action not in ('continue', 'abort'):
+ raise error.Abort(_('unknown fix.failure action: %s') % (action,),
+ hint=_('use "continue" or "abort"'))
+ if action == 'abort':
+ raise error.Abort(message, hint=hint)
+
allopt = ('', 'all', False, _('fix all non-public non-obsolete revisions'))
baseopt = ('', 'base', [], _('revisions to diff against (overrides automatic '
'selection, and applies to every revision being '
@@ 465,9 513,14 @@ def fixfile(ui, opts, fixers, fixctx, pa
showstderr(ui, fixctx.rev(), fixername, stderr)
if proc.returncode == 0:
newdata = newerdata
- elif not stderr:
- showstderr(ui, fixctx.rev(), fixername,
- _('exited with status %d\n') % (proc.returncode,))
+ else:
+ if not stderr:
+ message = _('exited with status %d\n') % (proc.returncode,)
+ showstderr(ui, fixctx.rev(), fixername, message)
+ checktoolfailureaction(
+ ui, _('no fixes will be applied'),
+ hint=_('use --config fix.failure=continue to apply any '
+ 'successful fixes anyway'))
return newdata
def showstderr(ui, rev, fixername, stderr):
@@ 533,6 586,17 @@ def replacerev(ui, repo, ctx, filedata,
newp1node = replacements.get(p1ctx.node(), p1ctx.node())
newp2node = replacements.get(p2ctx.node(), p2ctx.node())
+ # We don't want to create a revision that has no changes from the original,
+ # but we should if the original revision's parent has been replaced.
+ # Otherwise, we would produce an orphan that needs no actual human
+ # intervention to evolve. We can't rely on commit() to avoid creating the
+ # un-needed revision because the extra field added below produces a new hash
+ # regardless of file content changes.
+ if (not filedata and
+ p1ctx.node() not in replacements and
+ p2ctx.node() not in replacements):
+ return
+
def filectxfn(repo, memctx, path):
if path not in ctx:
return None
@@ 549,6 613,9 @@ def replacerev(ui, repo, ctx, filedata,
isexec=fctx.isexec(),
copied=copied)
+ extra = ctx.extra().copy()
+ extra['fix_source'] = ctx.hex()
+
memctx = context.memctx(
repo,
parents=(newp1node, newp2node),
@@ 557,7 624,7 @@ def replacerev(ui, repo, ctx, filedata,
filectxfn=filectxfn,
user=ctx.user(),
date=ctx.date(),
- extra=ctx.extra(),
+ extra=extra,
branch=ctx.branch(),
editor=None)
sucnode = memctx.commit()
@@ 573,14 640,21 @@ def getfixers(ui):
Each value is a Fixer object with methods that implement the behavior of the
fixer's config suboptions. Does not validate the config values.
"""
- result = {}
+ fixers = {}
for name in fixernames(ui):
- result[name] = Fixer()
+ fixers[name] = Fixer()
attrs = ui.configsuboptions('fix', name)[1]
- for key in FIXER_ATTRS:
- setattr(result[name], pycompat.sysstr('_' + key),
- attrs.get(key, ''))
- return result
+ if 'fileset' in attrs and 'pattern' not in attrs:
+ ui.warn(_('the fix.tool:fileset config name is deprecated; '
+ 'please rename it to fix.tool:pattern\n'))
+ attrs['pattern'] = attrs['fileset']
+ for key, default in FIXER_ATTRS.items():
+ setattr(fixers[name], pycompat.sysstr('_' + key),
+ attrs.get(key, default))
+ fixers[name]._priority = int(fixers[name]._priority)
+ return collections.OrderedDict(
+ sorted(fixers.items(), key=lambda item: item[1]._priority,
+ reverse=True))
def fixernames(ui):
"""Returns the names of [fix] config options that have suboptions"""
@@ 595,7 669,7 @@ class Fixer(object):
def affects(self, opts, fixctx, path):
"""Should this fixer run on the file at the given path and context?"""
- return scmutil.match(fixctx, [self._fileset], opts)(path)
+ return scmutil.match(fixctx, [self._pattern], opts)(path)
def command(self, ui, path, rangesfn):
"""A shell command to use to invoke this fixer on the given file/lines
M hgext/highlight/__init__.py +1 -1
@@ 87,7 87,7 @@ def generate_css(web):
]))
return web.res.sendresponse()
-def extsetup():
+def extsetup(ui):
# monkeypatch in the new version
extensions.wrapfunction(webcommands, '_filerevision',
filerevision_highlight)
M hgext/histedit.py +619 -27
@@ 183,7 183,17 @@ unexpectedly::
from __future__ import absolute_import
+# chistedit dependencies that are not available everywhere
+try:
+ import fcntl
+ import termios
+except ImportError:
+ fcntl = None
+ termios = None
+
+import functools
import os
+import struct
from mercurial.i18n import _
from mercurial import (
@@ 197,7 207,7 @@ from mercurial import (
exchange,
extensions,
hg,
- lock,
+ logcmdutil,
merge as mergemod,
mergeutil,
node,
@@ 210,11 220,11 @@ from mercurial import (
util,
)
from mercurial.utils import (
+ dateutil,
stringutil,
)
pickle = util.pickle
-release = lock.release
cmdtable = {}
command = registrar.command(cmdtable)
@@ 235,6 245,9 @@ configitem('histedit', 'linelen',
configitem('histedit', 'singletransaction',
default=False,
)
+configitem('ui', 'interface.histedit',
+ default=None,
+)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ 294,21 307,17 @@ Commands:
return ''.join(['# %s\n' % l if l else '#\n' for l in lines])
class histeditstate(object):
- def __init__(self, repo, parentctxnode=None, actions=None, keep=None,
- topmost=None, replacements=None, lock=None, wlock=None):
+ def __init__(self, repo):
self.repo = repo
- self.actions = actions
- self.keep = keep
- self.topmost = topmost
- self.parentctxnode = parentctxnode
- self.lock = lock
- self.wlock = wlock
+ self.actions = None
+ self.keep = None
+ self.topmost = None
+ self.parentctxnode = None
+ self.lock = None
+ self.wlock = None
self.backupfile = None
self.stateobj = statemod.cmdstate(repo, 'histedit-state')
- if replacements is None:
- self.replacements = []
- else:
- self.replacements = replacements
+ self.replacements = []
def read(self):
"""Load histedit state from disk and set fields appropriately."""
@@ 519,9 528,12 @@ class histeditaction(object):
editor = self.commiteditor()
commit = commitfuncfor(repo, rulectx)
-
+ if repo.ui.configbool('rewrite', 'update-timestamp'):