956ec6f1320d — Augie Fackler 4.8rc0 5 years ago
merge to stable for 4.8 release freeze
807 files changed, 61280 insertions(+), 24823 deletions(-)

M .hgignore
M Makefile
M contrib/bash_completion
M contrib/byteify-strings.py
A => contrib/catapipe.py
M contrib/check-code.py
M contrib/check-commit
M contrib/check-config.py
M contrib/chg/hgclient.c
M contrib/clang-format-ignorelist
M contrib/dumprevlog
M contrib/fuzz/Makefile
A => contrib/fuzz/manifest.cc
A => contrib/fuzz/manifest_corpus.py
M contrib/hgclient.py
M contrib/import-checker.py
M contrib/packaging/Makefile
M contrib/packaging/builddeb
M contrib/perf.py
M contrib/python-hook-examples.py
M contrib/python-zstandard/MANIFEST.in
M contrib/python-zstandard/NEWS.rst
M contrib/python-zstandard/README.rst
A => contrib/python-zstandard/c-ext/compressionchunker.c
M contrib/python-zstandard/c-ext/compressionparams.c
M contrib/python-zstandard/c-ext/compressionreader.c
M contrib/python-zstandard/c-ext/compressionwriter.c
M contrib/python-zstandard/c-ext/compressobj.c
M contrib/python-zstandard/c-ext/compressor.c
M contrib/python-zstandard/c-ext/constants.c
M contrib/python-zstandard/c-ext/decompressionreader.c
M contrib/python-zstandard/c-ext/decompressobj.c
M contrib/python-zstandard/c-ext/decompressor.c
M contrib/python-zstandard/c-ext/python-zstandard.h
M contrib/python-zstandard/make_cffi.py
M contrib/python-zstandard/setup_zstd.py
M contrib/python-zstandard/tests/test_compressor.py
M contrib/python-zstandard/tests/test_compressor_fuzzing.py
M contrib/python-zstandard/tests/test_data_structures.py
M contrib/python-zstandard/tests/test_data_structures_fuzzing.py
M contrib/python-zstandard/tests/test_decompressor.py
M contrib/python-zstandard/tests/test_module_attributes.py
M contrib/python-zstandard/tests/test_train_dictionary.py
M contrib/python-zstandard/zstandard/__init__.py
M contrib/python-zstandard/zstd.c
M contrib/python-zstandard/zstd/common/bitstream.h
M contrib/python-zstandard/zstd/common/compiler.h
M contrib/python-zstandard/zstd/common/cpu.h
A => contrib/python-zstandard/zstd/common/debug.c
A => contrib/python-zstandard/zstd/common/debug.h
M contrib/python-zstandard/zstd/common/entropy_common.c
M contrib/python-zstandard/zstd/common/fse.h
M contrib/python-zstandard/zstd/common/fse_decompress.c
M contrib/python-zstandard/zstd/common/huf.h
M contrib/python-zstandard/zstd/common/mem.h
M contrib/python-zstandard/zstd/common/pool.c
M contrib/python-zstandard/zstd/common/pool.h
M contrib/python-zstandard/zstd/common/xxhash.c
M contrib/python-zstandard/zstd/common/zstd_common.c
M contrib/python-zstandard/zstd/common/zstd_internal.h
M contrib/python-zstandard/zstd/compress/fse_compress.c
A => contrib/python-zstandard/zstd/compress/hist.c
A => contrib/python-zstandard/zstd/compress/hist.h
M contrib/python-zstandard/zstd/compress/huf_compress.c
M contrib/python-zstandard/zstd/compress/zstd_compress.c
M contrib/python-zstandard/zstd/compress/zstd_compress_internal.h
M contrib/python-zstandard/zstd/compress/zstd_double_fast.c
M contrib/python-zstandard/zstd/compress/zstd_double_fast.h
M contrib/python-zstandard/zstd/compress/zstd_fast.c
M contrib/python-zstandard/zstd/compress/zstd_fast.h
M contrib/python-zstandard/zstd/compress/zstd_lazy.c
M contrib/python-zstandard/zstd/compress/zstd_lazy.h
M contrib/python-zstandard/zstd/compress/zstd_ldm.c
M contrib/python-zstandard/zstd/compress/zstd_ldm.h
M contrib/python-zstandard/zstd/compress/zstd_opt.c
M contrib/python-zstandard/zstd/compress/zstd_opt.h
M contrib/python-zstandard/zstd/compress/zstdmt_compress.c
M contrib/python-zstandard/zstd/compress/zstdmt_compress.h
M contrib/python-zstandard/zstd/decompress/huf_decompress.c
M contrib/python-zstandard/zstd/decompress/zstd_decompress.c
M contrib/python-zstandard/zstd/dictBuilder/cover.c
A => contrib/python-zstandard/zstd/dictBuilder/cover.h
M contrib/python-zstandard/zstd/dictBuilder/divsufsort.c
A => contrib/python-zstandard/zstd/dictBuilder/fastcover.c
M contrib/python-zstandard/zstd/dictBuilder/zdict.c
M contrib/python-zstandard/zstd/dictBuilder/zdict.h
M contrib/python-zstandard/zstd/zstd.h
M contrib/python-zstandard/zstd_cffi.py
M contrib/python3-whitelist
A => contrib/relnotes
M contrib/revsetbenchmarks.py
M contrib/showstack.py
M contrib/simplemerge
A => contrib/testparseutil.py
M contrib/undumprevlog
M contrib/wix/help.wxs
M contrib/zsh_completion
M doc/check-seclevel.py
M doc/gendoc.py
M doc/runrst
M hg
M hgdemandimport/demandimportpy2.py
A => hgdemandimport/tracing.py
A => hgext/absorb.py
M hgext/acl.py
M hgext/amend.py
M hgext/beautifygraph.py
M hgext/blackbox.py
M hgext/censor.py
M hgext/children.py
M hgext/churn.py
A => hgext/closehead.py
M hgext/commitextras.py
M hgext/convert/common.py
M hgext/convert/cvs.py
M hgext/convert/cvsps.py
M hgext/convert/gnuarch.py
M hgext/convert/hg.py
M hgext/convert/monotone.py
M hgext/convert/subversion.py
M hgext/eol.py
M hgext/extdiff.py
M hgext/factotum.py
A => hgext/fastannotate/__init__.py
A => hgext/fastannotate/commands.py
A => hgext/fastannotate/context.py
A => hgext/fastannotate/error.py
A => hgext/fastannotate/formatter.py
A => hgext/fastannotate/protocol.py
A => hgext/fastannotate/revmap.py
A => hgext/fastannotate/support.py
M hgext/fetch.py
M hgext/fix.py
M hgext/fsmonitor/__init__.py
M hgext/fsmonitor/pywatchman/__init__.py
M hgext/githelp.py
M hgext/gpg.py
M hgext/graphlog.py
M hgext/hgk.py
M hgext/histedit.py
M hgext/infinitepush/__init__.py
M hgext/infinitepush/common.py
M hgext/infinitepush/store.py
M hgext/journal.py
M hgext/keyword.py
M hgext/largefiles/basestore.py
M hgext/largefiles/lfcommands.py
M hgext/largefiles/lfutil.py
M hgext/largefiles/overrides.py
M hgext/largefiles/uisetup.py
M hgext/lfs/__init__.py
M hgext/lfs/blobstore.py
M hgext/lfs/pointer.py
M hgext/lfs/wireprotolfsserver.py
M hgext/lfs/wrapper.py
M hgext/logtoprocess.py
M hgext/mq.py
M hgext/narrow/TODO.rst
M hgext/narrow/__init__.py
M hgext/narrow/narrowbundle2.py
R hgext/narrow/narrowchangegroup.py => 
M hgext/narrow/narrowcommands.py
R hgext/narrow/narrowcopies.py => 
M hgext/narrow/narrowdirstate.py
R hgext/narrow/narrowpatch.py => 
M hgext/narrow/narrowrepo.py
R hgext/narrow/narrowrevlog.py => 
M hgext/narrow/narrowtemplates.py
M hgext/narrow/narrowwirepeer.py
M hgext/notify.py
M hgext/patchbomb.py
M contrib/phabricator.py => hgext/phabricator.py
M hgext/purge.py
M hgext/rebase.py
M hgext/record.py
M hgext/releasenotes.py
M hgext/relink.py
M hgext/remotenames.py
M hgext/schemes.py
M hgext/share.py
M hgext/shelve.py
M hgext/show.py
M hgext/sparse.py
M hgext/split.py
A => hgext/sqlitestore.py
M hgext/strip.py
M hgext/transplant.py
M hgext/uncommit.py
M hgext/win32text.py
M i18n/check-translation.py
M i18n/hggettext
M i18n/polib.py
M i18n/posplit
M mercurial/__init__.py
M mercurial/ancestor.py
M mercurial/archival.py
M mercurial/bookmarks.py
M mercurial/branchmap.py
M mercurial/bundle2.py
M mercurial/bundlerepo.py
M mercurial/cext/mpatch.c
M mercurial/cext/osutil.c
M mercurial/cext/parsers.c
M mercurial/cext/revlog.c
M mercurial/changegroup.py
M mercurial/changelog.py
M mercurial/chgserver.py
M mercurial/cmdutil.py
M mercurial/color.py
M mercurial/commands.py
M mercurial/commandserver.py
M mercurial/compat.h
M mercurial/configitems.py
M mercurial/context.py
M mercurial/copies.py
M mercurial/crecord.py
M mercurial/dagop.py
M mercurial/dagparser.py
R mercurial/dagutil.py => 
M mercurial/debugcommands.py
M mercurial/default.d/mergetools.rc
M mercurial/diffhelper.py
M mercurial/dirstate.py
M mercurial/dirstateguard.py
M mercurial/dispatch.py
M mercurial/encoding.py
M mercurial/error.py
M mercurial/exchange.py
A => mercurial/exchangev2.py
M mercurial/exewrapper.c
M mercurial/extensions.py
M mercurial/filelog.py
M mercurial/filemerge.py
M mercurial/fileset.py
M mercurial/fileset.py => mercurial/filesetlang.py
M mercurial/formatter.py
M mercurial/graphmod.py
M mercurial/help.py
M mercurial/help/config.txt
A => mercurial/help/internals/cbor.txt
M mercurial/help/internals/changegroups.txt
A => mercurial/help/internals/linelog.txt
M mercurial/help/internals/wireprotocol.txt
M mercurial/help/internals/wireprotocol.txt => mercurial/help/internals/wireprotocolrpc.txt
M mercurial/help/internals/wireprotocol.txt => mercurial/help/internals/wireprotocolv2.txt
M mercurial/help/merge-tools.txt
M mercurial/hg.py
M mercurial/hgweb/common.py
M mercurial/hgweb/hgweb_mod.py
M mercurial/hgweb/hgwebdir_mod.py
M mercurial/hgweb/server.py
M mercurial/hgweb/webcommands.py
M mercurial/hgweb/webutil.py
M mercurial/hook.py
M mercurial/httppeer.py
M mercurial/i18n.py
M mercurial/keepalive.py
A => mercurial/linelog.py
M mercurial/localrepo.py
M mercurial/logcmdutil.py
M mercurial/lsprof.py
M mercurial/lsprofcalltree.py
M mercurial/mail.py
M mercurial/manifest.py
M mercurial/match.py
M mercurial/mdiff.py
M mercurial/merge.py
M mercurial/minifileset.py
M mercurial/minirst.py
M mercurial/narrowspec.py
M mercurial/node.py
M mercurial/obsolete.py
M mercurial/obsutil.py
M mercurial/parser.py
M mercurial/patch.py
M mercurial/phases.py
M mercurial/policy.py
M mercurial/posix.py
M mercurial/profiling.py
M mercurial/pure/osutil.py
M mercurial/pure/parsers.py
M mercurial/pvec.py
M mercurial/pycompat.py
M mercurial/registrar.py
M mercurial/repair.py
M mercurial/repository.py
M mercurial/repoview.py
M mercurial/revlog.py
A => mercurial/revlogutils/__init__.py
A => mercurial/revlogutils/constants.py
M mercurial/revlog.py => mercurial/revlogutils/deltas.py
M mercurial/revset.py
M mercurial/revsetlang.py
M mercurial/scmutil.py
M mercurial/server.py
M mercurial/setdiscovery.py
M mercurial/simplemerge.py
M mercurial/smartset.py
M mercurial/sparse.py
M mercurial/sshpeer.py
M mercurial/state.py
M mercurial/statichttprepo.py
M mercurial/statprof.py
M mercurial/store.py
M mercurial/streamclone.py
M mercurial/subrepo.py
M mercurial/templatefilters.py
M mercurial/templatefuncs.py
M mercurial/templatekw.py
M mercurial/templater.py
M mercurial/templates/json/map
M mercurial/templates/map-cmdline.bisect
M mercurial/templates/map-cmdline.default
M mercurial/templates/map-cmdline.phases
M mercurial/templates/map-cmdline.status
M mercurial/templateutil.py
A => mercurial/testing/__init__.py
A => mercurial/testing/storage.py
M mercurial/transaction.py
M mercurial/treediscovery.py
M mercurial/ui.py
M mercurial/unionrepo.py
M mercurial/upgrade.py
M mercurial/url.py
M mercurial/urllibcompat.py
M mercurial/util.py
M mercurial/utils/cborutil.py
M mercurial/utils/dateutil.py
M mercurial/utils/procutil.py
A => mercurial/utils/storageutil.py
M mercurial/utils/stringutil.py
M mercurial/verify.py
M mercurial/vfs.py
M mercurial/win32.py
M mercurial/windows.py
M mercurial/wireprotoframing.py
M mercurial/wireprotoserver.py
M mercurial/wireprototypes.py
M mercurial/wireprotov1peer.py
M mercurial/wireprotov1server.py
M mercurial/wireprotov2peer.py
M mercurial/wireprotov2server.py
M rust/Cargo.lock
M rust/Cargo.toml
A => rust/chg/Cargo.lock
A => rust/chg/Cargo.toml
A => rust/chg/build.rs
A => rust/chg/src/attachio.rs
A => rust/chg/src/clientext.rs
A => rust/chg/src/lib.rs
A => rust/chg/src/locator.rs
A => rust/chg/src/main.rs
A => rust/chg/src/message.rs
A => rust/chg/src/procutil.rs
A => rust/chg/src/runcommand.rs
A => rust/chg/src/sendfds.c
M contrib/chg/procutil.c => rust/chg/src/sighandlers.c
A => rust/chg/src/uihandler.rs
A => rust/hg-core/Cargo.toml
A => rust/hg-core/rustfmt.toml
A => rust/hg-core/src/ancestors.rs
A => rust/hg-core/src/lib.rs
A => rust/hg-direct-ffi/Cargo.toml
A => rust/hg-direct-ffi/rustfmt.toml
A => rust/hg-direct-ffi/src/ancestors.rs
A => rust/hg-direct-ffi/src/lib.rs
M setup.py
A => tests/artifacts/PURPOSE
A => tests/artifacts/cache/big-file-churn.hg.md5
A => tests/artifacts/scripts/generate-churning-bundle.py
M tests/badserverext.py
M tests/bruterebase.py
M tests/bzr-definitions
M tests/common-pattern.py
M tests/drawdag.py
M tests/dummysmtpd.py
M tests/f
M tests/fsmonitor-run-tests.py
M tests/get-with-headers.py
M tests/heredoctest.py
M tests/hghave.py
M tests/hgweberror.py
A => tests/phabricator/accept-4564.json
A => tests/phabricator/phabread-4480.json
A => tests/phabricator/phabread-conduit-error.json
A => tests/phabricator/phabsend-create-alpha.json
A => tests/phabricator/phabsend-update-alpha-create-beta.json
A => tests/printrevset.py
A => tests/pullext.py
M tests/run-tests.py
M tests/simplestorerepo.py
M tests/svn-safe-append.py
M tests/svnxml.py
A => tests/test-absorb-edit-lines.t
A => tests/test-absorb-filefixupstate.py
A => tests/test-absorb-phase.t
A => tests/test-absorb-rename.t
A => tests/test-absorb-strip.t
A => tests/test-absorb.t
M tests/test-add.t
M tests/test-addremove-similar.t
M tests/test-addremove.t
M tests/test-alias.t
M tests/test-amend.t
M tests/test-ancestor.py
M tests/test-ancestor.py.out
M tests/test-annotate.t
M tests/test-archive.t
M tests/test-audit-path.t
M tests/test-backout.t
M tests/test-bad-extension.t
M tests/test-bad-pull.t
M tests/test-basic.t
M tests/test-bdiff.py
M tests/test-blackbox.t
M tests/test-bookmarks-current.t
M tests/test-bookmarks-pushpull.t
M tests/test-bookmarks.t
M tests/test-branches.t
M tests/test-bundle-r.t
M tests/test-bundle-type.t
M tests/test-bundle.t
M tests/test-bundle2-exchange.t
M tests/test-bundle2-format.t
M tests/test-bundle2-pushback.t
M tests/test-bundle2-remote-changegroup.t
M tests/test-casefolding.t
M tests/test-cat.t
M tests/test-cbor.py
M tests/test-censor.t
M tests/test-check-code.t
M tests/test-check-config.t
M tests/test-check-help.t
M tests/test-check-interfaces.py
M tests/test-check-module-imports.t
M tests/test-check-py3-compat.t
M tests/test-chg.t
M tests/test-churn.t
M tests/test-clone-cgi.t
M tests/test-clone-pull-corruption.t
M tests/test-clone-r.t
M tests/test-clone-uncompressed.t
M tests/test-clone.t
M tests/test-clonebundles.t
A => tests/test-close-head.t
M tests/test-commandserver.t
M tests/test-commit-amend.t
M tests/test-commit-interactive-curses.t
M tests/test-commit-interactive.t
M tests/test-commit-multiple.t
M tests/test-commit.t
M tests/test-completion.t
M tests/test-conflict.t
M tests/test-confused-revert.t
M tests/test-context-metadata.t
M tests/test-contrib-check-code.t
M tests/test-contrib-check-commit.t
M tests/test-contrib-dumprevlog.t
M tests/test-contrib-perf.t
A => tests/test-contrib-relnotes.t
A => tests/test-contrib-testparseutil.t
M tests/test-contrib.t
M tests/test-convert-bzr-ghosts.t
M tests/test-convert-bzr-merges.t
M tests/test-convert-bzr-treeroot.t
M tests/test-convert-bzr.t
M tests/test-convert-clonebranches.t
M tests/test-convert-filemap.t
M tests/test-convert-git.t
M tests/test-convert-hg-source.t
M tests/test-convert-hg-svn.t
M tests/test-convert-mtn.t
M tests/test-convert-p4-filetypes.t
M tests/test-convert-svn-branches.t
M tests/test-convert-svn-encoding.t
M tests/test-convert-svn-move.t
M tests/test-convert-svn-sink.t
M tests/test-convert-svn-source.t
M tests/test-convert.t
M tests/test-copy-move-merge.t
M tests/test-copy.t
M tests/test-copytrace-heuristics.t
M tests/test-debugcommands.t
M tests/test-debugindexdot.t
M tests/test-devel-warnings.t
M tests/test-diff-binary-file.t
M tests/test-diff-color.t
M tests/test-diff-newlines.t
M tests/test-diff-upgrade.t
M tests/test-dirstate-race.t
M tests/test-dispatch.t
M tests/test-duplicateoptions.py
M tests/test-empty.t
M tests/test-encoding-align.t
M tests/test-encoding.t
M tests/test-eol.t
M tests/test-excessive-merge.t
M tests/test-exchange-obsmarkers-case-A1.t
M tests/test-exchange-obsmarkers-case-A2.t
M tests/test-exchange-obsmarkers-case-A3.t
M tests/test-exchange-obsmarkers-case-A4.t
M tests/test-exchange-obsmarkers-case-A5.t
M tests/test-exchange-obsmarkers-case-B3.t
M tests/test-exchange-obsmarkers-case-B5.t
M tests/test-exchange-obsmarkers-case-C2.t
M tests/test-exchange-obsmarkers-case-D1.t
M tests/test-exchange-obsmarkers-case-D4.t
M tests/test-extdiff.t
M tests/test-extension.t => tests/test-extension-timing.t
M tests/test-extension.t
A => tests/test-fastannotate-corrupt.t
A => tests/test-fastannotate-diffopts.t
A => tests/test-fastannotate-hg.t
A => tests/test-fastannotate-perfhack.t
A => tests/test-fastannotate-protocol.t
A => tests/test-fastannotate-renames.t
A => tests/test-fastannotate-revmap.py
A => tests/test-fastannotate.t
M tests/test-filebranch.t
M tests/test-fileset-generated.t
M tests/test-fileset.t
M tests/test-fix-topology.t
M tests/test-fix.t
M tests/test-flagprocessor.t
M tests/test-fncache.t
M tests/test-gendoc.t
M tests/test-generaldelta.t
M tests/test-globalopts.t
M tests/test-glog-beautifygraph.t
M tests/test-glog-topological.t
M tests/test-glog.t
M tests/test-graft.t
M tests/test-grep.t
M tests/test-hardlinks.t
M tests/test-help.t
M tests/test-hghave.t
M tests/test-hgignore.t
M tests/test-hgrc.t
M tests/test-hgweb-commands.t
M tests/test-hgweb-json.t
M tests/test-hgweb-no-path-info.t
M tests/test-hgweb-no-request-uri.t
M tests/test-hgweb-non-interactive.t
M tests/test-hgweb-raw.t
M tests/test-hgweb.t
M tests/test-hgwebdir.t
M tests/test-highlight.t
M tests/test-histedit-arguments.t
M tests/test-histedit-base.t
M tests/test-histedit-commute.t
M tests/test-histedit-obsolete.t
M tests/test-hook.t
M tests/test-http-api-httpv2.t
M tests/test-http-api.t
M tests/test-http-bad-server.t
M tests/test-http-bundle1.t
M tests/test-http-clone-r.t
M tests/test-http-permissions.t
M tests/test-http-protocol.t
M tests/test-http-proxy.t
M tests/test-http.t
M tests/test-https.t
M tests/test-i18n.t
M tests/test-identify.t
M tests/test-impexp-branch.t
M tests/test-import-bypass.t
M tests/test-import-context.t
M tests/test-import-eol.t
M tests/test-import-git.t
M tests/test-import-merge.t
M tests/test-import.t
M tests/test-imports-checker.t
M tests/test-incoming-outgoing.t
M tests/test-infinitepush-bundlestore.t
M tests/test-infinitepush-ci.t
M tests/test-infinitepush.t
M tests/test-inherit-mode.t
M tests/test-install.t
M tests/test-issue1175.t
M tests/test-issue4074.t
A => tests/test-issue5979.t
M tests/test-issue660.t
M tests/test-journal-exists.t
M tests/test-journal.t
M tests/test-keyword.t
M tests/test-largefiles-misc.t
M tests/test-largefiles-update.t
M tests/test-largefiles-wireproto.t
M tests/test-largefiles.t
M tests/test-lfconvert.t
M tests/test-lfs-largefiles.t
M tests/test-lfs-pointer.py
M tests/test-lfs-serve-access.t
M tests/test-lfs-serve.t
M tests/test-lfs-test-server.t
M tests/test-lfs.t
A => tests/test-linelog.py
M tests/test-locate.t
M tests/test-lock.py
M tests/test-log-exthook.t
M tests/test-log.t
M tests/test-logexchange.t
M tests/test-logtoprocess.t
M tests/test-lrucachedict.py
R tests/test-lrucachedict.py.out => 
M tests/test-mactext.t
M tests/test-manifest.t
M tests/test-match.py
M tests/test-merge-changedelete.t
M tests/test-merge-default.t
M tests/test-merge-force.t
A => tests/test-merge-no-file-change.t
M tests/test-merge-remove.t
M tests/test-merge-subrepos.t
M tests/test-merge-symlinks.t
M tests/test-merge-tools.t
M tests/test-merge1.t
M tests/test-minifileset.py
M tests/test-minirst.py
M tests/test-mq-eol.t
M tests/test-mq-missingfiles.t
M tests/test-mq-pull-from-bundle.t
M tests/test-mq-qimport.t
M tests/test-mq-qpush-fail.t
M tests/test-mq-subrepo-svn.t
M tests/test-mq.t
M tests/test-mv-cp-st-diff.t
M tests/test-narrow-clone-no-ellipsis.t
M tests/test-narrow-clone-non-narrow-server.t
A => tests/test-narrow-clone-stream.t
M tests/test-narrow-clone.t
M tests/test-narrow-commit.t
M tests/test-narrow-debugcommands.t
M tests/test-narrow-debugrebuilddirstate.t
M tests/test-narrow-exchange.t
M tests/test-narrow-expanddirstate.t
M tests/test-narrow-patterns.t
M tests/test-narrow-pull.t
M tests/test-narrow-rebase.t
M tests/test-narrow-strip.t
A => tests/test-narrow-trackedcmd.t
M tests/test-narrow-widen.t => tests/test-narrow-widen-no-ellipsis.t
M tests/test-narrow-widen.t
M tests/test-narrow.t
M tests/test-newcgi.t
M tests/test-newercgi.t
M tests/test-no-symlinks.t
M tests/test-nointerrupt.t
M tests/test-notify-changegroup.t
M tests/test-notify.t
M tests/test-obsmarker-template.t
M tests/test-obsolete-bundle-strip.t
M tests/test-obsolete-changeset-exchange.t
M tests/test-obsolete-checkheads.t
M tests/test-obsolete-distributed.t
M tests/test-obsolete.t
M tests/test-oldcgi.t
M tests/test-origbackup-conflict.t
M tests/test-pager-legacy.t
M tests/test-pager.t
M tests/test-parseindex.t
M tests/test-parseindex2.py
R tests/test-parseindex2.py.out => 
M tests/test-patch-offset.t
M tests/test-patch.t
M tests/test-patchbomb-bookmark.t
M tests/test-patchbomb-tls.t
M tests/test-patchbomb.t
M tests/test-pathconflicts-basic.t
M tests/test-pending.t
M tests/test-permissions.t
A => tests/test-phabricator.t
M tests/test-phases-exchange.t
M tests/test-phases.t
M tests/test-profile.t
M tests/test-progress.t
M tests/test-propertycache.py
M tests/test-pull-bundle.t
M tests/test-pull-permission.t
M tests/test-pull-pull-corruption.t
M tests/test-pull.t
M tests/test-purge.t
M tests/test-push-cgi.t
M tests/test-push-checkheads-partial-C1.t
M tests/test-push-checkheads-partial-C2.t
M tests/test-push-checkheads-partial-C3.t
M tests/test-push-checkheads-partial-C4.t
M tests/test-push-checkheads-pruned-B2.t
M tests/test-push-checkheads-pruned-B3.t
M tests/test-push-checkheads-pruned-B4.t
M tests/test-push-checkheads-pruned-B5.t
M tests/test-push-checkheads-pruned-B8.t
M tests/test-push-checkheads-superceed-A2.t
M tests/test-push-checkheads-superceed-A3.t
M tests/test-push-checkheads-superceed-A6.t
M tests/test-push-checkheads-superceed-A7.t
M tests/test-push-checkheads-unpushed-D2.t
M tests/test-push-checkheads-unpushed-D3.t
M tests/test-push-checkheads-unpushed-D4.t
M tests/test-push-checkheads-unpushed-D5.t
M tests/test-push-race.t
M tests/test-push-warn.t
M tests/test-push.t
R tests/test-py3-commands.t => 
A => tests/test-rebase-backup.t
M tests/test-rebase-base-flag.t
M tests/test-rebase-collapse.t
M tests/test-rebase-conflicts.t
M tests/test-rebase-dest.t
M tests/test-rebase-inmemory.t
M tests/test-rebase-named-branches.t
M tests/test-rebase-newancestor.t
M tests/test-rebase-obsolete.t
M tests/test-rebase-parameters.t
M tests/test-rebase-partial.t
M tests/test-rebase-scenario-global.t
M tests/test-rebuildstate.t
M tests/test-relink.t
M tests/test-remove.t
M tests/test-removeemptydirs.t
M tests/test-rename-merge2.t
M tests/test-rename.t
M tests/test-repair-strip.t
M tests/test-requires.t
M tests/test-resolve.t
M tests/test-revert-interactive.t
M tests/test-revert.t
A => tests/test-revisions.t
M tests/test-revlog-ancestry.py
M tests/test-revlog-ancestry.py.out
M tests/test-revlog-raw.py
M tests/test-revlog.t
M tests/test-revset.t
M tests/test-revset2.t
M tests/test-rollback.t
M tests/test-run-tests.t
M tests/test-schemes.t
M tests/test-serve.t
M tests/test-setdiscovery.t
M tests/test-share.t
M tests/test-shelve.t
M tests/test-show-work.t
M tests/test-simple-update.t
M tests/test-single-head.t
M tests/test-sparse-clone.t
M tests/test-sparse-merges.t
M tests/test-sparse-profiles.t
A => tests/test-sparse-revlog.t
M tests/test-sparse.t
M tests/test-split.t
A => tests/test-sqlitestore.t
M tests/test-ssh-bundle1.t
M tests/test-ssh-clone-r.t
M tests/test-ssh-proto-unbundle.t
M tests/test-ssh-proto.t
A => tests/test-ssh-repoerror.t
M tests/test-ssh.t
M tests/test-static-http.t
M tests/test-status-color.t
M tests/test-status-inprocess.py
M tests/test-status-rev.t
M tests/test-status.t
A => tests/test-storage.py
M tests/test-stream-bundle-v2.t
M tests/test-strict.t
M tests/test-strip-cross.t
M tests/test-strip.t
M tests/test-subrepo-deep-nested-change.t
M tests/test-subrepo-missing.t
M tests/test-subrepo-svn.t
M tests/test-subrepo.t
M tests/test-symlink-os-yes-fs-no.py.out
M tests/test-tag.t
M tests/test-template-functions.t
M tests/test-template-keywords.t
M tests/test-template-map.t
M tests/test-tools.t
M tests/test-transplant.t
M tests/test-treemanifest.t
M tests/test-unionrepo.t
M tests/test-update-names.t
M tests/test-upgrade-repo.t
A => tests/test-util.py
M tests/test-verify.t
M tests/test-walk.t
M tests/test-win32text.t
A => tests/test-wireproto-caching.t
M tests/test-wireproto-clientreactor.py
M tests/test-wireproto-command-branchmap.t
M tests/test-wireproto-command-capabilities.t
A => tests/test-wireproto-command-changesetdata.t
A => tests/test-wireproto-command-filedata.t
A => tests/test-wireproto-command-filesdata.t
M tests/test-wireproto-command-heads.t
M tests/test-wireproto-command-known.t
M tests/test-wireproto-command-listkeys.t
M tests/test-wireproto-command-lookup.t
A => tests/test-wireproto-command-manifestdata.t
M tests/test-wireproto-command-pushkey.t
A => tests/test-wireproto-command-rawstorefiledata.t
A => tests/test-wireproto-content-redirects.t
A => tests/test-wireproto-exchangev2-shallow.t
A => tests/test-wireproto-exchangev2.t
M tests/test-wireproto-framing.py
M tests/test-wireproto-serverreactor.py
M tests/wireprotohelpers.sh
A => tests/wireprotosimplecache.py
M .hgignore +2 -0
@@ 19,6 19,7 @@ syntax: glob
 *.zip
 \#*\#
 .\#*
+tests/artifacts/cache/big-file-churn.hg
 tests/.coverage*
 tests/.testtimes*
 tests/.hypothesis

          
@@ 55,6 56,7 @@ locale/*/LC_MESSAGES/hg.mo
 hgext/__index__.py
 
 rust/target/
+rust/*/target/
 
 # Generated wheels
 wheelhouse/

          
M Makefile +3 -2
@@ 9,7 9,8 @@ PYTHON=python
 $(eval HGROOT := $(shell pwd))
 HGPYTHONS ?= $(HGROOT)/build/pythons
 PURE=
-PYFILES:=$(shell find mercurial hgext doc -name '*.py')
+PYFILESCMD=find mercurial hgext doc -name '*.py'
+PYFILES:=$(shell $(PYFILESCMD))
 DOCFILES=mercurial/help/*.txt
 export LANGUAGE=C
 export LC_ALL=C

          
@@ 145,7 146,7 @@ i18n/hg.pot: $(PYFILES) $(DOCFILES) i18n
         # parse them even though they are not marked for translation.
         # Extracting with an explicit encoding of ISO-8859-1 will make
         # xgettext "parse" and ignore them.
-	echo $(PYFILES) | xargs \
+	$(PYFILESCMD) | xargs \
 	  xgettext --package-name "Mercurial" \
 	  --msgid-bugs-address "<mercurial-devel@mercurial-scm.org>" \
 	  --copyright-holder "Matt Mackall <mpm@selenic.com> and others" \

          
M contrib/bash_completion +13 -1
@@ 152,7 152,7 @@ shopt -s extglob
 {
     local cur prev cmd cmd_index opts i aliashg
     # global options that receive an argument
-    local global_args='--cwd|-R|--repository'
+    local global_args='--cwd|-R|--repository|--color|--config|--encoding|--encodingmode|--pager'
     local hg="$1"
     local canonical=0
 

          
@@ 206,6 206,18 @@ shopt -s extglob
             _hg_fix_wordlist
             return
         ;;
+        --color)
+            local choices='true false yes no always auto never debug'
+            COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$choices' -- "$cur"))
+            _hg_fix_wordlist
+            return
+        ;;
+        --pager)
+            local choices='true false yes no always auto never'
+            COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$choices' -- "$cur"))
+            _hg_fix_wordlist
+            return
+        ;;
     esac
 
     if [ -z "$cmd" ] || [ $COMP_CWORD -eq $i ]; then

          
M contrib/byteify-strings.py +5 -0
@@ 169,6 169,11 @@ def replacetokens(tokens, opts):
                 yield adjusttokenpos(t._replace(string=fn[4:]), coloffset)
                 continue
 
+        # Looks like "if __name__ == '__main__'".
+        if (t.type == token.NAME and t.string == '__name__'
+            and _isop(i + 1, '==')):
+            _ensuresysstr(i + 2)
+
         # Emit unmodified token.
         yield adjusttokenpos(t, coloffset)
 

          
A => contrib/catapipe.py +90 -0
@@ 0,0 1,90 @@ 
+#!/usr/bin/env python3
+#
+# Copyright 2018 Google LLC.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""Tool read primitive events from a pipe to produce a catapult trace.
+
+For now the event stream supports
+
+  START $SESSIONID ...
+
+and
+
+  END $SESSIONID ...
+
+events. Everything after the SESSIONID (which must not contain spaces)
+is used as a label for the event. Events are timestamped as of when
+they arrive in this process and are then used to produce catapult
+traces that can be loaded in Chrome's about:tracing utility. It's
+important that the event stream *into* this process stay simple,
+because we have to emit it from the shell scripts produced by
+run-tests.py.
+
+Typically you'll want to place the path to the named pipe in the
+HGCATAPULTSERVERPIPE environment variable, which both run-tests and hg
+understand.
+"""
+from __future__ import absolute_import, print_function
+
+import argparse
+import json
+import os
+import timeit
+
+_TYPEMAP = {
+    'START': 'B',
+    'END': 'E',
+}
+
+_threadmap = {}
+
+# Timeit already contains the whole logic about which timer to use based on
+# Python version and OS
+timer = timeit.default_timer
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('pipe', type=str, nargs=1,
+                        help='Path of named pipe to create and listen on.')
+    parser.add_argument('output', default='trace.json', type=str, nargs='?',
+                        help='Path of json file to create where the traces '
+                             'will be stored.')
+    parser.add_argument('--debug', default=False, action='store_true',
+                        help='Print useful debug messages')
+    args = parser.parse_args()
+    fn = args.pipe[0]
+    os.mkfifo(fn)
+    try:
+        with open(fn) as f, open(args.output, 'w') as out:
+            out.write('[\n')
+            start = timer()
+            while True:
+                ev = f.readline().strip()
+                if not ev:
+                    continue
+                now = timer()
+                if args.debug:
+                    print(ev)
+                verb, session, label = ev.split(' ', 2)
+                if session not in _threadmap:
+                    _threadmap[session] = len(_threadmap)
+                pid = _threadmap[session]
+                ts_micros = (now - start) * 1000000
+                out.write(json.dumps(
+                    {
+                        "name": label,
+                        "cat": "misc",
+                        "ph": _TYPEMAP[verb],
+                        "ts": ts_micros,
+                        "pid": pid,
+                        "tid": 1,
+                        "args": {}
+                    }))
+                out.write(',\n')
+    finally:
+        os.unlink(fn)
+
+if __name__ == '__main__':
+    main()

          
M contrib/check-code.py +3 -2
@@ 30,7 30,7 @@ if sys.version_info[0] < 3:
     opentext = open
 else:
     def opentext(f):
-        return open(f, encoding='ascii')
+        return open(f, encoding='latin1')
 try:
     xrange
 except NameError:

          
@@ 503,7 503,7 @@ py3pats = [
   [
     (r'os\.environ', "use encoding.environ instead (py3)", r'#.*re-exports'),
     (r'os\.name', "use pycompat.osname instead (py3)"),
-    (r'os\.getcwd', "use pycompat.getcwd instead (py3)"),
+    (r'os\.getcwd', "use encoding.getcwd instead (py3)", r'#.*re-exports'),
     (r'os\.sep', "use pycompat.ossep instead (py3)"),
     (r'os\.pathsep', "use pycompat.ospathsep instead (py3)"),
     (r'os\.altsep', "use pycompat.osaltsep instead (py3)"),

          
@@ 511,6 511,7 @@ py3pats = [
     (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"),
     (r'os\.getenv', "use encoding.environ.get instead"),
     (r'os\.setenv', "modifying the environ dict is not preferred"),
+    (r'(?<!pycompat\.)xrange', "use pycompat.xrange instead (py3)"),
   ],
   # warnings
   [],

          
M contrib/check-commit +0 -2
@@ 39,8 39,6 @@ errors = [
      "summary keyword should be most user-relevant one-word command or topic"),
     (afterheader + r".*\.\s*\n", "don't add trailing period on summary line"),
     (afterheader + r".{79,}", "summary line too long (limit is 78)"),
-    (r"\n\+\n( |\+)\n", "adds double empty line"),
-    (r"\n \n\+\n", "adds double empty line"),
     # Forbid "_" in function name.
     #
     # We skip the check for cffi related functions. They use names mapping the

          
M contrib/check-config.py +32 -15
@@ 42,6 42,14 @@ ignorere = re.compile(br'''
     config:\s(?P<config>\S+\.\S+)$
     ''', re.VERBOSE | re.MULTILINE)
 
+if sys.version_info[0] > 2:
+    def mkstr(b):
+        if isinstance(b, str):
+            return b
+        return b.decode('utf8')
+else:
+    mkstr = lambda x: x
+
 def main(args):
     for f in args:
         sect = b''

          
@@ 92,7 100,7 @@ def main(args):
             # look for ignore markers
             m = ignorere.search(l)
             if m:
-                if m.group('reason') == 'inconsistent':
+                if m.group('reason') == b'inconsistent':
                     allowinconsistent.add(m.group('config'))
                 else:
                     documented[m.group('config')] = 1

          
@@ 104,36 112,45 @@ def main(args):
                 ctype = m.group('ctype')
                 if not ctype:
                     ctype = 'str'
-                name = m.group('section') + "." + m.group('option')
+                name = m.group('section') + b"." + m.group('option')
                 default = m.group('default')
-                if default in (None, 'False', 'None', '0', '[]', '""', "''"):
-                    default = ''
+                if default in (
+                        None, b'False', b'None', b'0', b'[]', b'""', b"''"):
+                    default = b''
                 if re.match(b'[a-z.]+$', default):
-                    default = '<variable>'
+                    default = b'<variable>'
                 if (name in foundopts and (ctype, default) != foundopts[name]
                     and name not in allowinconsistent):
-                    print(l.rstrip())
-                    print("conflict on %s: %r != %r" % (name, (ctype, default),
-                                                        foundopts[name]))
-                    print("at %s:%d:" % (f, linenum))
+                    print(mkstr(l.rstrip()))
+                    fctype, fdefault = foundopts[name]
+                    print("conflict on %s: %r != %r" % (
+                        mkstr(name),
+                        (mkstr(ctype), mkstr(default)),
+                        (mkstr(fctype), mkstr(fdefault))))
+                    print("at %s:%d:" % (mkstr(f), linenum))
                 foundopts[name] = (ctype, default)
-                carryover = ''
+                carryover = b''
             else:
                 m = re.search(configpartialre, line)
                 if m:
                     carryover = line
                 else:
-                    carryover = ''
+                    carryover = b''
 
     for name in sorted(foundopts):
         if name not in documented:
-            if not (name.startswith("devel.") or
-                    name.startswith("experimental.") or
-                    name.startswith("debug.")):
+            if not (name.startswith(b"devel.") or
+                    name.startswith(b"experimental.") or
+                    name.startswith(b"debug.")):
                 ctype, default = foundopts[name]
                 if default:
+                    if isinstance(default, bytes):
+                        default = mkstr(default)
                     default = ' [%s]' % default
-                print("undocumented: %s (%s)%s" % (name, ctype, default))
+                elif isinstance(default, bytes):
+                    default = mkstr(default)
+                print("undocumented: %s (%s)%s" % (
+                    mkstr(name), mkstr(ctype), default))
 
 if __name__ == "__main__":
     if len(sys.argv) > 1:

          
M contrib/chg/hgclient.c +7 -6
@@ 32,7 32,7 @@ enum { CAP_GETENCODING = 0x0001,
        CAP_ATTACHIO = 0x0100,
        CAP_CHDIR = 0x0200,
        CAP_SETENV = 0x0800,
-       CAP_SETUMASK = 0x1000,
+       CAP_SETUMASK2 = 0x1000,
        CAP_VALIDATE = 0x2000,
        CAP_SETPROCNAME = 0x4000,
 };

          
@@ 48,7 48,7 @@ static const cappair_t captable[] = {
     {"attachio", CAP_ATTACHIO},
     {"chdir", CAP_CHDIR},
     {"setenv", CAP_SETENV},
-    {"setumask", CAP_SETUMASK},
+    {"setumask2", CAP_SETUMASK2},
     {"validate", CAP_VALIDATE},
     {"setprocname", CAP_SETPROCNAME},
     {NULL, 0}, /* terminator */

          
@@ 425,10 425,11 @@ static void forwardumask(hgclient_t *hgc
 	mode_t mask = umask(0);
 	umask(mask);
 
-	static const char command[] = "setumask\n";
-	sendall(hgc->sockfd, command, sizeof(command) - 1);
 	uint32_t data = htonl(mask);
-	sendall(hgc->sockfd, &data, sizeof(data));
+	enlargecontext(&hgc->ctx, sizeof(data));
+	memcpy(hgc->ctx.data, &data, sizeof(data));
+	hgc->ctx.datasize = sizeof(data);
+	writeblockrequest(hgc, "setumask2");
 }
 
 /*!

          
@@ 508,7 509,7 @@ hgclient_t *hgc_open(const char *socknam
 		attachio(hgc);
 	if (hgc->capflags & CAP_CHDIR)
 		chdirtocwd(hgc);
-	if (hgc->capflags & CAP_SETUMASK)
+	if (hgc->capflags & CAP_SETUMASK2)
 		forwardumask(hgc);
 
 	return hgc;

          
M contrib/clang-format-ignorelist +7 -0
@@ 6,6 6,7 @@ mercurial/cext/osutil.c
 mercurial/cext/revlog.c
 # Vendored code that we should never format:
 contrib/python-zstandard/c-ext/bufferutil.c
+contrib/python-zstandard/c-ext/compressionchunker.c
 contrib/python-zstandard/c-ext/compressiondict.c
 contrib/python-zstandard/c-ext/compressionparams.c
 contrib/python-zstandard/c-ext/compressionreader.c

          
@@ 25,6 26,8 @@ contrib/python-zstandard/zstd.c
 contrib/python-zstandard/zstd/common/bitstream.h
 contrib/python-zstandard/zstd/common/compiler.h
 contrib/python-zstandard/zstd/common/cpu.h
+contrib/python-zstandard/zstd/common/debug.c
+contrib/python-zstandard/zstd/common/debug.h
 contrib/python-zstandard/zstd/common/entropy_common.c
 contrib/python-zstandard/zstd/common/error_private.c
 contrib/python-zstandard/zstd/common/error_private.h

          
@@ 42,6 45,8 @@ contrib/python-zstandard/zstd/common/zst
 contrib/python-zstandard/zstd/common/zstd_errors.h
 contrib/python-zstandard/zstd/common/zstd_internal.h
 contrib/python-zstandard/zstd/compress/fse_compress.c
+contrib/python-zstandard/zstd/compress/hist.c
+contrib/python-zstandard/zstd/compress/hist.h
 contrib/python-zstandard/zstd/compress/huf_compress.c
 contrib/python-zstandard/zstd/compress/zstd_compress.c
 contrib/python-zstandard/zstd/compress/zstd_compress_internal.h

          
@@ 64,8 69,10 @@ contrib/python-zstandard/zstd/deprecated
 contrib/python-zstandard/zstd/deprecated/zbuff_decompress.c
 contrib/python-zstandard/zstd/deprecated/zbuff.h
 contrib/python-zstandard/zstd/dictBuilder/cover.c
+contrib/python-zstandard/zstd/dictBuilder/cover.h
 contrib/python-zstandard/zstd/dictBuilder/divsufsort.c
 contrib/python-zstandard/zstd/dictBuilder/divsufsort.h
+contrib/python-zstandard/zstd/dictBuilder/fastcover.c
 contrib/python-zstandard/zstd/dictBuilder/zdict.c
 contrib/python-zstandard/zstd/dictBuilder/zdict.h
 contrib/python-zstandard/zstd/zstd.h

          
M contrib/dumprevlog +18 -12
@@ 6,7 6,9 @@ from __future__ import absolute_import, 
 
 import sys
 from mercurial import (
+    encoding,
     node,
+    pycompat,
     revlog,
 )
 from mercurial.utils import (

          
@@ 16,22 18,26 @@ from mercurial.utils import (
 for fp in (sys.stdin, sys.stdout, sys.stderr):
     procutil.setbinary(fp)
 
-def binopen(path, mode='rb'):
-    if 'b' not in mode:
-        mode = mode + 'b'
-    return open(path, mode)
+def binopen(path, mode=b'rb'):
+    if b'b' not in mode:
+        mode = mode + b'b'
+    return open(path, pycompat.sysstr(mode))
+
+def printb(data, end=b'\n'):
+    sys.stdout.flush()
+    pycompat.stdout.write(data + end)
 
 for f in sys.argv[1:]:
-    r = revlog.revlog(binopen, f)
+    r = revlog.revlog(binopen, encoding.strtolocal(f))
     print("file:", f)
     for i in r:
         n = r.node(i)
         p = r.parents(n)
         d = r.revision(n)
-        print("node:", node.hex(n))
-        print("linkrev:", r.linkrev(i))
-        print("parents:", node.hex(p[0]), node.hex(p[1]))
-        print("length:", len(d))
-        print("-start-")
-        print(d)
-        print("-end-")
+        printb(b"node: %s" % node.hex(n))
+        printb(b"linkrev: %d" % r.linkrev(i))
+        printb(b"parents: %s %s" % (node.hex(p[0]), node.hex(p[1])))
+        printb(b"length: %d" % len(d))
+        printb(b"-start-")
+        printb(d)
+        printb(b"-end-")

          
M contrib/fuzz/Makefile +49 -2
@@ 70,12 70,59 @@ xdiff_fuzzer: xdiff.cc fuzz-xdiffi.o fuz
 	  fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o \
 	  -lFuzzingEngine -o $$OUT/xdiff_fuzzer
 
+# TODO use the $OUT env var instead of hardcoding /out
+/out/sanpy/bin/python:
+	cd /Python-2.7.15/ && ./configure --without-pymalloc --prefix=$$OUT/sanpy CFLAGS='-O1 -fno-omit-frame-pointer -g -fwrapv -fstack-protector-strong' LDFLAGS=-lasan  && ASAN_OPTIONS=detect_leaks=0 make && make install
+
+sanpy: /out/sanpy/bin/python
+
+manifest.o: sanpy ../../mercurial/cext/manifest.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o manifest.o ../../mercurial/cext/manifest.c
+
+charencode.o: sanpy ../../mercurial/cext/charencode.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o charencode.o ../../mercurial/cext/charencode.c
+
+parsers.o: sanpy ../../mercurial/cext/parsers.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o parsers.o ../../mercurial/cext/parsers.c
+
+dirs.o: sanpy ../../mercurial/cext/dirs.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o dirs.o ../../mercurial/cext/dirs.c
+
+pathencode.o: sanpy ../../mercurial/cext/pathencode.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o pathencode.o ../../mercurial/cext/pathencode.c
+
+revlog.o: sanpy ../../mercurial/cext/revlog.c
+	$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -I../../mercurial \
+	  -c -o revlog.o ../../mercurial/cext/revlog.c
+
+manifest_fuzzer: sanpy manifest.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o
+	$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+	  -Wno-register -Wno-macro-redefined \
+	  -I../../mercurial manifest.cc \
+	  manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o \
+	  -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+	  -o $$OUT/manifest_fuzzer
+
+manifest_corpus.zip:
+	python manifest_corpus.py $$OUT/manifest_fuzzer_seed_corpus.zip
+
 clean:
 	$(RM) *.o *_fuzzer \
 	  bdiff \
 	  mpatch \
 	  xdiff
 
-oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer
+oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer manifest_fuzzer manifest_corpus.zip
 
-.PHONY: all clean oss-fuzz
+.PHONY: all clean oss-fuzz sanpy

          
A => contrib/fuzz/manifest.cc +83 -0
@@ 0,0 1,83 @@ 
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <string>
+
+extern "C" {
+
+/* TODO: use Python 3 for this fuzzing? */
+PyMODINIT_FUNC initparsers(void);
+
+static char cpypath[8192] = "\0";
+
+static PyCodeObject *code;
+static PyObject *mainmod;
+static PyObject *globals;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+	const std::string subdir = "/sanpy/lib/python2.7";
+	/* HACK ALERT: we need a full Python installation built without
+	   pymalloc and with ASAN, so we dump one in
+	   $OUT/sanpy/lib/python2.7. This helps us wire that up. */
+	std::string selfpath(*argv[0]);
+	std::string pypath;
+	auto pos = selfpath.rfind("/");
+	if (pos == std::string::npos) {
+		char wd[8192];
+		getcwd(wd, 8192);
+		pypath = std::string(wd) + subdir;
+	} else {
+		pypath = selfpath.substr(0, pos) + subdir;
+	}
+	strncpy(cpypath, pypath.c_str(), pypath.size());
+	setenv("PYTHONPATH", cpypath, 1);
+	setenv("PYTHONNOUSERSITE", "1", 1);
+	/* prevent Python from looking up users in the fuzz environment */
+	setenv("PYTHONUSERBASE", cpypath, 1);
+	Py_SetPythonHome(cpypath);
+	Py_InitializeEx(0);
+	initparsers();
+	code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import lazymanifest
+try:
+  lm = lazymanifest(mdata)
+  # iterate the whole thing, which causes the code to fully parse
+  # every line in the manifest
+  list(lm.iterentries())
+  lm[b'xyzzy'] = (b'\0' * 20, 'x')
+  # do an insert, text should change
+  assert lm.text() != mdata, "insert should change text and didn't: %r %r" % (lm.text(), mdata)
+  del lm[b'xyzzy']
+  # should be back to the same
+  assert lm.text() == mdata, "delete should have restored text but didn't: %r %r" % (lm.text(), mdata)
+except Exception as e:
+  pass
+  # uncomment this print if you're editing this Python code
+  # to debug failures.
+  # print e
+)py",
+	                                        "fuzzer", Py_file_input);
+	mainmod = PyImport_AddModule("__main__");
+	globals = PyModule_GetDict(mainmod);
+	return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+	PyObject *mtext =
+	    PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+	PyObject *locals = PyDict_New();
+	PyDict_SetItemString(locals, "mdata", mtext);
+	PyObject *res = PyEval_EvalCode(code, globals, locals);
+	if (!res) {
+		PyErr_Print();
+	}
+	Py_XDECREF(res);
+	Py_DECREF(locals);
+	Py_DECREF(mtext);
+	return 0; // Non-zero return values are reserved for future use.
+}
+}

          
A => contrib/fuzz/manifest_corpus.py +30 -0
@@ 0,0 1,30 @@ 
+from __future__ import absolute_import, print_function
+
+import argparse
+import zipfile
+
+ap = argparse.ArgumentParser()
+ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
+args = ap.parse_args()
+
+with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
+    zf.writestr("manifest_zero",
+'''PKG-INFO\09b3ed8f2b81095a13064402e930565f083346e9a
+README\080b6e76643dcb44d4bc729e932fc464b3e36dbe3
+hg\0b6444347c629cc058d478023905cfb83b7f5bb9d
+mercurial/__init__.py\0b80de5d138758541c5f05265ad144ab9fa86d1db
+mercurial/byterange.py\017f5a9fbd99622f31a392c33ac1e903925dc80ed
+mercurial/fancyopts.py\0b6f52e23e356748c5039313d8b639cda16bf67ba
+mercurial/hg.py\023cc12f225f1b42f32dc0d897a4f95a38ddc8f4a
+mercurial/mdiff.py\0a05f65c44bfbeec6a42336cd2ff0b30217899ca3
+mercurial/revlog.py\0217bc3fde6d82c0210cf56aeae11d05a03f35b2b
+mercurial/transaction.py\09d180df101dc14ce3dd582fd998b36c98b3e39aa
+notes.txt\0703afcec5edb749cf5cec67831f554d6da13f2fb
+setup.py\0ccf3f6daf0f13101ca73631f7a1769e328b472c9
+tkmerge\03c922edb43a9c143682f7bc7b00f98b3c756ebe7
+''')
+    zf.writestr("badmanifest_shorthashes",
+                "narf\0aa\nnarf2\0aaa\n")
+    zf.writestr("badmanifest_nonull",
+                "narf\0cccccccccccccccccccccccccccccccccccccccc\n"
+                "narf2aaaaaaaaaaaaaaaaaaaa\n")

          
M contrib/hgclient.py +37 -26
@@ 1,7 1,10 @@ 
 # A minimal client for Mercurial's command server
 
 from __future__ import absolute_import, print_function
+
+import io
 import os
+import re
 import signal
 import socket
 import struct

          
@@ 9,17 12,25 @@ import subprocess
 import sys
 import time
 
-try:
-    import cStringIO as io
-    stringio = io.StringIO
-except ImportError:
-    import io
-    stringio = io.StringIO
+if sys.version_info[0] >= 3:
+    stdout = sys.stdout.buffer
+    stderr = sys.stderr.buffer
+    stringio = io.BytesIO
+    def bprint(*args):
+        # remove b'' as well for ease of test migration
+        pargs = [re.sub(br'''\bb(['"])''', br'\1', b'%s' % a) for a in args]
+        stdout.write(b' '.join(pargs) + b'\n')
+else:
+    import cStringIO
+    stdout = sys.stdout
+    stderr = sys.stderr
+    stringio = cStringIO.StringIO
+    bprint = print
 
 def connectpipe(path=None):
-    cmdline = ['hg', 'serve', '--cmdserver', 'pipe']
+    cmdline = [b'hg', b'serve', b'--cmdserver', b'pipe']
     if path:
-        cmdline += ['-R', path]
+        cmdline += [b'-R', path]
 
     server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
                               stdout=subprocess.PIPE)

          
@@ 41,9 52,9 @@ class unixconnection(object):
 class unixserver(object):
     def __init__(self, sockpath, logpath=None, repopath=None):
         self.sockpath = sockpath
-        cmdline = ['hg', 'serve', '--cmdserver', 'unix', '-a', sockpath]
+        cmdline = [b'hg', b'serve', b'--cmdserver', b'unix', b'-a', sockpath]
         if repopath:
-            cmdline += ['-R', repopath]
+            cmdline += [b'-R', repopath]
         if logpath:
             stdout = open(logpath, 'a')
             stderr = subprocess.STDOUT

          
@@ 64,7 75,7 @@ class unixserver(object):
         self.server.wait()
 
 def writeblock(server, data):
-    server.stdin.write(struct.pack('>I', len(data)))
+    server.stdin.write(struct.pack(b'>I', len(data)))
     server.stdin.write(data)
     server.stdin.flush()
 

          
@@ 73,48 84,48 @@ def readchannel(server):
     if not data:
         raise EOFError
     channel, length = struct.unpack('>cI', data)
-    if channel in 'IL':
+    if channel in b'IL':
         return channel, length
     else:
         return channel, server.stdout.read(length)
 
 def sep(text):
-    return text.replace('\\', '/')
+    return text.replace(b'\\', b'/')
 
-def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None,
+def runcommand(server, args, output=stdout, error=stderr, input=None,
                outfilter=lambda x: x):
-    print('*** runcommand', ' '.join(args))
-    sys.stdout.flush()
-    server.stdin.write('runcommand\n')
-    writeblock(server, '\0'.join(args))
+    bprint(b'*** runcommand', b' '.join(args))
+    stdout.flush()
+    server.stdin.write(b'runcommand\n')
+    writeblock(server, b'\0'.join(args))
 
     if not input:
         input = stringio()
 
     while True:
         ch, data = readchannel(server)
-        if ch == 'o':
+        if ch == b'o':
             output.write(outfilter(data))
             output.flush()
-        elif ch == 'e':
+        elif ch == b'e':
             error.write(data)
             error.flush()
-        elif ch == 'I':
+        elif ch == b'I':
             writeblock(server, input.read(data))
-        elif ch == 'L':
+        elif ch == b'L':
             writeblock(server, input.readline(data))
-        elif ch == 'r':
+        elif ch == b'r':
             ret, = struct.unpack('>i', data)
             if ret != 0:
-                print(' [%d]' % ret)
+                bprint(b' [%d]' % ret)
             return ret
         else:
-            print("unexpected channel %c: %r" % (ch, data))
+            bprint(b"unexpected channel %c: %r" % (ch, data))
             if ch.isupper():
                 return
 
 def check(func, connect=connectpipe):
-    sys.stdout.flush()
+    stdout.flush()
     server = connect()
     try:
         return func(server)

          
M contrib/import-checker.py +18 -54
@@ 5,7 5,6 @@ from __future__ import absolute_import, 
 import ast
 import collections
 import os
-import re
 import sys
 
 # Import a minimal set of stdlib modules needed for list_stdlib_modules()

          
@@ 18,6 17,8 @@ if True: # disable lexical sorting check
         basehttpserver = None
     import zlib
 
+import testparseutil
+
 # Whitelist of modules that symbols can be directly imported from.
 allowsymbolimports = (
     '__future__',

          
@@ 28,6 29,8 @@ allowsymbolimports = (
     'mercurial.hgweb.request',
     'mercurial.i18n',
     'mercurial.node',
+    # for revlog to re-export constant to extensions
+    'mercurial.revlogutils.constants',
     # for cffi modules to re-export pure functions
     'mercurial.pure.base85',
     'mercurial.pure.bdiff',

          
@@ 36,6 39,7 @@ allowsymbolimports = (
     'mercurial.pure.parsers',
     # third-party imports should be directly imported
     'mercurial.thirdparty',
+    'mercurial.thirdparty.attr',
     'mercurial.thirdparty.cbor',
     'mercurial.thirdparty.cbor.cbor2',
     'mercurial.thirdparty.zope',

          
@@ 656,61 660,21 @@ def embedded(f, modname, src):
     ...   b'  > EOF',
     ... ]
     >>> test(b"example.t", lines)
-    example[2] doctest.py 2
-    "from __future__ import print_function\\n' multiline\\nstring'\\n"
-    example[7] foo.py 7
+    example[2] doctest.py 1
+    "from __future__ import print_function\\n' multiline\\nstring'\\n\\n"
+    example[8] foo.py 7
     'from __future__ import print_function\\n'
     """
-    inlinepython = 0
-    shpython = 0
-    script = []
-    prefix = 6
-    t = ''
-    n = 0
-    for l in src:
-        n += 1
-        if not l.endswith(b'\n'):
-            l += b'\n'
-        if l.startswith(b'  >>> '): # python inlines
-            if shpython:
-                print("%s:%d: Parse Error" % (f, n))
-            if not inlinepython:
-                # We've just entered a Python block.
-                inlinepython = n
-                t = b'doctest.py'
-            script.append(l[prefix:])
-            continue
-        if l.startswith(b'  ... '): # python inlines
-            script.append(l[prefix:])
-            continue
-        cat = re.search(br"\$ \s*cat\s*>\s*(\S+\.py)\s*<<\s*EOF", l)
-        if cat:
-            if inlinepython:
-                yield b''.join(script), (b"%s[%d]" %
-                       (modname, inlinepython)), t, inlinepython
-                script = []
-                inlinepython = 0
-            shpython = n
-            t = cat.group(1)
-            continue
-        if shpython and l.startswith(b'  > '): # sh continuation
-            if l == b'  > EOF\n':
-                yield b''.join(script), (b"%s[%d]" %
-                       (modname, shpython)), t, shpython
-                script = []
-                shpython = 0
-            else:
-                script.append(l[4:])
-            continue
-        # If we have an empty line or a command for sh, we end the
-        # inline script.
-        if inlinepython and (l == b'  \n'
-                             or l.startswith(b'  $ ')):
-            yield b''.join(script), (b"%s[%d]" %
-                   (modname, inlinepython)), t, inlinepython
-            script = []
-            inlinepython = 0
-            continue
+    errors = []
+    for name, starts, ends, code in testparseutil.pyembedded(f, src, errors):
+        if not name:
+            # use 'doctest.py', in order to make already existing
+            # doctest above pass instantly
+            name = 'doctest.py'
+        # "starts" is "line number" (1-origin), but embedded() is
+        # expected to return "line offset" (0-origin). Therefore, this
+        # yields "starts - 1".
+        yield code, "%s[%d]" % (modname, starts), name, starts - 1
 
 def sources(f, modname):
     """Yields possibly multiple sources from a filepath

          
M contrib/packaging/Makefile +2 -2
@@ 120,8 120,8 @@ define centos_targets
 centos$(1):
 	mkdir -p $$(HGROOT)/packages/centos$(1)
 	./buildrpm $$(if $$(filter $(1),$$(CENTOS_WITH_PYTHON_RELEASES)),--withpython)
-	cp $$(HGROOT)/rpmbuild/RPMS/*/* $$(HGROOT)/packages/centos$(1)
-	cp $$(HGROOT)/rpmbuild/SRPMS/* $$(HGROOT)/packages/centos$(1)
+	cp $$(HGROOT)/contrib/packaging/rpmbuild/RPMS/*/* $$(HGROOT)/packages/centos$(1)
+	cp $$(HGROOT)/contrib/packaging/rpmbuild/SRPMS/* $$(HGROOT)/packages/centos$(1)
 
 .PHONY: docker-centos$(1)
 docker-centos$(1):

          
M contrib/packaging/builddeb +13 -4
@@ 13,6 13,13 @@ CLEANUP=1
 DISTID=`(lsb_release -is 2> /dev/null | tr '[:upper:]' '[:lower:]') || echo debian`
 CODENAME=`lsb_release -cs 2> /dev/null || echo unknown`
 DEBFLAGS=-b
+
+cleanup() {
+    if [ "$CLEANUP" ]; then
+        rm -r "$ROOTDIR/debian";
+    fi
+}
+
 while [ "$1" ]; do
     case "$1" in
     --distid )

          
@@ 44,12 51,14 @@ while [ "$1" ]; do
     esac
 done
 
-trap "if [ '$CLEANUP' ] ; then rm -r '$PWD/debian' ; fi" EXIT
+cd "$ROOTDIR"
+
+trap 'cleanup' EXIT
 
 set -u
 
 if [ ! -d .hg ]; then
-    echo 'You are not inside a Mercurial repository!' 1>&2
+    printf "You are inside %s, which is not the root of a Mercurial repository\n" $(pwd) 1>&2
     exit 1
 fi
 

          
@@ 71,7 80,7 @@ changelog=debian/changelog
 
 if [ "$BUILD" ]; then
     if [ -d debian ] ; then
-        echo "Error! debian control directory already exists!"
+        printf "Error! debian control directory already exists at %s/debian\n" $(pwd)
         exit 1
     fi
 

          
@@ 102,5 111,5 @@ if [ "$CLEANUP" ] ; then
           -type f -newer $control -print0 2>/dev/null | \
       xargs -Inarf -0 mv narf "$OUTPUTDIR"
     echo "Built packages for $debver:"
-    find "$OUTPUTDIR" -type f -newer $control -name '*.deb'
+    find "$PWD"/"$OUTPUTDIR" -type f -newer $control -name '*.deb'
 fi

          
M contrib/perf.py +605 -363
@@ 19,6 19,7 @@ 
 #   Mercurial
 
 from __future__ import absolute_import
+import contextlib
 import functools
 import gc
 import os

          
@@ 64,12 65,29 @@ try:
     from mercurial import scmutil # since 1.9 (or 8b252e826c68)
 except ImportError:
     pass
+
+def identity(a):
+    return a
+
 try:
     from mercurial import pycompat
     getargspec = pycompat.getargspec  # added to module after 4.5
+    _byteskwargs = pycompat.byteskwargs  # since 4.1 (or fbc3f73dc802)
+    _sysstr = pycompat.sysstr         # since 4.0 (or 2219f4f82ede)
+    _xrange = pycompat.xrange         # since 4.8 (or 7eba8f83129b)
+    fsencode = pycompat.fsencode      # since 3.9 (or f4a5e0e86a7e)
+    if pycompat.ispy3:
+        _maxint = sys.maxsize  # per py3 docs for replacing maxint
+    else:
+        _maxint = sys.maxint
 except (ImportError, AttributeError):
     import inspect
     getargspec = inspect.getargspec
+    _byteskwargs = identity
+    fsencode = identity               # no py3 support
+    _maxint = sys.maxint              # no py3 support
+    _sysstr = lambda x: x             # no py3 support
+    _xrange = xrange
 
 try:
     # 4.7+

          
@@ 95,7 113,7 @@ except (AttributeError, ImportError):
 # available since 1.9.3 (or 94b200a11cf7)
 _undefined = object()
 def safehasattr(thing, attr):
-    return getattr(thing, attr, _undefined) is not _undefined
+    return getattr(thing, _sysstr(attr), _undefined) is not _undefined
 setattr(util, 'safehasattr', safehasattr)
 
 # for "historical portability":

          
@@ 103,7 121,7 @@ setattr(util, 'safehasattr', safehasattr
 # since ae5d60bb70c9
 if safehasattr(time, 'perf_counter'):
     util.timer = time.perf_counter
-elif os.name == 'nt':
+elif os.name == b'nt':
     util.timer = time.clock
 else:
     util.timer = time.time

          
@@ 123,9 141,9 @@ formatteropts = getattr(cmdutil, "format
 # since 1.9 (or a79fea6b3e77).
 revlogopts = getattr(cmdutil, "debugrevlogopts",
                      getattr(commands, "debugrevlogopts", [
-        ('c', 'changelog', False, ('open changelog')),
-        ('m', 'manifest', False, ('open manifest')),
-        ('', 'dir', False, ('open directory manifest')),
+        (b'c', b'changelog', False, (b'open changelog')),
+        (b'm', b'manifest', False, (b'open manifest')),
+        (b'', b'dir', False, (b'open directory manifest')),
         ]))
 
 cmdtable = {}

          
@@ 134,20 152,20 @@ cmdtable = {}
 # define parsealiases locally, because cmdutil.parsealiases has been
 # available since 1.5 (or 6252852b4332)
 def parsealiases(cmd):
-    return cmd.lstrip("^").split("|")
+    return cmd.split(b"|")
 
 if safehasattr(registrar, 'command'):
     command = registrar.command(cmdtable)
 elif safehasattr(cmdutil, 'command'):
     command = cmdutil.command(cmdtable)
-    if 'norepo' not in getargspec(command).args:
+    if b'norepo' not in getargspec(command).args:
         # for "historical portability":
         # wrap original cmdutil.command, because "norepo" option has
         # been available since 3.1 (or 75a96326cecb)
         _command = command
         def command(name, options=(), synopsis=None, norepo=False):
             if norepo:
-                commands.norepo += ' %s' % ' '.join(parsealiases(name))
+                commands.norepo += b' %s' % b' '.join(parsealiases(name))
             return _command(name, list(options), synopsis)
 else:
     # for "historical portability":

          
@@ 160,7 178,7 @@ else:
             else:
                 cmdtable[name] = func, list(options)
             if norepo:
-                commands.norepo += ' %s' % ' '.join(parsealiases(name))
+                commands.norepo += b' %s' % b' '.join(parsealiases(name))
             return func
         return decorator
 

          
@@ 169,23 187,23 @@ try:
     import mercurial.configitems
     configtable = {}
     configitem = mercurial.registrar.configitem(configtable)
-    configitem('perf', 'presleep',
+    configitem(b'perf', b'presleep',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem('perf', 'stub',
+    configitem(b'perf', b'stub',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem('perf', 'parentscount',
+    configitem(b'perf', b'parentscount',
         default=mercurial.configitems.dynamicdefault,
     )
-    configitem('perf', 'all-timing',
+    configitem(b'perf', b'all-timing',
         default=mercurial.configitems.dynamicdefault,
     )
 except (ImportError, AttributeError):
     pass
 
 def getlen(ui):
-    if ui.configbool("perf", "stub", False):
+    if ui.configbool(b"perf", b"stub", False):
         return lambda x: 1
     return len
 

          
@@ 197,14 215,14 @@ def gettimer(ui, opts=None):
 
     # enforce an idle period before execution to counteract power management
     # experimental config: perf.presleep
-    time.sleep(getint(ui, "perf", "presleep", 1))
+    time.sleep(getint(ui, b"perf", b"presleep", 1))
 
     if opts is None:
         opts = {}
     # redirect all to stderr unless buffer api is in use
     if not ui._buffers:
         ui = ui.copy()
-        uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
+        uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
         if uifout:
             # for "historical portability":
             # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)

          
@@ 213,7 231,7 @@ def gettimer(ui, opts=None):
     # get a formatter
     uiformatter = getattr(ui, 'formatter', None)
     if uiformatter:
-        fm = uiformatter('perf', opts)
+        fm = uiformatter(b'perf', opts)
     else:
         # for "historical portability":
         # define formatter locally, because ui.formatter has been

          
@@ 244,66 262,81 @@ def gettimer(ui, opts=None):
                 self._ui.write(text, **opts)
             def end(self):
                 pass
-        fm = defaultformatter(ui, 'perf', opts)
+        fm = defaultformatter(ui, b'perf', opts)
 
     # stub function, runs code only once instead of in a loop
     # experimental config: perf.stub
-    if ui.configbool("perf", "stub", False):
+    if ui.configbool(b"perf", b"stub", False):
         return functools.partial(stub_timer, fm), fm
 
     # experimental config: perf.all-timing
-    displayall = ui.configbool("perf", "all-timing", False)
+    displayall = ui.configbool(b"perf", b"all-timing", False)
     return functools.partial(_timer, fm, displayall=displayall), fm
 
 def stub_timer(fm, func, title=None):
     func()
 
+@contextlib.contextmanager
+def timeone():
+    r = []
+    ostart = os.times()
+    cstart = util.timer()
+    yield r
+    cstop = util.timer()
+    ostop = os.times()
+    a, b = ostart, ostop
+    r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
+
 def _timer(fm, func, title=None, displayall=False):
     gc.collect()
     results = []
     begin = util.timer()
     count = 0
     while True:
-        ostart = os.times()
-        cstart = util.timer()
-        r = func()
+        with timeone() as item:
+            r = func()
+        count += 1
+        results.append(item[0])
         cstop = util.timer()
-        ostop = os.times()
-        count += 1
-        a, b = ostart, ostop
-        results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
         if cstop - begin > 3 and count >= 100:
             break
         if cstop - begin > 10 and count >= 3:
             break
 
+    formatone(fm, results, title=title, result=r,
+              displayall=displayall)
+
+def formatone(fm, timings, title=None, result=None, displayall=False):
+
+    count = len(timings)
+
     fm.startitem()
 
     if title:
-        fm.write('title', '! %s\n', title)
-    if r:
-        fm.write('result', '! result: %s\n', r)
+        fm.write(b'title', b'! %s\n', title)
+    if result:
+        fm.write(b'result', b'! result: %s\n', result)
     def display(role, entry):
-        prefix = ''
-        if role != 'best':
-            prefix = '%s.' % role
-        fm.plain('!')
-        fm.write(prefix + 'wall', ' wall %f', entry[0])
-        fm.write(prefix + 'comb', ' comb %f', entry[1] + entry[2])
-        fm.write(prefix + 'user', ' user %f', entry[1])
-        fm.write(prefix + 'sys',  ' sys %f', entry[2])
-        fm.write(prefix + 'count',  ' (%s of %d)', role, count)
-        fm.plain('\n')
-    results.sort()
-    min_val = results[0]
-    display('best', min_val)
+        prefix = b''
+        if role != b'best':
+            prefix = b'%s.' % role
+        fm.plain(b'!')
+        fm.write(prefix + b'wall', b' wall %f', entry[0])
+        fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
+        fm.write(prefix + b'user', b' user %f', entry[1])
+        fm.write(prefix + b'sys',  b' sys %f', entry[2])
+        fm.write(prefix + b'count',  b' (%s of %%d)' % role, count)
+        fm.plain(b'\n')
+    timings.sort()
+    min_val = timings[0]
+    display(b'best', min_val)
     if displayall:
-        max_val = results[-1]
-        display('max', max_val)
-        avg = tuple([sum(x) / count for x in zip(*results)])
-        display('avg', avg)
-        median = results[len(results) // 2]
-        display('median', median)
+        max_val = timings[-1]
+        display(b'max', max_val)
+        avg = tuple([sum(x) / count for x in zip(*timings)])
+        display(b'avg', avg)
+        median = timings[len(timings) // 2]
+        display(b'median', median)
 
 # utilities for historical portability
 

          
@@ 316,7 349,7 @@ def getint(ui, section, name, default):
     try:
         return int(v)
     except ValueError:
-        raise error.ConfigError(("%s.%s is not an integer ('%s')")
+        raise error.ConfigError((b"%s.%s is not an integer ('%s')")
                                 % (section, name, v))
 
 def safeattrsetter(obj, name, ignoremissing=False):

          
@@ 337,15 370,15 @@ def safeattrsetter(obj, name, ignoremiss
     if not util.safehasattr(obj, name):
         if ignoremissing:
             return None
-        raise error.Abort(("missing attribute %s of %s might break assumption"
-                           " of performance measurement") % (name, obj))
+        raise error.Abort((b"missing attribute %s of %s might break assumption"
+                           b" of performance measurement") % (name, obj))
 
-    origvalue = getattr(obj, name)
+    origvalue = getattr(obj, _sysstr(name))
     class attrutil(object):
         def set(self, newvalue):
-            setattr(obj, name, newvalue)
+            setattr(obj, _sysstr(name), newvalue)
         def restore(self):
-            setattr(obj, name, origvalue)
+            setattr(obj, _sysstr(name), origvalue)
 
     return attrutil()
 

          
@@ 364,8 397,8 @@ def getbranchmapsubsettable():
     # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
     # branchmap and repoview modules exist, but subsettable attribute
     # doesn't)
-    raise error.Abort(("perfbranchmap not available with this Mercurial"),
-                      hint="use 2.5 or later")
+    raise error.Abort((b"perfbranchmap not available with this Mercurial"),
+                      hint=b"use 2.5 or later")
 
 def getsvfs(repo):
     """Return appropriate object to access files under .hg/store

          
@@ 392,22 425,22 @@ def getvfs(repo):
 def repocleartagscachefunc(repo):
     """Return the function to clear tags cache according to repo internal API
     """
-    if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
+    if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
         # in this case, setattr(repo, '_tagscache', None) or so isn't
         # correct way to clear tags cache, because existing code paths
         # expect _tagscache to be a structured object.
         def clearcache():
             # _tagscache has been filteredpropertycache since 2.5 (or
             # 98c867ac1330), and delattr() can't work in such case
-            if '_tagscache' in vars(repo):
-                del repo.__dict__['_tagscache']
+            if b'_tagscache' in vars(repo):
+                del repo.__dict__[b'_tagscache']
         return clearcache
 
-    repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
+    repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
     if repotags: # since 1.4 (or 5614a628d173)
         return lambda : repotags.set(None)
 
-    repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
+    repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
     if repotagscache: # since 0.6 (or d7df759d0e97)
         return lambda : repotagscache.set(None)
 

          
@@ 416,7 449,7 @@ def repocleartagscachefunc(repo):
     # - repo.tags of such Mercurial isn't "callable", and repo.tags()
     #   in perftags() causes failure soon
     # - perf.py itself has been available since 1.1 (or eb240755386d)
-    raise error.Abort(("tags API of this hg command is unknown"))
+    raise error.Abort((b"tags API of this hg command is unknown"))
 
 # utilities to clear cache
 

          
@@ 428,56 461,61 @@ def clearfilecache(repo, attrname):
 
 # perf commands
 
-@command('perfwalk', formatteropts)
+@command(b'perfwalk', formatteropts)
 def perfwalk(ui, repo, *pats, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     m = scmutil.match(repo[None], pats, {})
     timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
                                               ignored=False))))
     fm.end()
 
-@command('perfannotate', formatteropts)
+@command(b'perfannotate', formatteropts)
 def perfannotate(ui, repo, f, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    fc = repo['.'][f]
+    fc = repo[b'.'][f]
     timer(lambda: len(fc.annotate(True)))
     fm.end()
 
-@command('perfstatus',
-         [('u', 'unknown', False,
-           'ask status to look for unknown files')] + formatteropts)
+@command(b'perfstatus',
+         [(b'u', b'unknown', False,
+           b'ask status to look for unknown files')] + formatteropts)
 def perfstatus(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     #m = match.always(repo.root, repo.getcwd())
     #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
     #                                                False))))
     timer, fm = gettimer(ui, opts)
-    timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
+    timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
     fm.end()
 
-@command('perfaddremove', formatteropts)
+@command(b'perfaddremove', formatteropts)
 def perfaddremove(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     try:
         oldquiet = repo.ui.quiet
         repo.ui.quiet = True
         matcher = scmutil.match(repo[None])
-        opts['dry_run'] = True
-        timer(lambda: scmutil.addremove(repo, matcher, "", opts))
+        opts[b'dry_run'] = True
+        timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
     finally:
         repo.ui.quiet = oldquiet
         fm.end()
 
 def clearcaches(cl):
     # behave somewhat consistently across internal API changes
-    if util.safehasattr(cl, 'clearcaches'):
+    if util.safehasattr(cl, b'clearcaches'):
         cl.clearcaches()
-    elif util.safehasattr(cl, '_nodecache'):
+    elif util.safehasattr(cl, b'_nodecache'):
         from mercurial.node import nullid, nullrev
         cl._nodecache = {nullid: nullrev}
         cl._nodepos = None
 
-@command('perfheads', formatteropts)
+@command(b'perfheads', formatteropts)
 def perfheads(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     cl = repo.changelog
     def d():

          
@@ 486,23 524,28 @@ def perfheads(ui, repo, **opts):
     timer(d)
     fm.end()
 
-@command('perftags', formatteropts)
+@command(b'perftags', formatteropts)
 def perftags(ui, repo, **opts):
     import mercurial.changelog
     import mercurial.manifest
+
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     svfs = getsvfs(repo)
     repocleartagscache = repocleartagscachefunc(repo)
     def t():
         repo.changelog = mercurial.changelog.changelog(svfs)
-        repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
+        rootmanifest = mercurial.manifest.manifestrevlog(svfs)
+        repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
+                                                          rootmanifest)
         repocleartagscache()
         return len(repo.tags())
     timer(t)
     fm.end()
 
-@command('perfancestors', formatteropts)
+@command(b'perfancestors', formatteropts)
 def perfancestors(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     heads = repo.changelog.headrevs()
     def d():

          
@@ 511,8 554,9 @@ def perfancestors(ui, repo, **opts):
     timer(d)
     fm.end()
 
-@command('perfancestorset', formatteropts)
+@command(b'perfancestorset', formatteropts)
 def perfancestorset(ui, repo, revset, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     revs = repo.revs(revset)
     heads = repo.changelog.headrevs()

          
@@ 523,17 567,18 @@ def perfancestorset(ui, repo, revset, **
     timer(d)
     fm.end()
 
-@command('perfbookmarks', formatteropts)
+@command(b'perfbookmarks', formatteropts)
 def perfbookmarks(ui, repo, **opts):
     """benchmark parsing bookmarks from disk to memory"""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     def d():
-        clearfilecache(repo, '_bookmarks')
+        clearfilecache(repo, b'_bookmarks')
         repo._bookmarks
     timer(d)
     fm.end()
 
-@command('perfbundleread', formatteropts, 'BUNDLE')
+@command(b'perfbundleread', formatteropts, b'BUNDLE')
 def perfbundleread(ui, repo, bundlepath, **opts):
     """Benchmark reading of bundle files.
 

          
@@ 546,9 591,11 @@ def perfbundleread(ui, repo, bundlepath,
         streamclone,
     )
 
+    opts = _byteskwargs(opts)
+
     def makebench(fn):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 bundle = exchange.readbundle(ui, fh, bundlepath)
                 fn(bundle)
 

          
@@ 556,7 603,7 @@ def perfbundleread(ui, repo, bundlepath,
 
     def makereadnbytes(size):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 bundle = exchange.readbundle(ui, fh, bundlepath)
                 while bundle.read(size):
                     pass

          
@@ 565,7 612,7 @@ def perfbundleread(ui, repo, bundlepath,
 
     def makestdioread(size):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 while fh.read(size):
                     pass
 

          
@@ 601,7 648,7 @@ def perfbundleread(ui, repo, bundlepath,
 
     def makepartreadnbytes(size):
         def run():
-            with open(bundlepath, 'rb') as fh:
+            with open(bundlepath, b'rb') as fh:
                 bundle = exchange.readbundle(ui, fh, bundlepath)
                 for part in bundle.iterparts():
                     while part.read(size):

          
@@ 610,49 657,49 @@ def perfbundleread(ui, repo, bundlepath,
         return run
 
     benches = [
-        (makestdioread(8192), 'read(8k)'),
-        (makestdioread(16384), 'read(16k)'),
-        (makestdioread(32768), 'read(32k)'),
-        (makestdioread(131072), 'read(128k)'),
+        (makestdioread(8192), b'read(8k)'),
+        (makestdioread(16384), b'read(16k)'),
+        (makestdioread(32768), b'read(32k)'),
+        (makestdioread(131072), b'read(128k)'),
     ]
 
-    with open(bundlepath, 'rb') as fh:
+    with open(bundlepath, b'rb') as fh:
         bundle = exchange.readbundle(ui, fh, bundlepath)
 
         if isinstance(bundle, changegroup.cg1unpacker):
             benches.extend([
-                (makebench(deltaiter), 'cg1 deltaiter()'),
-                (makebench(iterchunks), 'cg1 getchunks()'),
-                (makereadnbytes(8192), 'cg1 read(8k)'),
-                (makereadnbytes(16384), 'cg1 read(16k)'),
-                (makereadnbytes(32768), 'cg1 read(32k)'),
-                (makereadnbytes(131072), 'cg1 read(128k)'),
+                (makebench(deltaiter), b'cg1 deltaiter()'),
+                (makebench(iterchunks), b'cg1 getchunks()'),
+                (makereadnbytes(8192), b'cg1 read(8k)'),
+                (makereadnbytes(16384), b'cg1 read(16k)'),
+                (makereadnbytes(32768), b'cg1 read(32k)'),
+                (makereadnbytes(131072), b'cg1 read(128k)'),
             ])
         elif isinstance(bundle, bundle2.unbundle20):
             benches.extend([
-                (makebench(forwardchunks), 'bundle2 forwardchunks()'),
-                (makebench(iterparts), 'bundle2 iterparts()'),
-                (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
-                (makebench(seek), 'bundle2 part seek()'),
-                (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
-                (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
-                (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
-                (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
+                (makebench(forwardchunks), b'bundle2 forwardchunks()'),
+                (makebench(iterparts), b'bundle2 iterparts()'),
+                (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
+                (makebench(seek), b'bundle2 part seek()'),
+                (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
+                (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
+                (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
+                (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
             ])
         elif isinstance(bundle, streamclone.streamcloneapplier):
-            raise error.Abort('stream clone bundles not supported')
+            raise error.Abort(b'stream clone bundles not supported')
         else:
-            raise error.Abort('unhandled bundle type: %s' % type(bundle))
+            raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
 
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
-@command('perfchangegroupchangelog', formatteropts +
-         [('', 'version', '02', 'changegroup version'),
-          ('r', 'rev', '', 'revisions to add to changegroup')])
-def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
+@command(b'perfchangegroupchangelog', formatteropts +
+         [(b'', b'version', b'02', b'changegroup version'),
+          (b'r', b'rev', b'', b'revisions to add to changegroup')])
+def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
     """Benchmark producing a changelog group for a changegroup.
 
     This measures the time spent processing the changelog during a

          
@@ 662,92 709,99 @@ def perfchangegroupchangelog(ui, repo, v
 
     By default, all revisions are added to the changegroup.
     """
+    opts = _byteskwargs(opts)
     cl = repo.changelog
-    revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
+    nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
     bundler = changegroup.getbundler(version, repo)
 
-    def lookup(node):
-        # The real bundler reads the revision in order to access the
-        # manifest node and files list. Do that here.
-        cl.read(node)
-        return node
-
     def d():
-        for chunk in bundler.group(revs, cl, lookup):
+        state, chunks = bundler._generatechangelog(cl, nodes)
+        for chunk in chunks:
             pass
 
     timer, fm = gettimer(ui, opts)
-    timer(d)
+
+    # Terminal printing can interfere with timing. So disable it.
+    with ui.configoverride({(b'progress', b'disable'): True}):
+        timer(d)
+
     fm.end()
 
-@command('perfdirs', formatteropts)
+@command(b'perfdirs', formatteropts)
 def perfdirs(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    'a' in dirstate
+    b'a' in dirstate
     def d():
-        dirstate.hasdir('a')
+        dirstate.hasdir(b'a')
         del dirstate._map._dirs
     timer(d)
     fm.end()
 
-@command('perfdirstate', formatteropts)
+@command(b'perfdirstate', formatteropts)
 def perfdirstate(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    "a" in repo.dirstate
+    b"a" in repo.dirstate
     def d():
         repo.dirstate.invalidate()
-        "a" in repo.dirstate
+        b"a" in repo.dirstate
     timer(d)
     fm.end()
 
-@command('perfdirstatedirs', formatteropts)
+@command(b'perfdirstatedirs', formatteropts)
 def perfdirstatedirs(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    "a" in repo.dirstate
+    b"a" in repo.dirstate
     def d():
-        repo.dirstate.hasdir("a")
+        repo.dirstate.hasdir(b"a")
         del repo.dirstate._map._dirs
     timer(d)
     fm.end()
 
-@command('perfdirstatefoldmap', formatteropts)
+@command(b'perfdirstatefoldmap', formatteropts)
 def perfdirstatefoldmap(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    'a' in dirstate
+    b'a' in dirstate
     def d():
-        dirstate._map.filefoldmap.get('a')
+        dirstate._map.filefoldmap.get(b'a')
         del dirstate._map.filefoldmap
     timer(d)
     fm.end()
 
-@command('perfdirfoldmap', formatteropts)
+@command(b'perfdirfoldmap', formatteropts)
 def perfdirfoldmap(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     dirstate = repo.dirstate
-    'a' in dirstate
+    b'a' in dirstate
     def d():
-        dirstate._map.dirfoldmap.get('a')
+        dirstate._map.dirfoldmap.get(b'a')
         del dirstate._map.dirfoldmap
         del dirstate._map._dirs
     timer(d)
     fm.end()
 
-@command('perfdirstatewrite', formatteropts)
+@command(b'perfdirstatewrite', formatteropts)
 def perfdirstatewrite(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     ds = repo.dirstate
-    "a" in ds
+    b"a" in ds
     def d():
         ds._dirty = True
         ds.write(repo.currenttransaction())
     timer(d)
     fm.end()
 
-@command('perfmergecalculate',
-         [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
+@command(b'perfmergecalculate',
+         [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
 def perfmergecalculate(ui, repo, rev, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     wctx = repo[None]
     rctx = scmutil.revsingle(repo, rev, rev)

          
@@ 763,8 817,9 @@ def perfmergecalculate(ui, repo, rev, **
     timer(d)
     fm.end()
 
-@command('perfpathcopies', [], "REV REV")
+@command(b'perfpathcopies', [], b"REV REV")
 def perfpathcopies(ui, repo, rev1, rev2, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     ctx1 = scmutil.revsingle(repo, rev1, rev1)
     ctx2 = scmutil.revsingle(repo, rev2, rev2)

          
@@ 773,26 828,27 @@ def perfpathcopies(ui, repo, rev1, rev2,
     timer(d)
     fm.end()
 
-@command('perfphases',
-         [('', 'full', False, 'include file reading time too'),
-         ], "")
+@command(b'perfphases',
+         [(b'', b'full', False, b'include file reading time too'),
+          ], b"")
 def perfphases(ui, repo, **opts):
     """benchmark phasesets computation"""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     _phases = repo._phasecache
-    full = opts.get('full')
+    full = opts.get(b'full')
     def d():
         phases = _phases
         if full:
-            clearfilecache(repo, '_phasecache')
+            clearfilecache(repo, b'_phasecache')
             phases = repo._phasecache
         phases.invalidate()
         phases.loadphaserevs(repo)
     timer(d)
     fm.end()
 
-@command('perfphasesremote',
-         [], "[DEST]")
+@command(b'perfphasesremote',
+         [], b"[DEST]")
 def perfphasesremote(ui, repo, dest=None, **opts):
     """benchmark time needed to analyse phases of the remote server"""
     from mercurial.node import (

          
@@ 803,16 859,17 @@ def perfphasesremote(ui, repo, dest=None
         hg,
         phases,
     )
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
 
-    path = ui.paths.getpath(dest, default=('default-push', 'default'))
+    path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
     if not path:
-        raise error.Abort(('default repository not configured!'),
-                         hint=("see 'hg help config.paths'"))
+        raise error.Abort((b'default repository not configured!'),
+                          hint=(b"see 'hg help config.paths'"))
     dest = path.pushloc or path.loc
-    branches = (path.branch, opts.get('branch') or [])
-    ui.status(('analysing phase of %s\n') % util.hidepassword(dest))
-    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
+    branches = (path.branch, opts.get(b'branch') or [])
+    ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
+    revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
     other = hg.peer(repo, opts, dest)
 
     # easier to perform discovery through the operation

          
@@ 822,25 879,25 @@ def perfphasesremote(ui, repo, dest=None
     remotesubset = op.fallbackheads
 
     with other.commandexecutor() as e:
-        remotephases = e.callcommand('listkeys',
-                       {'namespace': 'phases'}).result()
+        remotephases = e.callcommand(b'listkeys',
+                       {b'namespace': b'phases'}).result()
     del other
-    publishing = remotephases.get('publishing', False)
+    publishing = remotephases.get(b'publishing', False)
     if publishing:
-        ui.status(('publishing: yes\n'))
+        ui.status((b'publishing: yes\n'))
     else:
-        ui.status(('publishing: no\n'))
+        ui.status((b'publishing: no\n'))
 
     nodemap = repo.changelog.nodemap
     nonpublishroots = 0
     for nhex, phase in remotephases.iteritems():
-        if nhex == 'publishing': # ignore data related to publish option
+        if nhex == b'publishing': # ignore data related to publish option
             continue
         node = bin(nhex)
         if node in nodemap and int(phase):
             nonpublishroots += 1
-    ui.status(('number of roots: %d\n') % len(remotephases))
-    ui.status(('number of known non public roots: %d\n') % nonpublishroots)
+    ui.status((b'number of roots: %d\n') % len(remotephases))
+    ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
     def d():
         phases.remotephasessummary(repo,
                                    remotesubset,

          
@@ 848,23 905,45 @@ def perfphasesremote(ui, repo, dest=None
     timer(d)
     fm.end()
 
-@command('perfmanifest', [], 'REV')
-def perfmanifest(ui, repo, rev, **opts):
+@command(b'perfmanifest',[
+            (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
+            (b'', b'clear-disk', False, b'clear on-disk caches too'),
+         ] + formatteropts, b'REV|NODE')
+def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
     """benchmark the time to read a manifest from disk and return a usable
     dict-like object
 
     Manifest caches are cleared before retrieval."""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    ctx = scmutil.revsingle(repo, rev, rev)
-    t = ctx.manifestnode()
+    if not manifest_rev:
+        ctx = scmutil.revsingle(repo, rev, rev)
+        t = ctx.manifestnode()
+    else:
+        from mercurial.node import bin
+
+        if len(rev) == 40:
+            t = bin(rev)
+        else:
+            try:
+                rev = int(rev)
+
+                if util.safehasattr(repo.manifestlog, b'getstorage'):
+                    t = repo.manifestlog.getstorage(b'').node(rev)
+                else:
+                    t = repo.manifestlog._revlog.lookup(rev)
+            except ValueError:
+                raise error.Abort(b'manifest revision must be integer or full '
+                                  b'node')
     def d():
-        repo.manifestlog.clearcaches()
+        repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
         repo.manifestlog[t].read()
     timer(d)
     fm.end()
 
-@command('perfchangeset', formatteropts)
+@command(b'perfchangeset', formatteropts)
 def perfchangeset(ui, repo, rev, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     n = scmutil.revsingle(repo, rev).node()
     def d():

          
@@ 873,50 952,54 @@ def perfchangeset(ui, repo, rev, **opts)
     timer(d)
     fm.end()
 
-@command('perfindex', formatteropts)
+@command(b'perfindex', formatteropts)
 def perfindex(ui, repo, **opts):
     import mercurial.revlog
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
-    n = repo["tip"].node()
+    n = repo[b"tip"].node()
     svfs = getsvfs(repo)
     def d():
-        cl = mercurial.revlog.revlog(svfs, "00changelog.i")
+        cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
         cl.rev(n)
     timer(d)
     fm.end()
 
-@command('perfstartup', formatteropts)
+@command(b'perfstartup', formatteropts)
 def perfstartup(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
-    cmd = sys.argv[0]
     def d():
-        if os.name != 'nt':
-            os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
+        if os.name != r'nt':
+            os.system(b"HGRCPATH= %s version -q > /dev/null" %
+                      fsencode(sys.argv[0]))
         else:
-            os.environ['HGRCPATH'] = ' '
-            os.system("%s version -q > NUL" % cmd)
+            os.environ[r'HGRCPATH'] = r' '
+            os.system(r"%s version -q > NUL" % sys.argv[0])
     timer(d)
     fm.end()
 
-@command('perfparents', formatteropts)
+@command(b'perfparents', formatteropts)
 def perfparents(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     # control the number of commits perfparents iterates over
     # experimental config: perf.parentscount
-    count = getint(ui, "perf", "parentscount", 1000)
+    count = getint(ui, b"perf", b"parentscount", 1000)
     if len(repo.changelog) < count:
-        raise error.Abort("repo needs %d commits for this test" % count)
+        raise error.Abort(b"repo needs %d commits for this test" % count)
     repo = repo.unfiltered()
-    nl = [repo.changelog.node(i) for i in xrange(count)]
+    nl = [repo.changelog.node(i) for i in _xrange(count)]
     def d():
         for n in nl:
             repo.changelog.parents(n)
     timer(d)
     fm.end()
 
-@command('perfctxfiles', formatteropts)
+@command(b'perfctxfiles', formatteropts)
 def perfctxfiles(ui, repo, x, **opts):
+    opts = _byteskwargs(opts)
     x = int(x)
     timer, fm = gettimer(ui, opts)
     def d():

          
@@ 924,8 1007,9 @@ def perfctxfiles(ui, repo, x, **opts):
     timer(d)
     fm.end()
 
-@command('perfrawfiles', formatteropts)
+@command(b'perfrawfiles', formatteropts)
 def perfrawfiles(ui, repo, x, **opts):
+    opts = _byteskwargs(opts)
     x = int(x)
     timer, fm = gettimer(ui, opts)
     cl = repo.changelog

          
@@ 934,77 1018,119 @@ def perfrawfiles(ui, repo, x, **opts):
     timer(d)
     fm.end()
 
-@command('perflookup', formatteropts)
+@command(b'perflookup', formatteropts)
 def perflookup(ui, repo, rev, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     timer(lambda: len(repo.lookup(rev)))
     fm.end()
 
-@command('perfrevrange', formatteropts)
+@command(b'perflinelogedits',
+         [(b'n', b'edits', 10000, b'number of edits'),
+          (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
+          ], norepo=True)
+def perflinelogedits(ui, **opts):
+    from mercurial import linelog
+
+    opts = _byteskwargs(opts)
+
+    edits = opts[b'edits']
+    maxhunklines = opts[b'max_hunk_lines']
+
+    maxb1 = 100000
+    random.seed(0)
+    randint = random.randint
+    currentlines = 0
+    arglist = []
+    for rev in _xrange(edits):
+        a1 = randint(0, currentlines)
+        a2 = randint(a1, min(currentlines, a1 + maxhunklines))
+        b1 = randint(0, maxb1)
+        b2 = randint(b1, b1 + maxhunklines)
+        currentlines += (b2 - b1) - (a2 - a1)
+        arglist.append((rev, a1, a2, b1, b2))
+
+    def d():
+        ll = linelog.linelog()
+        for args in arglist:
+            ll.replacelines(*args)
+
+    timer, fm = gettimer(ui, opts)
+    timer(d)
+    fm.end()
+
+@command(b'perfrevrange', formatteropts)
 def perfrevrange(ui, repo, *specs, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     revrange = scmutil.revrange
     timer(lambda: len(revrange(repo, specs)))
     fm.end()
 
-@command('perfnodelookup', formatteropts)
+@command(b'perfnodelookup', formatteropts)
 def perfnodelookup(ui, repo, rev, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     import mercurial.revlog
     mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
     n = scmutil.revsingle(repo, rev).node()
-    cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
+    cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
     def d():
         cl.rev(n)
         clearcaches(cl)
     timer(d)
     fm.end()
 
-@command('perflog',
-         [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
+@command(b'perflog',
+         [(b'', b'rename', False, b'ask log to follow renames')
+         ] + formatteropts)
 def perflog(ui, repo, rev=None, **opts):
+    opts = _byteskwargs(opts)
     if rev is None:
         rev=[]
     timer, fm = gettimer(ui, opts)
     ui.pushbuffer()
-    timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
-                               copies=opts.get('rename')))
+    timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
+                               copies=opts.get(b'rename')))
     ui.popbuffer()
     fm.end()
 
-@command('perfmoonwalk', formatteropts)
+@command(b'perfmoonwalk', formatteropts)
 def perfmoonwalk(ui, repo, **opts):
     """benchmark walking the changelog backwards
 
     This also loads the changelog data for each revision in the changelog.
     """
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     def moonwalk():
-        for i in xrange(len(repo), -1, -1):
+        for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
             ctx = repo[i]
             ctx.branch() # read changelog data (in addition to the index)
     timer(moonwalk)
     fm.end()
 
-@command('perftemplating',
-         [('r', 'rev', [], 'revisions to run the template on'),
-         ] + formatteropts)
+@command(b'perftemplating',
+         [(b'r', b'rev', [], b'revisions to run the template on'),
+          ] + formatteropts)
 def perftemplating(ui, repo, testedtemplate=None, **opts):
     """test the rendering time of a given template"""
     if makelogtemplater is None:
-        raise error.Abort(("perftemplating not available with this Mercurial"),
-                          hint="use 4.3 or later")
+        raise error.Abort((b"perftemplating not available with this Mercurial"),
+                          hint=b"use 4.3 or later")
+
+    opts = _byteskwargs(opts)
 
     nullui = ui.copy()
-    nullui.fout = open(os.devnull, 'wb')
+    nullui.fout = open(os.devnull, r'wb')
     nullui.disablepager()
-    revs = opts.get('rev')
+    revs = opts.get(b'rev')
     if not revs:
-        revs = ['all()']
+        revs = [b'all()']
     revs = list(scmutil.revrange(repo, revs))
 
-    defaulttemplate = ('{date|shortdate} [{rev}:{node|short}]'
-                       ' {author|person}: {desc|firstline}\n')
+    defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
+                       b' {author|person}: {desc|firstline}\n')
     if testedtemplate is None:
         testedtemplate = defaulttemplate
     displayer = makelogtemplater(nullui, repo, testedtemplate)

          
@@ 1018,14 1144,16 @@ def perftemplating(ui, repo, testedtempl
     timer(format)
     fm.end()
 
-@command('perfcca', formatteropts)
+@command(b'perfcca', formatteropts)
 def perfcca(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
     fm.end()
 
-@command('perffncacheload', formatteropts)
+@command(b'perffncacheload', formatteropts)
 def perffncacheload(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     s = repo.store
     def d():

          
@@ 1033,14 1161,15 @@ def perffncacheload(ui, repo, **opts):
     timer(d)
     fm.end()
 
-@command('perffncachewrite', formatteropts)
+@command(b'perffncachewrite', formatteropts)
 def perffncachewrite(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     s = repo.store
     lock = repo.lock()
     s.fncache._load()
-    tr = repo.transaction('perffncachewrite')
-    tr.addbackup('fncache')
+    tr = repo.transaction(b'perffncachewrite')
+    tr.addbackup(b'fncache')
     def d():
         s.fncache._dirty = True
         s.fncache.write(tr)

          
@@ 1049,8 1178,9 @@ def perffncachewrite(ui, repo, **opts):
     lock.release()
     fm.end()
 
-@command('perffncacheencode', formatteropts)
+@command(b'perffncacheencode', formatteropts)
 def perffncacheencode(ui, repo, **opts):
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     s = repo.store
     s.fncache._load()

          
@@ 1076,15 1206,25 @@ def _bdiffworker(q, blocks, xdiff, ready
         with ready:
             ready.wait()
 
-@command('perfbdiff', revlogopts + formatteropts + [
-    ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
-    ('', 'alldata', False, 'test bdiffs for all associated revisions'),
-    ('', 'threads', 0, 'number of thread to use (disable with 0)'),
-    ('', 'blocks', False, 'test computing diffs into blocks'),
-    ('', 'xdiff', False, 'use xdiff algorithm'),
+def _manifestrevision(repo, mnode):
+    ml = repo.manifestlog
+
+    if util.safehasattr(ml, b'getstorage'):
+        store = ml.getstorage(b'')
+    else:
+        store = ml._revlog
+
+    return store.revision(mnode)
+
+@command(b'perfbdiff', revlogopts + formatteropts + [
+    (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
+    (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
+    (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
+    (b'', b'blocks', False, b'test computing diffs into blocks'),
+    (b'', b'xdiff', False, b'use xdiff algorithm'),
     ],
 
-    '-c|-m|FILE REV')
+    b'-c|-m|FILE REV')
 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
     """benchmark a bdiff between revisions
 

          
@@ 1097,33 1237,33 @@ def perfbdiff(ui, repo, file_, rev=None,
     measure bdiffs for all changes related to that changeset (manifest
     and filelogs).
     """
-    opts = pycompat.byteskwargs(opts)
+    opts = _byteskwargs(opts)
 
-    if opts['xdiff'] and not opts['blocks']:
-        raise error.CommandError('perfbdiff', '--xdiff requires --blocks')
+    if opts[b'xdiff'] and not opts[b'blocks']:
+        raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
 
-    if opts['alldata']:
-        opts['changelog'] = True
+    if opts[b'alldata']:
+        opts[b'changelog'] = True
 
-    if opts.get('changelog') or opts.get('manifest'):
+    if opts.get(b'changelog') or opts.get(b'manifest'):
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('perfbdiff', 'invalid arguments')
+        raise error.CommandError(b'perfbdiff', b'invalid arguments')
 
-    blocks = opts['blocks']
-    xdiff = opts['xdiff']
+    blocks = opts[b'blocks']
+    xdiff = opts[b'xdiff']
     textpairs = []
 
-    r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
+    r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
 
     startrev = r.rev(r.lookup(rev))
     for rev in range(startrev, min(startrev + count, len(r) - 1)):
-        if opts['alldata']:
+        if opts[b'alldata']:
             # Load revisions associated with changeset.
             ctx = repo[rev]
-            mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
+            mtext = _manifestrevision(repo, ctx.manifestnode())
             for pctx in ctx.parents():
-                pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
+                pman = _manifestrevision(repo, pctx.manifestnode())
                 textpairs.append((pman, mtext))
 
             # Load filelog revisions by iterating manifest delta.

          
@@ 1150,18 1290,18 @@ def perfbdiff(ui, repo, file_, rev=None,
                     mdiff.textdiff(*pair)
     else:
         q = queue()
-        for i in xrange(threads):
+        for i in _xrange(threads):
             q.put(None)
         ready = threading.Condition()
         done = threading.Event()
-        for i in xrange(threads):
+        for i in _xrange(threads):
             threading.Thread(target=_bdiffworker,
                              args=(q, blocks, xdiff, ready, done)).start()
         q.join()
         def d():
             for pair in textpairs:
                 q.put(pair)
-            for i in xrange(threads):
+            for i in _xrange(threads):
                 q.put(None)
             with ready:
                 ready.notify_all()

          
@@ 1172,15 1312,15 @@ def perfbdiff(ui, repo, file_, rev=None,
 
     if withthreads:
         done.set()
-        for i in xrange(threads):
+        for i in _xrange(threads):
             q.put(None)
         with ready:
             ready.notify_all()
 
-@command('perfunidiff', revlogopts + formatteropts + [
-    ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
-    ('', 'alldata', False, 'test unidiffs for all associated revisions'),
-    ], '-c|-m|FILE REV')
+@command(b'perfunidiff', revlogopts + formatteropts + [
+    (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
+    (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
+    ], b'-c|-m|FILE REV')
 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
     """benchmark a unified diff between revisions
 

          
@@ 1196,26 1336,27 @@ def perfunidiff(ui, repo, file_, rev=Non
     measure diffs for all changes related to that changeset (manifest
     and filelogs).
     """
-    if opts['alldata']:
-        opts['changelog'] = True
+    opts = _byteskwargs(opts)
+    if opts[b'alldata']:
+        opts[b'changelog'] = True
 
-    if opts.get('changelog') or opts.get('manifest'):
+    if opts.get(b'changelog') or opts.get(b'manifest'):
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('perfunidiff', 'invalid arguments')
+        raise error.CommandError(b'perfunidiff', b'invalid arguments')
 
     textpairs = []
 
-    r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
+    r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
 
     startrev = r.rev(r.lookup(rev))
     for rev in range(startrev, min(startrev + count, len(r) - 1)):
-        if opts['alldata']:
+        if opts[b'alldata']:
             # Load revisions associated with changeset.
             ctx = repo[rev]
-            mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
+            mtext = _manifestrevision(repo, ctx.manifestnode())
             for pctx in ctx.parents():
-                pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
+                pman = _manifestrevision(repo, pctx.manifestnode())
                 textpairs.append((pman, mtext))
 
             # Load filelog revisions by iterating manifest delta.

          
@@ 1234,7 1375,7 @@ def perfunidiff(ui, repo, file_, rev=Non
         for left, right in textpairs:
             # The date strings don't matter, so we pass empty strings.
             headerlines, hunks = mdiff.unidiff(
-                left, '', right, '', 'left', 'right', binary=False)
+                left, b'', right, b'', b'left', b'right', binary=False)
             # consume iterators in roughly the way patch.py does
             b'\n'.join(headerlines)
             b''.join(sum((list(hlines) for hrange, hlines in hunks), []))

          
@@ 1242,9 1383,10 @@ def perfunidiff(ui, repo, file_, rev=Non
     timer(d)
     fm.end()
 
-@command('perfdiffwd', formatteropts)
+@command(b'perfdiffwd', formatteropts)
 def perfdiffwd(ui, repo, **opts):
     """Profile diff of working directory changes"""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     options = {
         'w': 'ignore_all_space',

          
@@ 1253,17 1395,18 @@ def perfdiffwd(ui, repo, **opts):
         }
 
     for diffopt in ('', 'w', 'b', 'B', 'wB'):
-        opts = dict((options[c], '1') for c in diffopt)
+        opts = dict((options[c], b'1') for c in diffopt)
         def d():
             ui.pushbuffer()
             commands.diff(ui, repo, **opts)
             ui.popbuffer()
-        title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
+        diffopt = diffopt.encode('ascii')
+        title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
         timer(d, title)
     fm.end()
 
-@command('perfrevlogindex', revlogopts + formatteropts,
-         '-c|-m|FILE')
+@command(b'perfrevlogindex', revlogopts + formatteropts,
+         b'-c|-m|FILE')
 def perfrevlogindex(ui, repo, file_=None, **opts):
     """Benchmark operations against a revlog index.
 

          
@@ 1272,19 1415,21 @@ def perfrevlogindex(ui, repo, file_=None
     index data.
     """
 
-    rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
+    opts = _byteskwargs(opts)
+
+    rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
 
     opener = getattr(rl, 'opener')  # trick linter
     indexfile = rl.indexfile
     data = opener.read(indexfile)
 
-    header = struct.unpack('>I', data[0:4])[0]
+    header = struct.unpack(b'>I', data[0:4])[0]
     version = header & 0xFFFF
     if version == 1:
         revlogio = revlog.revlogio()
         inline = header & (1 << 16)
     else:
-        raise error.Abort(('unsupported revlog version: %d') % version)
+        raise error.Abort((b'unsupported revlog version: %d') % version)
 
     rllen = len(rl)
 

          
@@ 1344,33 1489,33 @@ def perfrevlogindex(ui, repo, file_=None
                     pass
 
     benches = [
-        (constructor, 'revlog constructor'),
-        (read, 'read'),
-        (parseindex, 'create index object'),
-        (lambda: getentry(0), 'retrieve index entry for rev 0'),
-        (lambda: resolvenode('a' * 20), 'look up missing node'),
-        (lambda: resolvenode(node0), 'look up node at rev 0'),
-        (lambda: resolvenode(node25), 'look up node at 1/4 len'),
-        (lambda: resolvenode(node50), 'look up node at 1/2 len'),
-        (lambda: resolvenode(node75), 'look up node at 3/4 len'),
-        (lambda: resolvenode(node100), 'look up node at tip'),
+        (constructor, b'revlog constructor'),
+        (read, b'read'),
+        (parseindex, b'create index object'),
+        (lambda: getentry(0), b'retrieve index entry for rev 0'),
+        (lambda: resolvenode(b'a' * 20), b'look up missing node'),
+        (lambda: resolvenode(node0), b'look up node at rev 0'),
+        (lambda: resolvenode(node25), b'look up node at 1/4 len'),
+        (lambda: resolvenode(node50), b'look up node at 1/2 len'),
+        (lambda: resolvenode(node75), b'look up node at 3/4 len'),
+        (lambda: resolvenode(node100), b'look up node at tip'),
         # 2x variation is to measure caching impact.
         (lambda: resolvenodes(allnodes),
-         'look up all nodes (forward)'),
+         b'look up all nodes (forward)'),
         (lambda: resolvenodes(allnodes, 2),
-         'look up all nodes 2x (forward)'),
+         b'look up all nodes 2x (forward)'),
         (lambda: resolvenodes(allnodesrev),
-         'look up all nodes (reverse)'),
+         b'look up all nodes (reverse)'),
         (lambda: resolvenodes(allnodesrev, 2),
-         'look up all nodes 2x (reverse)'),
+         b'look up all nodes 2x (reverse)'),
         (lambda: getentries(allrevs),
-         'retrieve all index entries (forward)'),
+         b'retrieve all index entries (forward)'),
         (lambda: getentries(allrevs, 2),
-         'retrieve all index entries 2x (forward)'),
+         b'retrieve all index entries 2x (forward)'),
         (lambda: getentries(allrevsrev),
-         'retrieve all index entries (reverse)'),
+         b'retrieve all index entries (reverse)'),
         (lambda: getentries(allrevsrev, 2),
-         'retrieve all index entries 2x (reverse)'),
+         b'retrieve all index entries 2x (reverse)'),
     ]
 
     for fn, title in benches:

          
@@ 1378,11 1523,11 @@ def perfrevlogindex(ui, repo, file_=None
         timer(fn, title=title)
         fm.end()
 
-@command('perfrevlogrevisions', revlogopts + formatteropts +
-         [('d', 'dist', 100, 'distance between the revisions'),
-          ('s', 'startrev', 0, 'revision to start reading at'),
-          ('', 'reverse', False, 'read in reverse')],
-         '-c|-m|FILE')
+@command(b'perfrevlogrevisions', revlogopts + formatteropts +
+         [(b'd', b'dist', 100, b'distance between the revisions'),
+          (b's', b'startrev', 0, b'revision to start reading at'),
+          (b'', b'reverse', False, b'read in reverse')],
+         b'-c|-m|FILE')
 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
                         **opts):
     """Benchmark reading a series of revisions from a revlog.

          
@@ 1392,21 1537,26 @@ def perfrevlogrevisions(ui, repo, file_=
 
     The start revision can be defined via ``-s/--startrev``.
     """
-    rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
+    opts = _byteskwargs(opts)
+
+    rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
     rllen = getlen(ui)(rl)
 
+    if startrev < 0:
+        startrev = rllen + startrev
+
     def d():
         rl.clearcaches()
 
         beginrev = startrev
         endrev = rllen
-        dist = opts['dist']
+        dist = opts[b'dist']
 
         if reverse:
             beginrev, endrev = endrev, beginrev
             dist = -1 * dist
 
-        for x in xrange(beginrev, endrev, dist):
+        for x in _xrange(beginrev, endrev, dist):
             # Old revisions don't support passing int.
             n = rl.node(x)
             rl.revision(n)

          
@@ 1415,10 1565,10 @@ def perfrevlogrevisions(ui, repo, file_=
     timer(d)
     fm.end()
 
-@command('perfrevlogchunks', revlogopts + formatteropts +
-         [('e', 'engines', '', 'compression engines to use'),
-          ('s', 'startrev', 0, 'revision to start at')],
-         '-c|-m|FILE')
+@command(b'perfrevlogchunks', revlogopts + formatteropts +
+         [(b'e', b'engines', b'', b'compression engines to use'),
+          (b's', b'startrev', 0, b'revision to start at')],
+         b'-c|-m|FILE')
 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
     """Benchmark operations on revlog chunks.
 

          
@@ 1431,7 1581,9 @@ def perfrevlogchunks(ui, repo, file_=Non
     For measurements of higher-level operations like resolving revisions,
     see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
     """
-    rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
+    opts = _byteskwargs(opts)
+
+    rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
 
     # _chunkraw was renamed to _getsegmentforrevs.
     try:

          
@@ 1441,19 1593,19 @@ def perfrevlogchunks(ui, repo, file_=Non
 
     # Verify engines argument.
     if engines:
-        engines = set(e.strip() for e in engines.split(','))
+        engines = set(e.strip() for e in engines.split(b','))
         for engine in engines:
             try:
                 util.compressionengines[engine]
             except KeyError:
-                raise error.Abort('unknown compression engine: %s' % engine)
+                raise error.Abort(b'unknown compression engine: %s' % engine)
     else:
         engines = []
         for e in util.compengines:
             engine = util.compengines[e]
             try:
                 if engine.available():
-                    engine.revlogcompressor().compress('dummy')
+                    engine.revlogcompressor().compress(b'dummy')
                     engines.append(e)
             except NotImplementedError:
                 pass

          
@@ 1513,27 1665,27 @@ def perfrevlogchunks(ui, repo, file_=Non
             rl._compressor = oldcompressor
 
     benches = [
-        (lambda: doread(), 'read'),
-        (lambda: doreadcachedfh(), 'read w/ reused fd'),
-        (lambda: doreadbatch(), 'read batch'),
-        (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
-        (lambda: dochunk(), 'chunk'),
-        (lambda: dochunkbatch(), 'chunk batch'),
+        (lambda: doread(), b'read'),
+        (lambda: doreadcachedfh(), b'read w/ reused fd'),
+        (lambda: doreadbatch(), b'read batch'),
+        (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
+        (lambda: dochunk(), b'chunk'),
+        (lambda: dochunkbatch(), b'chunk batch'),
     ]
 
     for engine in sorted(engines):
         compressor = util.compengines[engine].revlogcompressor()
         benches.append((functools.partial(docompress, compressor),
-                        'compress w/ %s' % engine))
+                        b'compress w/ %s' % engine))
 
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
-@command('perfrevlogrevision', revlogopts + formatteropts +
-         [('', 'cache', False, 'use caches instead of clearing')],
-         '-c|-m|FILE REV')
+@command(b'perfrevlogrevision', revlogopts + formatteropts +
+         [(b'', b'cache', False, b'use caches instead of clearing')],
+         b'-c|-m|FILE REV')
 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
     """Benchmark obtaining a revlog revision.
 

          
@@ 1547,12 1699,14 @@ def perfrevlogrevision(ui, repo, file_, 
 
     This command measures the time spent in each of these phases.
     """
-    if opts.get('changelog') or opts.get('manifest'):
+    opts = _byteskwargs(opts)
+
+    if opts.get(b'changelog') or opts.get(b'manifest'):
         file_, rev = None, file_
     elif rev is None:
-        raise error.CommandError('perfrevlogrevision', 'invalid arguments')
+        raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
 
-    r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
+    r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
 
     # _chunkraw was renamed to _getsegmentforrevs.
     try:

          
@@ 1622,18 1776,18 @@ def perfrevlogrevision(ui, repo, file_, 
     data = segmentforrevs(chain[0], chain[-1])[1]
     rawchunks = getrawchunks(data, chain)
     bins = r._chunks(chain)
-    text = str(bins[0])
+    text = bytes(bins[0])
     bins = bins[1:]
     text = mdiff.patches(text, bins)
 
     benches = [
-        (lambda: dorevision(), 'full'),
-        (lambda: dodeltachain(rev), 'deltachain'),
-        (lambda: doread(chain), 'read'),
-        (lambda: dorawchunks(data, chain), 'rawchunks'),
-        (lambda: dodecompress(rawchunks), 'decompress'),
-        (lambda: dopatch(text, bins), 'patch'),
-        (lambda: dohash(text), 'hash'),
+        (lambda: dorevision(), b'full'),
+        (lambda: dodeltachain(rev), b'deltachain'),
+        (lambda: doread(chain), b'read'),
+        (lambda: dorawchunks(data, chain), b'rawchunks'),
+        (lambda: dodecompress(rawchunks), b'decompress'),
+        (lambda: dopatch(text, bins), b'patch'),
+        (lambda: dohash(text), b'hash'),
     ]
 
     for fn, title in benches:

          
@@ 1641,16 1795,18 @@ def perfrevlogrevision(ui, repo, file_, 
         timer(fn, title=title)
         fm.end()
 
-@command('perfrevset',
-         [('C', 'clear', False, 'clear volatile cache between each call.'),
-          ('', 'contexts', False, 'obtain changectx for each revision')]
-         + formatteropts, "REVSET")
+@command(b'perfrevset',
+         [(b'C', b'clear', False, b'clear volatile cache between each call.'),
+          (b'', b'contexts', False, b'obtain changectx for each revision')]
+         + formatteropts, b"REVSET")
 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
     """benchmark the execution time of a revset
 
     Use the --clean option if need to evaluate the impact of build volatile
     revisions set cache on the revset execution. Volatile cache hold filtered
     and obsolete related cache."""
+    opts = _byteskwargs(opts)
+
     timer, fm = gettimer(ui, opts)
     def d():
         if clear:

          
@@ 1662,21 1818,22 @@ def perfrevset(ui, repo, expr, clear=Fal
     timer(d)
     fm.end()
 
-@command('perfvolatilesets',
-         [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
-         ] + formatteropts)
+@command(b'perfvolatilesets',
+         [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
+          ] + formatteropts)
 def perfvolatilesets(ui, repo, *names, **opts):
     """benchmark the computation of various volatile set
 
     Volatile set computes element related to filtering and obsolescence."""
+    opts = _byteskwargs(opts)
     timer, fm = gettimer(ui, opts)
     repo = repo.unfiltered()
 
     def getobs(name):
         def d():
             repo.invalidatevolatilesets()
-            if opts['clear_obsstore']:
-                clearfilecache(repo, 'obsstore')
+            if opts[b'clear_obsstore']:
+                clearfilecache(repo, b'obsstore')
             obsolete.getrevs(repo, name)
         return d
 

          
@@ 1690,8 1847,8 @@ def perfvolatilesets(ui, repo, *names, *
     def getfiltered(name):
         def d():
             repo.invalidatevolatilesets()
-            if opts['clear_obsstore']:
-                clearfilecache(repo, 'obsstore')
+            if opts[b'clear_obsstore']:
+                clearfilecache(repo, b'obsstore')
             repoview.filterrevs(repo, name)
         return d
 

          
@@ 1703,19 1860,20 @@ def perfvolatilesets(ui, repo, *names, *
         timer(getfiltered(name), title=name)
     fm.end()
 
-@command('perfbranchmap',
-         [('f', 'full', False,
-           'Includes build time of subset'),
-          ('', 'clear-revbranch', False,
-           'purge the revbranch cache between computation'),
-         ] + formatteropts)
+@command(b'perfbranchmap',
+         [(b'f', b'full', False,
+           b'Includes build time of subset'),
+          (b'', b'clear-revbranch', False,
+           b'purge the revbranch cache between computation'),
+          ] + formatteropts)
 def perfbranchmap(ui, repo, *filternames, **opts):
     """benchmark the update of a branchmap
 
     This benchmarks the full repo.branchmap() call with read and write disabled
     """
-    full = opts.get("full", False)
-    clear_revbranch = opts.get("clear_revbranch", False)
+    opts = _byteskwargs(opts)
+    full = opts.get(b"full", False)
+    clear_revbranch = opts.get(b"clear_revbranch", False)
     timer, fm = gettimer(ui, opts)
     def getbranchmap(filtername):
         """generate a benchmark function for the filtername"""

          
@@ 1744,7 1902,7 @@ def perfbranchmap(ui, repo, *filternames
             if subset not in possiblefilters:
                 break
         else:
-            assert False, 'subset cycle %s!' % possiblefilters
+            assert False, b'subset cycle %s!' % possiblefilters
         allfilters.append(name)
         possiblefilters.remove(name)
 

          
@@ 1752,26 1910,53 @@ def perfbranchmap(ui, repo, *filternames
     if not full:
         for name in allfilters:
             repo.filtered(name).branchmap()
-    if not filternames or 'unfiltered' in filternames:
+    if not filternames or b'unfiltered' in filternames:
         # add unfiltered
         allfilters.append(None)
 
-    branchcacheread = safeattrsetter(branchmap, 'read')
-    branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
+    branchcacheread = safeattrsetter(branchmap, b'read')
+    branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
     branchcacheread.set(lambda repo: None)
     branchcachewrite.set(lambda bc, repo: None)
     try:
         for name in allfilters:
             printname = name
             if name is None:
-                printname = 'unfiltered'
+                printname = b'unfiltered'
             timer(getbranchmap(name), title=str(printname))
     finally:
         branchcacheread.restore()
         branchcachewrite.restore()
     fm.end()
 
-@command('perfloadmarkers')
+@command(b'perfbranchmapload', [
+     (b'f', b'filter', b'', b'Specify repoview filter'),
+     (b'', b'list', False, b'List brachmap filter caches'),
+    ] + formatteropts)
+def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
+    """benchmark reading the branchmap"""
+    opts = _byteskwargs(opts)
+
+    if list:
+        for name, kind, st in repo.cachevfs.readdir(stat=True):
+            if name.startswith(b'branch2'):
+                filtername = name.partition(b'-')[2] or b'unfiltered'
+                ui.status(b'%s - %s\n'
+                          % (filtername, util.bytecount(st.st_size)))
+        return
+    if filter:
+        repo = repoview.repoview(repo, filter)
+    else:
+        repo = repo.unfiltered()
+    # try once without timer, the filter may not be cached
+    if branchmap.read(repo) is None:
+        raise error.Abort(b'No brachmap cached for %s repo'
+                          % (filter or b'unfiltered'))
+    timer, fm = gettimer(ui, opts)
+    timer(lambda: branchmap.read(repo) and None)
+    fm.end()
+
+@command(b'perfloadmarkers')
 def perfloadmarkers(ui, repo):
     """benchmark the time to parse the on-disk markers for a repo
 

          
@@ 1781,27 1966,34 @@ def perfloadmarkers(ui, repo):
     timer(lambda: len(obsolete.obsstore(svfs)))
     fm.end()
 
-@command('perflrucachedict', formatteropts +
-    [('', 'size', 4, 'size of cache'),
-     ('', 'gets', 10000, 'number of key lookups'),
-     ('', 'sets', 10000, 'number of key sets'),
-     ('', 'mixed', 10000, 'number of mixed mode operations'),
-     ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
+@command(b'perflrucachedict', formatteropts +
+    [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
+     (b'', b'mincost', 0, b'smallest cost of items in cache'),
+     (b'', b'maxcost', 100, b'maximum cost of items in cache'),
+     (b'', b'size', 4, b'size of cache'),
+     (b'', b'gets', 10000, b'number of key lookups'),
+     (b'', b'sets', 10000, b'number of key sets'),
+     (b'', b'mixed', 10000, b'number of mixed mode operations'),
+     (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
     norepo=True)
-def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
-                 mixedgetfreq=50, **opts):
+def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
+                 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
+    opts = _byteskwargs(opts)
+
     def doinit():
-        for i in xrange(10000):
+        for i in _xrange(10000):
             util.lrucachedict(size)
 
+    costrange = list(range(mincost, maxcost + 1))
+
     values = []
-    for i in xrange(size):
-        values.append(random.randint(0, sys.maxint))
+    for i in _xrange(size):
+        values.append(random.randint(0, _maxint))
 
     # Get mode fills the cache and tests raw lookup performance with no
     # eviction.
     getseq = []
-    for i in xrange(gets):
+    for i in _xrange(gets):
         getseq.append(random.choice(values))
 
     def dogets():

          
@@ 1812,10 2004,33 @@ def perflrucache(ui, size=4, gets=10000,
             value = d[key]
             value # silence pyflakes warning
 
+    def dogetscost():
+        d = util.lrucachedict(size, maxcost=costlimit)
+        for i, v in enumerate(values):
+            d.insert(v, v, cost=costs[i])
+        for key in getseq:
+            try:
+                value = d[key]
+                value # silence pyflakes warning
+            except KeyError:
+                pass
+
     # Set mode tests insertion speed with cache eviction.
     setseq = []
-    for i in xrange(sets):
-        setseq.append(random.randint(0, sys.maxint))
+    costs = []
+    for i in _xrange(sets):
+        setseq.append(random.randint(0, _maxint))
+        costs.append(random.choice(costrange))
+
+    def doinserts():
+        d = util.lrucachedict(size)
+        for v in setseq:
+            d.insert(v, v)
+
+    def doinsertscost():
+        d = util.lrucachedict(size, maxcost=costlimit)
+        for i, v in enumerate(setseq):
+            d.insert(v, v, cost=costs[i])
 
     def dosets():
         d = util.lrucachedict(size)

          
@@ 1824,19 2039,21 @@ def perflrucache(ui, size=4, gets=10000,
 
     # Mixed mode randomly performs gets and sets with eviction.
     mixedops = []
-    for i in xrange(mixed):
+    for i in _xrange(mixed):
         r = random.randint(0, 100)
         if r < mixedgetfreq:
             op = 0
         else:
             op = 1
 
-        mixedops.append((op, random.randint(0, size * 2)))
+        mixedops.append((op,
+                         random.randint(0, size * 2),
+                         random.choice(costrange)))
 
     def domixed():
         d = util.lrucachedict(size)
 
-        for op, v in mixedops:
+        for op, v, cost in mixedops:
             if op == 0:
                 try:
                     d[v]

          
@@ 1845,40 2062,65 @@ def perflrucache(ui, size=4, gets=10000,
             else:
                 d[v] = v
 
+    def domixedcost():
+        d = util.lrucachedict(size, maxcost=costlimit)
+
+        for op, v, cost in mixedops:
+            if op == 0:
+                try:
+                    d[v]
+                except KeyError:
+                    pass
+            else:
+                d.insert(v, v, cost=cost)
+
     benches = [
-        (doinit, 'init'),
-        (dogets, 'gets'),
-        (dosets, 'sets'),
-        (domixed, 'mixed')
+        (doinit, b'init'),
     ]
 
+    if costlimit:
+        benches.extend([
+            (dogetscost, b'gets w/ cost limit'),
+            (doinsertscost, b'inserts w/ cost limit'),
+            (domixedcost, b'mixed w/ cost limit'),
+        ])
+    else:
+        benches.extend([
+            (dogets, b'gets'),
+            (doinserts, b'inserts'),
+            (dosets, b'sets'),
+            (domixed, b'mixed')
+        ])
+
     for fn, title in benches:
         timer, fm = gettimer(ui, opts)
         timer(fn, title=title)
         fm.end()
 
-@command('perfwrite', formatteropts)
+@command(b'perfwrite', formatteropts)
 def perfwrite(ui, repo, **opts):
     """microbenchmark ui.write
     """
+    opts = _byteskwargs(opts)
+
     timer, fm = gettimer(ui, opts)
     def write():
         for i in range(100000):
-            ui.write(('Testing write performance\n'))
+            ui.write((b'Testing write performance\n'))
     timer(write)
     fm.end()
 
 def uisetup(ui):
-    if (util.safehasattr(cmdutil, 'openrevlog') and
-        not util.safehasattr(commands, 'debugrevlogopts')):
+    if (util.safehasattr(cmdutil, b'openrevlog') and
+        not util.safehasattr(commands, b'debugrevlogopts')):
         # for "historical portability":
         # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
         # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
         # openrevlog() should cause failure, because it has been
         # available since 3.5 (or 49c583ca48c4).
         def openrevlog(orig, repo, cmd, file_, opts):
-            if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
-                raise error.Abort("This version doesn't support --dir option",
-                                  hint="use 3.5 or later")
+            if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
+                raise error.Abort(b"This version doesn't support --dir option",
+                                  hint=b"use 3.5 or later")
             return orig(repo, cmd, file_, opts)
-        extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
+        extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)

          
M contrib/python-hook-examples.py +1 -1
@@ 19,7 19,7 @@ def diffstat(ui, repo, **kwargs):
     node = kwargs['node']
     first = repo[node].p1().node()
     if 'url' in kwargs:
-        last = repo['tip'].node()
+        last = repo.changelog.tip()
     else:
         last = node
     diff = patch.diff(repo, first, last)

          
M contrib/python-zstandard/MANIFEST.in +3 -0
@@ 1,7 1,10 @@ 
 graft c-ext
+graft debian
 graft zstd
 graft tests
 include make_cffi.py
 include setup_zstd.py
 include zstd.c
+include zstd_cffi.py
 include LICENSE
+include NEWS.rst

          
M contrib/python-zstandard/NEWS.rst +119 -1
@@ 30,6 30,19 @@ Actions Blocking Release
 * Remove low-level compression parameters from ``ZstdCompressor.__init__`` and
   require use of ``CompressionParameters``.
 * Expose ``ZSTD_getFrameProgression()`` from more compressor types.
+* Support modifying compression parameters mid operation when supported by
+  zstd API.
+* Expose ``ZSTD_CLEVEL_DEFAULT`` constant.
+* Support ``ZSTD_p_forceAttachDict`` compression parameter.
+* Use ``ZSTD_CCtx_getParameter()``/``ZSTD_CCtxParam_getParameter()`` for retrieving
+  compression parameters.
+* Consider exposing ``ZSTDMT_toFlushNow()``.
+* Expose ``ZDICT_trainFromBuffer_fastCover()``,
+  ``ZDICT_optimizeTrainFromBuffer_fastCover``.
+* Expose and enforce ``ZSTD_minCLevel()`` for minimum compression level.
+* Consider a ``chunker()`` API for decompression.
+* Consider stats for ``chunker()`` API, including finding the last consumed
+  offset of input data.
 
 Other Actions Not Blocking Release
 ---------------------------------------

          
@@ 38,6 51,111 @@ Other Actions Not Blocking Release
 * API for ensuring max memory ceiling isn't exceeded.
 * Move off nose for testing.
 
+0.10.1 (released 2018-10-08)
+============================
+
+Backwards Compatibility Notes
+-----------------------------
+
+* ``ZstdCompressor.stream_reader().closed`` is now a property instead of a
+  method (#58).
+* ``ZstdDecompressor.stream_reader().closed`` is now a property instead of a
+  method (#58).
+
+Changes
+-------
+
+* Stop attempting to package Python 3.6 for Miniconda. The latest version of
+  Miniconda is using Python 3.7. The Python 3.6 Miniconda packages were a lie
+  since this were built against Python 3.7.
+* ``ZstdCompressor.stream_reader()``'s and ``ZstdDecompressor.stream_reader()``'s
+  ``closed`` attribute is now a read-only property instead of a method. This now
+  properly matches the ``IOBase`` API and allows instances to be used in more
+  places that accept ``IOBase`` instances.
+
+0.10.0 (released 2018-10-08)
+============================
+
+Backwards Compatibility Notes
+-----------------------------
+
+* ``ZstdDecompressor.stream_reader().read()`` now consistently requires an
+  argument in both the C and CFFI backends. Before, the CFFI implementation
+  would assume a default value of ``-1``, which was later rejected.
+* The ``compress_literals`` argument and attribute has been removed from
+  ``zstd.ZstdCompressionParameters`` because it was removed by the zstd 1.3.5
+  API.
+* ``ZSTD_CCtx_setParametersUsingCCtxParams()`` is no longer called on every
+  operation performed against ``ZstdCompressor`` instances. The reason for this
+  change is that the zstd 1.3.5 API no longer allows this without calling
+  ``ZSTD_CCtx_resetParameters()`` first. But if we called
+  ``ZSTD_CCtx_resetParameters()`` on every operation, we'd have to redo
+  potentially expensive setup when using dictionaries. We now call
+  ``ZSTD_CCtx_reset()`` on every operation and don't attempt to change
+  compression parameters.
+* Objects returned by ``ZstdCompressor.stream_reader()`` no longer need to be
+  used as a context manager. The context manager interface still exists and its
+  behavior is unchanged.
+* Objects returned by ``ZstdDecompressor.stream_reader()`` no longer need to be
+  used as a context manager. The context manager interface still exists and its
+  behavior is unchanged.
+
+Bug Fixes
+---------
+
+* ``ZstdDecompressor.decompressobj().decompress()`` should now return all data
+  from internal buffers in more scenarios. Before, it was possible for data to
+  remain in internal buffers. This data would be emitted on a subsequent call
+  to ``decompress()``. The overall output stream would still be valid. But if
+  callers were expecting input data to exactly map to output data (say the
+  producer had used ``flush(COMPRESSOBJ_FLUSH_BLOCK)`` and was attempting to
+  map input chunks to output chunks), then the previous behavior would be
+  wrong. The new behavior is such that output from
+  ``flush(COMPRESSOBJ_FLUSH_BLOCK)`` fed into ``decompressobj().decompress()``
+  should produce all available compressed input.
+* ``ZstdDecompressor.stream_reader().read()`` should no longer segfault after
+  a previous context manager resulted in error (#56).
+* ``ZstdCompressor.compressobj().flush(COMPRESSOBJ_FLUSH_BLOCK)`` now returns
+  all data necessary to flush a block. Before, it was possible for the
+  ``flush()`` to not emit all data necessary to fully represent a block. This
+  would mean decompressors wouldn't be able to decompress all data that had been
+  fed into the compressor and ``flush()``ed. (#55).
+
+New Features
+------------
+
+* New module constants ``BLOCKSIZELOG_MAX``, ``BLOCKSIZE_MAX``,
+  ``TARGETLENGTH_MAX`` that expose constants from libzstd.
+* New ``ZstdCompressor.chunker()`` API for manually feeding data into a
+  compressor and emitting chunks of a fixed size. Like ``compressobj()``, the
+  API doesn't impose restrictions on the input or output types for the
+  data streams. Unlike ``compressobj()``, it ensures output chunks are of a
+  fixed size. This makes this API useful when the compressed output is being
+  fed into an I/O layer, where uniform write sizes are useful.
+* ``ZstdCompressor.stream_reader()`` no longer needs to be used as a context
+  manager (#34).
+* ``ZstdDecompressor.stream_reader()`` no longer needs to be used as a context
+  manager (#34).
+* Bundled zstandard library upgraded from 1.3.4 to 1.3.6.
+
+Changes
+-------
+
+* Added ``zstd_cffi.py`` and ``NEWS.rst`` to ``MANIFEST.in``.
+* ``zstandard.__version__`` is now defined (#50).
+* Upgrade pip, setuptools, wheel, and cibuildwheel packages to latest versions.
+* Upgrade various packages used in CI to latest versions. Notably tox (in
+  order to support Python 3.7).
+* Use relative paths in setup.py to appease Python 3.7 (#51).
+* Added CI for Python 3.7.
+
+0.9.1 (released 2018-06-04)
+===========================
+
+* Debian packaging support.
+* Fix typo in setup.py (#44).
+* Support building with mingw compiler (#46).
+
 0.9.0 (released 2018-04-08)
 ===========================
 

          
@@ 90,7 208,7 @@ Bug Fixes
 New Features
 ------------
 
-* Bundlded zstandard library upgraded from 1.1.3 to 1.3.4. This delivers various
+* Bundled zstandard library upgraded from 1.1.3 to 1.3.4. This delivers various
   bug fixes and performance improvements. It also gives us access to newer
   features.
 * Support for negative compression levels.

          
M contrib/python-zstandard/README.rst +88 -13
@@ 196,6 196,17 @@ Stream Reader API
 
    with open(path, 'rb') as fh:
        cctx = zstd.ZstdCompressor()
+       reader = cctx.stream_reader(fh)
+       while True:
+           chunk = reader.read(16384)
+           if not chunk:
+               break
+
+           # Do something with compressed chunk.
+
+Instances can also be used as context managers::
+
+   with open(path, 'rb') as fh:
        with cctx.stream_reader(fh) as reader:
            while True:
                chunk = reader.read(16384)

          
@@ 204,9 215,9 @@ Stream Reader API
 
                # Do something with compressed chunk.
 
-The stream can only be read within a context manager. When the context
-manager exits, the stream is closed and the underlying resource is
-released and future operations against the compression stream stream will fail.
+When the context manager exists or ``close()`` is called, the stream is closed,
+underlying resources are released, and future operations against the compression
+stream will fail.
 
 The ``source`` argument to ``stream_reader()`` can be any object with a
 ``read(size)`` method or any object implementing the *buffer protocol*.

          
@@ 419,6 430,64 @@ the compressor::
    data = cobj.compress(b'foobar')
    data = cobj.flush()
 
+Chunker API
+^^^^^^^^^^^
+
+``chunker(size=None, chunk_size=COMPRESSION_RECOMMENDED_OUTPUT_SIZE)`` returns
+an object that can be used to iteratively feed chunks of data into a compressor
+and produce output chunks of a uniform size.
+
+The object returned by ``chunker()`` exposes the following methods:
+
+``compress(data)``
+   Feeds new input data into the compressor.
+
+``flush()``
+   Flushes all data currently in the compressor.
+
+``finish()``
+   Signals the end of input data. No new data can be compressed after this
+   method is called.
+
+``compress()``, ``flush()``, and ``finish()`` all return an iterator of
+``bytes`` instances holding compressed data. The iterator may be empty. Callers
+MUST iterate through all elements of the returned iterator before performing
+another operation on the object.
+
+All chunks emitted by ``compress()`` will have a length of ``chunk_size``.
+
+``flush()`` and ``finish()`` may return a final chunk smaller than
+``chunk_size``.
+
+Here is how the API should be used::
+
+   cctx = zstd.ZstdCompressor()
+   chunker = cctx.chunker(chunk_size=32768)
+
+   with open(path, 'rb') as fh:
+       while True:
+           in_chunk = fh.read(32768)
+           if not in_chunk:
+               break
+
+           for out_chunk in chunker.compress(in_chunk):
+               # Do something with output chunk of size 32768.
+
+       for out_chunk in chunker.finish():
+           # Do something with output chunks that finalize the zstd frame.
+
+The ``chunker()`` API is often a better alternative to ``compressobj()``.
+
+``compressobj()`` will emit output data as it is available. This results in a
+*stream* of output chunks of varying sizes. The consistency of the output chunk
+size with ``chunker()`` is more appropriate for many usages, such as sending
+compressed data to a socket.
+
+``compressobj()`` may also perform extra memory reallocations in order to
+dynamically adjust the sizes of the output chunks. Since ``chunker()`` output
+chunks are all the same size (except for flushed or final chunks), there is
+less memory allocation overhead.
+
 Batch Compression API
 ^^^^^^^^^^^^^^^^^^^^^
 

          
@@ 542,17 611,24 @@ Stream Reader API
 
    with open(path, 'rb') as fh:
        dctx = zstd.ZstdDecompressor()
-       with dctx.stream_reader(fh) as reader:
-           while True:
-               chunk = reader.read(16384)
-               if not chunk:
-                   break
+       reader = dctx.stream_reader(fh)
+       while True:
+           chunk = reader.read(16384)
+            if not chunk:
+                break
+
+            # Do something with decompressed chunk.
 
-               # Do something with decompressed chunk.
+The stream can also be used as a context manager::
 
-The stream can only be read within a context manager. When the context
-manager exits, the stream is closed and the underlying resource is
-released and future operations against the stream will fail.
+   with open(path, 'rb') as fh:
+       dctx = zstd.ZstdDecompressor()
+       with dctx.stream_reader(fh) as reader:
+           ...
+
+When used as a context manager, the stream is closed and the underlying
+resources are released when the context manager exits. Future operations against
+the stream will fail.
 
 The ``source`` argument to ``stream_reader()`` can be any object with a
 ``read(size)`` method or any object implementing the *buffer protocol*.

          
@@ 1077,7 1153,6 @@ follows:
 * write_dict_id
 * job_size
 * overlap_size_log
-* compress_literals
 * force_max_window
 * enable_ldm
 * ldm_hash_log

          
A => contrib/python-zstandard/c-ext/compressionchunker.c +360 -0
@@ 0,0 1,360 @@ 
+/**
+* Copyright (c) 2018-present, Gregory Szorc
+* All rights reserved.
+*
+* This software may be modified and distributed under the terms
+* of the BSD license. See the LICENSE file for details.
+*/
+
+#include "python-zstandard.h"
+
+extern PyObject* ZstdError;
+
+PyDoc_STRVAR(ZstdCompressionChunkerIterator__doc__,
+	"Iterator of output chunks from ZstdCompressionChunker.\n"
+);
+
+static void ZstdCompressionChunkerIterator_dealloc(ZstdCompressionChunkerIterator* self) {
+	Py_XDECREF(self->chunker);
+
+	PyObject_Del(self);
+}
+
+static PyObject* ZstdCompressionChunkerIterator_iter(PyObject* self) {
+	Py_INCREF(self);
+	return self;
+}
+
+static PyObject* ZstdCompressionChunkerIterator_iternext(ZstdCompressionChunkerIterator* self) {
+	size_t zresult;
+	PyObject* chunk;
+	ZstdCompressionChunker* chunker = self->chunker;
+	ZSTD_EndDirective zFlushMode;
+
+	if (self->mode != compressionchunker_mode_normal && chunker->input.pos != chunker->input.size) {
+		PyErr_SetString(ZstdError, "input should have been fully consumed before calling flush() or finish()");
+		return NULL;
+	}
+
+	if (chunker->finished) {
+		return NULL;
+	}
+
+	/* If we have data left in the input, consume it. */
+	while (chunker->input.pos < chunker->input.size) {
+		Py_BEGIN_ALLOW_THREADS
+		zresult = ZSTD_compress_generic(chunker->compressor->cctx, &chunker->output,
+			&chunker->input, ZSTD_e_continue);
+		Py_END_ALLOW_THREADS
+
+		/* Input is fully consumed. */
+		if (chunker->input.pos == chunker->input.size) {
+			chunker->input.src = NULL;
+			chunker->input.pos = 0;
+			chunker->input.size = 0;
+			PyBuffer_Release(&chunker->inBuffer);
+		}
+
+		if (ZSTD_isError(zresult)) {
+			PyErr_Format(ZstdError, "zstd compress error: %s", ZSTD_getErrorName(zresult));
+			return NULL;
+		}
+
+		/* If it produced a full output chunk, emit it. */
+		if (chunker->output.pos == chunker->output.size) {
+			chunk = PyBytes_FromStringAndSize(chunker->output.dst, chunker->output.pos);
+			if (!chunk) {
+				return NULL;
+			}
+
+			chunker->output.pos = 0;
+
+			return chunk;
+		}
+
+		/* Else continue to compress available input data. */
+	}
+
+	/* We also need this here for the special case of an empty input buffer. */
+	if (chunker->input.pos == chunker->input.size) {
+		chunker->input.src = NULL;
+		chunker->input.pos = 0;
+		chunker->input.size = 0;
+		PyBuffer_Release(&chunker->inBuffer);
+	}
+
+	/* No more input data. A partial chunk may be in chunker->output.
+	 * If we're in normal compression mode, we're done. Otherwise if we're in
+	 * flush or finish mode, we need to emit what data remains.
+	 */
+	if (self->mode == compressionchunker_mode_normal) {
+		/* We don't need to set StopIteration. */
+		return NULL;
+	}
+
+	if (self->mode == compressionchunker_mode_flush) {
+		zFlushMode = ZSTD_e_flush;
+	}
+	else if (self->mode == compressionchunker_mode_finish) {
+		zFlushMode = ZSTD_e_end;
+	}
+	else {
+		PyErr_SetString(ZstdError, "unhandled compression mode; this should never happen");
+		return NULL;
+	}
+
+	Py_BEGIN_ALLOW_THREADS
+	zresult = ZSTD_compress_generic(chunker->compressor->cctx, &chunker->output,
+		&chunker->input, zFlushMode);
+	Py_END_ALLOW_THREADS
+
+	if (ZSTD_isError(zresult)) {
+		PyErr_Format(ZstdError, "zstd compress error: %s",
+			ZSTD_getErrorName(zresult));
+		return NULL;
+	}
+
+	if (!zresult && chunker->output.pos == 0) {
+		return NULL;
+	}
+
+	chunk = PyBytes_FromStringAndSize(chunker->output.dst, chunker->output.pos);
+	if (!chunk) {
+		return NULL;
+	}
+
+	chunker->output.pos = 0;
+
+	if (!zresult && self->mode == compressionchunker_mode_finish) {
+		chunker->finished = 1;
+	}
+
+	return chunk;
+}
+
+PyTypeObject ZstdCompressionChunkerIteratorType = {
+	PyVarObject_HEAD_INIT(NULL, 0)
+	"zstd.ZstdCompressionChunkerIterator", /* tp_name */
+	sizeof(ZstdCompressionChunkerIterator), /* tp_basicsize */
+	0,                               /* tp_itemsize */
+	(destructor)ZstdCompressionChunkerIterator_dealloc, /* tp_dealloc */
+	0,                               /* tp_print */
+	0,                               /* tp_getattr */
+	0,                               /* tp_setattr */
+	0,                               /* tp_compare */
+	0,                               /* tp_repr */
+	0,