Show More
The requested changes are too big and content was truncated. Show full diff
@@ -0,0 +1,64 b'' | |||||
|
1 | #!/bin/bash | |||
|
2 | # | |||
|
3 | # produces two repositories with different common and missing subsets | |||
|
4 | # | |||
|
5 | # $ discovery-helper.sh REPO NBHEADS DEPT | |||
|
6 | # | |||
|
7 | # The Goal is to produce two repositories with some common part and some | |||
|
8 | # exclusive part on each side. Provide a source repository REPO, it will | |||
|
9 | # produce two repositories REPO-left and REPO-right. | |||
|
10 | # | |||
|
11 | # Each repository will be missing some revisions exclusive to NBHEADS of the | |||
|
12 | # repo topological heads. These heads and revisions exclusive to them (up to | |||
|
13 | # DEPTH depth) are stripped. | |||
|
14 | # | |||
|
15 | # The "left" repository will use the NBHEADS first heads (sorted by | |||
|
16 | # description). The "right" use the last NBHEADS one. | |||
|
17 | # | |||
|
18 | # To find out how many topological heads a repo has, use: | |||
|
19 | # | |||
|
20 | # $ hg heads -t -T '{rev}\n' | wc -l | |||
|
21 | # | |||
|
22 | # Example: | |||
|
23 | # | |||
|
24 | # The `pypy-2018-09-01` repository has 192 heads. To produce two repositories | |||
|
25 | # with 92 common heads and ~50 exclusive heads on each side. | |||
|
26 | # | |||
|
27 | # $ ./discovery-helper.sh pypy-2018-08-01 50 10 | |||
|
28 | ||||
|
29 | set -euo pipefail | |||
|
30 | ||||
|
31 | if [ $# -lt 3 ]; then | |||
|
32 | echo "usage: `basename $0` REPO NBHEADS DEPTH" | |||
|
33 | exit 64 | |||
|
34 | fi | |||
|
35 | ||||
|
36 | repo="$1" | |||
|
37 | shift | |||
|
38 | ||||
|
39 | nbheads="$1" | |||
|
40 | shift | |||
|
41 | ||||
|
42 | depth="$1" | |||
|
43 | shift | |||
|
44 | ||||
|
45 | leftrepo="${repo}-left" | |||
|
46 | rightrepo="${repo}-right" | |||
|
47 | ||||
|
48 | left="first(sort(heads(all()), 'desc'), $nbheads)" | |||
|
49 | right="last(sort(heads(all()), 'desc'), $nbheads)" | |||
|
50 | ||||
|
51 | leftsubset="ancestors($left, $depth) and only($left, heads(all() - $left))" | |||
|
52 | rightsubset="ancestors($right, $depth) and only($right, heads(all() - $right))" | |||
|
53 | ||||
|
54 | echo '### building left repository:' $left-repo | |||
|
55 | echo '# cloning' | |||
|
56 | hg clone --noupdate "${repo}" "${leftrepo}" | |||
|
57 | echo '# stripping' '"'${leftsubset}'"' | |||
|
58 | hg -R "${leftrepo}" --config extensions.strip= strip --rev "$leftsubset" --no-backup | |||
|
59 | ||||
|
60 | echo '### building right repository:' $right-repo | |||
|
61 | echo '# cloning' | |||
|
62 | hg clone --noupdate "${repo}" "${rightrepo}" | |||
|
63 | echo '# stripping:' '"'${rightsubset}'"' | |||
|
64 | hg -R "${rightrepo}" --config extensions.strip= strip --rev "$rightsubset" --no-backup |
@@ -0,0 +1,48 b'' | |||||
|
1 | #include <Python.h> | |||
|
2 | #include <assert.h> | |||
|
3 | #include <stdlib.h> | |||
|
4 | #include <unistd.h> | |||
|
5 | ||||
|
6 | #include <string> | |||
|
7 | ||||
|
8 | #include "pyutil.h" | |||
|
9 | ||||
|
10 | extern "C" { | |||
|
11 | ||||
|
12 | static PyCodeObject *code; | |||
|
13 | ||||
|
14 | extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv) | |||
|
15 | { | |||
|
16 | contrib::initpy(*argv[0]); | |||
|
17 | code = (PyCodeObject *)Py_CompileString(R"py( | |||
|
18 | from parsers import parse_dirstate | |||
|
19 | try: | |||
|
20 | dmap = {} | |||
|
21 | copymap = {} | |||
|
22 | p = parse_dirstate(dmap, copymap, data) | |||
|
23 | except Exception as e: | |||
|
24 | pass | |||
|
25 | # uncomment this print if you're editing this Python code | |||
|
26 | # to debug failures. | |||
|
27 | # print e | |||
|
28 | )py", | |||
|
29 | "fuzzer", Py_file_input); | |||
|
30 | return 0; | |||
|
31 | } | |||
|
32 | ||||
|
33 | int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) | |||
|
34 | { | |||
|
35 | PyObject *text = | |||
|
36 | PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size); | |||
|
37 | PyObject *locals = PyDict_New(); | |||
|
38 | PyDict_SetItemString(locals, "data", text); | |||
|
39 | PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals); | |||
|
40 | if (!res) { | |||
|
41 | PyErr_Print(); | |||
|
42 | } | |||
|
43 | Py_XDECREF(res); | |||
|
44 | Py_DECREF(locals); | |||
|
45 | Py_DECREF(text); | |||
|
46 | return 0; // Non-zero return values are reserved for future use. | |||
|
47 | } | |||
|
48 | } |
@@ -0,0 +1,18 b'' | |||||
|
1 | from __future__ import absolute_import, print_function | |||
|
2 | ||||
|
3 | import argparse | |||
|
4 | import os | |||
|
5 | import zipfile | |||
|
6 | ||||
|
7 | ap = argparse.ArgumentParser() | |||
|
8 | ap.add_argument("out", metavar="some.zip", type=str, nargs=1) | |||
|
9 | args = ap.parse_args() | |||
|
10 | ||||
|
11 | reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__), | |||
|
12 | '..', '..')) | |||
|
13 | dirstate = os.path.join(reporoot, '.hg', 'dirstate') | |||
|
14 | ||||
|
15 | with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf: | |||
|
16 | if os.path.exists(dirstate): | |||
|
17 | with open(dirstate) as f: | |||
|
18 | zf.writestr("dirstate", f.read()) |
@@ -0,0 +1,60 b'' | |||||
|
1 | #include <Python.h> | |||
|
2 | #include <assert.h> | |||
|
3 | #include <stdlib.h> | |||
|
4 | #include <unistd.h> | |||
|
5 | ||||
|
6 | #include <string> | |||
|
7 | ||||
|
8 | #include "pyutil.h" | |||
|
9 | ||||
|
10 | extern "C" { | |||
|
11 | ||||
|
12 | static PyCodeObject *code; | |||
|
13 | ||||
|
14 | extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv) | |||
|
15 | { | |||
|
16 | contrib::initpy(*argv[0]); | |||
|
17 | code = (PyCodeObject *)Py_CompileString(R"py( | |||
|
18 | from parsers import fm1readmarkers | |||
|
19 | def maybeint(s, default): | |||
|
20 | try: | |||
|
21 | return int(s) | |||
|
22 | except ValueError: | |||
|
23 | return default | |||
|
24 | try: | |||
|
25 | parts = data.split('\0', 2) | |||
|
26 | if len(parts) == 3: | |||
|
27 | offset, stop, data = parts | |||
|
28 | elif len(parts) == 2: | |||
|
29 | stop, data = parts | |||
|
30 | offset = 0 | |||
|
31 | else: | |||
|
32 | offset = stop = 0 | |||
|
33 | offset, stop = maybeint(offset, 0), maybeint(stop, len(data)) | |||
|
34 | fm1readmarkers(data, offset, stop) | |||
|
35 | except Exception as e: | |||
|
36 | pass | |||
|
37 | # uncomment this print if you're editing this Python code | |||
|
38 | # to debug failures. | |||
|
39 | # print e | |||
|
40 | )py", | |||
|
41 | "fuzzer", Py_file_input); | |||
|
42 | return 0; | |||
|
43 | } | |||
|
44 | ||||
|
45 | int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) | |||
|
46 | { | |||
|
47 | PyObject *text = | |||
|
48 | PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size); | |||
|
49 | PyObject *locals = PyDict_New(); | |||
|
50 | PyDict_SetItemString(locals, "data", text); | |||
|
51 | PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals); | |||
|
52 | if (!res) { | |||
|
53 | PyErr_Print(); | |||
|
54 | } | |||
|
55 | Py_XDECREF(res); | |||
|
56 | Py_DECREF(locals); | |||
|
57 | Py_DECREF(text); | |||
|
58 | return 0; // Non-zero return values are reserved for future use. | |||
|
59 | } | |||
|
60 | } |
@@ -0,0 +1,36 b'' | |||||
|
1 | from __future__ import absolute_import, print_function | |||
|
2 | ||||
|
3 | import argparse | |||
|
4 | import zipfile | |||
|
5 | ||||
|
6 | ap = argparse.ArgumentParser() | |||
|
7 | ap.add_argument("out", metavar="some.zip", type=str, nargs=1) | |||
|
8 | args = ap.parse_args() | |||
|
9 | ||||
|
10 | with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf: | |||
|
11 | zf.writestr( | |||
|
12 | 'smallish_obsstore', | |||
|
13 | ( | |||
|
14 | # header: fm1readmarkers should start at offset 1, and | |||
|
15 | # read until byte 597. | |||
|
16 | '1\x00597\x00' | |||
|
17 | # body of obsstore file | |||
|
18 | '\x01\x00\x00\x00vA\xd7\x02+C\x1a<)\x01,\x00\x00\x01\x03\x03\xe6' | |||
|
19 | '\x92\xde)x\x16\xd1Xph\xc7\xa7[\xe5\xe2\x1a\xab\x1e6e\xaf\xc2\xae' | |||
|
20 | '\xe7\xbc\x83\xe1\x88\xa5\xda\xce>O\xbd\x04\xe9\x03\xc4o\xeb\x03' | |||
|
21 | '\x01\t\x05\x04\x1fef18operationamenduserAugie Fackler <raf@duri' | |||
|
22 | 'n42.com>\x00\x00\x00vA\xd7\x02-\x8aD\xaf-\x01,\x00\x00\x01\x03\x03' | |||
|
23 | '\x17*\xca\x8f\x9e}i\xe0i\xbb\xdf\x9fb\x03\xd2XG?\xd3h\x98\x89\x1a' | |||
|
24 | '=2\xeb\xc3\xc5<\xb3\x9e\xcc\x0e;#\xee\xc3\x10ux\x03\x01\t\x05\x04' | |||
|
25 | '\x1fef18operationamenduserAugie Fackler <raf@durin42.com>\x00\x00' | |||
|
26 | '\x00vA\xd7\x02Mn\xd9%\xea\x01,\x00\x00\x01\x03\x03\x98\x89\x1a=' | |||
|
27 | '2\xeb\xc3\xc5<\xb3\x9e\xcc\x0e;#\xee\xc3\x10ux\xe0*\xcaT\x86Z8J' | |||
|
28 | '\x85)\x97\xff7\xcc)\xc1\x7f\x19\x0c\x01\x03\x01\t\x05\x04\x1fef' | |||
|
29 | '18operationamenduserAugie Fackler <raf@durin42.com>\x00\x00\x00' | |||
|
30 | 'yA\xd7\x02MtA\xbfj\x01,\x00\x00\x01\x03\x03\xe0*\xcaT\x86Z8J\x85' | |||
|
31 | ')\x97\xff7\xcc)\xc1\x7f\x19\x0c\x01\x00\x94\x01\xa9\n\xf80\x92\xa3' | |||
|
32 | 'j\xc5X\xb1\xc9:\xd51\xb8*\xa9\x03\x01\t\x08\x04\x1fef11operatio' | |||
|
33 | 'nhistedituserAugie Fackler <raf@durin42.com>\x00\x00\x00yA\xd7\x02' | |||
|
34 | 'MtA\xd4\xe1\x01,\x00\x00\x01\x03\x03"\xa5\xcb\x86\xb6\xf4\xbaO\xa0' | |||
|
35 | 'sH\xe7?\xcb\x9b\xc2n\xcfI\x9e\x14\xf0D\xf0!\x18DN\xcd\x97\x016\xa5' | |||
|
36 | '\xef\xa06\xcb\x884\x8a\x03\x01\t\x08\x04\x1fef14operationhisted')) |
@@ -0,0 +1,49 b'' | |||||
|
1 | #include "pyutil.h" | |||
|
2 | ||||
|
3 | #include <string> | |||
|
4 | ||||
|
5 | namespace contrib | |||
|
6 | { | |||
|
7 | ||||
|
8 | static char cpypath[8192] = "\0"; | |||
|
9 | ||||
|
10 | static PyObject *mainmod; | |||
|
11 | static PyObject *globals; | |||
|
12 | ||||
|
13 | /* TODO: use Python 3 for this fuzzing? */ | |||
|
14 | PyMODINIT_FUNC initparsers(void); | |||
|
15 | ||||
|
16 | void initpy(const char *cselfpath) | |||
|
17 | { | |||
|
18 | const std::string subdir = "/sanpy/lib/python2.7"; | |||
|
19 | /* HACK ALERT: we need a full Python installation built without | |||
|
20 | pymalloc and with ASAN, so we dump one in | |||
|
21 | $OUT/sanpy/lib/python2.7. This helps us wire that up. */ | |||
|
22 | std::string selfpath(cselfpath); | |||
|
23 | std::string pypath; | |||
|
24 | auto pos = selfpath.rfind("/"); | |||
|
25 | if (pos == std::string::npos) { | |||
|
26 | char wd[8192]; | |||
|
27 | getcwd(wd, 8192); | |||
|
28 | pypath = std::string(wd) + subdir; | |||
|
29 | } else { | |||
|
30 | pypath = selfpath.substr(0, pos) + subdir; | |||
|
31 | } | |||
|
32 | strncpy(cpypath, pypath.c_str(), pypath.size()); | |||
|
33 | setenv("PYTHONPATH", cpypath, 1); | |||
|
34 | setenv("PYTHONNOUSERSITE", "1", 1); | |||
|
35 | /* prevent Python from looking up users in the fuzz environment */ | |||
|
36 | setenv("PYTHONUSERBASE", cpypath, 1); | |||
|
37 | Py_SetPythonHome(cpypath); | |||
|
38 | Py_InitializeEx(0); | |||
|
39 | mainmod = PyImport_AddModule("__main__"); | |||
|
40 | globals = PyModule_GetDict(mainmod); | |||
|
41 | initparsers(); | |||
|
42 | } | |||
|
43 | ||||
|
44 | PyObject *pyglobals() | |||
|
45 | { | |||
|
46 | return globals; | |||
|
47 | } | |||
|
48 | ||||
|
49 | } // namespace contrib |
@@ -0,0 +1,9 b'' | |||||
|
1 | #include <Python.h> | |||
|
2 | ||||
|
3 | namespace contrib | |||
|
4 | { | |||
|
5 | ||||
|
6 | void initpy(const char *cselfpath); | |||
|
7 | PyObject *pyglobals(); | |||
|
8 | ||||
|
9 | } /* namespace contrib */ |
@@ -0,0 +1,47 b'' | |||||
|
1 | #include <Python.h> | |||
|
2 | #include <assert.h> | |||
|
3 | #include <stdlib.h> | |||
|
4 | #include <unistd.h> | |||
|
5 | ||||
|
6 | #include <string> | |||
|
7 | ||||
|
8 | #include "pyutil.h" | |||
|
9 | ||||
|
10 | extern "C" { | |||
|
11 | ||||
|
12 | static PyCodeObject *code; | |||
|
13 | ||||
|
14 | extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv) | |||
|
15 | { | |||
|
16 | contrib::initpy(*argv[0]); | |||
|
17 | code = (PyCodeObject *)Py_CompileString(R"py( | |||
|
18 | from parsers import parse_index2 | |||
|
19 | for inline in (True, False): | |||
|
20 | try: | |||
|
21 | index, cache = parse_index2(data, inline) | |||
|
22 | except Exception as e: | |||
|
23 | pass | |||
|
24 | # uncomment this print if you're editing this Python code | |||
|
25 | # to debug failures. | |||
|
26 | # print e | |||
|
27 | )py", | |||
|
28 | "fuzzer", Py_file_input); | |||
|
29 | return 0; | |||
|
30 | } | |||
|
31 | ||||
|
32 | int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) | |||
|
33 | { | |||
|
34 | PyObject *text = | |||
|
35 | PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size); | |||
|
36 | PyObject *locals = PyDict_New(); | |||
|
37 | PyDict_SetItemString(locals, "data", text); | |||
|
38 | PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals); | |||
|
39 | if (!res) { | |||
|
40 | PyErr_Print(); | |||
|
41 | } | |||
|
42 | Py_XDECREF(res); | |||
|
43 | Py_DECREF(locals); | |||
|
44 | Py_DECREF(text); | |||
|
45 | return 0; // Non-zero return values are reserved for future use. | |||
|
46 | } | |||
|
47 | } |
@@ -0,0 +1,28 b'' | |||||
|
1 | from __future__ import absolute_import, print_function | |||
|
2 | ||||
|
3 | import argparse | |||
|
4 | import os | |||
|
5 | import zipfile | |||
|
6 | ||||
|
7 | ap = argparse.ArgumentParser() | |||
|
8 | ap.add_argument("out", metavar="some.zip", type=str, nargs=1) | |||
|
9 | args = ap.parse_args() | |||
|
10 | ||||
|
11 | reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__), | |||
|
12 | '..', '..')) | |||
|
13 | # typically a standalone index | |||
|
14 | changelog = os.path.join(reporoot, '.hg', 'store', '00changelog.i') | |||
|
15 | # an inline revlog with only a few revisions | |||
|
16 | contributing = os.path.join( | |||
|
17 | reporoot, '.hg', 'store', 'data', 'contrib', 'fuzz', 'mpatch.cc.i') | |||
|
18 | ||||
|
19 | print(changelog, os.path.exists(changelog)) | |||
|
20 | print(contributing, os.path.exists(contributing)) | |||
|
21 | ||||
|
22 | with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf: | |||
|
23 | if os.path.exists(changelog): | |||
|
24 | with open(changelog) as f: | |||
|
25 | zf.writestr("00changelog.i", f.read()) | |||
|
26 | if os.path.exists(contributing): | |||
|
27 | with open(contributing) as f: | |||
|
28 | zf.writestr("contributing.i", f.read()) |
@@ -0,0 +1,127 b'' | |||||
|
1 | #!/usr/bin/env python | |||
|
2 | # | |||
|
3 | # Copyright 2018 Paul Morelle <Paul.Morelle@octobus.net> | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | # | |||
|
8 | # This script use the output of `hg perfrevlogwrite -T json --details` to draw | |||
|
9 | # various plot related to write performance in a revlog | |||
|
10 | # | |||
|
11 | # usage: perf-revlog-write-plot.py details.json | |||
|
12 | from __future__ import absolute_import, print_function | |||
|
13 | import json | |||
|
14 | import re | |||
|
15 | ||||
|
16 | import numpy as np | |||
|
17 | import scipy.signal | |||
|
18 | ||||
|
19 | from matplotlib import ( | |||
|
20 | pyplot as plt, | |||
|
21 | ticker as mticker, | |||
|
22 | ) | |||
|
23 | ||||
|
24 | ||||
|
25 | def plot(data, title=None): | |||
|
26 | items = {} | |||
|
27 | re_title = re.compile(r'^revisions #\d+ of \d+, rev (\d+)$') | |||
|
28 | for item in data: | |||
|
29 | m = re_title.match(item['title']) | |||
|
30 | if m is None: | |||
|
31 | continue | |||
|
32 | ||||
|
33 | rev = int(m.group(1)) | |||
|
34 | items[rev] = item | |||
|
35 | ||||
|
36 | min_rev = min(items.keys()) | |||
|
37 | max_rev = max(items.keys()) | |||
|
38 | ary = np.empty((2, max_rev - min_rev + 1)) | |||
|
39 | for rev, item in items.items(): | |||
|
40 | ary[0][rev - min_rev] = rev | |||
|
41 | ary[1][rev - min_rev] = item['wall'] | |||
|
42 | ||||
|
43 | fig = plt.figure() | |||
|
44 | comb_plt = fig.add_subplot(211) | |||
|
45 | other_plt = fig.add_subplot(212) | |||
|
46 | ||||
|
47 | comb_plt.plot(ary[0], | |||
|
48 | np.cumsum(ary[1]), | |||
|
49 | color='red', | |||
|
50 | linewidth=1, | |||
|
51 | label='comb') | |||
|
52 | ||||
|
53 | plots = [] | |||
|
54 | p = other_plt.plot(ary[0], | |||
|
55 | ary[1], | |||
|
56 | color='red', | |||
|
57 | linewidth=1, | |||
|
58 | label='wall') | |||
|
59 | plots.append(p) | |||
|
60 | ||||
|
61 | colors = { | |||
|
62 | 10: ('green', 'xkcd:grass green'), | |||
|
63 | 100: ('blue', 'xkcd:bright blue'), | |||
|
64 | 1000: ('purple', 'xkcd:dark pink'), | |||
|
65 | } | |||
|
66 | for n, color in colors.items(): | |||
|
67 | avg_n = np.convolve(ary[1], np.full(n, 1. / n), 'valid') | |||
|
68 | p = other_plt.plot(ary[0][n - 1:], | |||
|
69 | avg_n, | |||
|
70 | color=color[0], | |||
|
71 | linewidth=1, | |||
|
72 | label='avg time last %d' % n) | |||
|
73 | plots.append(p) | |||
|
74 | ||||
|
75 | med_n = scipy.signal.medfilt(ary[1], n + 1) | |||
|
76 | p = other_plt.plot(ary[0], | |||
|
77 | med_n, | |||
|
78 | color=color[1], | |||
|
79 | linewidth=1, | |||
|
80 | label='median time last %d' % n) | |||
|
81 | plots.append(p) | |||
|
82 | ||||
|
83 | formatter = mticker.ScalarFormatter() | |||
|
84 | formatter.set_scientific(False) | |||
|
85 | formatter.set_useOffset(False) | |||
|
86 | ||||
|
87 | comb_plt.grid() | |||
|
88 | comb_plt.xaxis.set_major_formatter(formatter) | |||
|
89 | comb_plt.legend() | |||
|
90 | ||||
|
91 | other_plt.grid() | |||
|
92 | other_plt.xaxis.set_major_formatter(formatter) | |||
|
93 | leg = other_plt.legend() | |||
|
94 | leg2plot = {} | |||
|
95 | for legline, plot in zip(leg.get_lines(), plots): | |||
|
96 | legline.set_picker(5) | |||
|
97 | leg2plot[legline] = plot | |||
|
98 | ||||
|
99 | def onpick(event): | |||
|
100 | legline = event.artist | |||
|
101 | plot = leg2plot[legline] | |||
|
102 | visible = not plot[0].get_visible() | |||
|
103 | for l in plot: | |||
|
104 | l.set_visible(visible) | |||
|
105 | ||||
|
106 | if visible: | |||
|
107 | legline.set_alpha(1.0) | |||
|
108 | else: | |||
|
109 | legline.set_alpha(0.2) | |||
|
110 | fig.canvas.draw() | |||
|
111 | if title is not None: | |||
|
112 | fig.canvas.set_window_title(title) | |||
|
113 | fig.canvas.mpl_connect('pick_event', onpick) | |||
|
114 | ||||
|
115 | plt.show() | |||
|
116 | ||||
|
117 | ||||
|
118 | if __name__ == '__main__': | |||
|
119 | import sys | |||
|
120 | ||||
|
121 | if len(sys.argv) > 1: | |||
|
122 | print('reading from %r' % sys.argv[1]) | |||
|
123 | with open(sys.argv[1], 'r') as fp: | |||
|
124 | plot(json.load(fp), title=sys.argv[1]) | |||
|
125 | else: | |||
|
126 | print('reading from stdin') | |||
|
127 | plot(json.load(sys.stdin)) |
@@ -0,0 +1,104 b'' | |||||
|
1 | """implements bookmark-based branching (EXPERIMENTAL) | |||
|
2 | ||||
|
3 | - Disables creation of new branches (config: enable_branches=False). | |||
|
4 | - Requires an active bookmark on commit (config: require_bookmark=True). | |||
|
5 | - Doesn't move the active bookmark on update, only on commit. | |||
|
6 | - Requires '--rev' for moving an existing bookmark. | |||
|
7 | - Protects special bookmarks (config: protect=@). | |||
|
8 | ||||
|
9 | flow related commands | |||
|
10 | ||||
|
11 | :hg book NAME: create a new bookmark | |||
|
12 | :hg book NAME -r REV: move bookmark to revision (fast-forward) | |||
|
13 | :hg up|co NAME: switch to bookmark | |||
|
14 | :hg push -B .: push active bookmark | |||
|
15 | """ | |||
|
16 | from __future__ import absolute_import | |||
|
17 | ||||
|
18 | from mercurial.i18n import _ | |||
|
19 | from mercurial import ( | |||
|
20 | bookmarks, | |||
|
21 | commands, | |||
|
22 | error, | |||
|
23 | extensions, | |||
|
24 | registrar, | |||
|
25 | ) | |||
|
26 | ||||
|
27 | MY_NAME = 'bookflow' | |||
|
28 | ||||
|
29 | configtable = {} | |||
|
30 | configitem = registrar.configitem(configtable) | |||
|
31 | ||||
|
32 | configitem(MY_NAME, 'protect', ['@']) | |||
|
33 | configitem(MY_NAME, 'require-bookmark', True) | |||
|
34 | configitem(MY_NAME, 'enable-branches', False) | |||
|
35 | ||||
|
36 | cmdtable = {} | |||
|
37 | command = registrar.command(cmdtable) | |||
|
38 | ||||
|
39 | def commit_hook(ui, repo, **kwargs): | |||
|
40 | active = repo._bookmarks.active | |||
|
41 | if active: | |||
|
42 | if active in ui.configlist(MY_NAME, 'protect'): | |||
|
43 | raise error.Abort( | |||
|
44 | _('cannot commit, bookmark %s is protected') % active) | |||
|
45 | if not cwd_at_bookmark(repo, active): | |||
|
46 | raise error.Abort( | |||
|
47 | _('cannot commit, working directory out of sync with active bookmark'), | |||
|
48 | hint=_("run 'hg up %s'") % active) | |||
|
49 | elif ui.configbool(MY_NAME, 'require-bookmark', True): | |||
|
50 | raise error.Abort(_('cannot commit without an active bookmark')) | |||
|
51 | return 0 | |||
|
52 | ||||
|
53 | def bookmarks_update(orig, repo, parents, node): | |||
|
54 | if len(parents) == 2: | |||
|
55 | # called during commit | |||
|
56 | return orig(repo, parents, node) | |||
|
57 | else: | |||
|
58 | # called during update | |||
|
59 | return False | |||
|
60 | ||||
|
61 | def bookmarks_addbookmarks( | |||
|
62 | orig, repo, tr, names, rev=None, force=False, inactive=False): | |||
|
63 | if not rev: | |||
|
64 | marks = repo._bookmarks | |||
|
65 | for name in names: | |||
|
66 | if name in marks: | |||
|
67 | raise error.Abort(_( | |||
|
68 | "bookmark %s already exists, to move use the --rev option" | |||
|
69 | ) % name) | |||
|
70 | return orig(repo, tr, names, rev, force, inactive) | |||
|
71 | ||||
|
72 | def commands_commit(orig, ui, repo, *args, **opts): | |||
|
73 | commit_hook(ui, repo) | |||
|
74 | return orig(ui, repo, *args, **opts) | |||
|
75 | ||||
|
76 | def commands_pull(orig, ui, repo, *args, **opts): | |||
|
77 | rc = orig(ui, repo, *args, **opts) | |||
|
78 | active = repo._bookmarks.active | |||
|
79 | if active and not cwd_at_bookmark(repo, active): | |||
|
80 | ui.warn(_( | |||
|
81 | "working directory out of sync with active bookmark, run " | |||
|
82 | "'hg up %s'" | |||
|
83 | ) % active) | |||
|
84 | return rc | |||
|
85 | ||||
|
86 | def commands_branch(orig, ui, repo, label=None, **opts): | |||
|
87 | if label and not opts.get(r'clean') and not opts.get(r'rev'): | |||
|
88 | raise error.Abort( | |||
|
89 | _("creating named branches is disabled and you should use bookmarks"), | |||
|
90 | hint="see 'hg help bookflow'") | |||
|
91 | return orig(ui, repo, label, **opts) | |||
|
92 | ||||
|
93 | def cwd_at_bookmark(repo, mark): | |||
|
94 | mark_id = repo._bookmarks[mark] | |||
|
95 | cur_id = repo.lookup('.') | |||
|
96 | return cur_id == mark_id | |||
|
97 | ||||
|
98 | def uisetup(ui): | |||
|
99 | extensions.wrapfunction(bookmarks, 'update', bookmarks_update) | |||
|
100 | extensions.wrapfunction(bookmarks, 'addbookmarks', bookmarks_addbookmarks) | |||
|
101 | extensions.wrapcommand(commands.table, 'commit', commands_commit) | |||
|
102 | extensions.wrapcommand(commands.table, 'pull', commands_pull) | |||
|
103 | if not ui.configbool(MY_NAME, 'enable-branches'): | |||
|
104 | extensions.wrapcommand(commands.table, 'branch', commands_branch) |
@@ -0,0 +1,109 b'' | |||||
|
1 | remotefilelog | |||
|
2 | ============= | |||
|
3 | ||||
|
4 | The remotefilelog extension allows Mercurial to clone shallow copies of a repository such that all file contents are left on the server and only downloaded on demand by the client. This greatly speeds up clone and pull performance for repositories that have long histories or that are growing quickly. | |||
|
5 | ||||
|
6 | In addition, the extension allows using a caching layer (such as memcache) to serve the file contents, thus providing better scalability and reducing server load. | |||
|
7 | ||||
|
8 | Installing | |||
|
9 | ========== | |||
|
10 | ||||
|
11 | **NOTE:** See the limitations section below to check if remotefilelog will work for your use case. | |||
|
12 | ||||
|
13 | remotefilelog can be installed like any other Mercurial extension. Download the source code and add the remotefilelog subdirectory to your `hgrc`: | |||
|
14 | ||||
|
15 | :::ini | |||
|
16 | [extensions] | |||
|
17 | remotefilelog=path/to/remotefilelog/remotefilelog | |||
|
18 | ||||
|
19 | Configuring | |||
|
20 | ----------- | |||
|
21 | ||||
|
22 | **Server** | |||
|
23 | ||||
|
24 | * `server` (required) - Set to 'True' to indicate that the server can serve shallow clones. | |||
|
25 | * `serverexpiration` - The server keeps a local cache of recently requested file revision blobs in .hg/remotefilelogcache. This setting specifies how many days they should be kept locally. Defaults to 30. | |||
|
26 | ||||
|
27 | An example server configuration: | |||
|
28 | ||||
|
29 | :::ini | |||
|
30 | [remotefilelog] | |||
|
31 | server = True | |||
|
32 | serverexpiration = 14 | |||
|
33 | ||||
|
34 | **Client** | |||
|
35 | ||||
|
36 | * `cachepath` (required) - the location to store locally cached file revisions | |||
|
37 | * `cachelimit` - the maximum size of the cachepath. By default it's 1000 GB. | |||
|
38 | * `cachegroup` - the default unix group for the cachepath. Useful on shared systems so multiple users can read and write to the same cache. | |||
|
39 | * `cacheprocess` - the external process that will handle the remote caching layer. If not set, all requests will go to the Mercurial server. | |||
|
40 | * `fallbackpath` - the Mercurial repo path to fetch file revisions from. By default it uses the paths.default repo. This setting is useful for cloning from shallow clones and still talking to the central server for file revisions. | |||
|
41 | * `includepattern` - a list of regex patterns matching files that should be kept remotely. Defaults to all files. | |||
|
42 | * `excludepattern` - a list of regex patterns matching files that should not be kept remotely and should always be downloaded. | |||
|
43 | * `pullprefetch` - a revset of commits whose file content should be prefetched after every pull. The most common value for this will be '(bookmark() + head()) & public()'. This is useful in environments where offline work is common, since it will enable offline updating to, rebasing to, and committing on every head and bookmark. | |||
|
44 | ||||
|
45 | An example client configuration: | |||
|
46 | ||||
|
47 | :::ini | |||
|
48 | [remotefilelog] | |||
|
49 | cachepath = /dev/shm/hgcache | |||
|
50 | cachelimit = 2 GB | |||
|
51 | ||||
|
52 | Using as a largefiles replacement | |||
|
53 | --------------------------------- | |||
|
54 | ||||
|
55 | remotefilelog can theoretically be used as a replacement for the largefiles extension. You can use the `includepattern` setting to specify which directories or file types are considered large and they will be left on the server. Unlike the largefiles extension, this can be done without converting the server repository. Only the client configuration needs to specify the patterns. | |||
|
56 | ||||
|
57 | The include/exclude settings haven't been extensively tested, so this feature is still considered experimental. | |||
|
58 | ||||
|
59 | An example largefiles style client configuration: | |||
|
60 | ||||
|
61 | :::ini | |||
|
62 | [remotefilelog] | |||
|
63 | cachepath = /dev/shm/hgcache | |||
|
64 | cachelimit = 2 GB | |||
|
65 | includepattern = *.sql3 | |||
|
66 | bin/* | |||
|
67 | ||||
|
68 | Usage | |||
|
69 | ===== | |||
|
70 | ||||
|
71 | Once you have configured the server, you can get a shallow clone by doing: | |||
|
72 | ||||
|
73 | :::bash | |||
|
74 | hg clone --shallow ssh://server//path/repo | |||
|
75 | ||||
|
76 | After that, all normal mercurial commands should work. | |||
|
77 | ||||
|
78 | Occasionly the client or server caches may grow too big. Run `hg gc` to clean up the cache. It will remove cached files that appear to no longer be necessary, or any files that exceed the configured maximum size. This does not improve performance; it just frees up space. | |||
|
79 | ||||
|
80 | Limitations | |||
|
81 | =========== | |||
|
82 | ||||
|
83 | 1. The extension must be used with Mercurial 3.3 (commit d7d08337b3f6) or higher (earlier versions of the extension work with earlier versions of Mercurial though, up to Mercurial 2.7). | |||
|
84 | ||||
|
85 | 2. remotefilelog has only been tested on linux with case-sensitive filesystems. It should work on other unix systems but may have problems on case-insensitive filesystems. | |||
|
86 | ||||
|
87 | 3. remotefilelog only works with ssh based Mercurial repos. http based repos are currently not supported, though it shouldn't be too difficult for some motivated individual to implement. | |||
|
88 | ||||
|
89 | 4. Tags are not supported in completely shallow repos. If you use tags in your repo you will have to specify `excludepattern=.hgtags` in your client configuration to ensure that file is downloaded. The include/excludepattern settings are experimental at the moment and have yet to be deployed in a production environment. | |||
|
90 | ||||
|
91 | 5. A few commands will be slower. `hg log <filename>` will be much slower since it has to walk the entire commit history instead of just the filelog. Use `hg log -f <filename>` instead, which remains very fast. | |||
|
92 | ||||
|
93 | Contributing | |||
|
94 | ============ | |||
|
95 | ||||
|
96 | Patches are welcome as pull requests, though they will be collapsed and rebased to maintain a linear history. Tests can be run via: | |||
|
97 | ||||
|
98 | :::bash | |||
|
99 | cd tests | |||
|
100 | ./run-tests --with-hg=path/to/hgrepo/hg | |||
|
101 | ||||
|
102 | We (Facebook) have to ask for a "Contributor License Agreement" from someone who sends in a patch or code that we want to include in the codebase. This is a legal requirement; a similar situation applies to Apache and other ASF projects. | |||
|
103 | ||||
|
104 | If we ask you to fill out a CLA we'll direct you to our [online CLA page](https://developers.facebook.com/opensource/cla) where you can complete it easily. We use the same form as the Apache CLA so that friction is minimal. | |||
|
105 | ||||
|
106 | License | |||
|
107 | ======= | |||
|
108 | ||||
|
109 | remotefilelog is made available under the terms of the GNU General Public License version 2, or any later version. See the COPYING file that accompanies this distribution for the full text of the license. |
This diff has been collapsed as it changes many lines, (1143 lines changed) Show them Hide them | |||||
@@ -0,0 +1,1143 b'' | |||||
|
1 | # __init__.py - remotefilelog extension | |||
|
2 | # | |||
|
3 | # Copyright 2013 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL) | |||
|
8 | ||||
|
9 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY | |||
|
10 | GUARANTEES. This means that repositories created with this extension may | |||
|
11 | only be usable with the exact version of this extension/Mercurial that was | |||
|
12 | used. The extension attempts to enforce this in order to prevent repository | |||
|
13 | corruption. | |||
|
14 | ||||
|
15 | remotefilelog works by fetching file contents lazily and storing them | |||
|
16 | in a cache on the client rather than in revlogs. This allows enormous | |||
|
17 | histories to be transferred only partially, making them easier to | |||
|
18 | operate on. | |||
|
19 | ||||
|
20 | Configs: | |||
|
21 | ||||
|
22 | ``packs.maxchainlen`` specifies the maximum delta chain length in pack files | |||
|
23 | ||||
|
24 | ``packs.maxpacksize`` specifies the maximum pack file size | |||
|
25 | ||||
|
26 | ``packs.maxpackfilecount`` specifies the maximum number of packs in the | |||
|
27 | shared cache (trees only for now) | |||
|
28 | ||||
|
29 | ``remotefilelog.backgroundprefetch`` runs prefetch in background when True | |||
|
30 | ||||
|
31 | ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and | |||
|
32 | update, and on other commands that use them. Different from pullprefetch. | |||
|
33 | ||||
|
34 | ``remotefilelog.gcrepack`` does garbage collection during repack when True | |||
|
35 | ||||
|
36 | ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before | |||
|
37 | it is garbage collected | |||
|
38 | ||||
|
39 | ``remotefilelog.repackonhggc`` runs repack on hg gc when True | |||
|
40 | ||||
|
41 | ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in | |||
|
42 | days after which it is no longer prefetched. | |||
|
43 | ||||
|
44 | ``remotefilelog.prefetchdelay`` specifies delay between background | |||
|
45 | prefetches in seconds after operations that change the working copy parent | |||
|
46 | ||||
|
47 | ``remotefilelog.data.gencountlimit`` constraints the minimum number of data | |||
|
48 | pack files required to be considered part of a generation. In particular, | |||
|
49 | minimum number of packs files > gencountlimit. | |||
|
50 | ||||
|
51 | ``remotefilelog.data.generations`` list for specifying the lower bound of | |||
|
52 | each generation of the data pack files. For example, list ['100MB','1MB'] | |||
|
53 | or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [ | |||
|
54 | 1MB, 100MB) and [100MB, infinity). | |||
|
55 | ||||
|
56 | ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to | |||
|
57 | include in an incremental data repack. | |||
|
58 | ||||
|
59 | ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for | |||
|
60 | it to be considered for an incremental data repack. | |||
|
61 | ||||
|
62 | ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files | |||
|
63 | to include in an incremental data repack. | |||
|
64 | ||||
|
65 | ``remotefilelog.history.gencountlimit`` constraints the minimum number of | |||
|
66 | history pack files required to be considered part of a generation. In | |||
|
67 | particular, minimum number of packs files > gencountlimit. | |||
|
68 | ||||
|
69 | ``remotefilelog.history.generations`` list for specifying the lower bound of | |||
|
70 | each generation of the history pack files. For example, list [ | |||
|
71 | '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [ | |||
|
72 | 0, 1MB), [1MB, 100MB) and [100MB, infinity). | |||
|
73 | ||||
|
74 | ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to | |||
|
75 | include in an incremental history repack. | |||
|
76 | ||||
|
77 | ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file | |||
|
78 | for it to be considered for an incremental history repack. | |||
|
79 | ||||
|
80 | ``remotefilelog.history.repacksizelimit`` the maximum total size of pack | |||
|
81 | files to include in an incremental history repack. | |||
|
82 | ||||
|
83 | ``remotefilelog.backgroundrepack`` automatically consolidate packs in the | |||
|
84 | background | |||
|
85 | ||||
|
86 | ``remotefilelog.cachepath`` path to cache | |||
|
87 | ||||
|
88 | ``remotefilelog.cachegroup`` if set, make cache directory sgid to this | |||
|
89 | group | |||
|
90 | ||||
|
91 | ``remotefilelog.cacheprocess`` binary to invoke for fetching file data | |||
|
92 | ||||
|
93 | ``remotefilelog.debug`` turn on remotefilelog-specific debug output | |||
|
94 | ||||
|
95 | ``remotefilelog.excludepattern`` pattern of files to exclude from pulls | |||
|
96 | ||||
|
97 | ``remotefilelog.includepattern`` pattern of files to include in pulls | |||
|
98 | ||||
|
99 | ``remotefilelog.fetchwarning``: message to print when too many | |||
|
100 | single-file fetches occur | |||
|
101 | ||||
|
102 | ``remotefilelog.getfilesstep`` number of files to request in a single RPC | |||
|
103 | ||||
|
104 | ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch | |||
|
105 | files, otherwise use optimistic fetching | |||
|
106 | ||||
|
107 | ``remotefilelog.pullprefetch`` revset for selecting files that should be | |||
|
108 | eagerly downloaded rather than lazily | |||
|
109 | ||||
|
110 | ``remotefilelog.reponame`` name of the repo. If set, used to partition | |||
|
111 | data from other repos in a shared store. | |||
|
112 | ||||
|
113 | ``remotefilelog.server`` if true, enable server-side functionality | |||
|
114 | ||||
|
115 | ``remotefilelog.servercachepath`` path for caching blobs on the server | |||
|
116 | ||||
|
117 | ``remotefilelog.serverexpiration`` number of days to keep cached server | |||
|
118 | blobs | |||
|
119 | ||||
|
120 | ``remotefilelog.validatecache`` if set, check cache entries for corruption | |||
|
121 | before returning blobs | |||
|
122 | ||||
|
123 | ``remotefilelog.validatecachelog`` if set, check cache entries for | |||
|
124 | corruption before returning metadata | |||
|
125 | ||||
|
126 | """ | |||
|
127 | from __future__ import absolute_import | |||
|
128 | ||||
|
129 | import os | |||
|
130 | import time | |||
|
131 | import traceback | |||
|
132 | ||||
|
133 | from mercurial.node import hex | |||
|
134 | from mercurial.i18n import _ | |||
|
135 | from mercurial import ( | |||
|
136 | changegroup, | |||
|
137 | changelog, | |||
|
138 | cmdutil, | |||
|
139 | commands, | |||
|
140 | configitems, | |||
|
141 | context, | |||
|
142 | copies, | |||
|
143 | debugcommands as hgdebugcommands, | |||
|
144 | dispatch, | |||
|
145 | error, | |||
|
146 | exchange, | |||
|
147 | extensions, | |||
|
148 | hg, | |||
|
149 | localrepo, | |||
|
150 | match, | |||
|
151 | merge, | |||
|
152 | node as nodemod, | |||
|
153 | patch, | |||
|
154 | pycompat, | |||
|
155 | registrar, | |||
|
156 | repair, | |||
|
157 | repoview, | |||
|
158 | revset, | |||
|
159 | scmutil, | |||
|
160 | smartset, | |||
|
161 | streamclone, | |||
|
162 | templatekw, | |||
|
163 | util, | |||
|
164 | ) | |||
|
165 | from . import ( | |||
|
166 | constants, | |||
|
167 | debugcommands, | |||
|
168 | fileserverclient, | |||
|
169 | remotefilectx, | |||
|
170 | remotefilelog, | |||
|
171 | remotefilelogserver, | |||
|
172 | repack as repackmod, | |||
|
173 | shallowbundle, | |||
|
174 | shallowrepo, | |||
|
175 | shallowstore, | |||
|
176 | shallowutil, | |||
|
177 | shallowverifier, | |||
|
178 | ) | |||
|
179 | ||||
|
180 | # ensures debug commands are registered | |||
|
181 | hgdebugcommands.command | |||
|
182 | ||||
|
183 | cmdtable = {} | |||
|
184 | command = registrar.command(cmdtable) | |||
|
185 | ||||
|
186 | configtable = {} | |||
|
187 | configitem = registrar.configitem(configtable) | |||
|
188 | ||||
|
189 | configitem('remotefilelog', 'debug', default=False) | |||
|
190 | ||||
|
191 | configitem('remotefilelog', 'reponame', default='') | |||
|
192 | configitem('remotefilelog', 'cachepath', default=None) | |||
|
193 | configitem('remotefilelog', 'cachegroup', default=None) | |||
|
194 | configitem('remotefilelog', 'cacheprocess', default=None) | |||
|
195 | configitem('remotefilelog', 'cacheprocess.includepath', default=None) | |||
|
196 | configitem("remotefilelog", "cachelimit", default="1000 GB") | |||
|
197 | ||||
|
198 | configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault, | |||
|
199 | alias=[('remotefilelog', 'fallbackrepo')]) | |||
|
200 | ||||
|
201 | configitem('remotefilelog', 'validatecachelog', default=None) | |||
|
202 | configitem('remotefilelog', 'validatecache', default='on') | |||
|
203 | configitem('remotefilelog', 'server', default=None) | |||
|
204 | configitem('remotefilelog', 'servercachepath', default=None) | |||
|
205 | configitem("remotefilelog", "serverexpiration", default=30) | |||
|
206 | configitem('remotefilelog', 'backgroundrepack', default=False) | |||
|
207 | configitem('remotefilelog', 'bgprefetchrevs', default=None) | |||
|
208 | configitem('remotefilelog', 'pullprefetch', default=None) | |||
|
209 | configitem('remotefilelog', 'backgroundprefetch', default=False) | |||
|
210 | configitem('remotefilelog', 'prefetchdelay', default=120) | |||
|
211 | configitem('remotefilelog', 'prefetchdays', default=14) | |||
|
212 | ||||
|
213 | configitem('remotefilelog', 'getfilesstep', default=10000) | |||
|
214 | configitem('remotefilelog', 'getfilestype', default='optimistic') | |||
|
215 | configitem('remotefilelog', 'batchsize', configitems.dynamicdefault) | |||
|
216 | configitem('remotefilelog', 'fetchwarning', default='') | |||
|
217 | ||||
|
218 | configitem('remotefilelog', 'includepattern', default=None) | |||
|
219 | configitem('remotefilelog', 'excludepattern', default=None) | |||
|
220 | ||||
|
221 | configitem('remotefilelog', 'gcrepack', default=False) | |||
|
222 | configitem('remotefilelog', 'repackonhggc', default=False) | |||
|
223 | configitem('repack', 'chainorphansbysize', default=True) | |||
|
224 | ||||
|
225 | configitem('packs', 'maxpacksize', default=0) | |||
|
226 | configitem('packs', 'maxchainlen', default=1000) | |||
|
227 | ||||
|
228 | # default TTL limit is 30 days | |||
|
229 | _defaultlimit = 60 * 60 * 24 * 30 | |||
|
230 | configitem('remotefilelog', 'nodettl', default=_defaultlimit) | |||
|
231 | ||||
|
232 | configitem('remotefilelog', 'data.gencountlimit', default=2), | |||
|
233 | configitem('remotefilelog', 'data.generations', | |||
|
234 | default=['1GB', '100MB', '1MB']) | |||
|
235 | configitem('remotefilelog', 'data.maxrepackpacks', default=50) | |||
|
236 | configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB') | |||
|
237 | configitem('remotefilelog', 'data.repacksizelimit', default='100MB') | |||
|
238 | ||||
|
239 | configitem('remotefilelog', 'history.gencountlimit', default=2), | |||
|
240 | configitem('remotefilelog', 'history.generations', default=['100MB']) | |||
|
241 | configitem('remotefilelog', 'history.maxrepackpacks', default=50) | |||
|
242 | configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB') | |||
|
243 | configitem('remotefilelog', 'history.repacksizelimit', default='100MB') | |||
|
244 | ||||
|
245 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |||
|
246 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |||
|
247 | # be specifying the version(s) of Mercurial they are tested with, or | |||
|
248 | # leave the attribute unspecified. | |||
|
249 | testedwith = 'ships-with-hg-core' | |||
|
250 | ||||
|
251 | repoclass = localrepo.localrepository | |||
|
252 | repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT) | |||
|
253 | ||||
|
254 | isenabled = shallowutil.isenabled | |||
|
255 | ||||
|
256 | def uisetup(ui): | |||
|
257 | """Wraps user facing Mercurial commands to swap them out with shallow | |||
|
258 | versions. | |||
|
259 | """ | |||
|
260 | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) | |||
|
261 | ||||
|
262 | entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow) | |||
|
263 | entry[1].append(('', 'shallow', None, | |||
|
264 | _("create a shallow clone which uses remote file " | |||
|
265 | "history"))) | |||
|
266 | ||||
|
267 | extensions.wrapcommand(commands.table, 'debugindex', | |||
|
268 | debugcommands.debugindex) | |||
|
269 | extensions.wrapcommand(commands.table, 'debugindexdot', | |||
|
270 | debugcommands.debugindexdot) | |||
|
271 | extensions.wrapcommand(commands.table, 'log', log) | |||
|
272 | extensions.wrapcommand(commands.table, 'pull', pull) | |||
|
273 | ||||
|
274 | # Prevent 'hg manifest --all' | |||
|
275 | def _manifest(orig, ui, repo, *args, **opts): | |||
|
276 | if (isenabled(repo) and opts.get(r'all')): | |||
|
277 | raise error.Abort(_("--all is not supported in a shallow repo")) | |||
|
278 | ||||
|
279 | return orig(ui, repo, *args, **opts) | |||
|
280 | extensions.wrapcommand(commands.table, "manifest", _manifest) | |||
|
281 | ||||
|
282 | # Wrap remotefilelog with lfs code | |||
|
283 | def _lfsloaded(loaded=False): | |||
|
284 | lfsmod = None | |||
|
285 | try: | |||
|
286 | lfsmod = extensions.find('lfs') | |||
|
287 | except KeyError: | |||
|
288 | pass | |||
|
289 | if lfsmod: | |||
|
290 | lfsmod.wrapfilelog(remotefilelog.remotefilelog) | |||
|
291 | fileserverclient._lfsmod = lfsmod | |||
|
292 | extensions.afterloaded('lfs', _lfsloaded) | |||
|
293 | ||||
|
294 | # debugdata needs remotefilelog.len to work | |||
|
295 | extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow) | |||
|
296 | ||||
|
297 | def cloneshallow(orig, ui, repo, *args, **opts): | |||
|
298 | if opts.get(r'shallow'): | |||
|
299 | repos = [] | |||
|
300 | def pull_shallow(orig, self, *args, **kwargs): | |||
|
301 | if not isenabled(self): | |||
|
302 | repos.append(self.unfiltered()) | |||
|
303 | # set up the client hooks so the post-clone update works | |||
|
304 | setupclient(self.ui, self.unfiltered()) | |||
|
305 | ||||
|
306 | # setupclient fixed the class on the repo itself | |||
|
307 | # but we also need to fix it on the repoview | |||
|
308 | if isinstance(self, repoview.repoview): | |||
|
309 | self.__class__.__bases__ = (self.__class__.__bases__[0], | |||
|
310 | self.unfiltered().__class__) | |||
|
311 | self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) | |||
|
312 | self._writerequirements() | |||
|
313 | ||||
|
314 | # Since setupclient hadn't been called, exchange.pull was not | |||
|
315 | # wrapped. So we need to manually invoke our version of it. | |||
|
316 | return exchangepull(orig, self, *args, **kwargs) | |||
|
317 | else: | |||
|
318 | return orig(self, *args, **kwargs) | |||
|
319 | extensions.wrapfunction(exchange, 'pull', pull_shallow) | |||
|
320 | ||||
|
321 | # Wrap the stream logic to add requirements and to pass include/exclude | |||
|
322 | # patterns around. | |||
|
323 | def setup_streamout(repo, remote): | |||
|
324 | # Replace remote.stream_out with a version that sends file | |||
|
325 | # patterns. | |||
|
326 | def stream_out_shallow(orig): | |||
|
327 | caps = remote.capabilities() | |||
|
328 | if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps: | |||
|
329 | opts = {} | |||
|
330 | if repo.includepattern: | |||
|
331 | opts[r'includepattern'] = '\0'.join(repo.includepattern) | |||
|
332 | if repo.excludepattern: | |||
|
333 | opts[r'excludepattern'] = '\0'.join(repo.excludepattern) | |||
|
334 | return remote._callstream('stream_out_shallow', **opts) | |||
|
335 | else: | |||
|
336 | return orig() | |||
|
337 | extensions.wrapfunction(remote, 'stream_out', stream_out_shallow) | |||
|
338 | def stream_wrap(orig, op): | |||
|
339 | setup_streamout(op.repo, op.remote) | |||
|
340 | return orig(op) | |||
|
341 | extensions.wrapfunction( | |||
|
342 | streamclone, 'maybeperformlegacystreamclone', stream_wrap) | |||
|
343 | ||||
|
344 | def canperformstreamclone(orig, pullop, bundle2=False): | |||
|
345 | # remotefilelog is currently incompatible with the | |||
|
346 | # bundle2 flavor of streamclones, so force us to use | |||
|
347 | # v1 instead. | |||
|
348 | if 'v2' in pullop.remotebundle2caps.get('stream', []): | |||
|
349 | pullop.remotebundle2caps['stream'] = [ | |||
|
350 | c for c in pullop.remotebundle2caps['stream'] | |||
|
351 | if c != 'v2'] | |||
|
352 | if bundle2: | |||
|
353 | return False, None | |||
|
354 | supported, requirements = orig(pullop, bundle2=bundle2) | |||
|
355 | if requirements is not None: | |||
|
356 | requirements.add(constants.SHALLOWREPO_REQUIREMENT) | |||
|
357 | return supported, requirements | |||
|
358 | extensions.wrapfunction( | |||
|
359 | streamclone, 'canperformstreamclone', canperformstreamclone) | |||
|
360 | ||||
|
361 | try: | |||
|
362 | orig(ui, repo, *args, **opts) | |||
|
363 | finally: | |||
|
364 | if opts.get(r'shallow'): | |||
|
365 | for r in repos: | |||
|
366 | if util.safehasattr(r, 'fileservice'): | |||
|
367 | r.fileservice.close() | |||
|
368 | ||||
|
369 | def debugdatashallow(orig, *args, **kwds): | |||
|
370 | oldlen = remotefilelog.remotefilelog.__len__ | |||
|
371 | try: | |||
|
372 | remotefilelog.remotefilelog.__len__ = lambda x: 1 | |||
|
373 | return orig(*args, **kwds) | |||
|
374 | finally: | |||
|
375 | remotefilelog.remotefilelog.__len__ = oldlen | |||
|
376 | ||||
|
377 | def reposetup(ui, repo): | |||
|
378 | if not repo.local(): | |||
|
379 | return | |||
|
380 | ||||
|
381 | # put here intentionally bc doesnt work in uisetup | |||
|
382 | ui.setconfig('hooks', 'update.prefetch', wcpprefetch) | |||
|
383 | ui.setconfig('hooks', 'commit.prefetch', wcpprefetch) | |||
|
384 | ||||
|
385 | isserverenabled = ui.configbool('remotefilelog', 'server') | |||
|
386 | isshallowclient = isenabled(repo) | |||
|
387 | ||||
|
388 | if isserverenabled and isshallowclient: | |||
|
389 | raise RuntimeError("Cannot be both a server and shallow client.") | |||
|
390 | ||||
|
391 | if isshallowclient: | |||
|
392 | setupclient(ui, repo) | |||
|
393 | ||||
|
394 | if isserverenabled: | |||
|
395 | remotefilelogserver.setupserver(ui, repo) | |||
|
396 | ||||
|
397 | def setupclient(ui, repo): | |||
|
398 | if not isinstance(repo, localrepo.localrepository): | |||
|
399 | return | |||
|
400 | ||||
|
401 | # Even clients get the server setup since they need to have the | |||
|
402 | # wireprotocol endpoints registered. | |||
|
403 | remotefilelogserver.onetimesetup(ui) | |||
|
404 | onetimeclientsetup(ui) | |||
|
405 | ||||
|
406 | shallowrepo.wraprepo(repo) | |||
|
407 | repo.store = shallowstore.wrapstore(repo.store) | |||
|
408 | ||||
|
409 | clientonetime = False | |||
|
410 | def onetimeclientsetup(ui): | |||
|
411 | global clientonetime | |||
|
412 | if clientonetime: | |||
|
413 | return | |||
|
414 | clientonetime = True | |||
|
415 | ||||
|
416 | changegroup.cgpacker = shallowbundle.shallowcg1packer | |||
|
417 | ||||
|
418 | extensions.wrapfunction(changegroup, '_addchangegroupfiles', | |||
|
419 | shallowbundle.addchangegroupfiles) | |||
|
420 | extensions.wrapfunction( | |||
|
421 | changegroup, 'makechangegroup', shallowbundle.makechangegroup) | |||
|
422 | ||||
|
423 | def storewrapper(orig, requirements, path, vfstype): | |||
|
424 | s = orig(requirements, path, vfstype) | |||
|
425 | if constants.SHALLOWREPO_REQUIREMENT in requirements: | |||
|
426 | s = shallowstore.wrapstore(s) | |||
|
427 | ||||
|
428 | return s | |||
|
429 | extensions.wrapfunction(localrepo, 'makestore', storewrapper) | |||
|
430 | ||||
|
431 | extensions.wrapfunction(exchange, 'pull', exchangepull) | |||
|
432 | ||||
|
433 | # prefetch files before update | |||
|
434 | def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None): | |||
|
435 | if isenabled(repo): | |||
|
436 | manifest = mctx.manifest() | |||
|
437 | files = [] | |||
|
438 | for f, args, msg in actions['g']: | |||
|
439 | files.append((f, hex(manifest[f]))) | |||
|
440 | # batch fetch the needed files from the server | |||
|
441 | repo.fileservice.prefetch(files) | |||
|
442 | return orig(repo, actions, wctx, mctx, overwrite, labels=labels) | |||
|
443 | extensions.wrapfunction(merge, 'applyupdates', applyupdates) | |||
|
444 | ||||
|
445 | # Prefetch merge checkunknownfiles | |||
|
446 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, | |||
|
447 | *args, **kwargs): | |||
|
448 | if isenabled(repo): | |||
|
449 | files = [] | |||
|
450 | sparsematch = repo.maybesparsematch(mctx.rev()) | |||
|
451 | for f, (m, actionargs, msg) in actions.iteritems(): | |||
|
452 | if sparsematch and not sparsematch(f): | |||
|
453 | continue | |||
|
454 | if m in ('c', 'dc', 'cm'): | |||
|
455 | files.append((f, hex(mctx.filenode(f)))) | |||
|
456 | elif m == 'dg': | |||
|
457 | f2 = actionargs[0] | |||
|
458 | files.append((f2, hex(mctx.filenode(f2)))) | |||
|
459 | # batch fetch the needed files from the server | |||
|
460 | repo.fileservice.prefetch(files) | |||
|
461 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) | |||
|
462 | extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles) | |||
|
463 | ||||
|
464 | # Prefetch files before status attempts to look at their size and contents | |||
|
465 | def checklookup(orig, self, files): | |||
|
466 | repo = self._repo | |||
|
467 | if isenabled(repo): | |||
|
468 | prefetchfiles = [] | |||
|
469 | for parent in self._parents: | |||
|
470 | for f in files: | |||
|
471 | if f in parent: | |||
|
472 | prefetchfiles.append((f, hex(parent.filenode(f)))) | |||
|
473 | # batch fetch the needed files from the server | |||
|
474 | repo.fileservice.prefetch(prefetchfiles) | |||
|
475 | return orig(self, files) | |||
|
476 | extensions.wrapfunction(context.workingctx, '_checklookup', checklookup) | |||
|
477 | ||||
|
478 | # Prefetch the logic that compares added and removed files for renames | |||
|
479 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): | |||
|
480 | if isenabled(repo): | |||
|
481 | files = [] | |||
|
482 | parentctx = repo['.'] | |||
|
483 | for f in removed: | |||
|
484 | files.append((f, hex(parentctx.filenode(f)))) | |||
|
485 | # batch fetch the needed files from the server | |||
|
486 | repo.fileservice.prefetch(files) | |||
|
487 | return orig(repo, matcher, added, removed, *args, **kwargs) | |||
|
488 | extensions.wrapfunction(scmutil, '_findrenames', findrenames) | |||
|
489 | ||||
|
490 | # prefetch files before mergecopies check | |||
|
491 | def computenonoverlap(orig, repo, c1, c2, *args, **kwargs): | |||
|
492 | u1, u2 = orig(repo, c1, c2, *args, **kwargs) | |||
|
493 | if isenabled(repo): | |||
|
494 | m1 = c1.manifest() | |||
|
495 | m2 = c2.manifest() | |||
|
496 | files = [] | |||
|
497 | ||||
|
498 | sparsematch1 = repo.maybesparsematch(c1.rev()) | |||
|
499 | if sparsematch1: | |||
|
500 | sparseu1 = [] | |||
|
501 | for f in u1: | |||
|
502 | if sparsematch1(f): | |||
|
503 | files.append((f, hex(m1[f]))) | |||
|
504 | sparseu1.append(f) | |||
|
505 | u1 = sparseu1 | |||
|
506 | ||||
|
507 | sparsematch2 = repo.maybesparsematch(c2.rev()) | |||
|
508 | if sparsematch2: | |||
|
509 | sparseu2 = [] | |||
|
510 | for f in u2: | |||
|
511 | if sparsematch2(f): | |||
|
512 | files.append((f, hex(m2[f]))) | |||
|
513 | sparseu2.append(f) | |||
|
514 | u2 = sparseu2 | |||
|
515 | ||||
|
516 | # batch fetch the needed files from the server | |||
|
517 | repo.fileservice.prefetch(files) | |||
|
518 | return u1, u2 | |||
|
519 | extensions.wrapfunction(copies, '_computenonoverlap', computenonoverlap) | |||
|
520 | ||||
|
521 | # prefetch files before pathcopies check | |||
|
522 | def computeforwardmissing(orig, a, b, match=None): | |||
|
523 | missing = list(orig(a, b, match=match)) | |||
|
524 | repo = a._repo | |||
|
525 | if isenabled(repo): | |||
|
526 | mb = b.manifest() | |||
|
527 | ||||
|
528 | files = [] | |||
|
529 | sparsematch = repo.maybesparsematch(b.rev()) | |||
|
530 | if sparsematch: | |||
|
531 | sparsemissing = [] | |||
|
532 | for f in missing: | |||
|
533 | if sparsematch(f): | |||
|
534 | files.append((f, hex(mb[f]))) | |||
|
535 | sparsemissing.append(f) | |||
|
536 | missing = sparsemissing | |||
|
537 | ||||
|
538 | # batch fetch the needed files from the server | |||
|
539 | repo.fileservice.prefetch(files) | |||
|
540 | return missing | |||
|
541 | extensions.wrapfunction(copies, '_computeforwardmissing', | |||
|
542 | computeforwardmissing) | |||
|
543 | ||||
|
544 | # close cache miss server connection after the command has finished | |||
|
545 | def runcommand(orig, lui, repo, *args, **kwargs): | |||
|
546 | fileservice = None | |||
|
547 | # repo can be None when running in chg: | |||
|
548 | # - at startup, reposetup was called because serve is not norepo | |||
|
549 | # - a norepo command like "help" is called | |||
|
550 | if repo and isenabled(repo): | |||
|
551 | fileservice = repo.fileservice | |||
|
552 | try: | |||
|
553 | return orig(lui, repo, *args, **kwargs) | |||
|
554 | finally: | |||
|
555 | if fileservice: | |||
|
556 | fileservice.close() | |||
|
557 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) | |||
|
558 | ||||
|
559 | # disappointing hacks below | |||
|
560 | templatekw.getrenamedfn = getrenamedfn | |||
|
561 | extensions.wrapfunction(revset, 'filelog', filelogrevset) | |||
|
562 | revset.symbols['filelog'] = revset.filelog | |||
|
563 | extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs) | |||
|
564 | ||||
|
565 | # prevent strip from stripping remotefilelogs | |||
|
566 | def _collectbrokencsets(orig, repo, files, striprev): | |||
|
567 | if isenabled(repo): | |||
|
568 | files = list([f for f in files if not repo.shallowmatch(f)]) | |||
|
569 | return orig(repo, files, striprev) | |||
|
570 | extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets) | |||
|
571 | ||||
|
572 | # Don't commit filelogs until we know the commit hash, since the hash | |||
|
573 | # is present in the filelog blob. | |||
|
574 | # This violates Mercurial's filelog->manifest->changelog write order, | |||
|
575 | # but is generally fine for client repos. | |||
|
576 | pendingfilecommits = [] | |||
|
577 | def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node, | |||
|
578 | flags, cachedelta=None, _metatuple=None): | |||
|
579 | if isinstance(link, int): | |||
|
580 | pendingfilecommits.append( | |||
|
581 | (self, rawtext, transaction, link, p1, p2, node, flags, | |||
|
582 | cachedelta, _metatuple)) | |||
|
583 | return node | |||
|
584 | else: | |||
|
585 | return orig(self, rawtext, transaction, link, p1, p2, node, flags, | |||
|
586 | cachedelta, _metatuple=_metatuple) | |||
|
587 | extensions.wrapfunction( | |||
|
588 | remotefilelog.remotefilelog, 'addrawrevision', addrawrevision) | |||
|
589 | ||||
|
590 | def changelogadd(orig, self, *args): | |||
|
591 | oldlen = len(self) | |||
|
592 | node = orig(self, *args) | |||
|
593 | newlen = len(self) | |||
|
594 | if oldlen != newlen: | |||
|
595 | for oldargs in pendingfilecommits: | |||
|
596 | log, rt, tr, link, p1, p2, n, fl, c, m = oldargs | |||
|
597 | linknode = self.node(link) | |||
|
598 | if linknode == node: | |||
|
599 | log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m) | |||
|
600 | else: | |||
|
601 | raise error.ProgrammingError( | |||
|
602 | 'pending multiple integer revisions are not supported') | |||
|
603 | else: | |||
|
604 | # "link" is actually wrong here (it is set to len(changelog)) | |||
|
605 | # if changelog remains unchanged, skip writing file revisions | |||
|
606 | # but still do a sanity check about pending multiple revisions | |||
|
607 | if len(set(x[3] for x in pendingfilecommits)) > 1: | |||
|
608 | raise error.ProgrammingError( | |||
|
609 | 'pending multiple integer revisions are not supported') | |||
|
610 | del pendingfilecommits[:] | |||
|
611 | return node | |||
|
612 | extensions.wrapfunction(changelog.changelog, 'add', changelogadd) | |||
|
613 | ||||
|
614 | # changectx wrappers | |||
|
615 | def filectx(orig, self, path, fileid=None, filelog=None): | |||
|
616 | if fileid is None: | |||
|
617 | fileid = self.filenode(path) | |||
|
618 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |||
|
619 | return remotefilectx.remotefilectx(self._repo, path, | |||
|
620 | fileid=fileid, changectx=self, filelog=filelog) | |||
|
621 | return orig(self, path, fileid=fileid, filelog=filelog) | |||
|
622 | extensions.wrapfunction(context.changectx, 'filectx', filectx) | |||
|
623 | ||||
|
624 | def workingfilectx(orig, self, path, filelog=None): | |||
|
625 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |||
|
626 | return remotefilectx.remoteworkingfilectx(self._repo, | |||
|
627 | path, workingctx=self, filelog=filelog) | |||
|
628 | return orig(self, path, filelog=filelog) | |||
|
629 | extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx) | |||
|
630 | ||||
|
631 | # prefetch required revisions before a diff | |||
|
632 | def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, | |||
|
633 | copy, getfilectx, *args, **kwargs): | |||
|
634 | if isenabled(repo): | |||
|
635 | prefetch = [] | |||
|
636 | mf1 = ctx1.manifest() | |||
|
637 | for fname in modified + added + removed: | |||
|
638 | if fname in mf1: | |||
|
639 | fnode = getfilectx(fname, ctx1).filenode() | |||
|
640 | # fnode can be None if it's a edited working ctx file | |||
|
641 | if fnode: | |||
|
642 | prefetch.append((fname, hex(fnode))) | |||
|
643 | if fname not in removed: | |||
|
644 | fnode = getfilectx(fname, ctx2).filenode() | |||
|
645 | if fnode: | |||
|
646 | prefetch.append((fname, hex(fnode))) | |||
|
647 | ||||
|
648 | repo.fileservice.prefetch(prefetch) | |||
|
649 | ||||
|
650 | return orig(repo, revs, ctx1, ctx2, modified, added, removed, | |||
|
651 | copy, getfilectx, *args, **kwargs) | |||
|
652 | extensions.wrapfunction(patch, 'trydiff', trydiff) | |||
|
653 | ||||
|
654 | # Prevent verify from processing files | |||
|
655 | # a stub for mercurial.hg.verify() | |||
|
656 | def _verify(orig, repo): | |||
|
657 | lock = repo.lock() | |||
|
658 | try: | |||
|
659 | return shallowverifier.shallowverifier(repo).verify() | |||
|
660 | finally: | |||
|
661 | lock.release() | |||
|
662 | ||||
|
663 | extensions.wrapfunction(hg, 'verify', _verify) | |||
|
664 | ||||
|
665 | scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook) | |||
|
666 | ||||
|
667 | def getrenamedfn(repo, endrev=None): | |||
|
668 | rcache = {} | |||
|
669 | ||||
|
670 | def getrenamed(fn, rev): | |||
|
671 | '''looks up all renames for a file (up to endrev) the first | |||
|
672 | time the file is given. It indexes on the changerev and only | |||
|
673 | parses the manifest if linkrev != changerev. | |||
|
674 | Returns rename info for fn at changerev rev.''' | |||
|
675 | if rev in rcache.setdefault(fn, {}): | |||
|
676 | return rcache[fn][rev] | |||
|
677 | ||||
|
678 | try: | |||
|
679 | fctx = repo[rev].filectx(fn) | |||
|
680 | for ancestor in fctx.ancestors(): | |||
|
681 | if ancestor.path() == fn: | |||
|
682 | renamed = ancestor.renamed() | |||
|
683 | rcache[fn][ancestor.rev()] = renamed and renamed[0] | |||
|
684 | ||||
|
685 | renamed = fctx.renamed() | |||
|
686 | return renamed and renamed[0] | |||
|
687 | except error.LookupError: | |||
|
688 | return None | |||
|
689 | ||||
|
690 | return getrenamed | |||
|
691 | ||||
|
692 | def walkfilerevs(orig, repo, match, follow, revs, fncache): | |||
|
693 | if not isenabled(repo): | |||
|
694 | return orig(repo, match, follow, revs, fncache) | |||
|
695 | ||||
|
696 | # remotefilelog's can't be walked in rev order, so throw. | |||
|
697 | # The caller will see the exception and walk the commit tree instead. | |||
|
698 | if not follow: | |||
|
699 | raise cmdutil.FileWalkError("Cannot walk via filelog") | |||
|
700 | ||||
|
701 | wanted = set() | |||
|
702 | minrev, maxrev = min(revs), max(revs) | |||
|
703 | ||||
|
704 | pctx = repo['.'] | |||
|
705 | for filename in match.files(): | |||
|
706 | if filename not in pctx: | |||
|
707 | raise error.Abort(_('cannot follow file not in parent ' | |||
|
708 | 'revision: "%s"') % filename) | |||
|
709 | fctx = pctx[filename] | |||
|
710 | ||||
|
711 | linkrev = fctx.linkrev() | |||
|
712 | if linkrev >= minrev and linkrev <= maxrev: | |||
|
713 | fncache.setdefault(linkrev, []).append(filename) | |||
|
714 | wanted.add(linkrev) | |||
|
715 | ||||
|
716 | for ancestor in fctx.ancestors(): | |||
|
717 | linkrev = ancestor.linkrev() | |||
|
718 | if linkrev >= minrev and linkrev <= maxrev: | |||
|
719 | fncache.setdefault(linkrev, []).append(ancestor.path()) | |||
|
720 | wanted.add(linkrev) | |||
|
721 | ||||
|
722 | return wanted | |||
|
723 | ||||
|
724 | def filelogrevset(orig, repo, subset, x): | |||
|
725 | """``filelog(pattern)`` | |||
|
726 | Changesets connected to the specified filelog. | |||
|
727 | ||||
|
728 | For performance reasons, ``filelog()`` does not show every changeset | |||
|
729 | that affects the requested file(s). See :hg:`help log` for details. For | |||
|
730 | a slower, more accurate result, use ``file()``. | |||
|
731 | """ | |||
|
732 | ||||
|
733 | if not isenabled(repo): | |||
|
734 | return orig(repo, subset, x) | |||
|
735 | ||||
|
736 | # i18n: "filelog" is a keyword | |||
|
737 | pat = revset.getstring(x, _("filelog requires a pattern")) | |||
|
738 | m = match.match(repo.root, repo.getcwd(), [pat], default='relpath', | |||
|
739 | ctx=repo[None]) | |||
|
740 | s = set() | |||
|
741 | ||||
|
742 | if not match.patkind(pat): | |||
|
743 | # slow | |||
|
744 | for r in subset: | |||
|
745 | ctx = repo[r] | |||
|
746 | cfiles = ctx.files() | |||
|
747 | for f in m.files(): | |||
|
748 | if f in cfiles: | |||
|
749 | s.add(ctx.rev()) | |||
|
750 | break | |||
|
751 | else: | |||
|
752 | # partial | |||
|
753 | files = (f for f in repo[None] if m(f)) | |||
|
754 | for f in files: | |||
|
755 | fctx = repo[None].filectx(f) | |||
|
756 | s.add(fctx.linkrev()) | |||
|
757 | for actx in fctx.ancestors(): | |||
|
758 | s.add(actx.linkrev()) | |||
|
759 | ||||
|
760 | return smartset.baseset([r for r in subset if r in s]) | |||
|
761 | ||||
|
762 | @command('gc', [], _('hg gc [REPO...]'), norepo=True) | |||
|
763 | def gc(ui, *args, **opts): | |||
|
764 | '''garbage collect the client and server filelog caches | |||
|
765 | ''' | |||
|
766 | cachepaths = set() | |||
|
767 | ||||
|
768 | # get the system client cache | |||
|
769 | systemcache = shallowutil.getcachepath(ui, allowempty=True) | |||
|
770 | if systemcache: | |||
|
771 | cachepaths.add(systemcache) | |||
|
772 | ||||
|
773 | # get repo client and server cache | |||
|
774 | repopaths = [] | |||
|
775 | pwd = ui.environ.get('PWD') | |||
|
776 | if pwd: | |||
|
777 | repopaths.append(pwd) | |||
|
778 | ||||
|
779 | repopaths.extend(args) | |||
|
780 | repos = [] | |||
|
781 | for repopath in repopaths: | |||
|
782 | try: | |||
|
783 | repo = hg.peer(ui, {}, repopath) | |||
|
784 | repos.append(repo) | |||
|
785 | ||||
|
786 | repocache = shallowutil.getcachepath(repo.ui, allowempty=True) | |||
|
787 | if repocache: | |||
|
788 | cachepaths.add(repocache) | |||
|
789 | except error.RepoError: | |||
|
790 | pass | |||
|
791 | ||||
|
792 | # gc client cache | |||
|
793 | for cachepath in cachepaths: | |||
|
794 | gcclient(ui, cachepath) | |||
|
795 | ||||
|
796 | # gc server cache | |||
|
797 | for repo in repos: | |||
|
798 | remotefilelogserver.gcserver(ui, repo._repo) | |||
|
799 | ||||
|
800 | def gcclient(ui, cachepath): | |||
|
801 | # get list of repos that use this cache | |||
|
802 | repospath = os.path.join(cachepath, 'repos') | |||
|
803 | if not os.path.exists(repospath): | |||
|
804 | ui.warn(_("no known cache at %s\n") % cachepath) | |||
|
805 | return | |||
|
806 | ||||
|
807 | reposfile = open(repospath, 'rb') | |||
|
808 | repos = set([r[:-1] for r in reposfile.readlines()]) | |||
|
809 | reposfile.close() | |||
|
810 | ||||
|
811 | # build list of useful files | |||
|
812 | validrepos = [] | |||
|
813 | keepkeys = set() | |||
|
814 | ||||
|
815 | sharedcache = None | |||
|
816 | filesrepacked = False | |||
|
817 | ||||
|
818 | count = 0 | |||
|
819 | progress = ui.makeprogress(_("analyzing repositories"), unit="repos", | |||
|
820 | total=len(repos)) | |||
|
821 | for path in repos: | |||
|
822 | progress.update(count) | |||
|
823 | count += 1 | |||
|
824 | try: | |||
|
825 | path = ui.expandpath(os.path.normpath(path)) | |||
|
826 | except TypeError as e: | |||
|
827 | ui.warn(_("warning: malformed path: %r:%s\n") % (path, e)) | |||
|
828 | traceback.print_exc() | |||
|
829 | continue | |||
|
830 | try: | |||
|
831 | peer = hg.peer(ui, {}, path) | |||
|
832 | repo = peer._repo | |||
|
833 | except error.RepoError: | |||
|
834 | continue | |||
|
835 | ||||
|
836 | validrepos.append(path) | |||
|
837 | ||||
|
838 | # Protect against any repo or config changes that have happened since | |||
|
839 | # this repo was added to the repos file. We'd rather this loop succeed | |||
|
840 | # and too much be deleted, than the loop fail and nothing gets deleted. | |||
|
841 | if not isenabled(repo): | |||
|
842 | continue | |||
|
843 | ||||
|
844 | if not util.safehasattr(repo, 'name'): | |||
|
845 | ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path) | |||
|
846 | continue | |||
|
847 | ||||
|
848 | # If garbage collection on repack and repack on hg gc are enabled | |||
|
849 | # then loose files are repacked and garbage collected. | |||
|
850 | # Otherwise regular garbage collection is performed. | |||
|
851 | repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc') | |||
|
852 | gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack') | |||
|
853 | if repackonhggc and gcrepack: | |||
|
854 | try: | |||
|
855 | repackmod.incrementalrepack(repo) | |||
|
856 | filesrepacked = True | |||
|
857 | continue | |||
|
858 | except (IOError, repackmod.RepackAlreadyRunning): | |||
|
859 | # If repack cannot be performed due to not enough disk space | |||
|
860 | # continue doing garbage collection of loose files w/o repack | |||
|
861 | pass | |||
|
862 | ||||
|
863 | reponame = repo.name | |||
|
864 | if not sharedcache: | |||
|
865 | sharedcache = repo.sharedstore | |||
|
866 | ||||
|
867 | # Compute a keepset which is not garbage collected | |||
|
868 | def keyfn(fname, fnode): | |||
|
869 | return fileserverclient.getcachekey(reponame, fname, hex(fnode)) | |||
|
870 | keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys) | |||
|
871 | ||||
|
872 | progress.complete() | |||
|
873 | ||||
|
874 | # write list of valid repos back | |||
|
875 | oldumask = os.umask(0o002) | |||
|
876 | try: | |||
|
877 | reposfile = open(repospath, 'wb') | |||
|
878 | reposfile.writelines([("%s\n" % r) for r in validrepos]) | |||
|
879 | reposfile.close() | |||
|
880 | finally: | |||
|
881 | os.umask(oldumask) | |||
|
882 | ||||
|
883 | # prune cache | |||
|
884 | if sharedcache is not None: | |||
|
885 | sharedcache.gc(keepkeys) | |||
|
886 | elif not filesrepacked: | |||
|
887 | ui.warn(_("warning: no valid repos in repofile\n")) | |||
|
888 | ||||
|
889 | def log(orig, ui, repo, *pats, **opts): | |||
|
890 | if not isenabled(repo): | |||
|
891 | return orig(ui, repo, *pats, **opts) | |||
|
892 | ||||
|
893 | follow = opts.get(r'follow') | |||
|
894 | revs = opts.get(r'rev') | |||
|
895 | if pats: | |||
|
896 | # Force slowpath for non-follow patterns and follows that start from | |||
|
897 | # non-working-copy-parent revs. | |||
|
898 | if not follow or revs: | |||
|
899 | # This forces the slowpath | |||
|
900 | opts[r'removed'] = True | |||
|
901 | ||||
|
902 | # If this is a non-follow log without any revs specified, recommend that | |||
|
903 | # the user add -f to speed it up. | |||
|
904 | if not follow and not revs: | |||
|
905 | match, pats = scmutil.matchandpats(repo['.'], pats, | |||
|
906 | pycompat.byteskwargs(opts)) | |||
|
907 | isfile = not match.anypats() | |||
|
908 | if isfile: | |||
|
909 | for file in match.files(): | |||
|
910 | if not os.path.isfile(repo.wjoin(file)): | |||
|
911 | isfile = False | |||
|
912 | break | |||
|
913 | ||||
|
914 | if isfile: | |||
|
915 | ui.warn(_("warning: file log can be slow on large repos - " + | |||
|
916 | "use -f to speed it up\n")) | |||
|
917 | ||||
|
918 | return orig(ui, repo, *pats, **opts) | |||
|
919 | ||||
|
920 | def revdatelimit(ui, revset): | |||
|
921 | """Update revset so that only changesets no older than 'prefetchdays' days | |||
|
922 | are included. The default value is set to 14 days. If 'prefetchdays' is set | |||
|
923 | to zero or negative value then date restriction is not applied. | |||
|
924 | """ | |||
|
925 | days = ui.configint('remotefilelog', 'prefetchdays') | |||
|
926 | if days > 0: | |||
|
927 | revset = '(%s) & date(-%s)' % (revset, days) | |||
|
928 | return revset | |||
|
929 | ||||
|
930 | def readytofetch(repo): | |||
|
931 | """Check that enough time has passed since the last background prefetch. | |||
|
932 | This only relates to prefetches after operations that change the working | |||
|
933 | copy parent. Default delay between background prefetches is 2 minutes. | |||
|
934 | """ | |||
|
935 | timeout = repo.ui.configint('remotefilelog', 'prefetchdelay') | |||
|
936 | fname = repo.vfs.join('lastprefetch') | |||
|
937 | ||||
|
938 | ready = False | |||
|
939 | with open(fname, 'a'): | |||
|
940 | # the with construct above is used to avoid race conditions | |||
|
941 | modtime = os.path.getmtime(fname) | |||
|
942 | if (time.time() - modtime) > timeout: | |||
|
943 | os.utime(fname, None) | |||
|
944 | ready = True | |||
|
945 | ||||
|
946 | return ready | |||
|
947 | ||||
|
948 | def wcpprefetch(ui, repo, **kwargs): | |||
|
949 | """Prefetches in background revisions specified by bgprefetchrevs revset. | |||
|
950 | Does background repack if backgroundrepack flag is set in config. | |||
|
951 | """ | |||
|
952 | shallow = isenabled(repo) | |||
|
953 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs') | |||
|
954 | isready = readytofetch(repo) | |||
|
955 | ||||
|
956 | if not (shallow and bgprefetchrevs and isready): | |||
|
957 | return | |||
|
958 | ||||
|
959 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') | |||
|
960 | # update a revset with a date limit | |||
|
961 | bgprefetchrevs = revdatelimit(ui, bgprefetchrevs) | |||
|
962 | ||||
|
963 | def anon(): | |||
|
964 | if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch: | |||
|
965 | return | |||
|
966 | repo.ranprefetch = True | |||
|
967 | repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack) | |||
|
968 | ||||
|
969 | repo._afterlock(anon) | |||
|
970 | ||||
|
971 | def pull(orig, ui, repo, *pats, **opts): | |||
|
972 | result = orig(ui, repo, *pats, **opts) | |||
|
973 | ||||
|
974 | if isenabled(repo): | |||
|
975 | # prefetch if it's configured | |||
|
976 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch') | |||
|
977 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') | |||
|
978 | bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch') | |||
|
979 | ||||
|
980 | if prefetchrevset: | |||
|
981 | ui.status(_("prefetching file contents\n")) | |||
|
982 | revs = scmutil.revrange(repo, [prefetchrevset]) | |||
|
983 | base = repo['.'].rev() | |||
|
984 | if bgprefetch: | |||
|
985 | repo.backgroundprefetch(prefetchrevset, repack=bgrepack) | |||
|
986 | else: | |||
|
987 | repo.prefetch(revs, base=base) | |||
|
988 | if bgrepack: | |||
|
989 | repackmod.backgroundrepack(repo, incremental=True) | |||
|
990 | elif bgrepack: | |||
|
991 | repackmod.backgroundrepack(repo, incremental=True) | |||
|
992 | ||||
|
993 | return result | |||
|
994 | ||||
|
995 | def exchangepull(orig, repo, remote, *args, **kwargs): | |||
|
996 | # Hook into the callstream/getbundle to insert bundle capabilities | |||
|
997 | # during a pull. | |||
|
998 | def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None, | |||
|
999 | **kwargs): | |||
|
1000 | if not bundlecaps: | |||
|
1001 | bundlecaps = set() | |||
|
1002 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) | |||
|
1003 | return orig(source, heads=heads, common=common, bundlecaps=bundlecaps, | |||
|
1004 | **kwargs) | |||
|
1005 | ||||
|
1006 | if util.safehasattr(remote, '_callstream'): | |||
|
1007 | remote._localrepo = repo | |||
|
1008 | elif util.safehasattr(remote, 'getbundle'): | |||
|
1009 | extensions.wrapfunction(remote, 'getbundle', localgetbundle) | |||
|
1010 | ||||
|
1011 | return orig(repo, remote, *args, **kwargs) | |||
|
1012 | ||||
|
1013 | def _fileprefetchhook(repo, revs, match): | |||
|
1014 | if isenabled(repo): | |||
|
1015 | allfiles = [] | |||
|
1016 | for rev in revs: | |||
|
1017 | if rev == nodemod.wdirrev or rev is None: | |||
|
1018 | continue | |||
|
1019 | ctx = repo[rev] | |||
|
1020 | mf = ctx.manifest() | |||
|
1021 | sparsematch = repo.maybesparsematch(ctx.rev()) | |||
|
1022 | for path in ctx.walk(match): | |||
|
1023 | if path.endswith('/'): | |||
|
1024 | # Tree manifest that's being excluded as part of narrow | |||
|
1025 | continue | |||
|
1026 | if (not sparsematch or sparsematch(path)) and path in mf: | |||
|
1027 | allfiles.append((path, hex(mf[path]))) | |||
|
1028 | repo.fileservice.prefetch(allfiles) | |||
|
1029 | ||||
|
1030 | @command('debugremotefilelog', [ | |||
|
1031 | ('d', 'decompress', None, _('decompress the filelog first')), | |||
|
1032 | ], _('hg debugremotefilelog <path>'), norepo=True) | |||
|
1033 | def debugremotefilelog(ui, path, **opts): | |||
|
1034 | return debugcommands.debugremotefilelog(ui, path, **opts) | |||
|
1035 | ||||
|
1036 | @command('verifyremotefilelog', [ | |||
|
1037 | ('d', 'decompress', None, _('decompress the filelogs first')), | |||
|
1038 | ], _('hg verifyremotefilelogs <directory>'), norepo=True) | |||
|
1039 | def verifyremotefilelog(ui, path, **opts): | |||
|
1040 | return debugcommands.verifyremotefilelog(ui, path, **opts) | |||
|
1041 | ||||
|
1042 | @command('debugdatapack', [ | |||
|
1043 | ('', 'long', None, _('print the long hashes')), | |||
|
1044 | ('', 'node', '', _('dump the contents of node'), 'NODE'), | |||
|
1045 | ], _('hg debugdatapack <paths>'), norepo=True) | |||
|
1046 | def debugdatapack(ui, *paths, **opts): | |||
|
1047 | return debugcommands.debugdatapack(ui, *paths, **opts) | |||
|
1048 | ||||
|
1049 | @command('debughistorypack', [ | |||
|
1050 | ], _('hg debughistorypack <path>'), norepo=True) | |||
|
1051 | def debughistorypack(ui, path, **opts): | |||
|
1052 | return debugcommands.debughistorypack(ui, path) | |||
|
1053 | ||||
|
1054 | @command('debugkeepset', [ | |||
|
1055 | ], _('hg debugkeepset')) | |||
|
1056 | def debugkeepset(ui, repo, **opts): | |||
|
1057 | # The command is used to measure keepset computation time | |||
|
1058 | def keyfn(fname, fnode): | |||
|
1059 | return fileserverclient.getcachekey(repo.name, fname, hex(fnode)) | |||
|
1060 | repackmod.keepset(repo, keyfn) | |||
|
1061 | return | |||
|
1062 | ||||
|
1063 | @command('debugwaitonrepack', [ | |||
|
1064 | ], _('hg debugwaitonrepack')) | |||
|
1065 | def debugwaitonrepack(ui, repo, **opts): | |||
|
1066 | return debugcommands.debugwaitonrepack(repo) | |||
|
1067 | ||||
|
1068 | @command('debugwaitonprefetch', [ | |||
|
1069 | ], _('hg debugwaitonprefetch')) | |||
|
1070 | def debugwaitonprefetch(ui, repo, **opts): | |||
|
1071 | return debugcommands.debugwaitonprefetch(repo) | |||
|
1072 | ||||
|
1073 | def resolveprefetchopts(ui, opts): | |||
|
1074 | if not opts.get('rev'): | |||
|
1075 | revset = ['.', 'draft()'] | |||
|
1076 | ||||
|
1077 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None) | |||
|
1078 | if prefetchrevset: | |||
|
1079 | revset.append('(%s)' % prefetchrevset) | |||
|
1080 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None) | |||
|
1081 | if bgprefetchrevs: | |||
|
1082 | revset.append('(%s)' % bgprefetchrevs) | |||
|
1083 | revset = '+'.join(revset) | |||
|
1084 | ||||
|
1085 | # update a revset with a date limit | |||
|
1086 | revset = revdatelimit(ui, revset) | |||
|
1087 | ||||
|
1088 | opts['rev'] = [revset] | |||
|
1089 | ||||
|
1090 | if not opts.get('base'): | |||
|
1091 | opts['base'] = None | |||
|
1092 | ||||
|
1093 | return opts | |||
|
1094 | ||||
|
1095 | @command('prefetch', [ | |||
|
1096 | ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')), | |||
|
1097 | ('', 'repack', False, _('run repack after prefetch')), | |||
|
1098 | ('b', 'base', '', _("rev that is assumed to already be local")), | |||
|
1099 | ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]')) | |||
|
1100 | def prefetch(ui, repo, *pats, **opts): | |||
|
1101 | """prefetch file revisions from the server | |||
|
1102 | ||||
|
1103 | Prefetchs file revisions for the specified revs and stores them in the | |||
|
1104 | local remotefilelog cache. If no rev is specified, the default rev is | |||
|
1105 | used which is the union of dot, draft, pullprefetch and bgprefetchrev. | |||
|
1106 | File names or patterns can be used to limit which files are downloaded. | |||
|
1107 | ||||
|
1108 | Return 0 on success. | |||
|
1109 | """ | |||
|
1110 | opts = pycompat.byteskwargs(opts) | |||
|
1111 | if not isenabled(repo): | |||
|
1112 | raise error.Abort(_("repo is not shallow")) | |||
|
1113 | ||||
|
1114 | opts = resolveprefetchopts(ui, opts) | |||
|
1115 | revs = scmutil.revrange(repo, opts.get('rev')) | |||
|
1116 | repo.prefetch(revs, opts.get('base'), pats, opts) | |||
|
1117 | ||||
|
1118 | # Run repack in background | |||
|
1119 | if opts.get('repack'): | |||
|
1120 | repackmod.backgroundrepack(repo, incremental=True) | |||
|
1121 | ||||
|
1122 | @command('repack', [ | |||
|
1123 | ('', 'background', None, _('run in a background process'), None), | |||
|
1124 | ('', 'incremental', None, _('do an incremental repack'), None), | |||
|
1125 | ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None), | |||
|
1126 | ], _('hg repack [OPTIONS]')) | |||
|
1127 | def repack_(ui, repo, *pats, **opts): | |||
|
1128 | if opts.get(r'background'): | |||
|
1129 | repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'), | |||
|
1130 | packsonly=opts.get(r'packsonly', False)) | |||
|
1131 | return | |||
|
1132 | ||||
|
1133 | options = {'packsonly': opts.get(r'packsonly')} | |||
|
1134 | ||||
|
1135 | try: | |||
|
1136 | if opts.get(r'incremental'): | |||
|
1137 | repackmod.incrementalrepack(repo, options=options) | |||
|
1138 | else: | |||
|
1139 | repackmod.fullrepack(repo, options=options) | |||
|
1140 | except repackmod.RepackAlreadyRunning as ex: | |||
|
1141 | # Don't propogate the exception if the repack is already in | |||
|
1142 | # progress, since we want the command to exit 0. | |||
|
1143 | repo.ui.warn('%s\n' % ex) |
This diff has been collapsed as it changes many lines, (541 lines changed) Show them Hide them | |||||
@@ -0,0 +1,541 b'' | |||||
|
1 | from __future__ import absolute_import | |||
|
2 | ||||
|
3 | import collections | |||
|
4 | import errno | |||
|
5 | import hashlib | |||
|
6 | import mmap | |||
|
7 | import os | |||
|
8 | import struct | |||
|
9 | import time | |||
|
10 | ||||
|
11 | from mercurial.i18n import _ | |||
|
12 | from mercurial import ( | |||
|
13 | node as nodemod, | |||
|
14 | policy, | |||
|
15 | pycompat, | |||
|
16 | util, | |||
|
17 | vfs as vfsmod, | |||
|
18 | ) | |||
|
19 | from . import shallowutil | |||
|
20 | ||||
|
21 | osutil = policy.importmod(r'osutil') | |||
|
22 | ||||
|
23 | # The pack version supported by this implementation. This will need to be | |||
|
24 | # rev'd whenever the byte format changes. Ex: changing the fanout prefix, | |||
|
25 | # changing any of the int sizes, changing the delta algorithm, etc. | |||
|
26 | PACKVERSIONSIZE = 1 | |||
|
27 | INDEXVERSIONSIZE = 2 | |||
|
28 | ||||
|
29 | FANOUTSTART = INDEXVERSIONSIZE | |||
|
30 | ||||
|
31 | # Constant that indicates a fanout table entry hasn't been filled in. (This does | |||
|
32 | # not get serialized) | |||
|
33 | EMPTYFANOUT = -1 | |||
|
34 | ||||
|
35 | # The fanout prefix is the number of bytes that can be addressed by the fanout | |||
|
36 | # table. Example: a fanout prefix of 1 means we use the first byte of a hash to | |||
|
37 | # look in the fanout table (which will be 2^8 entries long). | |||
|
38 | SMALLFANOUTPREFIX = 1 | |||
|
39 | LARGEFANOUTPREFIX = 2 | |||
|
40 | ||||
|
41 | # The number of entries in the index at which point we switch to a large fanout. | |||
|
42 | # It is chosen to balance the linear scan through a sparse fanout, with the | |||
|
43 | # size of the bisect in actual index. | |||
|
44 | # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step | |||
|
45 | # bisect) with (8 step fanout scan + 1 step bisect) | |||
|
46 | # 5 step bisect = log(2^16 / 8 / 255) # fanout | |||
|
47 | # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries | |||
|
48 | SMALLFANOUTCUTOFF = 2**16 // 8 | |||
|
49 | ||||
|
50 | # The amount of time to wait between checking for new packs. This prevents an | |||
|
51 | # exception when data is moved to a new pack after the process has already | |||
|
52 | # loaded the pack list. | |||
|
53 | REFRESHRATE = 0.1 | |||
|
54 | ||||
|
55 | if pycompat.isposix and not pycompat.ispy3: | |||
|
56 | # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening. | |||
|
57 | # The 'e' flag will be ignored on older versions of glibc. | |||
|
58 | # Python 3 can't handle the 'e' flag. | |||
|
59 | PACKOPENMODE = 'rbe' | |||
|
60 | else: | |||
|
61 | PACKOPENMODE = 'rb' | |||
|
62 | ||||
|
63 | class _cachebackedpacks(object): | |||
|
64 | def __init__(self, packs, cachesize): | |||
|
65 | self._packs = set(packs) | |||
|
66 | self._lrucache = util.lrucachedict(cachesize) | |||
|
67 | self._lastpack = None | |||
|
68 | ||||
|
69 | # Avoid cold start of the cache by populating the most recent packs | |||
|
70 | # in the cache. | |||
|
71 | for i in reversed(range(min(cachesize, len(packs)))): | |||
|
72 | self._movetofront(packs[i]) | |||
|
73 | ||||
|
74 | def _movetofront(self, pack): | |||
|
75 | # This effectively makes pack the first entry in the cache. | |||
|
76 | self._lrucache[pack] = True | |||
|
77 | ||||
|
78 | def _registerlastpackusage(self): | |||
|
79 | if self._lastpack is not None: | |||
|
80 | self._movetofront(self._lastpack) | |||
|
81 | self._lastpack = None | |||
|
82 | ||||
|
83 | def add(self, pack): | |||
|
84 | self._registerlastpackusage() | |||
|
85 | ||||
|
86 | # This method will mostly be called when packs are not in cache. | |||
|
87 | # Therefore, adding pack to the cache. | |||
|
88 | self._movetofront(pack) | |||
|
89 | self._packs.add(pack) | |||
|
90 | ||||
|
91 | def __iter__(self): | |||
|
92 | self._registerlastpackusage() | |||
|
93 | ||||
|
94 | # Cache iteration is based on LRU. | |||
|
95 | for pack in self._lrucache: | |||
|
96 | self._lastpack = pack | |||
|
97 | yield pack | |||
|
98 | ||||
|
99 | cachedpacks = set(pack for pack in self._lrucache) | |||
|
100 | # Yield for paths not in the cache. | |||
|
101 | for pack in self._packs - cachedpacks: | |||
|
102 | self._lastpack = pack | |||
|
103 | yield pack | |||
|
104 | ||||
|
105 | # Data not found in any pack. | |||
|
106 | self._lastpack = None | |||
|
107 | ||||
|
108 | class basepackstore(object): | |||
|
109 | # Default cache size limit for the pack files. | |||
|
110 | DEFAULTCACHESIZE = 100 | |||
|
111 | ||||
|
112 | def __init__(self, ui, path): | |||
|
113 | self.ui = ui | |||
|
114 | self.path = path | |||
|
115 | ||||
|
116 | # lastrefesh is 0 so we'll immediately check for new packs on the first | |||
|
117 | # failure. | |||
|
118 | self.lastrefresh = 0 | |||
|
119 | ||||
|
120 | packs = [] | |||
|
121 | for filepath, __, __ in self._getavailablepackfilessorted(): | |||
|
122 | try: | |||
|
123 | pack = self.getpack(filepath) | |||
|
124 | except Exception as ex: | |||
|
125 | # An exception may be thrown if the pack file is corrupted | |||
|
126 | # somehow. Log a warning but keep going in this case, just | |||
|
127 | # skipping this pack file. | |||
|
128 | # | |||
|
129 | # If this is an ENOENT error then don't even bother logging. | |||
|
130 | # Someone could have removed the file since we retrieved the | |||
|
131 | # list of paths. | |||
|
132 | if getattr(ex, 'errno', None) != errno.ENOENT: | |||
|
133 | ui.warn(_('unable to load pack %s: %s\n') % (filepath, ex)) | |||
|
134 | continue | |||
|
135 | packs.append(pack) | |||
|
136 | ||||
|
137 | self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE) | |||
|
138 | ||||
|
139 | def _getavailablepackfiles(self): | |||
|
140 | """For each pack file (a index/data file combo), yields: | |||
|
141 | (full path without extension, mtime, size) | |||
|
142 | ||||
|
143 | mtime will be the mtime of the index/data file (whichever is newer) | |||
|
144 | size is the combined size of index/data file | |||
|
145 | """ | |||
|
146 | indexsuffixlen = len(self.INDEXSUFFIX) | |||
|
147 | packsuffixlen = len(self.PACKSUFFIX) | |||
|
148 | ||||
|
149 | ids = set() | |||
|
150 | sizes = collections.defaultdict(lambda: 0) | |||
|
151 | mtimes = collections.defaultdict(lambda: []) | |||
|
152 | try: | |||
|
153 | for filename, type, stat in osutil.listdir(self.path, stat=True): | |||
|
154 | id = None | |||
|
155 | if filename[-indexsuffixlen:] == self.INDEXSUFFIX: | |||
|
156 | id = filename[:-indexsuffixlen] | |||
|
157 | elif filename[-packsuffixlen:] == self.PACKSUFFIX: | |||
|
158 | id = filename[:-packsuffixlen] | |||
|
159 | ||||
|
160 | # Since we expect to have two files corresponding to each ID | |||
|
161 | # (the index file and the pack file), we can yield once we see | |||
|
162 | # it twice. | |||
|
163 | if id: | |||
|
164 | sizes[id] += stat.st_size # Sum both files' sizes together | |||
|
165 | mtimes[id].append(stat.st_mtime) | |||
|
166 | if id in ids: | |||
|
167 | yield (os.path.join(self.path, id), max(mtimes[id]), | |||
|
168 | sizes[id]) | |||
|
169 | else: | |||
|
170 | ids.add(id) | |||
|
171 | except OSError as ex: | |||
|
172 | if ex.errno != errno.ENOENT: | |||
|
173 | raise | |||
|
174 | ||||
|
175 | def _getavailablepackfilessorted(self): | |||
|
176 | """Like `_getavailablepackfiles`, but also sorts the files by mtime, | |||
|
177 | yielding newest files first. | |||
|
178 | ||||
|
179 | This is desirable, since it is more likely newer packfiles have more | |||
|
180 | desirable data. | |||
|
181 | """ | |||
|
182 | files = [] | |||
|
183 | for path, mtime, size in self._getavailablepackfiles(): | |||
|
184 | files.append((mtime, size, path)) | |||
|
185 | files = sorted(files, reverse=True) | |||
|
186 | for mtime, size, path in files: | |||
|
187 | yield path, mtime, size | |||
|
188 | ||||
|
189 | def gettotalsizeandcount(self): | |||
|
190 | """Returns the total disk size (in bytes) of all the pack files in | |||
|
191 | this store, and the count of pack files. | |||
|
192 | ||||
|
193 | (This might be smaller than the total size of the ``self.path`` | |||
|
194 | directory, since this only considers fuly-writen pack files, and not | |||
|
195 | temporary files or other detritus on the directory.) | |||
|
196 | """ | |||
|
197 | totalsize = 0 | |||
|
198 | count = 0 | |||
|
199 | for __, __, size in self._getavailablepackfiles(): | |||
|
200 | totalsize += size | |||
|
201 | count += 1 | |||
|
202 | return totalsize, count | |||
|
203 | ||||
|
204 | def getmetrics(self): | |||
|
205 | """Returns metrics on the state of this store.""" | |||
|
206 | size, count = self.gettotalsizeandcount() | |||
|
207 | return { | |||
|
208 | 'numpacks': count, | |||
|
209 | 'totalpacksize': size, | |||
|
210 | } | |||
|
211 | ||||
|
212 | def getpack(self, path): | |||
|
213 | raise NotImplementedError() | |||
|
214 | ||||
|
215 | def getmissing(self, keys): | |||
|
216 | missing = keys | |||
|
217 | for pack in self.packs: | |||
|
218 | missing = pack.getmissing(missing) | |||
|
219 | ||||
|
220 | # Ensures better performance of the cache by keeping the most | |||
|
221 | # recently accessed pack at the beginning in subsequent iterations. | |||
|
222 | if not missing: | |||
|
223 | return missing | |||
|
224 | ||||
|
225 | if missing: | |||
|
226 | for pack in self.refresh(): | |||
|
227 | missing = pack.getmissing(missing) | |||
|
228 | ||||
|
229 | return missing | |||
|
230 | ||||
|
231 | def markledger(self, ledger, options=None): | |||
|
232 | for pack in self.packs: | |||
|
233 | pack.markledger(ledger) | |||
|
234 | ||||
|
235 | def markforrefresh(self): | |||
|
236 | """Tells the store that there may be new pack files, so the next time it | |||
|
237 | has a lookup miss it should check for new files.""" | |||
|
238 | self.lastrefresh = 0 | |||
|
239 | ||||
|
240 | def refresh(self): | |||
|
241 | """Checks for any new packs on disk, adds them to the main pack list, | |||
|
242 | and returns a list of just the new packs.""" | |||
|
243 | now = time.time() | |||
|
244 | ||||
|
245 | # If we experience a lot of misses (like in the case of getmissing() on | |||
|
246 | # new objects), let's only actually check disk for new stuff every once | |||
|
247 | # in a while. Generally this code path should only ever matter when a | |||
|
248 | # repack is going on in the background, and that should be pretty rare | |||
|
249 | # to have that happen twice in quick succession. | |||
|
250 | newpacks = [] | |||
|
251 | if now > self.lastrefresh + REFRESHRATE: | |||
|
252 | self.lastrefresh = now | |||
|
253 | previous = set(p.path for p in self.packs) | |||
|
254 | for filepath, __, __ in self._getavailablepackfilessorted(): | |||
|
255 | if filepath not in previous: | |||
|
256 | newpack = self.getpack(filepath) | |||
|
257 | newpacks.append(newpack) | |||
|
258 | self.packs.add(newpack) | |||
|
259 | ||||
|
260 | return newpacks | |||
|
261 | ||||
|
262 | class versionmixin(object): | |||
|
263 | # Mix-in for classes with multiple supported versions | |||
|
264 | VERSION = None | |||
|
265 | SUPPORTED_VERSIONS = [2] | |||
|
266 | ||||
|
267 | def _checkversion(self, version): | |||
|
268 | if version in self.SUPPORTED_VERSIONS: | |||
|
269 | if self.VERSION is None: | |||
|
270 | # only affect this instance | |||
|
271 | self.VERSION = version | |||
|
272 | elif self.VERSION != version: | |||
|
273 | raise RuntimeError('inconsistent version: %s' % version) | |||
|
274 | else: | |||
|
275 | raise RuntimeError('unsupported version: %s' % version) | |||
|
276 | ||||
|
277 | class basepack(versionmixin): | |||
|
278 | # The maximum amount we should read via mmap before remmaping so the old | |||
|
279 | # pages can be released (100MB) | |||
|
280 | MAXPAGEDIN = 100 * 1024**2 | |||
|
281 | ||||
|
282 | SUPPORTED_VERSIONS = [2] | |||
|
283 | ||||
|
284 | def __init__(self, path): | |||
|
285 | self.path = path | |||
|
286 | self.packpath = path + self.PACKSUFFIX | |||
|
287 | self.indexpath = path + self.INDEXSUFFIX | |||
|
288 | ||||
|
289 | self.indexsize = os.stat(self.indexpath).st_size | |||
|
290 | self.datasize = os.stat(self.packpath).st_size | |||
|
291 | ||||
|
292 | self._index = None | |||
|
293 | self._data = None | |||
|
294 | self.freememory() # initialize the mmap | |||
|
295 | ||||
|
296 | version = struct.unpack('!B', self._data[:PACKVERSIONSIZE])[0] | |||
|
297 | self._checkversion(version) | |||
|
298 | ||||
|
299 | version, config = struct.unpack('!BB', self._index[:INDEXVERSIONSIZE]) | |||
|
300 | self._checkversion(version) | |||
|
301 | ||||
|
302 | if 0b10000000 & config: | |||
|
303 | self.params = indexparams(LARGEFANOUTPREFIX, version) | |||
|
304 | else: | |||
|
305 | self.params = indexparams(SMALLFANOUTPREFIX, version) | |||
|
306 | ||||
|
307 | @util.propertycache | |||
|
308 | def _fanouttable(self): | |||
|
309 | params = self.params | |||
|
310 | rawfanout = self._index[FANOUTSTART:FANOUTSTART + params.fanoutsize] | |||
|
311 | fanouttable = [] | |||
|
312 | for i in pycompat.xrange(0, params.fanoutcount): | |||
|
313 | loc = i * 4 | |||
|
314 | fanoutentry = struct.unpack('!I', rawfanout[loc:loc + 4])[0] | |||
|
315 | fanouttable.append(fanoutentry) | |||
|
316 | return fanouttable | |||
|
317 | ||||
|
318 | @util.propertycache | |||
|
319 | def _indexend(self): | |||
|
320 | nodecount = struct.unpack_from('!Q', self._index, | |||
|
321 | self.params.indexstart - 8)[0] | |||
|
322 | return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH | |||
|
323 | ||||
|
324 | def freememory(self): | |||
|
325 | """Unmap and remap the memory to free it up after known expensive | |||
|
326 | operations. Return True if self._data and self._index were reloaded. | |||
|
327 | """ | |||
|
328 | if self._index: | |||
|
329 | if self._pagedin < self.MAXPAGEDIN: | |||
|
330 | return False | |||
|
331 | ||||
|
332 | self._index.close() | |||
|
333 | self._data.close() | |||
|
334 | ||||
|
335 | # TODO: use an opener/vfs to access these paths | |||
|
336 | with open(self.indexpath, PACKOPENMODE) as indexfp: | |||
|
337 | # memory-map the file, size 0 means whole file | |||
|
338 | self._index = mmap.mmap(indexfp.fileno(), 0, | |||
|
339 | access=mmap.ACCESS_READ) | |||
|
340 | with open(self.packpath, PACKOPENMODE) as datafp: | |||
|
341 | self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ) | |||
|
342 | ||||
|
343 | self._pagedin = 0 | |||
|
344 | return True | |||
|
345 | ||||
|
346 | def getmissing(self, keys): | |||
|
347 | raise NotImplementedError() | |||
|
348 | ||||
|
349 | def markledger(self, ledger, options=None): | |||
|
350 | raise NotImplementedError() | |||
|
351 | ||||
|
352 | def cleanup(self, ledger): | |||
|
353 | raise NotImplementedError() | |||
|
354 | ||||
|
355 | def __iter__(self): | |||
|
356 | raise NotImplementedError() | |||
|
357 | ||||
|
358 | def iterentries(self): | |||
|
359 | raise NotImplementedError() | |||
|
360 | ||||
|
361 | class mutablebasepack(versionmixin): | |||
|
362 | ||||
|
363 | def __init__(self, ui, packdir, version=2): | |||
|
364 | self._checkversion(version) | |||
|
365 | # TODO(augie): make this configurable | |||
|
366 | self._compressor = 'GZ' | |||
|
367 | opener = vfsmod.vfs(packdir) | |||
|
368 | opener.createmode = 0o444 | |||
|
369 | self.opener = opener | |||
|
370 | ||||
|
371 | self.entries = {} | |||
|
372 | ||||
|
373 | shallowutil.mkstickygroupdir(ui, packdir) | |||
|
374 | self.packfp, self.packpath = opener.mkstemp( | |||
|
375 | suffix=self.PACKSUFFIX + '-tmp') | |||
|
376 | self.idxfp, self.idxpath = opener.mkstemp( | |||
|
377 | suffix=self.INDEXSUFFIX + '-tmp') | |||
|
378 | self.packfp = os.fdopen(self.packfp, r'wb+') | |||
|
379 | self.idxfp = os.fdopen(self.idxfp, r'wb+') | |||
|
380 | self.sha = hashlib.sha1() | |||
|
381 | self._closed = False | |||
|
382 | ||||
|
383 | # The opener provides no way of doing permission fixup on files created | |||
|
384 | # via mkstemp, so we must fix it ourselves. We can probably fix this | |||
|
385 | # upstream in vfs.mkstemp so we don't need to use the private method. | |||
|
386 | opener._fixfilemode(opener.join(self.packpath)) | |||
|
387 | opener._fixfilemode(opener.join(self.idxpath)) | |||
|
388 | ||||
|
389 | # Write header | |||
|
390 | # TODO: make it extensible (ex: allow specifying compression algorithm, | |||
|
391 | # a flexible key/value header, delta algorithm, fanout size, etc) | |||
|
392 | versionbuf = struct.pack('!B', self.VERSION) # unsigned 1 byte int | |||
|
393 | self.writeraw(versionbuf) | |||
|
394 | ||||
|
395 | def __enter__(self): | |||
|
396 | return self | |||
|
397 | ||||
|
398 | def __exit__(self, exc_type, exc_value, traceback): | |||
|
399 | if exc_type is None: | |||
|
400 | self.close() | |||
|
401 | else: | |||
|
402 | self.abort() | |||
|
403 | ||||
|
404 | def abort(self): | |||
|
405 | # Unclean exit | |||
|
406 | self._cleantemppacks() | |||
|
407 | ||||
|
408 | def writeraw(self, data): | |||
|
409 | self.packfp.write(data) | |||
|
410 | self.sha.update(data) | |||
|
411 | ||||
|
412 | def close(self, ledger=None): | |||
|
413 | if self._closed: | |||
|
414 | return | |||
|
415 | ||||
|
416 | try: | |||
|
417 | sha = nodemod.hex(self.sha.digest()) | |||
|
418 | self.packfp.close() | |||
|
419 | self.writeindex() | |||
|
420 | ||||
|
421 | if len(self.entries) == 0: | |||
|
422 | # Empty pack | |||
|
423 | self._cleantemppacks() | |||
|
424 | self._closed = True | |||
|
425 | return None | |||
|
426 | ||||
|
427 | self.opener.rename(self.packpath, sha + self.PACKSUFFIX) | |||
|
428 | try: | |||
|
429 | self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX) | |||
|
430 | except Exception as ex: | |||
|
431 | try: | |||
|
432 | self.opener.unlink(sha + self.PACKSUFFIX) | |||
|
433 | except Exception: | |||
|
434 | pass | |||
|
435 | # Throw exception 'ex' explicitly since a normal 'raise' would | |||
|
436 | # potentially throw an exception from the unlink cleanup. | |||
|
437 | raise ex | |||
|
438 | except Exception: | |||
|
439 | # Clean up temp packs in all exception cases | |||
|
440 | self._cleantemppacks() | |||
|
441 | raise | |||
|
442 | ||||
|
443 | self._closed = True | |||
|
444 | result = self.opener.join(sha) | |||
|
445 | if ledger: | |||
|
446 | ledger.addcreated(result) | |||
|
447 | return result | |||
|
448 | ||||
|
449 | def _cleantemppacks(self): | |||
|
450 | try: | |||
|
451 | self.opener.unlink(self.packpath) | |||
|
452 | except Exception: | |||
|
453 | pass | |||
|
454 | try: | |||
|
455 | self.opener.unlink(self.idxpath) | |||
|
456 | except Exception: | |||
|
457 | pass | |||
|
458 | ||||
|
459 | def writeindex(self): | |||
|
460 | rawindex = '' | |||
|
461 | ||||
|
462 | largefanout = len(self.entries) > SMALLFANOUTCUTOFF | |||
|
463 | if largefanout: | |||
|
464 | params = indexparams(LARGEFANOUTPREFIX, self.VERSION) | |||
|
465 | else: | |||
|
466 | params = indexparams(SMALLFANOUTPREFIX, self.VERSION) | |||
|
467 | ||||
|
468 | fanouttable = [EMPTYFANOUT] * params.fanoutcount | |||
|
469 | ||||
|
470 | # Precompute the location of each entry | |||
|
471 | locations = {} | |||
|
472 | count = 0 | |||
|
473 | for node in sorted(self.entries): | |||
|
474 | location = count * self.INDEXENTRYLENGTH | |||
|
475 | locations[node] = location | |||
|
476 | count += 1 | |||
|
477 | ||||
|
478 | # Must use [0] on the unpack result since it's always a tuple. | |||
|
479 | fanoutkey = struct.unpack(params.fanoutstruct, | |||
|
480 | node[:params.fanoutprefix])[0] | |||
|
481 | if fanouttable[fanoutkey] == EMPTYFANOUT: | |||
|
482 | fanouttable[fanoutkey] = location | |||
|
483 | ||||
|
484 | rawfanouttable = '' | |||
|
485 | last = 0 | |||
|
486 | for offset in fanouttable: | |||
|
487 | offset = offset if offset != EMPTYFANOUT else last | |||
|
488 | last = offset | |||
|
489 | rawfanouttable += struct.pack('!I', offset) | |||
|
490 | ||||
|
491 | rawentrieslength = struct.pack('!Q', len(self.entries)) | |||
|
492 | ||||
|
493 | # The index offset is the it's location in the file. So after the 2 byte | |||
|
494 | # header and the fanouttable. | |||
|
495 | rawindex = self.createindex(locations, 2 + len(rawfanouttable)) | |||
|
496 | ||||
|
497 | self._writeheader(params) | |||
|
498 | self.idxfp.write(rawfanouttable) | |||
|
499 | self.idxfp.write(rawentrieslength) | |||
|
500 | self.idxfp.write(rawindex) | |||
|
501 | self.idxfp.close() | |||
|
502 | ||||
|
503 | def createindex(self, nodelocations): | |||
|
504 | raise NotImplementedError() | |||
|
505 | ||||
|
506 | def _writeheader(self, indexparams): | |||
|
507 | # Index header | |||
|
508 | # <version: 1 byte> | |||
|
509 | # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8 | |||
|
510 | # <unused: 7 bit> # future use (compression, delta format, etc) | |||
|
511 | config = 0 | |||
|
512 | if indexparams.fanoutprefix == LARGEFANOUTPREFIX: | |||
|
513 | config = 0b10000000 | |||
|
514 | self.idxfp.write(struct.pack('!BB', self.VERSION, config)) | |||
|
515 | ||||
|
516 | class indexparams(object): | |||
|
517 | __slots__ = (r'fanoutprefix', r'fanoutstruct', r'fanoutcount', | |||
|
518 | r'fanoutsize', r'indexstart') | |||
|
519 | ||||
|
520 | def __init__(self, prefixsize, version): | |||
|
521 | self.fanoutprefix = prefixsize | |||
|
522 | ||||
|
523 | # The struct pack format for fanout table location (i.e. the format that | |||
|
524 | # converts the node prefix into an integer location in the fanout | |||
|
525 | # table). | |||
|
526 | if prefixsize == SMALLFANOUTPREFIX: | |||
|
527 | self.fanoutstruct = '!B' | |||
|
528 | elif prefixsize == LARGEFANOUTPREFIX: | |||
|
529 | self.fanoutstruct = '!H' | |||
|
530 | else: | |||
|
531 | raise ValueError("invalid fanout prefix size: %s" % prefixsize) | |||
|
532 | ||||
|
533 | # The number of fanout table entries | |||
|
534 | self.fanoutcount = 2**(prefixsize * 8) | |||
|
535 | ||||
|
536 | # The total bytes used by the fanout table | |||
|
537 | self.fanoutsize = self.fanoutcount * 4 | |||
|
538 | ||||
|
539 | self.indexstart = FANOUTSTART + self.fanoutsize | |||
|
540 | # Skip the index length | |||
|
541 | self.indexstart += 8 |
@@ -0,0 +1,425 b'' | |||||
|
1 | from __future__ import absolute_import | |||
|
2 | ||||
|
3 | import errno | |||
|
4 | import hashlib | |||
|
5 | import os | |||
|
6 | import shutil | |||
|
7 | import stat | |||
|
8 | import time | |||
|
9 | ||||
|
10 | from mercurial.i18n import _ | |||
|
11 | from mercurial.node import bin, hex | |||
|
12 | from mercurial import ( | |||
|
13 | error, | |||
|
14 | pycompat, | |||
|
15 | util, | |||
|
16 | ) | |||
|
17 | from . import ( | |||
|
18 | constants, | |||
|
19 | shallowutil, | |||
|
20 | ) | |||
|
21 | ||||
|
22 | class basestore(object): | |||
|
23 | def __init__(self, repo, path, reponame, shared=False): | |||
|
24 | """Creates a remotefilelog store object for the given repo name. | |||
|
25 | ||||
|
26 | `path` - The file path where this store keeps its data | |||
|
27 | `reponame` - The name of the repo. This is used to partition data from | |||
|
28 | many repos. | |||
|
29 | `shared` - True if this store is a shared cache of data from the central | |||
|
30 | server, for many repos on this machine. False means this store is for | |||
|
31 | the local data for one repo. | |||
|
32 | """ | |||
|
33 | self.repo = repo | |||
|
34 | self.ui = repo.ui | |||
|
35 | self._path = path | |||
|
36 | self._reponame = reponame | |||
|
37 | self._shared = shared | |||
|
38 | self._uid = os.getuid() if not pycompat.iswindows else None | |||
|
39 | ||||
|
40 | self._validatecachelog = self.ui.config("remotefilelog", | |||
|
41 | "validatecachelog") | |||
|
42 | self._validatecache = self.ui.config("remotefilelog", "validatecache", | |||
|
43 | 'on') | |||
|
44 | if self._validatecache not in ('on', 'strict', 'off'): | |||
|
45 | self._validatecache = 'on' | |||
|
46 | if self._validatecache == 'off': | |||
|
47 | self._validatecache = False | |||
|
48 | ||||
|
49 | if shared: | |||
|
50 | shallowutil.mkstickygroupdir(self.ui, path) | |||
|
51 | ||||
|
52 | def getmissing(self, keys): | |||
|
53 | missing = [] | |||
|
54 | for name, node in keys: | |||
|
55 | filepath = self._getfilepath(name, node) | |||
|
56 | exists = os.path.exists(filepath) | |||
|
57 | if (exists and self._validatecache == 'strict' and | |||
|
58 | not self._validatekey(filepath, 'contains')): | |||
|
59 | exists = False | |||
|
60 | if not exists: | |||
|
61 | missing.append((name, node)) | |||
|
62 | ||||
|
63 | return missing | |||
|
64 | ||||
|
65 | # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE | |||
|
66 | ||||
|
67 | def markledger(self, ledger, options=None): | |||
|
68 | if options and options.get(constants.OPTION_PACKSONLY): | |||
|
69 | return | |||
|
70 | if self._shared: | |||
|
71 | for filename, nodes in self._getfiles(): | |||
|
72 | for node in nodes: | |||
|
73 | ledger.markdataentry(self, filename, node) | |||
|
74 | ledger.markhistoryentry(self, filename, node) | |||
|
75 | ||||
|
76 | def cleanup(self, ledger): | |||
|
77 | ui = self.ui | |||
|
78 | entries = ledger.sources.get(self, []) | |||
|
79 | count = 0 | |||
|
80 | progress = ui.makeprogress(_("cleaning up"), unit="files", | |||
|
81 | total=len(entries)) | |||
|
82 | for entry in entries: | |||
|
83 | if entry.gced or (entry.datarepacked and entry.historyrepacked): | |||
|
84 | progress.update(count) | |||
|
85 | path = self._getfilepath(entry.filename, entry.node) | |||
|
86 | util.tryunlink(path) | |||
|
87 | count += 1 | |||
|
88 | progress.complete() | |||
|
89 | ||||
|
90 | # Clean up the repo cache directory. | |||
|
91 | self._cleanupdirectory(self._getrepocachepath()) | |||
|
92 | ||||
|
93 | # BELOW THIS ARE NON-STANDARD APIS | |||
|
94 | ||||
|
95 | def _cleanupdirectory(self, rootdir): | |||
|
96 | """Removes the empty directories and unnecessary files within the root | |||
|
97 | directory recursively. Note that this method does not remove the root | |||
|
98 | directory itself. """ | |||
|
99 | ||||
|
100 | oldfiles = set() | |||
|
101 | otherfiles = set() | |||
|
102 | # osutil.listdir returns stat information which saves some rmdir/listdir | |||
|
103 | # syscalls. | |||
|
104 | for name, mode in util.osutil.listdir(rootdir): | |||
|
105 | if stat.S_ISDIR(mode): | |||
|
106 | dirpath = os.path.join(rootdir, name) | |||
|
107 | self._cleanupdirectory(dirpath) | |||
|
108 | ||||
|
109 | # Now that the directory specified by dirpath is potentially | |||
|
110 | # empty, try and remove it. | |||
|
111 | try: | |||
|
112 | os.rmdir(dirpath) | |||
|
113 | except OSError: | |||
|
114 | pass | |||
|
115 | ||||
|
116 | elif stat.S_ISREG(mode): | |||
|
117 | if name.endswith('_old'): | |||
|
118 | oldfiles.add(name[:-4]) | |||
|
119 | else: | |||
|
120 | otherfiles.add(name) | |||
|
121 | ||||
|
122 | # Remove the files which end with suffix '_old' and have no | |||
|
123 | # corresponding file without the suffix '_old'. See addremotefilelognode | |||
|
124 | # method for the generation/purpose of files with '_old' suffix. | |||
|
125 | for filename in oldfiles - otherfiles: | |||
|
126 | filepath = os.path.join(rootdir, filename + '_old') | |||
|
127 | util.tryunlink(filepath) | |||
|
128 | ||||
|
129 | def _getfiles(self): | |||
|
130 | """Return a list of (filename, [node,...]) for all the revisions that | |||
|
131 | exist in the store. | |||
|
132 | ||||
|
133 | This is useful for obtaining a list of all the contents of the store | |||
|
134 | when performing a repack to another store, since the store API requires | |||
|
135 | name+node keys and not namehash+node keys. | |||
|
136 | """ | |||
|
137 | existing = {} | |||
|
138 | for filenamehash, node in self._listkeys(): | |||
|
139 | existing.setdefault(filenamehash, []).append(node) | |||
|
140 | ||||
|
141 | filenamemap = self._resolvefilenames(existing.keys()) | |||
|
142 | ||||
|
143 | for filename, sha in filenamemap.iteritems(): | |||
|
144 | yield (filename, existing[sha]) | |||
|
145 | ||||
|
146 | def _resolvefilenames(self, hashes): | |||
|
147 | """Given a list of filename hashes that are present in the | |||
|
148 | remotefilelog store, return a mapping from filename->hash. | |||
|
149 | ||||
|
150 | This is useful when converting remotefilelog blobs into other storage | |||
|
151 | formats. | |||
|
152 | """ | |||
|
153 | if not hashes: | |||
|
154 | return {} | |||
|
155 | ||||
|
156 | filenames = {} | |||
|
157 | missingfilename = set(hashes) | |||
|
158 | ||||
|
159 | # Start with a full manifest, since it'll cover the majority of files | |||
|
160 | for filename in self.repo['tip'].manifest(): | |||
|
161 | sha = hashlib.sha1(filename).digest() | |||
|
162 | if sha in missingfilename: | |||
|
163 | filenames[filename] = sha | |||
|
164 | missingfilename.discard(sha) | |||
|
165 | ||||
|
166 | # Scan the changelog until we've found every file name | |||
|
167 | cl = self.repo.unfiltered().changelog | |||
|
168 | for rev in pycompat.xrange(len(cl) - 1, -1, -1): | |||
|
169 | if not missingfilename: | |||
|
170 | break | |||
|
171 | files = cl.readfiles(cl.node(rev)) | |||
|
172 | for filename in files: | |||
|
173 | sha = hashlib.sha1(filename).digest() | |||
|
174 | if sha in missingfilename: | |||
|
175 | filenames[filename] = sha | |||
|
176 | missingfilename.discard(sha) | |||
|
177 | ||||
|
178 | return filenames | |||
|
179 | ||||
|
180 | def _getrepocachepath(self): | |||
|
181 | return os.path.join( | |||
|
182 | self._path, self._reponame) if self._shared else self._path | |||
|
183 | ||||
|
184 | def _listkeys(self): | |||
|
185 | """List all the remotefilelog keys that exist in the store. | |||
|
186 | ||||
|
187 | Returns a iterator of (filename hash, filecontent hash) tuples. | |||
|
188 | """ | |||
|
189 | ||||
|
190 | for root, dirs, files in os.walk(self._getrepocachepath()): | |||
|
191 | for filename in files: | |||
|
192 | if len(filename) != 40: | |||
|
193 | continue | |||
|
194 | node = filename | |||
|
195 | if self._shared: | |||
|
196 | # .../1a/85ffda..be21 | |||
|
197 | filenamehash = root[-41:-39] + root[-38:] | |||
|
198 | else: | |||
|
199 | filenamehash = root[-40:] | |||
|
200 | yield (bin(filenamehash), bin(node)) | |||
|
201 | ||||
|
202 | def _getfilepath(self, name, node): | |||
|
203 | node = hex(node) | |||
|
204 | if self._shared: | |||
|
205 | key = shallowutil.getcachekey(self._reponame, name, node) | |||
|
206 | else: | |||
|
207 | key = shallowutil.getlocalkey(name, node) | |||
|
208 | ||||
|
209 | return os.path.join(self._path, key) | |||
|
210 | ||||
|
211 | def _getdata(self, name, node): | |||
|
212 | filepath = self._getfilepath(name, node) | |||
|
213 | try: | |||
|
214 | data = shallowutil.readfile(filepath) | |||
|
215 | if self._validatecache and not self._validatedata(data, filepath): | |||
|
216 | if self._validatecachelog: | |||
|
217 | with open(self._validatecachelog, 'a+') as f: | |||
|
218 | f.write("corrupt %s during read\n" % filepath) | |||
|
219 | os.rename(filepath, filepath + ".corrupt") | |||
|
220 | raise KeyError("corrupt local cache file %s" % filepath) | |||
|
221 | except IOError: | |||
|
222 | raise KeyError("no file found at %s for %s:%s" % (filepath, name, | |||
|
223 | hex(node))) | |||
|
224 | ||||
|
225 | return data | |||
|
226 | ||||
|
227 | def addremotefilelognode(self, name, node, data): | |||
|
228 | filepath = self._getfilepath(name, node) | |||
|
229 | ||||
|
230 | oldumask = os.umask(0o002) | |||
|
231 | try: | |||
|
232 | # if this node already exists, save the old version for | |||
|
233 | # recovery/debugging purposes. | |||
|
234 | if os.path.exists(filepath): | |||
|
235 | newfilename = filepath + '_old' | |||
|
236 | # newfilename can be read-only and shutil.copy will fail. | |||
|
237 | # Delete newfilename to avoid it | |||
|
238 | if os.path.exists(newfilename): | |||
|
239 | shallowutil.unlinkfile(newfilename) | |||
|
240 | shutil.copy(filepath, newfilename) | |||
|
241 | ||||
|
242 | shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) | |||
|
243 | shallowutil.writefile(filepath, data, readonly=True) | |||
|
244 | ||||
|
245 | if self._validatecache: | |||
|
246 | if not self._validatekey(filepath, 'write'): | |||
|
247 | raise error.Abort(_("local cache write was corrupted %s") % | |||
|
248 | filepath) | |||
|
249 | finally: | |||
|
250 | os.umask(oldumask) | |||
|
251 | ||||
|
252 | def markrepo(self, path): | |||
|
253 | """Call this to add the given repo path to the store's list of | |||
|
254 | repositories that are using it. This is useful later when doing garbage | |||
|
255 | collection, since it allows us to insecpt the repos to see what nodes | |||
|
256 | they want to be kept alive in the store. | |||
|
257 | """ | |||
|
258 | repospath = os.path.join(self._path, "repos") | |||
|
259 | with open(repospath, 'ab') as reposfile: | |||
|
260 | reposfile.write(os.path.dirname(path) + "\n") | |||
|
261 | ||||
|
262 | repospathstat = os.stat(repospath) | |||
|
263 | if repospathstat.st_uid == self._uid: | |||
|
264 | os.chmod(repospath, 0o0664) | |||
|
265 | ||||
|
266 | def _validatekey(self, path, action): | |||
|
267 | with open(path, 'rb') as f: | |||
|
268 | data = f.read() | |||
|
269 | ||||
|
270 | if self._validatedata(data, path): | |||
|
271 | return True | |||
|
272 | ||||
|
273 | if self._validatecachelog: | |||
|
274 | with open(self._validatecachelog, 'ab+') as f: | |||
|
275 | f.write("corrupt %s during %s\n" % (path, action)) | |||
|
276 | ||||
|
277 | os.rename(path, path + ".corrupt") | |||
|
278 | return False | |||
|
279 | ||||
|
280 | def _validatedata(self, data, path): | |||
|
281 | try: | |||
|
282 | if len(data) > 0: | |||
|
283 | # see remotefilelogserver.createfileblob for the format | |||
|
284 | offset, size, flags = shallowutil.parsesizeflags(data) | |||
|
285 | if len(data) <= size: | |||
|
286 | # it is truncated | |||
|
287 | return False | |||
|
288 | ||||
|
289 | # extract the node from the metadata | |||
|
290 | offset += size | |||
|
291 | datanode = data[offset:offset + 20] | |||
|
292 | ||||
|
293 | # and compare against the path | |||
|
294 | if os.path.basename(path) == hex(datanode): | |||
|
295 | # Content matches the intended path | |||
|
296 | return True | |||
|
297 | return False | |||
|
298 | except (ValueError, RuntimeError): | |||
|
299 | pass | |||
|
300 | ||||
|
301 | return False | |||
|
302 | ||||
|
303 | def gc(self, keepkeys): | |||
|
304 | ui = self.ui | |||
|
305 | cachepath = self._path | |||
|
306 | ||||
|
307 | # prune cache | |||
|
308 | queue = pycompat.queue.PriorityQueue() | |||
|
309 | originalsize = 0 | |||
|
310 | size = 0 | |||
|
311 | count = 0 | |||
|
312 | removed = 0 | |||
|
313 | ||||
|
314 | # keep files newer than a day even if they aren't needed | |||
|
315 | limit = time.time() - (60 * 60 * 24) | |||
|
316 | ||||
|
317 | progress = ui.makeprogress(_("removing unnecessary files"), | |||
|
318 | unit="files") | |||
|
319 | progress.update(0) | |||
|
320 | for root, dirs, files in os.walk(cachepath): | |||
|
321 | for file in files: | |||
|
322 | if file == 'repos': | |||
|
323 | continue | |||
|
324 | ||||
|
325 | # Don't delete pack files | |||
|
326 | if '/packs/' in root: | |||
|
327 | continue | |||
|
328 | ||||
|
329 | progress.update(count) | |||
|
330 | path = os.path.join(root, file) | |||
|
331 | key = os.path.relpath(path, cachepath) | |||
|
332 | count += 1 | |||
|
333 | try: | |||
|
334 | pathstat = os.stat(path) | |||
|
335 | except OSError as e: | |||
|
336 | # errno.ENOENT = no such file or directory | |||
|
337 | if e.errno != errno.ENOENT: | |||
|
338 | raise | |||
|
339 | msg = _("warning: file %s was removed by another process\n") | |||
|
340 | ui.warn(msg % path) | |||
|
341 | continue | |||
|
342 | ||||
|
343 | originalsize += pathstat.st_size | |||
|
344 | ||||
|
345 | if key in keepkeys or pathstat.st_atime > limit: | |||
|
346 | queue.put((pathstat.st_atime, path, pathstat)) | |||
|
347 | size += pathstat.st_size | |||
|
348 | else: | |||
|
349 | try: | |||
|
350 | shallowutil.unlinkfile(path) | |||
|
351 | except OSError as e: | |||
|
352 | # errno.ENOENT = no such file or directory | |||
|
353 | if e.errno != errno.ENOENT: | |||
|
354 | raise | |||
|
355 | msg = _("warning: file %s was removed by another " | |||
|
356 | "process\n") | |||
|
357 | ui.warn(msg % path) | |||
|
358 | continue | |||
|
359 | removed += 1 | |||
|
360 | progress.complete() | |||
|
361 | ||||
|
362 | # remove oldest files until under limit | |||
|
363 | limit = ui.configbytes("remotefilelog", "cachelimit") | |||
|
364 | if size > limit: | |||
|
365 | excess = size - limit | |||
|
366 | progress = ui.makeprogress(_("enforcing cache limit"), unit="bytes", | |||
|
367 | total=excess) | |||
|
368 | removedexcess = 0 | |||
|
369 | while queue and size > limit and size > 0: | |||
|
370 | progress.update(removedexcess) | |||
|
371 | atime, oldpath, oldpathstat = queue.get() | |||
|
372 | try: | |||
|
373 | shallowutil.unlinkfile(oldpath) | |||
|
374 | except OSError as e: | |||
|
375 | # errno.ENOENT = no such file or directory | |||
|
376 | if e.errno != errno.ENOENT: | |||
|
377 | raise | |||
|
378 | msg = _("warning: file %s was removed by another process\n") | |||
|
379 | ui.warn(msg % oldpath) | |||
|
380 | size -= oldpathstat.st_size | |||
|
381 | removed += 1 | |||
|
382 | removedexcess += oldpathstat.st_size | |||
|
383 | progress.complete() | |||
|
384 | ||||
|
385 | ui.status(_("finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n") | |||
|
386 | % (removed, count, | |||
|
387 | float(originalsize) / 1024.0 / 1024.0 / 1024.0, | |||
|
388 | float(size) / 1024.0 / 1024.0 / 1024.0)) | |||
|
389 | ||||
|
390 | class baseunionstore(object): | |||
|
391 | def __init__(self, *args, **kwargs): | |||
|
392 | # If one of the functions that iterates all of the stores is about to | |||
|
393 | # throw a KeyError, try this many times with a full refresh between | |||
|
394 | # attempts. A repack operation may have moved data from one store to | |||
|
395 | # another while we were running. | |||
|
396 | self.numattempts = kwargs.get(r'numretries', 0) + 1 | |||
|
397 | # If not-None, call this function on every retry and if the attempts are | |||
|
398 | # exhausted. | |||
|
399 | self.retrylog = kwargs.get(r'retrylog', None) | |||
|
400 | ||||
|
401 | def markforrefresh(self): | |||
|
402 | for store in self.stores: | |||
|
403 | if util.safehasattr(store, 'markforrefresh'): | |||
|
404 | store.markforrefresh() | |||
|
405 | ||||
|
406 | @staticmethod | |||
|
407 | def retriable(fn): | |||
|
408 | def noop(*args): | |||
|
409 | pass | |||
|
410 | def wrapped(self, *args, **kwargs): | |||
|
411 | retrylog = self.retrylog or noop | |||
|
412 | funcname = fn.__name__ | |||
|
413 | for i in pycompat.xrange(self.numattempts): | |||
|
414 | if i > 0: | |||
|
415 | retrylog('re-attempting (n=%d) %s\n' % (i, funcname)) | |||
|
416 | self.markforrefresh() | |||
|
417 | try: | |||
|
418 | return fn(self, *args, **kwargs) | |||
|
419 | except KeyError: | |||
|
420 | pass | |||
|
421 | # retries exhausted | |||
|
422 | retrylog('retries exhausted in %s, raising KeyError\n' % | |||
|
423 | pycompat.sysbytes(funcname)) | |||
|
424 | raise | |||
|
425 | return wrapped |
@@ -0,0 +1,84 b'' | |||||
|
1 | # connectionpool.py - class for pooling peer connections for reuse | |||
|
2 | # | |||
|
3 | # Copyright 2017 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | ||||
|
8 | from __future__ import absolute_import | |||
|
9 | ||||
|
10 | from mercurial import ( | |||
|
11 | extensions, | |||
|
12 | hg, | |||
|
13 | sshpeer, | |||
|
14 | util, | |||
|
15 | ) | |||
|
16 | ||||
|
17 | _sshv1peer = sshpeer.sshv1peer | |||
|
18 | ||||
|
19 | class connectionpool(object): | |||
|
20 | def __init__(self, repo): | |||
|
21 | self._repo = repo | |||
|
22 | self._pool = dict() | |||
|
23 | ||||
|
24 | def get(self, path): | |||
|
25 | pathpool = self._pool.get(path) | |||
|
26 | if pathpool is None: | |||
|
27 | pathpool = list() | |||
|
28 | self._pool[path] = pathpool | |||
|
29 | ||||
|
30 | conn = None | |||
|
31 | if len(pathpool) > 0: | |||
|
32 | try: | |||
|
33 | conn = pathpool.pop() | |||
|
34 | peer = conn.peer | |||
|
35 | # If the connection has died, drop it | |||
|
36 | if isinstance(peer, _sshv1peer): | |||
|
37 | if peer._subprocess.poll() is not None: | |||
|
38 | conn = None | |||
|
39 | except IndexError: | |||
|
40 | pass | |||
|
41 | ||||
|
42 | if conn is None: | |||
|
43 | def _cleanup(orig): | |||
|
44 | # close pipee first so peer.cleanup reading it won't deadlock, | |||
|
45 | # if there are other processes with pipeo open (i.e. us). | |||
|
46 | peer = orig.im_self | |||
|
47 | if util.safehasattr(peer, 'pipee'): | |||
|
48 | peer.pipee.close() | |||
|
49 | return orig() | |||
|
50 | ||||
|
51 | peer = hg.peer(self._repo.ui, {}, path) | |||
|
52 | if util.safehasattr(peer, 'cleanup'): | |||
|
53 | extensions.wrapfunction(peer, 'cleanup', _cleanup) | |||
|
54 | ||||
|
55 | conn = connection(pathpool, peer) | |||
|
56 | ||||
|
57 | return conn | |||
|
58 | ||||
|
59 | def close(self): | |||
|
60 | for pathpool in self._pool.itervalues(): | |||
|
61 | for conn in pathpool: | |||
|
62 | conn.close() | |||
|
63 | del pathpool[:] | |||
|
64 | ||||
|
65 | class connection(object): | |||
|
66 | def __init__(self, pool, peer): | |||
|
67 | self._pool = pool | |||
|
68 | self.peer = peer | |||
|
69 | ||||
|
70 | def __enter__(self): | |||
|
71 | return self | |||
|
72 | ||||
|
73 | def __exit__(self, type, value, traceback): | |||
|
74 | # Only add the connection back to the pool if there was no exception, | |||
|
75 | # since an exception could mean the connection is not in a reusable | |||
|
76 | # state. | |||
|
77 | if type is None: | |||
|
78 | self._pool.append(self) | |||
|
79 | else: | |||
|
80 | self.close() | |||
|
81 | ||||
|
82 | def close(self): | |||
|
83 | if util.safehasattr(self.peer, 'cleanup'): | |||
|
84 | self.peer.cleanup() |
@@ -0,0 +1,41 b'' | |||||
|
1 | from __future__ import absolute_import | |||
|
2 | ||||
|
3 | import struct | |||
|
4 | ||||
|
5 | from mercurial.i18n import _ | |||
|
6 | ||||
|
7 | NETWORK_CAP_LEGACY_SSH_GETFILES = 'exp-remotefilelog-ssh-getfiles-1' | |||
|
8 | ||||
|
9 | SHALLOWREPO_REQUIREMENT = "exp-remotefilelog-repo-req-1" | |||
|
10 | ||||
|
11 | BUNDLE2_CAPABLITY = "exp-remotefilelog-b2cap-1" | |||
|
12 | ||||
|
13 | FILENAMESTRUCT = '!H' | |||
|
14 | FILENAMESIZE = struct.calcsize(FILENAMESTRUCT) | |||
|
15 | ||||
|
16 | NODESIZE = 20 | |||
|
17 | PACKREQUESTCOUNTSTRUCT = '!I' | |||
|
18 | ||||
|
19 | NODECOUNTSTRUCT = '!I' | |||
|
20 | NODECOUNTSIZE = struct.calcsize(NODECOUNTSTRUCT) | |||
|
21 | ||||
|
22 | PATHCOUNTSTRUCT = '!I' | |||
|
23 | PATHCOUNTSIZE = struct.calcsize(PATHCOUNTSTRUCT) | |||
|
24 | ||||
|
25 | FILEPACK_CATEGORY="" | |||
|
26 | TREEPACK_CATEGORY="manifests" | |||
|
27 | ||||
|
28 | ALL_CATEGORIES = [FILEPACK_CATEGORY, TREEPACK_CATEGORY] | |||
|
29 | ||||
|
30 | # revision metadata keys. must be a single character. | |||
|
31 | METAKEYFLAG = 'f' # revlog flag | |||
|
32 | METAKEYSIZE = 's' # full rawtext size | |||
|
33 | ||||
|
34 | def getunits(category): | |||
|
35 | if category == FILEPACK_CATEGORY: | |||
|
36 | return _("files") | |||
|
37 | if category == TREEPACK_CATEGORY: | |||
|
38 | return _("trees") | |||
|
39 | ||||
|
40 | # Repack options passed to ``markledger``. | |||
|
41 | OPTION_PACKSONLY = 'packsonly' |
@@ -0,0 +1,376 b'' | |||||
|
1 | from __future__ import absolute_import | |||
|
2 | ||||
|
3 | import threading | |||
|
4 | ||||
|
5 | from mercurial.node import hex, nullid | |||
|
6 | from mercurial import ( | |||
|
7 | mdiff, | |||
|
8 | pycompat, | |||
|
9 | revlog, | |||
|
10 | ) | |||
|
11 | from . import ( | |||
|
12 | basestore, | |||
|
13 | constants, | |||
|
14 | shallowutil, | |||
|
15 | ) | |||
|
16 | ||||
|
17 | class ChainIndicies(object): | |||
|
18 | """A static class for easy reference to the delta chain indicies. | |||
|
19 | """ | |||
|
20 | # The filename of this revision delta | |||
|
21 | NAME = 0 | |||
|
22 | # The mercurial file node for this revision delta | |||
|
23 | NODE = 1 | |||
|
24 | # The filename of the delta base's revision. This is useful when delta | |||
|
25 | # between different files (like in the case of a move or copy, we can delta | |||
|
26 | # against the original file content). | |||
|
27 | BASENAME = 2 | |||
|
28 | # The mercurial file node for the delta base revision. This is the nullid if | |||
|
29 | # this delta is a full text. | |||
|
30 | BASENODE = 3 | |||
|
31 | # The actual delta or full text data. | |||
|
32 | DATA = 4 | |||
|
33 | ||||
|
34 | class unioncontentstore(basestore.baseunionstore): | |||
|
35 | def __init__(self, *args, **kwargs): | |||
|
36 | super(unioncontentstore, self).__init__(*args, **kwargs) | |||
|
37 | ||||
|
38 | self.stores = args | |||
|
39 | self.writestore = kwargs.get(r'writestore') | |||
|
40 | ||||
|
41 | # If allowincomplete==True then the union store can return partial | |||
|
42 | # delta chains, otherwise it will throw a KeyError if a full | |||
|
43 | # deltachain can't be found. | |||
|
44 | self.allowincomplete = kwargs.get(r'allowincomplete', False) | |||
|
45 | ||||
|
46 | def get(self, name, node): | |||
|
47 | """Fetches the full text revision contents of the given name+node pair. | |||
|
48 | If the full text doesn't exist, throws a KeyError. | |||
|
49 | ||||
|
50 | Under the hood, this uses getdeltachain() across all the stores to build | |||
|
51 | up a full chain to produce the full text. | |||
|
52 | """ | |||
|
53 | chain = self.getdeltachain(name, node) | |||
|
54 | ||||
|
55 | if chain[-1][ChainIndicies.BASENODE] != nullid: | |||
|
56 | # If we didn't receive a full chain, throw | |||
|
57 | raise KeyError((name, hex(node))) | |||
|
58 | ||||
|
59 | # The last entry in the chain is a full text, so we start our delta | |||
|
60 | # applies with that. | |||
|
61 | fulltext = chain.pop()[ChainIndicies.DATA] | |||
|
62 | ||||
|
63 | text = fulltext | |||
|
64 | while chain: | |||
|
65 | delta = chain.pop()[ChainIndicies.DATA] | |||
|
66 | text = mdiff.patches(text, [delta]) | |||
|
67 | ||||
|
68 | return text | |||
|
69 | ||||
|
70 | @basestore.baseunionstore.retriable | |||
|
71 | def getdelta(self, name, node): | |||
|
72 | """Return the single delta entry for the given name/node pair. | |||
|
73 | """ | |||
|
74 | for store in self.stores: | |||
|
75 | try: | |||
|
76 | return store.getdelta(name, node) | |||
|
77 | except KeyError: | |||
|
78 | pass | |||
|
79 | ||||
|
80 | raise KeyError((name, hex(node))) | |||
|
81 | ||||
|
82 | def getdeltachain(self, name, node): | |||
|
83 | """Returns the deltachain for the given name/node pair. | |||
|
84 | ||||
|
85 | Returns an ordered list of: | |||
|
86 | ||||
|
87 | [(name, node, deltabasename, deltabasenode, deltacontent),...] | |||
|
88 | ||||
|
89 | where the chain is terminated by a full text entry with a nullid | |||
|
90 | deltabasenode. | |||
|
91 | """ | |||
|
92 | chain = self._getpartialchain(name, node) | |||
|
93 | while chain[-1][ChainIndicies.BASENODE] != nullid: | |||
|
94 | x, x, deltabasename, deltabasenode, x = chain[-1] | |||
|
95 | try: | |||
|
96 | morechain = self._getpartialchain(deltabasename, deltabasenode) | |||
|
97 | chain.extend(morechain) | |||
|
98 | except KeyError: | |||
|
99 | # If we allow incomplete chains, don't throw. | |||
|
100 | if not self.allowincomplete: | |||
|
101 | raise | |||
|
102 | break | |||
|
103 | ||||
|
104 | return chain | |||
|
105 | ||||
|
106 | @basestore.baseunionstore.retriable | |||
|
107 | def getmeta(self, name, node): | |||
|
108 | """Returns the metadata dict for given node.""" | |||
|
109 | for store in self.stores: | |||
|
110 | try: | |||
|
111 | return store.getmeta(name, node) | |||
|
112 | except KeyError: | |||
|
113 | pass | |||
|
114 | raise KeyError((name, hex(node))) | |||
|
115 | ||||
|
116 | def getmetrics(self): | |||
|
117 | metrics = [s.getmetrics() for s in self.stores] | |||
|
118 | return shallowutil.sumdicts(*metrics) | |||
|
119 | ||||
|
120 | @basestore.baseunionstore.retriable | |||
|
121 | def _getpartialchain(self, name, node): | |||
|
122 | """Returns a partial delta chain for the given name/node pair. | |||
|
123 | ||||
|
124 | A partial chain is a chain that may not be terminated in a full-text. | |||
|
125 | """ | |||
|
126 | for store in self.stores: | |||
|
127 | try: | |||
|
128 | return store.getdeltachain(name, node) | |||
|
129 | except KeyError: | |||
|
130 | pass | |||
|
131 | ||||
|
132 | raise KeyError((name, hex(node))) | |||
|
133 | ||||
|
134 | def add(self, name, node, data): | |||
|
135 | raise RuntimeError("cannot add content only to remotefilelog " | |||
|
136 | "contentstore") | |||
|
137 | ||||
|
138 | def getmissing(self, keys): | |||
|
139 | missing = keys | |||
|
140 | for store in self.stores: | |||
|
141 | if missing: | |||
|
142 | missing = store.getmissing(missing) | |||
|
143 | return missing | |||
|
144 | ||||
|
145 | def addremotefilelognode(self, name, node, data): | |||
|
146 | if self.writestore: | |||
|
147 | self.writestore.addremotefilelognode(name, node, data) | |||
|
148 | else: | |||
|
149 | raise RuntimeError("no writable store configured") | |||
|
150 | ||||
|
151 | def markledger(self, ledger, options=None): | |||
|
152 | for store in self.stores: | |||
|
153 | store.markledger(ledger, options) | |||
|
154 | ||||
|
155 | class remotefilelogcontentstore(basestore.basestore): | |||
|
156 | def __init__(self, *args, **kwargs): | |||
|
157 | super(remotefilelogcontentstore, self).__init__(*args, **kwargs) | |||
|
158 | self._threaddata = threading.local() | |||
|
159 | ||||
|
160 | def get(self, name, node): | |||
|
161 | # return raw revision text | |||
|
162 | data = self._getdata(name, node) | |||
|
163 | ||||
|
164 | offset, size, flags = shallowutil.parsesizeflags(data) | |||
|
165 | content = data[offset:offset + size] | |||
|
166 | ||||
|
167 | ancestormap = shallowutil.ancestormap(data) | |||
|
168 | p1, p2, linknode, copyfrom = ancestormap[node] | |||
|
169 | copyrev = None | |||
|
170 | if copyfrom: | |||
|
171 | copyrev = hex(p1) | |||
|
172 | ||||
|
173 | self._updatemetacache(node, size, flags) | |||
|
174 | ||||
|
175 | # lfs tracks renames in its own metadata, remove hg copy metadata, | |||
|
176 | # because copy metadata will be re-added by lfs flag processor. | |||
|
177 | if flags & revlog.REVIDX_EXTSTORED: | |||
|
178 | copyrev = copyfrom = None | |||
|
179 | revision = shallowutil.createrevlogtext(content, copyfrom, copyrev) | |||
|
180 | return revision | |||
|
181 | ||||
|
182 | def getdelta(self, name, node): | |||
|
183 | # Since remotefilelog content stores only contain full texts, just | |||
|
184 | # return that. | |||
|
185 | revision = self.get(name, node) | |||
|
186 | return revision, name, nullid, self.getmeta(name, node) | |||
|
187 | ||||
|
188 | def getdeltachain(self, name, node): | |||
|
189 | # Since remotefilelog content stores just contain full texts, we return | |||
|
190 | # a fake delta chain that just consists of a single full text revision. | |||
|
191 | # The nullid in the deltabasenode slot indicates that the revision is a | |||
|
192 | # fulltext. | |||
|
193 | revision = self.get(name, node) | |||
|
194 | return [(name, node, None, nullid, revision)] | |||
|
195 | ||||
|
196 | def getmeta(self, name, node): | |||
|
197 | self._sanitizemetacache() | |||
|
198 | if node != self._threaddata.metacache[0]: | |||
|
199 | data = self._getdata(name, node) | |||
|
200 | offset, size, flags = shallowutil.parsesizeflags(data) | |||
|
201 | self._updatemetacache(node, size, flags) | |||
|
202 | return self._threaddata.metacache[1] | |||
|
203 | ||||
|
204 | def add(self, name, node, data): | |||
|
205 | raise RuntimeError("cannot add content only to remotefilelog " | |||
|
206 | "contentstore") | |||
|
207 | ||||
|
208 | def _sanitizemetacache(self): | |||
|
209 | metacache = getattr(self._threaddata, 'metacache', None) | |||
|
210 | if metacache is None: | |||
|
211 | self._threaddata.metacache = (None, None) # (node, meta) | |||
|
212 | ||||
|
213 | def _updatemetacache(self, node, size, flags): | |||
|
214 | self._sanitizemetacache() | |||
|
215 | if node == self._threaddata.metacache[0]: | |||
|
216 | return | |||
|
217 | meta = {constants.METAKEYFLAG: flags, | |||
|
218 | constants.METAKEYSIZE: size} | |||
|
219 | self._threaddata.metacache = (node, meta) | |||
|
220 | ||||
|
221 | class remotecontentstore(object): | |||
|
222 | def __init__(self, ui, fileservice, shared): | |||
|
223 | self._fileservice = fileservice | |||
|
224 | # type(shared) is usually remotefilelogcontentstore | |||
|
225 | self._shared = shared | |||
|
226 | ||||
|
227 | def get(self, name, node): | |||
|
228 | self._fileservice.prefetch([(name, hex(node))], force=True, | |||
|
229 | fetchdata=True) | |||
|
230 | return self._shared.get(name, node) | |||
|
231 | ||||
|
232 | def getdelta(self, name, node): | |||
|
233 | revision = self.get(name, node) | |||
|
234 | return revision, name, nullid, self._shared.getmeta(name, node) | |||
|
235 | ||||
|
236 | def getdeltachain(self, name, node): | |||
|
237 | # Since our remote content stores just contain full texts, we return a | |||
|
238 | # fake delta chain that just consists of a single full text revision. | |||
|
239 | # The nullid in the deltabasenode slot indicates that the revision is a | |||
|
240 | # fulltext. | |||
|
241 | revision = self.get(name, node) | |||
|
242 | return [(name, node, None, nullid, revision)] | |||
|
243 | ||||
|
244 | def getmeta(self, name, node): | |||
|
245 | self._fileservice.prefetch([(name, hex(node))], force=True, | |||
|
246 | fetchdata=True) | |||
|
247 | return self._shared.getmeta(name, node) | |||
|
248 | ||||
|
249 | def add(self, name, node, data): | |||
|
250 | raise RuntimeError("cannot add to a remote store") | |||
|
251 | ||||
|
252 | def getmissing(self, keys): | |||
|
253 | return keys | |||
|
254 | ||||
|
255 | def markledger(self, ledger, options=None): | |||
|
256 | pass | |||
|
257 | ||||
|
258 | class manifestrevlogstore(object): | |||
|
259 | def __init__(self, repo): | |||
|
260 | self._store = repo.store | |||
|
261 | self._svfs = repo.svfs | |||
|
262 | self._revlogs = dict() | |||
|
263 | self._cl = revlog.revlog(self._svfs, '00changelog.i') | |||
|
264 | self._repackstartlinkrev = 0 | |||
|
265 | ||||
|
266 | def get(self, name, node): | |||
|
267 | return self._revlog(name).revision(node, raw=True) | |||
|
268 | ||||
|
269 | def getdelta(self, name, node): | |||
|
270 | revision = self.get(name, node) | |||
|
271 | return revision, name, nullid, self.getmeta(name, node) | |||
|
272 | ||||
|
273 | def getdeltachain(self, name, node): | |||
|
274 | revision = self.get(name, node) | |||
|
275 | return [(name, node, None, nullid, revision)] | |||
|
276 | ||||
|
277 | def getmeta(self, name, node): | |||
|
278 | rl = self._revlog(name) | |||
|
279 | rev = rl.rev(node) | |||
|
280 | return {constants.METAKEYFLAG: rl.flags(rev), | |||
|
281 | constants.METAKEYSIZE: rl.rawsize(rev)} | |||
|
282 | ||||
|
283 | def getancestors(self, name, node, known=None): | |||
|
284 | if known is None: | |||
|
285 | known = set() | |||
|
286 | if node in known: | |||
|
287 | return [] | |||
|
288 | ||||
|
289 | rl = self._revlog(name) | |||
|
290 | ancestors = {} | |||
|
291 | missing = set((node,)) | |||
|
292 | for ancrev in rl.ancestors([rl.rev(node)], inclusive=True): | |||
|
293 | ancnode = rl.node(ancrev) | |||
|
294 | missing.discard(ancnode) | |||
|
295 | ||||
|
296 | p1, p2 = rl.parents(ancnode) | |||
|
297 | if p1 != nullid and p1 not in known: | |||
|
298 | missing.add(p1) | |||
|
299 | if p2 != nullid and p2 not in known: | |||
|
300 | missing.add(p2) | |||
|
301 | ||||
|
302 | linknode = self._cl.node(rl.linkrev(ancrev)) | |||
|
303 | ancestors[rl.node(ancrev)] = (p1, p2, linknode, '') | |||
|
304 | if not missing: | |||
|
305 | break | |||
|
306 | return ancestors | |||
|
307 | ||||
|
308 | def getnodeinfo(self, name, node): | |||
|
309 | cl = self._cl | |||
|
310 | rl = self._revlog(name) | |||
|
311 | parents = rl.parents(node) | |||
|
312 | linkrev = rl.linkrev(rl.rev(node)) | |||
|
313 | return (parents[0], parents[1], cl.node(linkrev), None) | |||
|
314 | ||||
|
315 | def add(self, *args): | |||
|
316 | raise RuntimeError("cannot add to a revlog store") | |||
|
317 | ||||
|
318 | def _revlog(self, name): | |||
|
319 | rl = self._revlogs.get(name) | |||
|
320 | if rl is None: | |||
|
321 | revlogname = '00manifesttree.i' | |||
|
322 | if name != '': | |||
|
323 | revlogname = 'meta/%s/00manifest.i' % name | |||
|
324 | rl = revlog.revlog(self._svfs, revlogname) | |||
|
325 | self._revlogs[name] = rl | |||
|
326 | return rl | |||
|
327 | ||||
|
328 | def getmissing(self, keys): | |||
|
329 | missing = [] | |||
|
330 | for name, node in keys: | |||
|
331 | mfrevlog = self._revlog(name) | |||
|
332 | if node not in mfrevlog.nodemap: | |||
|
333 | missing.append((name, node)) | |||
|
334 | ||||
|
335 | return missing | |||
|
336 | ||||
|
337 | def setrepacklinkrevrange(self, startrev, endrev): | |||
|
338 | self._repackstartlinkrev = startrev | |||
|
339 | self._repackendlinkrev = endrev | |||
|
340 | ||||
|
341 | def markledger(self, ledger, options=None): | |||
|
342 | if options and options.get(constants.OPTION_PACKSONLY): | |||
|
343 | return | |||
|
344 | treename = '' | |||
|
345 | rl = revlog.revlog(self._svfs, '00manifesttree.i') | |||
|
346 | startlinkrev = self._repackstartlinkrev | |||
|
347 | endlinkrev = self._repackendlinkrev | |||
|
348 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): | |||
|
349 | linkrev = rl.linkrev(rev) | |||
|
350 | if linkrev < startlinkrev: | |||
|
351 | break | |||
|
352 | if linkrev > endlinkrev: | |||
|
353 | continue | |||
|
354 | node = rl.node(rev) | |||
|
355 | ledger.markdataentry(self, treename, node) | |||
|
356 | ledger.markhistoryentry(self, treename, node) | |||
|
357 | ||||
|
358 | for path, encoded, size in self._store.datafiles(): | |||
|
359 | if path[:5] != 'meta/' or path[-2:] != '.i': | |||
|
360 | continue | |||
|
361 | ||||
|
362 | treename = path[5:-len('/00manifest.i')] | |||
|
363 | ||||
|
364 | rl = revlog.revlog(self._svfs, path) | |||
|
365 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): | |||
|
366 | linkrev = rl.linkrev(rev) | |||
|
367 | if linkrev < startlinkrev: | |||
|
368 | break | |||
|
369 | if linkrev > endlinkrev: | |||
|
370 | continue | |||
|
371 | node = rl.node(rev) | |||
|
372 | ledger.markdataentry(self, treename, node) | |||
|
373 | ledger.markhistoryentry(self, treename, node) | |||
|
374 | ||||
|
375 | def cleanup(self, ledger): | |||
|
376 | pass |
@@ -0,0 +1,460 b'' | |||||
|
1 | from __future__ import absolute_import | |||
|
2 | ||||
|
3 | import struct | |||
|
4 | import zlib | |||
|
5 | ||||
|
6 | from mercurial.node import hex, nullid | |||
|
7 | from mercurial.i18n import _ | |||
|
8 | from mercurial import ( | |||
|
9 | pycompat, | |||
|
10 | util, | |||
|
11 | ) | |||
|
12 | from . import ( | |||
|
13 | basepack, | |||
|
14 | constants, | |||
|
15 | shallowutil, | |||
|
16 | ) | |||
|
17 | ||||
|
18 | NODELENGTH = 20 | |||
|
19 | ||||
|
20 | # The indicator value in the index for a fulltext entry. | |||
|
21 | FULLTEXTINDEXMARK = -1 | |||
|
22 | NOBASEINDEXMARK = -2 | |||
|
23 | ||||
|
24 | INDEXSUFFIX = '.dataidx' | |||
|
25 | PACKSUFFIX = '.datapack' | |||
|
26 | ||||
|
27 | class datapackstore(basepack.basepackstore): | |||
|
28 | INDEXSUFFIX = INDEXSUFFIX | |||
|
29 | PACKSUFFIX = PACKSUFFIX | |||
|
30 | ||||
|
31 | def __init__(self, ui, path): | |||
|
32 | super(datapackstore, self).__init__(ui, path) | |||
|
33 | ||||
|
34 | def getpack(self, path): | |||
|
35 | return datapack(path) | |||
|
36 | ||||
|
37 | def get(self, name, node): | |||
|
38 | raise RuntimeError("must use getdeltachain with datapackstore") | |||
|
39 | ||||
|
40 | def getmeta(self, name, node): | |||
|
41 | for pack in self.packs: | |||
|
42 | try: | |||
|
43 | return pack.getmeta(name, node) | |||
|
44 | except KeyError: | |||
|
45 | pass | |||
|
46 | ||||
|
47 | for pack in self.refresh(): | |||
|
48 | try: | |||
|
49 | return pack.getmeta(name, node) | |||
|
50 | except KeyError: | |||
|
51 | pass | |||
|
52 | ||||
|
53 | raise KeyError((name, hex(node))) | |||
|
54 | ||||
|
55 | def getdelta(self, name, node): | |||
|
56 | for pack in self.packs: | |||
|
57 | try: | |||
|
58 | return pack.getdelta(name, node) | |||
|
59 | except KeyError: | |||
|
60 | pass | |||
|
61 | ||||
|
62 | for pack in self.refresh(): | |||
|
63 | try: | |||
|
64 | return pack.getdelta(name, node) | |||
|
65 | except KeyError: | |||
|
66 | pass | |||
|
67 | ||||
|
68 | raise KeyError((name, hex(node))) | |||
|
69 | ||||
|
70 | def getdeltachain(self, name, node): | |||
|
71 | for pack in self.packs: | |||
|
72 | try: | |||
|
73 | return pack.getdeltachain(name, node) | |||
|
74 | except KeyError: | |||
|
75 | pass | |||
|
76 | ||||
|
77 | for pack in self.refresh(): | |||
|
78 | try: | |||
|
79 | return pack.getdeltachain(name, node) | |||
|
80 | except KeyError: | |||
|
81 | pass | |||
|
82 | ||||
|
83 | raise KeyError((name, hex(node))) | |||
|
84 | ||||
|
85 | def add(self, name, node, data): | |||
|
86 | raise RuntimeError("cannot add to datapackstore") | |||
|
87 | ||||
|
88 | class datapack(basepack.basepack): | |||
|
89 | INDEXSUFFIX = INDEXSUFFIX | |||
|
90 | PACKSUFFIX = PACKSUFFIX | |||
|
91 | ||||
|
92 | # Format is <node><delta offset><pack data offset><pack data size> | |||
|
93 | # See the mutabledatapack doccomment for more details. | |||
|
94 | INDEXFORMAT = '!20siQQ' | |||
|
95 | INDEXENTRYLENGTH = 40 | |||
|
96 | ||||
|
97 | SUPPORTED_VERSIONS = [2] | |||
|
98 | ||||
|
99 | def getmissing(self, keys): | |||
|
100 | missing = [] | |||
|
101 | for name, node in keys: | |||
|
102 | value = self._find(node) | |||
|
103 | if not value: | |||
|
104 | missing.append((name, node)) | |||
|
105 | ||||
|
106 | return missing | |||
|
107 | ||||
|
108 | def get(self, name, node): | |||
|
109 | raise RuntimeError("must use getdeltachain with datapack (%s:%s)" | |||
|
110 | % (name, hex(node))) | |||
|
111 | ||||
|
112 | def getmeta(self, name, node): | |||
|
113 | value = self._find(node) | |||
|
114 | if value is None: | |||
|
115 | raise KeyError((name, hex(node))) | |||
|
116 | ||||
|
117 | node, deltabaseoffset, offset, size = value | |||
|
118 | rawentry = self._data[offset:offset + size] | |||
|
119 | ||||
|
120 | # see docstring of mutabledatapack for the format | |||
|
121 | offset = 0 | |||
|
122 | offset += struct.unpack_from('!H', rawentry, offset)[0] + 2 # filename | |||
|
123 | offset += 40 # node, deltabase node | |||
|
124 | offset += struct.unpack_from('!Q', rawentry, offset)[0] + 8 # delta | |||
|
125 | ||||
|
126 | metalen = struct.unpack_from('!I', rawentry, offset)[0] | |||
|
127 | offset += 4 | |||
|
128 | ||||
|
129 | meta = shallowutil.parsepackmeta(rawentry[offset:offset + metalen]) | |||
|
130 | ||||
|
131 | return meta | |||
|
132 | ||||
|
133 | def getdelta(self, name, node): | |||
|
134 | value = self._find(node) | |||
|
135 | if value is None: | |||
|
136 | raise KeyError((name, hex(node))) | |||
|
137 | ||||
|
138 | node, deltabaseoffset, offset, size = value | |||
|
139 | entry = self._readentry(offset, size, getmeta=True) | |||
|
140 | filename, node, deltabasenode, delta, meta = entry | |||
|
141 | ||||
|
142 | # If we've read a lot of data from the mmap, free some memory. | |||
|
143 | self.freememory() | |||
|
144 | ||||
|
145 | return delta, filename, deltabasenode, meta | |||
|
146 | ||||
|
147 | def getdeltachain(self, name, node): | |||
|
148 | value = self._find(node) | |||
|
149 | if value is None: | |||
|
150 | raise KeyError((name, hex(node))) | |||
|
151 | ||||
|
152 | params = self.params | |||
|
153 | ||||
|
154 | # Precompute chains | |||
|
155 | chain = [value] | |||
|
156 | deltabaseoffset = value[1] | |||
|
157 | entrylen = self.INDEXENTRYLENGTH | |||
|
158 | while (deltabaseoffset != FULLTEXTINDEXMARK | |||
|
159 | and deltabaseoffset != NOBASEINDEXMARK): | |||
|
160 | loc = params.indexstart + deltabaseoffset | |||
|
161 | value = struct.unpack(self.INDEXFORMAT, | |||
|
162 | self._index[loc:loc + entrylen]) | |||
|
163 | deltabaseoffset = value[1] | |||
|
164 | chain.append(value) | |||
|
165 | ||||
|
166 | # Read chain data | |||
|
167 | deltachain = [] | |||
|
168 | for node, deltabaseoffset, offset, size in chain: | |||
|
169 | filename, node, deltabasenode, delta = self._readentry(offset, size) | |||
|
170 | deltachain.append((filename, node, filename, deltabasenode, delta)) | |||
|
171 | ||||
|
172 | # If we've read a lot of data from the mmap, free some memory. | |||
|
173 | self.freememory() | |||
|
174 | ||||
|
175 | return deltachain | |||
|
176 | ||||
|
177 | def _readentry(self, offset, size, getmeta=False): | |||
|
178 | rawentry = self._data[offset:offset + size] | |||
|
179 | self._pagedin += len(rawentry) | |||
|
180 | ||||
|
181 | # <2 byte len> + <filename> | |||
|
182 | lengthsize = 2 | |||
|
183 | filenamelen = struct.unpack('!H', rawentry[:2])[0] | |||
|
184 | filename = rawentry[lengthsize:lengthsize + filenamelen] | |||
|
185 | ||||
|
186 | # <20 byte node> + <20 byte deltabase> | |||
|
187 | nodestart = lengthsize + filenamelen | |||
|
188 | deltabasestart = nodestart + NODELENGTH | |||
|
189 | node = rawentry[nodestart:deltabasestart] | |||
|
190 | deltabasenode = rawentry[deltabasestart:deltabasestart + NODELENGTH] | |||
|
191 | ||||
|
192 | # <8 byte len> + <delta> | |||
|
193 | deltastart = deltabasestart + NODELENGTH | |||
|
194 | rawdeltalen = rawentry[deltastart:deltastart + 8] | |||
|
195 | deltalen = struct.unpack('!Q', rawdeltalen)[0] | |||
|
196 | ||||
|
197 | delta = rawentry[deltastart + 8:deltastart + 8 + deltalen] | |||
|
198 | delta = self._decompress(delta) | |||
|
199 | ||||
|
200 | if getmeta: | |||
|
201 | metastart = deltastart + 8 + deltalen | |||
|
202 | metalen = struct.unpack_from('!I', rawentry, metastart)[0] | |||
|
203 | ||||
|
204 | rawmeta = rawentry[metastart + 4:metastart + 4 + metalen] | |||
|
205 | meta = shallowutil.parsepackmeta(rawmeta) | |||
|
206 | return filename, node, deltabasenode, delta, meta | |||
|
207 | else: | |||
|
208 | return filename, node, deltabasenode, delta | |||
|
209 | ||||
|
210 | def _decompress(self, data): | |||
|
211 | return zlib.decompress(data) | |||
|
212 | ||||
|
213 | def add(self, name, node, data): | |||
|
214 | raise RuntimeError("cannot add to datapack (%s:%s)" % (name, node)) | |||
|
215 | ||||
|
216 | def _find(self, node): | |||
|
217 | params = self.params | |||
|
218 | fanoutkey = struct.unpack(params.fanoutstruct, | |||
|
219 | node[:params.fanoutprefix])[0] | |||
|
220 | fanout = self._fanouttable | |||
|
221 | ||||
|
222 | start = fanout[fanoutkey] + params.indexstart | |||
|
223 | indexend = self._indexend | |||
|
224 | ||||
|
225 | # Scan forward to find the first non-same entry, which is the upper | |||
|
226 | # bound. | |||
|
227 | for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount): | |||
|
228 | end = fanout[i] + params.indexstart | |||
|
229 | if end != start: | |||
|
230 | break | |||
|
231 | else: | |||
|
232 | end = indexend | |||
|
233 | ||||
|
234 | # Bisect between start and end to find node | |||
|
235 | index = self._index | |||
|
236 | startnode = index[start:start + NODELENGTH] | |||
|
237 | endnode = index[end:end + NODELENGTH] | |||
|
238 | entrylen = self.INDEXENTRYLENGTH | |||
|
239 | if startnode == node: | |||
|
240 | entry = index[start:start + entrylen] | |||
|
241 | elif endnode == node: | |||
|
242 | entry = index[end:end + entrylen] | |||
|
243 | else: | |||
|
244 | while start < end - entrylen: | |||
|
245 | mid = start + (end - start) / 2 | |||
|
246 | mid = mid - ((mid - params.indexstart) % entrylen) | |||
|
247 | midnode = index[mid:mid + NODELENGTH] | |||
|
248 | if midnode == node: | |||
|
249 | entry = index[mid:mid + entrylen] | |||
|
250 | break | |||
|
251 | if node > midnode: | |||
|
252 | start = mid | |||
|
253 | startnode = midnode | |||
|
254 | elif node < midnode: | |||
|
255 | end = mid | |||
|
256 | endnode = midnode | |||
|
257 | else: | |||
|
258 | return None | |||
|
259 | ||||
|
260 | return struct.unpack(self.INDEXFORMAT, entry) | |||
|
261 | ||||
|
262 | def markledger(self, ledger, options=None): | |||
|
263 | for filename, node in self: | |||
|
264 | ledger.markdataentry(self, filename, node) | |||
|
265 | ||||
|
266 | def cleanup(self, ledger): | |||
|
267 | entries = ledger.sources.get(self, []) | |||
|
268 | allkeys = set(self) | |||
|
269 | repackedkeys = set((e.filename, e.node) for e in entries if | |||
|
270 | e.datarepacked or e.gced) | |||
|
271 | ||||
|
272 | if len(allkeys - repackedkeys) == 0: | |||
|
273 | if self.path not in ledger.created: | |||
|
274 | util.unlinkpath(self.indexpath, ignoremissing=True) | |||
|
275 | util.unlinkpath(self.packpath, ignoremissing=True) | |||
|
276 | ||||
|
277 | def __iter__(self): | |||
|
278 | for f, n, deltabase, deltalen in self.iterentries(): | |||
|
279 | yield f, n | |||
|
280 | ||||
|
281 | def iterentries(self): | |||
|
282 | # Start at 1 to skip the header | |||
|
283 | offset = 1 | |||
|
284 | data = self._data | |||
|
285 | while offset < self.datasize: | |||
|
286 | oldoffset = offset | |||
|
287 | ||||
|
288 | # <2 byte len> + <filename> | |||
|
289 | filenamelen = struct.unpack('!H', data[offset:offset + 2])[0] | |||
|
290 | offset += 2 | |||
|
291 | filename = data[offset:offset + filenamelen] | |||
|
292 | offset += filenamelen | |||
|
293 | ||||
|
294 | # <20 byte node> | |||
|
295 | node = data[offset:offset + constants.NODESIZE] | |||
|
296 | offset += constants.NODESIZE | |||
|
297 | # <20 byte deltabase> | |||
|
298 | deltabase = data[offset:offset + constants.NODESIZE] | |||
|
299 | offset += constants.NODESIZE | |||
|
300 | ||||
|
301 | # <8 byte len> + <delta> | |||
|
302 | rawdeltalen = data[offset:offset + 8] | |||
|
303 | deltalen = struct.unpack('!Q', rawdeltalen)[0] | |||
|
304 | offset += 8 | |||
|
305 | ||||
|
306 | # TODO(augie): we should store a header that is the | |||
|
307 | # uncompressed size. | |||
|
308 | uncompressedlen = len(self._decompress( | |||
|
309 | data[offset:offset + deltalen])) | |||
|
310 | offset += deltalen | |||
|
311 | ||||
|
312 | # <4 byte len> + <metadata-list> | |||
|
313 | metalen = struct.unpack_from('!I', data, offset)[0] | |||
|
314 | offset += 4 + metalen | |||
|
315 | ||||
|
316 | yield (filename, node, deltabase, uncompressedlen) | |||
|
317 | ||||
|
318 | # If we've read a lot of data from the mmap, free some memory. | |||
|
319 | self._pagedin += offset - oldoffset | |||
|
320 | if self.freememory(): | |||
|
321 | data = self._data | |||
|
322 | ||||
|
323 | class mutabledatapack(basepack.mutablebasepack): | |||
|
324 | """A class for constructing and serializing a datapack file and index. | |||
|
325 | ||||
|
326 | A datapack is a pair of files that contain the revision contents for various | |||
|
327 | file revisions in Mercurial. It contains only revision contents (like file | |||
|
328 | contents), not any history information. | |||
|
329 | ||||
|
330 | It consists of two files, with the following format. All bytes are in | |||
|
331 | network byte order (big endian). | |||
|
332 | ||||
|
333 | .datapack | |||
|
334 | The pack itself is a series of revision deltas with some basic header | |||
|
335 | information on each. A revision delta may be a fulltext, represented by | |||
|
336 | a deltabasenode equal to the nullid. | |||
|
337 | ||||
|
338 | datapack = <version: 1 byte> | |||
|
339 | [<revision>,...] | |||
|
340 | revision = <filename len: 2 byte unsigned int> | |||
|
341 | <filename> | |||
|
342 | <node: 20 byte> | |||
|
343 | <deltabasenode: 20 byte> | |||
|
344 | <delta len: 8 byte unsigned int> | |||
|
345 | <delta> | |||
|
346 | <metadata-list len: 4 byte unsigned int> [1] | |||
|
347 | <metadata-list> [1] | |||
|
348 | metadata-list = [<metadata-item>, ...] | |||
|
349 | metadata-item = <metadata-key: 1 byte> | |||
|
350 | <metadata-value len: 2 byte unsigned> | |||
|
351 | <metadata-value> | |||
|
352 | ||||
|
353 | metadata-key could be METAKEYFLAG or METAKEYSIZE or other single byte | |||
|
354 | value in the future. | |||
|
355 | ||||
|
356 | .dataidx | |||
|
357 | The index file consists of two parts, the fanout and the index. | |||
|
358 | ||||
|
359 | The index is a list of index entries, sorted by node (one per revision | |||
|
360 | in the pack). Each entry has: | |||
|
361 | ||||
|
362 | - node (The 20 byte node of the entry; i.e. the commit hash, file node | |||
|
363 | hash, etc) | |||
|
364 | - deltabase index offset (The location in the index of the deltabase for | |||
|
365 | this entry. The deltabase is the next delta in | |||
|
366 | the chain, with the chain eventually | |||
|
367 | terminating in a full-text, represented by a | |||
|
368 | deltabase offset of -1. This lets us compute | |||
|
369 | delta chains from the index, then do | |||
|
370 | sequential reads from the pack if the revision | |||
|
371 | are nearby on disk.) | |||
|
372 | - pack entry offset (The location of this entry in the datapack) | |||
|
373 | - pack content size (The on-disk length of this entry's pack data) | |||
|
374 | ||||
|
375 | The fanout is a quick lookup table to reduce the number of steps for | |||
|
376 | bisecting the index. It is a series of 4 byte pointers to positions | |||
|
377 | within the index. It has 2^16 entries, which corresponds to hash | |||
|
378 | prefixes [0000, 0001,..., FFFE, FFFF]. Example: the pointer in slot | |||
|
379 | 4F0A points to the index position of the first revision whose node | |||
|
380 | starts with 4F0A. This saves log(2^16)=16 bisect steps. | |||
|
381 | ||||
|
382 | dataidx = <fanouttable> | |||
|
383 | <index> | |||
|
384 | fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries) | |||
|
385 | index = [<index entry>,...] | |||
|
386 | indexentry = <node: 20 byte> | |||
|
387 | <deltabase location: 4 byte signed int> | |||
|
388 | <pack entry offset: 8 byte unsigned int> | |||
|
389 | <pack entry size: 8 byte unsigned int> | |||
|
390 | ||||
|
391 | [1]: new in version 1. | |||
|
392 | """ | |||
|
393 | INDEXSUFFIX = INDEXSUFFIX | |||
|
394 | PACKSUFFIX = PACKSUFFIX | |||
|
395 | ||||
|
396 | # v[01] index format: <node><delta offset><pack data offset><pack data size> | |||
|
397 | INDEXFORMAT = datapack.INDEXFORMAT | |||
|
398 | INDEXENTRYLENGTH = datapack.INDEXENTRYLENGTH | |||
|
399 | ||||
|
400 | # v1 has metadata support | |||
|
401 | SUPPORTED_VERSIONS = [2] | |||
|
402 | ||||
|
403 | def _compress(self, data): | |||
|
404 | return zlib.compress(data) | |||
|
405 | ||||
|
406 | def add(self, name, node, deltabasenode, delta, metadata=None): | |||
|
407 | # metadata is a dict, ex. {METAKEYFLAG: flag} | |||
|
408 | if len(name) > 2**16: | |||
|
409 | raise RuntimeError(_("name too long %s") % name) | |||
|
410 | if len(node) != 20: | |||
|
411 | raise RuntimeError(_("node should be 20 bytes %s") % node) | |||
|
412 | ||||
|
413 | if node in self.entries: | |||
|
414 | # The revision has already been added | |||
|
415 | return | |||
|
416 | ||||
|
417 | # TODO: allow configurable compression | |||
|
418 | delta = self._compress(delta) | |||
|
419 | ||||
|
420 | rawdata = ''.join(( | |||
|
421 | struct.pack('!H', len(name)), # unsigned 2 byte int | |||
|
422 | name, | |||
|
423 | node, | |||
|
424 | deltabasenode, | |||
|
425 | struct.pack('!Q', len(delta)), # unsigned 8 byte int | |||
|
426 | delta, | |||
|
427 | )) | |||
|
428 | ||||
|
429 | # v1 support metadata | |||
|
430 | rawmeta = shallowutil.buildpackmeta(metadata) | |||
|
431 | rawdata += struct.pack('!I', len(rawmeta)) # unsigned 4 byte | |||
|
432 | rawdata += rawmeta | |||
|
433 | ||||
|
434 | offset = self.packfp.tell() | |||
|
435 | ||||
|
436 | size = len(rawdata) | |||
|
437 | ||||
|
438 | self.entries[node] = (deltabasenode, offset, size) | |||
|
439 | ||||
|
440 | self.writeraw(rawdata) | |||
|
441 | ||||
|
442 | def createindex(self, nodelocations, indexoffset): | |||
|
443 | entries = sorted((n, db, o, s) for n, (db, o, s) | |||
|
444 | in self.entries.iteritems()) | |||
|
445 | ||||
|
446 | rawindex = '' | |||
|
447 | fmt = self.INDEXFORMAT | |||
|
448 | for node, deltabase, offset, size in entries: | |||
|
449 | if deltabase == nullid: | |||
|
450 | deltabaselocation = FULLTEXTINDEXMARK | |||
|
451 | else: | |||
|
452 | # Instead of storing the deltabase node in the index, let's | |||
|
453 | # store a pointer directly to the index entry for the deltabase. | |||
|
454 | deltabaselocation = nodelocations.get(deltabase, | |||
|
455 | NOBASEINDEXMARK) | |||
|
456 | ||||
|
457 | entry = struct.pack(fmt, node, deltabaselocation, offset, size) | |||
|
458 | rawindex += entry | |||
|
459 | ||||
|
460 | return rawindex |
@@ -0,0 +1,378 b'' | |||||
|
1 | # debugcommands.py - debug logic for remotefilelog | |||
|
2 | # | |||
|
3 | # Copyright 2013 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | from __future__ import absolute_import | |||
|
8 | ||||
|
9 | import hashlib | |||
|
10 | import os | |||
|
11 | import zlib | |||
|
12 | ||||
|
13 | from mercurial.node import bin, hex, nullid, short | |||
|
14 | from mercurial.i18n import _ | |||
|
15 | from mercurial import ( | |||
|
16 | error, | |||
|
17 | filelog, | |||
|
18 | node as nodemod, | |||
|
19 | revlog, | |||
|
20 | ) | |||
|
21 | from . import ( | |||
|
22 | constants, | |||
|
23 | datapack, | |||
|
24 | extutil, | |||
|
25 | fileserverclient, | |||
|
26 | historypack, | |||
|
27 | repack, | |||
|
28 | shallowutil, | |||
|
29 | ) | |||
|
30 | ||||
|
31 | def debugremotefilelog(ui, path, **opts): | |||
|
32 | decompress = opts.get(r'decompress') | |||
|
33 | ||||
|
34 | size, firstnode, mapping = parsefileblob(path, decompress) | |||
|
35 | ||||
|
36 | ui.status(_("size: %d bytes\n") % (size)) | |||
|
37 | ui.status(_("path: %s \n") % (path)) | |||
|
38 | ui.status(_("key: %s \n") % (short(firstnode))) | |||
|
39 | ui.status(_("\n")) | |||
|
40 | ui.status(_("%12s => %12s %13s %13s %12s\n") % | |||
|
41 | ("node", "p1", "p2", "linknode", "copyfrom")) | |||
|
42 | ||||
|
43 | queue = [firstnode] | |||
|
44 | while queue: | |||
|
45 | node = queue.pop(0) | |||
|
46 | p1, p2, linknode, copyfrom = mapping[node] | |||
|
47 | ui.status(_("%s => %s %s %s %s\n") % | |||
|
48 | (short(node), short(p1), short(p2), short(linknode), copyfrom)) | |||
|
49 | if p1 != nullid: | |||
|
50 | queue.append(p1) | |||
|
51 | if p2 != nullid: | |||
|
52 | queue.append(p2) | |||
|
53 | ||||
|
54 | def buildtemprevlog(repo, file): | |||
|
55 | # get filename key | |||
|
56 | filekey = nodemod.hex(hashlib.sha1(file).digest()) | |||
|
57 | filedir = os.path.join(repo.path, 'store/data', filekey) | |||
|
58 | ||||
|
59 | # sort all entries based on linkrev | |||
|
60 | fctxs = [] | |||
|
61 | for filenode in os.listdir(filedir): | |||
|
62 | if '_old' not in filenode: | |||
|
63 | fctxs.append(repo.filectx(file, fileid=bin(filenode))) | |||
|
64 | ||||
|
65 | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) | |||
|
66 | ||||
|
67 | # add to revlog | |||
|
68 | temppath = repo.sjoin('data/temprevlog.i') | |||
|
69 | if os.path.exists(temppath): | |||
|
70 | os.remove(temppath) | |||
|
71 | r = filelog.filelog(repo.svfs, 'temprevlog') | |||
|
72 | ||||
|
73 | class faket(object): | |||
|
74 | def add(self, a, b, c): | |||
|
75 | pass | |||
|
76 | t = faket() | |||
|
77 | for fctx in fctxs: | |||
|
78 | if fctx.node() not in repo: | |||
|
79 | continue | |||
|
80 | ||||
|
81 | p = fctx.filelog().parents(fctx.filenode()) | |||
|
82 | meta = {} | |||
|
83 | if fctx.renamed(): | |||
|
84 | meta['copy'] = fctx.renamed()[0] | |||
|
85 | meta['copyrev'] = hex(fctx.renamed()[1]) | |||
|
86 | ||||
|
87 | r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) | |||
|
88 | ||||
|
89 | return r | |||
|
90 | ||||
|
91 | def debugindex(orig, ui, repo, file_=None, **opts): | |||
|
92 | """dump the contents of an index file""" | |||
|
93 | if (opts.get(r'changelog') or | |||
|
94 | opts.get(r'manifest') or | |||
|
95 | opts.get(r'dir') or | |||
|
96 | not shallowutil.isenabled(repo) or | |||
|
97 | not repo.shallowmatch(file_)): | |||
|
98 | return orig(ui, repo, file_, **opts) | |||
|
99 | ||||
|
100 | r = buildtemprevlog(repo, file_) | |||
|
101 | ||||
|
102 | # debugindex like normal | |||
|
103 | format = opts.get('format', 0) | |||
|
104 | if format not in (0, 1): | |||
|
105 | raise error.Abort(_("unknown format %d") % format) | |||
|
106 | ||||
|
107 | generaldelta = r.version & revlog.FLAG_GENERALDELTA | |||
|
108 | if generaldelta: | |||
|
109 | basehdr = ' delta' | |||
|
110 | else: | |||
|
111 | basehdr = ' base' | |||
|
112 | ||||
|
113 | if format == 0: | |||
|
114 | ui.write((" rev offset length " + basehdr + " linkrev" | |||
|
115 | " nodeid p1 p2\n")) | |||
|
116 | elif format == 1: | |||
|
117 | ui.write((" rev flag offset length" | |||
|
118 | " size " + basehdr + " link p1 p2" | |||
|
119 | " nodeid\n")) | |||
|
120 | ||||
|
121 | for i in r: | |||
|
122 | node = r.node(i) | |||
|
123 | if generaldelta: | |||
|
124 | base = r.deltaparent(i) | |||
|
125 | else: | |||
|
126 | base = r.chainbase(i) | |||
|
127 | if format == 0: | |||
|
128 | try: | |||
|
129 | pp = r.parents(node) | |||
|
130 | except Exception: | |||
|
131 | pp = [nullid, nullid] | |||
|
132 | ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( | |||
|
133 | i, r.start(i), r.length(i), base, r.linkrev(i), | |||
|
134 | short(node), short(pp[0]), short(pp[1]))) | |||
|
135 | elif format == 1: | |||
|
136 | pr = r.parentrevs(i) | |||
|
137 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( | |||
|
138 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), | |||
|
139 | base, r.linkrev(i), pr[0], pr[1], short(node))) | |||
|
140 | ||||
|
141 | def debugindexdot(orig, ui, repo, file_): | |||
|
142 | """dump an index DAG as a graphviz dot file""" | |||
|
143 | if not shallowutil.isenabled(repo): | |||
|
144 | return orig(ui, repo, file_) | |||
|
145 | ||||
|
146 | r = buildtemprevlog(repo, os.path.basename(file_)[:-2]) | |||
|
147 | ||||
|
148 | ui.write(("digraph G {\n")) | |||
|
149 | for i in r: | |||
|
150 | node = r.node(i) | |||
|
151 | pp = r.parents(node) | |||
|
152 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) | |||
|
153 | if pp[1] != nullid: | |||
|
154 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) | |||
|
155 | ui.write("}\n") | |||
|
156 | ||||
|
157 | def verifyremotefilelog(ui, path, **opts): | |||
|
158 | decompress = opts.get(r'decompress') | |||
|
159 | ||||
|
160 | for root, dirs, files in os.walk(path): | |||
|
161 | for file in files: | |||
|
162 | if file == "repos": | |||
|
163 | continue | |||
|
164 | filepath = os.path.join(root, file) | |||
|
165 | size, firstnode, mapping = parsefileblob(filepath, decompress) | |||
|
166 | for p1, p2, linknode, copyfrom in mapping.itervalues(): | |||
|
167 | if linknode == nullid: | |||
|
168 | actualpath = os.path.relpath(root, path) | |||
|
169 | key = fileserverclient.getcachekey("reponame", actualpath, | |||
|
170 | file) | |||
|
171 | ui.status("%s %s\n" % (key, os.path.relpath(filepath, | |||
|
172 | path))) | |||
|
173 | ||||
|
174 | def _decompressblob(raw): | |||
|
175 | return zlib.decompress(raw) | |||
|
176 | ||||
|
177 | def parsefileblob(path, decompress): | |||
|
178 | raw = None | |||
|
179 | f = open(path, "rb") | |||
|
180 | try: | |||
|
181 | raw = f.read() | |||
|
182 | finally: | |||
|
183 | f.close() | |||
|
184 | ||||
|
185 | if decompress: | |||
|
186 | raw = _decompressblob(raw) | |||
|
187 | ||||
|
188 | offset, size, flags = shallowutil.parsesizeflags(raw) | |||
|
189 | start = offset + size | |||
|
190 | ||||
|
191 | firstnode = None | |||
|
192 | ||||
|
193 | mapping = {} | |||
|
194 | while start < len(raw): | |||
|
195 | divider = raw.index('\0', start + 80) | |||
|
196 | ||||
|
197 | currentnode = raw[start:(start + 20)] | |||
|
198 | if not firstnode: | |||
|
199 | firstnode = currentnode | |||
|
200 | ||||
|
201 | p1 = raw[(start + 20):(start + 40)] | |||
|
202 | p2 = raw[(start + 40):(start + 60)] | |||
|
203 | linknode = raw[(start + 60):(start + 80)] | |||
|
204 | copyfrom = raw[(start + 80):divider] | |||
|
205 | ||||
|
206 | mapping[currentnode] = (p1, p2, linknode, copyfrom) | |||
|
207 | start = divider + 1 | |||
|
208 | ||||
|
209 | return size, firstnode, mapping | |||
|
210 | ||||
|
211 | def debugdatapack(ui, *paths, **opts): | |||
|
212 | for path in paths: | |||
|
213 | if '.data' in path: | |||
|
214 | path = path[:path.index('.data')] | |||
|
215 | ui.write("%s:\n" % path) | |||
|
216 | dpack = datapack.datapack(path) | |||
|
217 | node = opts.get(r'node') | |||
|
218 | if node: | |||
|
219 | deltachain = dpack.getdeltachain('', bin(node)) | |||
|
220 | dumpdeltachain(ui, deltachain, **opts) | |||
|
221 | return | |||
|
222 | ||||
|
223 | if opts.get(r'long'): | |||
|
224 | hashformatter = hex | |||
|
225 | hashlen = 42 | |||
|
226 | else: | |||
|
227 | hashformatter = short | |||
|
228 | hashlen = 14 | |||
|
229 | ||||
|
230 | lastfilename = None | |||
|
231 | totaldeltasize = 0 | |||
|
232 | totalblobsize = 0 | |||
|
233 | def printtotals(): | |||
|
234 | if lastfilename is not None: | |||
|
235 | ui.write("\n") | |||
|
236 | if not totaldeltasize or not totalblobsize: | |||
|
237 | return | |||
|
238 | difference = totalblobsize - totaldeltasize | |||
|
239 | deltastr = "%0.1f%% %s" % ( | |||
|
240 | (100.0 * abs(difference) / totalblobsize), | |||
|
241 | ("smaller" if difference > 0 else "bigger")) | |||
|
242 | ||||
|
243 | ui.write(("Total:%s%s %s (%s)\n") % ( | |||
|
244 | "".ljust(2 * hashlen - len("Total:")), | |||
|
245 | ('%d' % totaldeltasize).ljust(12), | |||
|
246 | ('%d' % totalblobsize).ljust(9), | |||
|
247 | deltastr | |||
|
248 | )) | |||
|
249 | ||||
|
250 | bases = {} | |||
|
251 | nodes = set() | |||
|
252 | failures = 0 | |||
|
253 | for filename, node, deltabase, deltalen in dpack.iterentries(): | |||
|
254 | bases[node] = deltabase | |||
|
255 | if node in nodes: | |||
|
256 | ui.write(("Bad entry: %s appears twice\n" % short(node))) | |||
|
257 | failures += 1 | |||
|
258 | nodes.add(node) | |||
|
259 | if filename != lastfilename: | |||
|
260 | printtotals() | |||
|
261 | name = '(empty name)' if filename == '' else filename | |||
|
262 | ui.write("%s:\n" % name) | |||
|
263 | ui.write("%s%s%s%s\n" % ( | |||
|
264 | "Node".ljust(hashlen), | |||
|
265 | "Delta Base".ljust(hashlen), | |||
|
266 | "Delta Length".ljust(14), | |||
|
267 | "Blob Size".ljust(9))) | |||
|
268 | lastfilename = filename | |||
|
269 | totalblobsize = 0 | |||
|
270 | totaldeltasize = 0 | |||
|
271 | ||||
|
272 | # Metadata could be missing, in which case it will be an empty dict. | |||
|
273 | meta = dpack.getmeta(filename, node) | |||
|
274 | if constants.METAKEYSIZE in meta: | |||
|
275 | blobsize = meta[constants.METAKEYSIZE] | |||
|
276 | totaldeltasize += deltalen | |||
|
277 | totalblobsize += blobsize | |||
|
278 | else: | |||
|
279 | blobsize = "(missing)" | |||
|
280 | ui.write("%s %s %s%d\n" % ( | |||
|
281 | hashformatter(node), | |||
|
282 | hashformatter(deltabase), | |||
|
283 | ('%d' % deltalen).ljust(14), | |||
|
284 | blobsize)) | |||
|
285 | ||||
|
286 | if filename is not None: | |||
|
287 | printtotals() | |||
|
288 | ||||
|
289 | failures += _sanitycheck(ui, set(nodes), bases) | |||
|
290 | if failures > 1: | |||
|
291 | ui.warn(("%d failures\n" % failures)) | |||
|
292 | return 1 | |||
|
293 | ||||
|
294 | def _sanitycheck(ui, nodes, bases): | |||
|
295 | """ | |||
|
296 | Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a | |||
|
297 | mapping of node->base): | |||
|
298 | ||||
|
299 | - Each deltabase must itself be a node elsewhere in the pack | |||
|
300 | - There must be no cycles | |||
|
301 | """ | |||
|
302 | failures = 0 | |||
|
303 | for node in nodes: | |||
|
304 | seen = set() | |||
|
305 | current = node | |||
|
306 | deltabase = bases[current] | |||
|
307 | ||||
|
308 | while deltabase != nullid: | |||
|
309 | if deltabase not in nodes: | |||
|
310 | ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" % | |||
|
311 | (short(node), short(deltabase)))) | |||
|
312 | failures += 1 | |||
|
313 | break | |||
|
314 | ||||
|
315 | if deltabase in seen: | |||
|
316 | ui.warn(("Bad entry: %s has a cycle (at %s)\n" % | |||
|
317 | (short(node), short(deltabase)))) | |||
|
318 | failures += 1 | |||
|
319 | break | |||
|
320 | ||||
|
321 | current = deltabase | |||
|
322 | seen.add(current) | |||
|
323 | deltabase = bases[current] | |||
|
324 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid | |||
|
325 | # so we don't traverse it again. | |||
|
326 | bases[node] = nullid | |||
|
327 | return failures | |||
|
328 | ||||
|
329 | def dumpdeltachain(ui, deltachain, **opts): | |||
|
330 | hashformatter = hex | |||
|
331 | hashlen = 40 | |||
|
332 | ||||
|
333 | lastfilename = None | |||
|
334 | for filename, node, filename, deltabasenode, delta in deltachain: | |||
|
335 | if filename != lastfilename: | |||
|
336 | ui.write("\n%s\n" % filename) | |||
|
337 | lastfilename = filename | |||
|
338 | ui.write("%s %s %s %s\n" % ( | |||
|
339 | "Node".ljust(hashlen), | |||
|
340 | "Delta Base".ljust(hashlen), | |||
|
341 | "Delta SHA1".ljust(hashlen), | |||
|
342 | "Delta Length".ljust(6), | |||
|
343 | )) | |||
|
344 | ||||
|
345 | ui.write("%s %s %s %d\n" % ( | |||
|
346 | hashformatter(node), | |||
|
347 | hashformatter(deltabasenode), | |||
|
348 | nodemod.hex(hashlib.sha1(delta).digest()), | |||
|
349 | len(delta))) | |||
|
350 | ||||
|
351 | def debughistorypack(ui, path): | |||
|
352 | if '.hist' in path: | |||
|
353 | path = path[:path.index('.hist')] | |||
|
354 | hpack = historypack.historypack(path) | |||
|
355 | ||||
|
356 | lastfilename = None | |||
|
357 | for entry in hpack.iterentries(): | |||
|
358 | filename, node, p1node, p2node, linknode, copyfrom = entry | |||
|
359 | if filename != lastfilename: | |||
|
360 | ui.write("\n%s\n" % filename) | |||
|
361 | ui.write("%s%s%s%s%s\n" % ( | |||
|
362 | "Node".ljust(14), | |||
|
363 | "P1 Node".ljust(14), | |||
|
364 | "P2 Node".ljust(14), | |||
|
365 | "Link Node".ljust(14), | |||
|
366 | "Copy From")) | |||
|
367 | lastfilename = filename | |||
|
368 | ui.write("%s %s %s %s %s\n" % (short(node), short(p1node), | |||
|
369 | short(p2node), short(linknode), copyfrom)) | |||
|
370 | ||||
|
371 | def debugwaitonrepack(repo): | |||
|
372 | with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''): | |||
|
373 | return | |||
|
374 | ||||
|
375 | def debugwaitonprefetch(repo): | |||
|
376 | with repo._lock(repo.svfs, "prefetchlock", True, None, | |||
|
377 | None, _('prefetching in %s') % repo.origroot): | |||
|
378 | pass |
@@ -0,0 +1,66 b'' | |||||
|
1 | # extutil.py - useful utility methods for extensions | |||
|
2 | # | |||
|
3 | # Copyright 2016 Facebook | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | ||||
|
8 | from __future__ import absolute_import | |||
|
9 | ||||
|
10 | import contextlib | |||
|
11 | import errno | |||
|
12 | import os | |||
|
13 | import time | |||
|
14 | ||||
|
15 | from mercurial import ( | |||
|
16 | error, | |||
|
17 | lock as lockmod, | |||
|
18 | util, | |||
|
19 | vfs as vfsmod, | |||
|
20 | ) | |||
|
21 | ||||
|
22 | @contextlib.contextmanager | |||
|
23 | def flock(lockpath, description, timeout=-1): | |||
|
24 | """A flock based lock object. Currently it is always non-blocking. | |||
|
25 | ||||
|
26 | Note that since it is flock based, you can accidentally take it multiple | |||
|
27 | times within one process and the first one to be released will release all | |||
|
28 | of them. So the caller needs to be careful to not create more than one | |||
|
29 | instance per lock. | |||
|
30 | """ | |||
|
31 | ||||
|
32 | # best effort lightweight lock | |||
|
33 | try: | |||
|
34 | import fcntl | |||
|
35 | fcntl.flock | |||
|
36 | except ImportError: | |||
|
37 | # fallback to Mercurial lock | |||
|
38 | vfs = vfsmod.vfs(os.path.dirname(lockpath)) | |||
|
39 | with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout): | |||
|
40 | yield | |||
|
41 | return | |||
|
42 | # make sure lock file exists | |||
|
43 | util.makedirs(os.path.dirname(lockpath)) | |||
|
44 | with open(lockpath, 'a'): | |||
|
45 | pass | |||
|
46 | lockfd = os.open(lockpath, os.O_RDONLY, 0o664) | |||
|
47 | start = time.time() | |||
|
48 | while True: | |||
|
49 | try: | |||
|
50 | fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) | |||
|
51 | break | |||
|
52 | except IOError as ex: | |||
|
53 | if ex.errno == errno.EAGAIN: | |||
|
54 | if timeout != -1 and time.time() - start > timeout: | |||
|
55 | raise error.LockHeld(errno.EAGAIN, lockpath, description, | |||
|
56 | '') | |||
|
57 | else: | |||
|
58 | time.sleep(0.05) | |||
|
59 | continue | |||
|
60 | raise | |||
|
61 | ||||
|
62 | try: | |||
|
63 | yield | |||
|
64 | finally: | |||
|
65 | fcntl.flock(lockfd, fcntl.LOCK_UN) | |||
|
66 | os.close(lockfd) |
This diff has been collapsed as it changes many lines, (581 lines changed) Show them Hide them | |||||
@@ -0,0 +1,581 b'' | |||||
|
1 | # fileserverclient.py - client for communicating with the cache process | |||
|
2 | # | |||
|
3 | # Copyright 2013 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | ||||
|
8 | from __future__ import absolute_import | |||
|
9 | ||||
|
10 | import hashlib | |||
|
11 | import io | |||
|
12 | import os | |||
|
13 | import threading | |||
|
14 | import time | |||
|
15 | import zlib | |||
|
16 | ||||
|
17 | from mercurial.i18n import _ | |||
|
18 | from mercurial.node import bin, hex, nullid | |||
|
19 | from mercurial import ( | |||
|
20 | error, | |||
|
21 | node, | |||
|
22 | pycompat, | |||
|
23 | revlog, | |||
|
24 | sshpeer, | |||
|
25 | util, | |||
|
26 | wireprotov1peer, | |||
|
27 | ) | |||
|
28 | from mercurial.utils import procutil | |||
|
29 | ||||
|
30 | from . import ( | |||
|
31 | constants, | |||
|
32 | contentstore, | |||
|
33 | metadatastore, | |||
|
34 | ) | |||
|
35 | ||||
|
36 | _sshv1peer = sshpeer.sshv1peer | |||
|
37 | ||||
|
38 | # Statistics for debugging | |||
|
39 | fetchcost = 0 | |||
|
40 | fetches = 0 | |||
|
41 | fetched = 0 | |||
|
42 | fetchmisses = 0 | |||
|
43 | ||||
|
44 | _lfsmod = None | |||
|
45 | ||||
|
46 | def getcachekey(reponame, file, id): | |||
|
47 | pathhash = node.hex(hashlib.sha1(file).digest()) | |||
|
48 | return os.path.join(reponame, pathhash[:2], pathhash[2:], id) | |||
|
49 | ||||
|
50 | def getlocalkey(file, id): | |||
|
51 | pathhash = node.hex(hashlib.sha1(file).digest()) | |||
|
52 | return os.path.join(pathhash, id) | |||
|
53 | ||||
|
54 | def peersetup(ui, peer): | |||
|
55 | ||||
|
56 | class remotefilepeer(peer.__class__): | |||
|
57 | @wireprotov1peer.batchable | |||
|
58 | def x_rfl_getfile(self, file, node): | |||
|
59 | if not self.capable('x_rfl_getfile'): | |||
|
60 | raise error.Abort( | |||
|
61 | 'configured remotefile server does not support getfile') | |||
|
62 | f = wireprotov1peer.future() | |||
|
63 | yield {'file': file, 'node': node}, f | |||
|
64 | code, data = f.value.split('\0', 1) | |||
|
65 | if int(code): | |||
|
66 | raise error.LookupError(file, node, data) | |||
|
67 | yield data | |||
|
68 | ||||
|
69 | @wireprotov1peer.batchable | |||
|
70 | def x_rfl_getflogheads(self, path): | |||
|
71 | if not self.capable('x_rfl_getflogheads'): | |||
|
72 | raise error.Abort('configured remotefile server does not ' | |||
|
73 | 'support getflogheads') | |||
|
74 | f = wireprotov1peer.future() | |||
|
75 | yield {'path': path}, f | |||
|
76 | heads = f.value.split('\n') if f.value else [] | |||
|
77 | yield heads | |||
|
78 | ||||
|
79 | def _updatecallstreamopts(self, command, opts): | |||
|
80 | if command != 'getbundle': | |||
|
81 | return | |||
|
82 | if (constants.NETWORK_CAP_LEGACY_SSH_GETFILES | |||
|
83 | not in self.capabilities()): | |||
|
84 | return | |||
|
85 | if not util.safehasattr(self, '_localrepo'): | |||
|
86 | return | |||
|
87 | if (constants.SHALLOWREPO_REQUIREMENT | |||
|
88 | not in self._localrepo.requirements): | |||
|
89 | return | |||
|
90 | ||||
|
91 | bundlecaps = opts.get('bundlecaps') | |||
|
92 | if bundlecaps: | |||
|
93 | bundlecaps = [bundlecaps] | |||
|
94 | else: | |||
|
95 | bundlecaps = [] | |||
|
96 | ||||
|
97 | # shallow, includepattern, and excludepattern are a hacky way of | |||
|
98 | # carrying over data from the local repo to this getbundle | |||
|
99 | # command. We need to do it this way because bundle1 getbundle | |||
|
100 | # doesn't provide any other place we can hook in to manipulate | |||
|
101 | # getbundle args before it goes across the wire. Once we get rid | |||
|
102 | # of bundle1, we can use bundle2's _pullbundle2extraprepare to | |||
|
103 | # do this more cleanly. | |||
|
104 | bundlecaps.append(constants.BUNDLE2_CAPABLITY) | |||
|
105 | if self._localrepo.includepattern: | |||
|
106 | patterns = '\0'.join(self._localrepo.includepattern) | |||
|
107 | includecap = "includepattern=" + patterns | |||
|
108 | bundlecaps.append(includecap) | |||
|
109 | if self._localrepo.excludepattern: | |||
|
110 | patterns = '\0'.join(self._localrepo.excludepattern) | |||
|
111 | excludecap = "excludepattern=" + patterns | |||
|
112 | bundlecaps.append(excludecap) | |||
|
113 | opts['bundlecaps'] = ','.join(bundlecaps) | |||
|
114 | ||||
|
115 | def _sendrequest(self, command, args, **opts): | |||
|
116 | self._updatecallstreamopts(command, args) | |||
|
117 | return super(remotefilepeer, self)._sendrequest(command, args, | |||
|
118 | **opts) | |||
|
119 | ||||
|
120 | def _callstream(self, command, **opts): | |||
|
121 | supertype = super(remotefilepeer, self) | |||
|
122 | if not util.safehasattr(supertype, '_sendrequest'): | |||
|
123 | self._updatecallstreamopts(command, pycompat.byteskwargs(opts)) | |||
|
124 | return super(remotefilepeer, self)._callstream(command, **opts) | |||
|
125 | ||||
|
126 | peer.__class__ = remotefilepeer | |||
|
127 | ||||
|
128 | class cacheconnection(object): | |||
|
129 | """The connection for communicating with the remote cache. Performs | |||
|
130 | gets and sets by communicating with an external process that has the | |||
|
131 | cache-specific implementation. | |||
|
132 | """ | |||
|
133 | def __init__(self): | |||
|
134 | self.pipeo = self.pipei = self.pipee = None | |||
|
135 | self.subprocess = None | |||
|
136 | self.connected = False | |||
|
137 | ||||
|
138 | def connect(self, cachecommand): | |||
|
139 | if self.pipeo: | |||
|
140 | raise error.Abort(_("cache connection already open")) | |||
|
141 | self.pipei, self.pipeo, self.pipee, self.subprocess = \ | |||
|
142 | procutil.popen4(cachecommand) | |||
|
143 | self.connected = True | |||
|
144 | ||||
|
145 | def close(self): | |||
|
146 | def tryclose(pipe): | |||
|
147 | try: | |||
|
148 | pipe.close() | |||
|
149 | except Exception: | |||
|
150 | pass | |||
|
151 | if self.connected: | |||
|
152 | try: | |||
|
153 | self.pipei.write("exit\n") | |||
|
154 | except Exception: | |||
|
155 | pass | |||
|
156 | tryclose(self.pipei) | |||
|
157 | self.pipei = None | |||
|
158 | tryclose(self.pipeo) | |||
|
159 | self.pipeo = None | |||
|
160 | tryclose(self.pipee) | |||
|
161 | self.pipee = None | |||
|
162 | try: | |||
|
163 | # Wait for process to terminate, making sure to avoid deadlock. | |||
|
164 | # See https://docs.python.org/2/library/subprocess.html for | |||
|
165 | # warnings about wait() and deadlocking. | |||
|
166 | self.subprocess.communicate() | |||
|
167 | except Exception: | |||
|
168 | pass | |||
|
169 | self.subprocess = None | |||
|
170 | self.connected = False | |||
|
171 | ||||
|
172 | def request(self, request, flush=True): | |||
|
173 | if self.connected: | |||
|
174 | try: | |||
|
175 | self.pipei.write(request) | |||
|
176 | if flush: | |||
|
177 | self.pipei.flush() | |||
|
178 | except IOError: | |||
|
179 | self.close() | |||
|
180 | ||||
|
181 | def receiveline(self): | |||
|
182 | if not self.connected: | |||
|
183 | return None | |||
|
184 | try: | |||
|
185 | result = self.pipeo.readline()[:-1] | |||
|
186 | if not result: | |||
|
187 | self.close() | |||
|
188 | except IOError: | |||
|
189 | self.close() | |||
|
190 | ||||
|
191 | return result | |||
|
192 | ||||
|
193 | def _getfilesbatch( | |||
|
194 | remote, receivemissing, progresstick, missed, idmap, batchsize): | |||
|
195 | # Over http(s), iterbatch is a streamy method and we can start | |||
|
196 | # looking at results early. This means we send one (potentially | |||
|
197 | # large) request, but then we show nice progress as we process | |||
|
198 | # file results, rather than showing chunks of $batchsize in | |||
|
199 | # progress. | |||
|
200 | # | |||
|
201 | # Over ssh, iterbatch isn't streamy because batch() wasn't | |||
|
202 | # explicitly designed as a streaming method. In the future we | |||
|
203 | # should probably introduce a streambatch() method upstream and | |||
|
204 | # use that for this. | |||
|
205 | with remote.commandexecutor() as e: | |||
|
206 | futures = [] | |||
|
207 | for m in missed: | |||
|
208 | futures.append(e.callcommand('x_rfl_getfile', { | |||
|
209 | 'file': idmap[m], | |||
|
210 | 'node': m[-40:] | |||
|
211 | })) | |||
|
212 | ||||
|
213 | for i, m in enumerate(missed): | |||
|
214 | r = futures[i].result() | |||
|
215 | futures[i] = None # release memory | |||
|
216 | file_ = idmap[m] | |||
|
217 | node = m[-40:] | |||
|
218 | receivemissing(io.BytesIO('%d\n%s' % (len(r), r)), file_, node) | |||
|
219 | progresstick() | |||
|
220 | ||||
|
221 | def _getfiles_optimistic( | |||
|
222 | remote, receivemissing, progresstick, missed, idmap, step): | |||
|
223 | remote._callstream("x_rfl_getfiles") | |||
|
224 | i = 0 | |||
|
225 | pipeo = remote._pipeo | |||
|
226 | pipei = remote._pipei | |||
|
227 | while i < len(missed): | |||
|
228 | # issue a batch of requests | |||
|
229 | start = i | |||
|
230 | end = min(len(missed), start + step) | |||
|
231 | i = end | |||
|
232 | for missingid in missed[start:end]: | |||
|
233 | # issue new request | |||
|
234 | versionid = missingid[-40:] | |||
|
235 | file = idmap[missingid] | |||
|
236 | sshrequest = "%s%s\n" % (versionid, file) | |||
|
237 | pipeo.write(sshrequest) | |||
|
238 | pipeo.flush() | |||
|
239 | ||||
|
240 | # receive batch results | |||
|
241 | for missingid in missed[start:end]: | |||
|
242 | versionid = missingid[-40:] | |||
|
243 | file = idmap[missingid] | |||
|
244 | receivemissing(pipei, file, versionid) | |||
|
245 | progresstick() | |||
|
246 | ||||
|
247 | # End the command | |||
|
248 | pipeo.write('\n') | |||
|
249 | pipeo.flush() | |||
|
250 | ||||
|
251 | def _getfiles_threaded( | |||
|
252 | remote, receivemissing, progresstick, missed, idmap, step): | |||
|
253 | remote._callstream("getfiles") | |||
|
254 | pipeo = remote._pipeo | |||
|
255 | pipei = remote._pipei | |||
|
256 | ||||
|
257 | def writer(): | |||
|
258 | for missingid in missed: | |||
|
259 | versionid = missingid[-40:] | |||
|
260 | file = idmap[missingid] | |||
|
261 | sshrequest = "%s%s\n" % (versionid, file) | |||
|
262 | pipeo.write(sshrequest) | |||
|
263 | pipeo.flush() | |||
|
264 | writerthread = threading.Thread(target=writer) | |||
|
265 | writerthread.daemon = True | |||
|
266 | writerthread.start() | |||
|
267 | ||||
|
268 | for missingid in missed: | |||
|
269 | versionid = missingid[-40:] | |||
|
270 | file = idmap[missingid] | |||
|
271 | receivemissing(pipei, file, versionid) | |||
|
272 | progresstick() | |||
|
273 | ||||
|
274 | writerthread.join() | |||
|
275 | # End the command | |||
|
276 | pipeo.write('\n') | |||
|
277 | pipeo.flush() | |||
|
278 | ||||
|
279 | class fileserverclient(object): | |||
|
280 | """A client for requesting files from the remote file server. | |||
|
281 | """ | |||
|
282 | def __init__(self, repo): | |||
|
283 | ui = repo.ui | |||
|
284 | self.repo = repo | |||
|
285 | self.ui = ui | |||
|
286 | self.cacheprocess = ui.config("remotefilelog", "cacheprocess") | |||
|
287 | if self.cacheprocess: | |||
|
288 | self.cacheprocess = util.expandpath(self.cacheprocess) | |||
|
289 | ||||
|
290 | # This option causes remotefilelog to pass the full file path to the | |||
|
291 | # cacheprocess instead of a hashed key. | |||
|
292 | self.cacheprocesspasspath = ui.configbool( | |||
|
293 | "remotefilelog", "cacheprocess.includepath") | |||
|
294 | ||||
|
295 | self.debugoutput = ui.configbool("remotefilelog", "debug") | |||
|
296 | ||||
|
297 | self.remotecache = cacheconnection() | |||
|
298 | ||||
|
299 | def setstore(self, datastore, historystore, writedata, writehistory): | |||
|
300 | self.datastore = datastore | |||
|
301 | self.historystore = historystore | |||
|
302 | self.writedata = writedata | |||
|
303 | self.writehistory = writehistory | |||
|
304 | ||||
|
305 | def _connect(self): | |||
|
306 | return self.repo.connectionpool.get(self.repo.fallbackpath) | |||
|
307 | ||||
|
308 | def request(self, fileids): | |||
|
309 | """Takes a list of filename/node pairs and fetches them from the | |||
|
310 | server. Files are stored in the local cache. | |||
|
311 | A list of nodes that the server couldn't find is returned. | |||
|
312 | If the connection fails, an exception is raised. | |||
|
313 | """ | |||
|
314 | if not self.remotecache.connected: | |||
|
315 | self.connect() | |||
|
316 | cache = self.remotecache | |||
|
317 | writedata = self.writedata | |||
|
318 | ||||
|
319 | repo = self.repo | |||
|
320 | total = len(fileids) | |||
|
321 | request = "get\n%d\n" % total | |||
|
322 | idmap = {} | |||
|
323 | reponame = repo.name | |||
|
324 | for file, id in fileids: | |||
|
325 | fullid = getcachekey(reponame, file, id) | |||
|
326 | if self.cacheprocesspasspath: | |||
|
327 | request += file + '\0' | |||
|
328 | request += fullid + "\n" | |||
|
329 | idmap[fullid] = file | |||
|
330 | ||||
|
331 | cache.request(request) | |||
|
332 | ||||
|
333 | progress = self.ui.makeprogress(_('downloading'), total=total) | |||
|
334 | progress.update(0) | |||
|
335 | ||||
|
336 | missed = [] | |||
|
337 | while True: | |||
|
338 | missingid = cache.receiveline() | |||
|
339 | if not missingid: | |||
|
340 | missedset = set(missed) | |||
|
341 | for missingid in idmap: | |||
|
342 | if not missingid in missedset: | |||
|
343 | missed.append(missingid) | |||
|
344 | self.ui.warn(_("warning: cache connection closed early - " + | |||
|
345 | "falling back to server\n")) | |||
|
346 | break | |||
|
347 | if missingid == "0": | |||
|
348 | break | |||
|
349 | if missingid.startswith("_hits_"): | |||
|
350 | # receive progress reports | |||
|
351 | parts = missingid.split("_") | |||
|
352 | progress.increment(int(parts[2])) | |||
|
353 | continue | |||
|
354 | ||||
|
355 | missed.append(missingid) | |||
|
356 | ||||
|
357 | global fetchmisses | |||
|
358 | fetchmisses += len(missed) | |||
|
359 | ||||
|
360 | fromcache = total - len(missed) | |||
|
361 | progress.update(fromcache, total=total) | |||
|
362 | self.ui.log("remotefilelog", "remote cache hit rate is %r of %r\n", | |||
|
363 | fromcache, total, hit=fromcache, total=total) | |||
|
364 | ||||
|
365 | oldumask = os.umask(0o002) | |||
|
366 | try: | |||
|
367 | # receive cache misses from master | |||
|
368 | if missed: | |||
|
369 | # When verbose is true, sshpeer prints 'running ssh...' | |||
|
370 | # to stdout, which can interfere with some command | |||
|
371 | # outputs | |||
|
372 | verbose = self.ui.verbose | |||
|
373 | self.ui.verbose = False | |||
|
374 | try: | |||
|
375 | with self._connect() as conn: | |||
|
376 | remote = conn.peer | |||
|
377 | if remote.capable( | |||
|
378 | constants.NETWORK_CAP_LEGACY_SSH_GETFILES): | |||
|
379 | if not isinstance(remote, _sshv1peer): | |||
|
380 | raise error.Abort('remotefilelog requires ssh ' | |||
|
381 | 'servers') | |||
|
382 | step = self.ui.configint('remotefilelog', | |||
|
383 | 'getfilesstep') | |||
|
384 | getfilestype = self.ui.config('remotefilelog', | |||
|
385 | 'getfilestype') | |||
|
386 | if getfilestype == 'threaded': | |||
|
387 | _getfiles = _getfiles_threaded | |||
|
388 | else: | |||
|
389 | _getfiles = _getfiles_optimistic | |||
|
390 | _getfiles(remote, self.receivemissing, | |||
|
391 | progress.increment, missed, idmap, step) | |||
|
392 | elif remote.capable("x_rfl_getfile"): | |||
|
393 | if remote.capable('batch'): | |||
|
394 | batchdefault = 100 | |||
|
395 | else: | |||
|
396 | batchdefault = 10 | |||
|
397 | batchsize = self.ui.configint( | |||
|
398 | 'remotefilelog', 'batchsize', batchdefault) | |||
|
399 | _getfilesbatch( | |||
|
400 | remote, self.receivemissing, progress.increment, | |||
|
401 | missed, idmap, batchsize) | |||
|
402 | else: | |||
|
403 | raise error.Abort("configured remotefilelog server" | |||
|
404 | " does not support remotefilelog") | |||
|
405 | ||||
|
406 | self.ui.log("remotefilefetchlog", | |||
|
407 | "Success\n", | |||
|
408 | fetched_files = progress.pos - fromcache, | |||
|
409 | total_to_fetch = total - fromcache) | |||
|
410 | except Exception: | |||
|
411 | self.ui.log("remotefilefetchlog", | |||
|
412 | "Fail\n", | |||
|
413 | fetched_files = progress.pos - fromcache, | |||
|
414 | total_to_fetch = total - fromcache) | |||
|
415 | raise | |||
|
416 | finally: | |||
|
417 | self.ui.verbose = verbose | |||
|
418 | # send to memcache | |||
|
419 | request = "set\n%d\n%s\n" % (len(missed), "\n".join(missed)) | |||
|
420 | cache.request(request) | |||
|
421 | ||||
|
422 | progress.complete() | |||
|
423 | ||||
|
424 | # mark ourselves as a user of this cache | |||
|
425 | writedata.markrepo(self.repo.path) | |||
|
426 | finally: | |||
|
427 | os.umask(oldumask) | |||
|
428 | ||||
|
429 | def receivemissing(self, pipe, filename, node): | |||
|
430 | line = pipe.readline()[:-1] | |||
|
431 | if not line: | |||
|
432 | raise error.ResponseError(_("error downloading file contents:"), | |||
|
433 | _("connection closed early")) | |||
|
434 | size = int(line) | |||
|
435 | data = pipe.read(size) | |||
|
436 | if len(data) != size: | |||
|
437 | raise error.ResponseError(_("error downloading file contents:"), | |||
|
438 | _("only received %s of %s bytes") | |||
|
439 | % (len(data), size)) | |||
|
440 | ||||
|
441 | self.writedata.addremotefilelognode(filename, bin(node), | |||
|
442 | zlib.decompress(data)) | |||
|
443 | ||||
|
444 | def connect(self): | |||
|
445 | if self.cacheprocess: | |||
|
446 | cmd = "%s %s" % (self.cacheprocess, self.writedata._path) | |||
|
447 | self.remotecache.connect(cmd) | |||
|
448 | else: | |||
|
449 | # If no cache process is specified, we fake one that always | |||
|
450 | # returns cache misses. This enables tests to run easily | |||
|
451 | # and may eventually allow us to be a drop in replacement | |||
|
452 | # for the largefiles extension. | |||
|
453 | class simplecache(object): | |||
|
454 | def __init__(self): | |||
|
455 | self.missingids = [] | |||
|
456 | self.connected = True | |||
|
457 | ||||
|
458 | def close(self): | |||
|
459 | pass | |||
|
460 | ||||
|
461 | def request(self, value, flush=True): | |||
|
462 | lines = value.split("\n") | |||
|
463 | if lines[0] != "get": | |||
|
464 | return | |||
|
465 | self.missingids = lines[2:-1] | |||
|
466 | self.missingids.append('0') | |||
|
467 | ||||
|
468 | def receiveline(self): | |||
|
469 | if len(self.missingids) > 0: | |||
|
470 | return self.missingids.pop(0) | |||
|
471 | return None | |||
|
472 | ||||
|
473 | self.remotecache = simplecache() | |||
|
474 | ||||
|
475 | def close(self): | |||
|
476 | if fetches: | |||
|
477 | msg = ("%d files fetched over %d fetches - " + | |||
|
478 | "(%d misses, %0.2f%% hit ratio) over %0.2fs\n") % ( | |||
|
479 | fetched, | |||
|
480 | fetches, | |||
|
481 | fetchmisses, | |||
|
482 | float(fetched - fetchmisses) / float(fetched) * 100.0, | |||
|
483 | fetchcost) | |||
|
484 | if self.debugoutput: | |||
|
485 | self.ui.warn(msg) | |||
|
486 | self.ui.log("remotefilelog.prefetch", msg.replace("%", "%%"), | |||
|
487 | remotefilelogfetched=fetched, | |||
|
488 | remotefilelogfetches=fetches, | |||
|
489 | remotefilelogfetchmisses=fetchmisses, | |||
|
490 | remotefilelogfetchtime=fetchcost * 1000) | |||
|
491 | ||||
|
492 | if self.remotecache.connected: | |||
|
493 | self.remotecache.close() | |||
|
494 | ||||
|
495 | def prefetch(self, fileids, force=False, fetchdata=True, | |||
|
496 | fetchhistory=False): | |||
|
497 | """downloads the given file versions to the cache | |||
|
498 | """ | |||
|
499 | repo = self.repo | |||
|
500 | idstocheck = [] | |||
|
501 | for file, id in fileids: | |||
|
502 | # hack | |||
|
503 | # - we don't use .hgtags | |||
|
504 | # - workingctx produces ids with length 42, | |||
|
505 | # which we skip since they aren't in any cache | |||
|
506 | if (file == '.hgtags' or len(id) == 42 | |||
|
507 | or not repo.shallowmatch(file)): | |||
|
508 | continue | |||
|
509 | ||||
|
510 | idstocheck.append((file, bin(id))) | |||
|
511 | ||||
|
512 | datastore = self.datastore | |||
|
513 | historystore = self.historystore | |||
|
514 | if force: | |||
|
515 | datastore = contentstore.unioncontentstore(*repo.shareddatastores) | |||
|
516 | historystore = metadatastore.unionmetadatastore( | |||
|
517 | *repo.sharedhistorystores) | |||
|
518 | ||||
|
519 | missingids = set() | |||
|
520 | if fetchdata: | |||
|
521 | missingids.update(datastore.getmissing(idstocheck)) | |||
|
522 | if fetchhistory: | |||
|
523 | missingids.update(historystore.getmissing(idstocheck)) | |||
|
524 | ||||
|
525 | # partition missing nodes into nullid and not-nullid so we can | |||
|
526 | # warn about this filtering potentially shadowing bugs. | |||
|
527 | nullids = len([None for unused, id in missingids if id == nullid]) | |||
|
528 | if nullids: | |||
|
529 | missingids = [(f, id) for f, id in missingids if id != nullid] | |||
|
530 | repo.ui.develwarn( | |||
|
531 | ('remotefilelog not fetching %d null revs' | |||
|
532 | ' - this is likely hiding bugs' % nullids), | |||
|
533 | config='remotefilelog-ext') | |||
|
534 | if missingids: | |||
|
535 | global fetches, fetched, fetchcost | |||
|
536 | fetches += 1 | |||
|
537 | ||||
|
538 | # We want to be able to detect excess individual file downloads, so | |||
|
539 | # let's log that information for debugging. | |||
|
540 | if fetches >= 15 and fetches < 18: | |||
|
541 | if fetches == 15: | |||
|
542 | fetchwarning = self.ui.config('remotefilelog', | |||
|
543 | 'fetchwarning') | |||
|
544 | if fetchwarning: | |||
|
545 | self.ui.warn(fetchwarning + '\n') | |||
|
546 | self.logstacktrace() | |||
|
547 | missingids = [(file, hex(id)) for file, id in missingids] | |||
|
548 | fetched += len(missingids) | |||
|
549 | start = time.time() | |||
|
550 | missingids = self.request(missingids) | |||
|
551 | if missingids: | |||
|
552 | raise error.Abort(_("unable to download %d files") % | |||
|
553 | len(missingids)) | |||
|
554 | fetchcost += time.time() - start | |||
|
555 | self._lfsprefetch(fileids) | |||
|
556 | ||||
|
557 | def _lfsprefetch(self, fileids): | |||
|
558 | if not _lfsmod or not util.safehasattr( | |||
|
559 | self.repo.svfs, 'lfslocalblobstore'): | |||
|
560 | return | |||
|
561 | if not _lfsmod.wrapper.candownload(self.repo): | |||
|
562 | return | |||
|
563 | pointers = [] | |||
|
564 | store = self.repo.svfs.lfslocalblobstore | |||
|
565 | for file, id in fileids: | |||
|
566 | node = bin(id) | |||
|
567 | rlog = self.repo.file(file) | |||
|
568 | if rlog.flags(node) & revlog.REVIDX_EXTSTORED: | |||
|
569 | text = rlog.revision(node, raw=True) | |||
|
570 | p = _lfsmod.pointer.deserialize(text) | |||
|
571 | oid = p.oid() | |||
|
572 | if not store.has(oid): | |||
|
573 | pointers.append(p) | |||
|
574 | if len(pointers) > 0: | |||
|
575 | self.repo.svfs.lfsremoteblobstore.readbatch(pointers, store) | |||
|
576 | assert all(store.has(p.oid()) for p in pointers) | |||
|
577 | ||||
|
578 | def logstacktrace(self): | |||
|
579 | import traceback | |||
|
580 | self.ui.log('remotefilelog', 'excess remotefilelog fetching:\n%s\n', | |||
|
581 | ''.join(traceback.format_stack())) |
This diff has been collapsed as it changes many lines, (520 lines changed) Show them Hide them | |||||
@@ -0,0 +1,520 b'' | |||||
|
1 | from __future__ import absolute_import | |||
|
2 | ||||
|
3 | import hashlib | |||
|
4 | import struct | |||
|
5 | ||||
|
6 | from mercurial.node import hex, nullid | |||
|
7 | from mercurial import ( | |||
|
8 | pycompat, | |||
|
9 | util, | |||
|
10 | ) | |||
|
11 | from . import ( | |||
|
12 | basepack, | |||
|
13 | constants, | |||
|
14 | shallowutil, | |||
|
15 | ) | |||
|
16 | ||||
|
17 | # (filename hash, offset, size) | |||
|
18 | INDEXFORMAT2 = '!20sQQII' | |||
|
19 | INDEXENTRYLENGTH2 = struct.calcsize(INDEXFORMAT2) | |||
|
20 | NODELENGTH = 20 | |||
|
21 | ||||
|
22 | NODEINDEXFORMAT = '!20sQ' | |||
|
23 | NODEINDEXENTRYLENGTH = struct.calcsize(NODEINDEXFORMAT) | |||
|
24 | ||||
|
25 | # (node, p1, p2, linknode) | |||
|
26 | PACKFORMAT = "!20s20s20s20sH" | |||
|
27 | PACKENTRYLENGTH = 82 | |||
|
28 | ||||
|
29 | ENTRYCOUNTSIZE = 4 | |||
|
30 | ||||
|
31 | INDEXSUFFIX = '.histidx' | |||
|
32 | PACKSUFFIX = '.histpack' | |||
|
33 | ||||
|
34 | ANC_NODE = 0 | |||
|
35 | ANC_P1NODE = 1 | |||
|
36 | ANC_P2NODE = 2 | |||
|
37 | ANC_LINKNODE = 3 | |||
|
38 | ANC_COPYFROM = 4 | |||
|
39 | ||||
|
40 | class historypackstore(basepack.basepackstore): | |||
|
41 | INDEXSUFFIX = INDEXSUFFIX | |||
|
42 | PACKSUFFIX = PACKSUFFIX | |||
|
43 | ||||
|
44 | def getpack(self, path): | |||
|
45 | return historypack(path) | |||
|
46 | ||||
|
47 | def getancestors(self, name, node, known=None): | |||
|
48 | for pack in self.packs: | |||
|
49 | try: | |||
|
50 | return pack.getancestors(name, node, known=known) | |||
|
51 | except KeyError: | |||
|
52 | pass | |||
|
53 | ||||
|
54 | for pack in self.refresh(): | |||
|
55 | try: | |||
|
56 | return pack.getancestors(name, node, known=known) | |||
|
57 | except KeyError: | |||
|
58 | pass | |||
|
59 | ||||
|
60 | raise KeyError((name, node)) | |||
|
61 | ||||
|
62 | def getnodeinfo(self, name, node): | |||
|
63 | for pack in self.packs: | |||
|
64 | try: | |||
|
65 | return pack.getnodeinfo(name, node) | |||
|
66 | except KeyError: | |||
|
67 | pass | |||
|
68 | ||||
|
69 | for pack in self.refresh(): | |||
|
70 | try: | |||
|
71 | return pack.getnodeinfo(name, node) | |||
|
72 | except KeyError: | |||
|
73 | pass | |||
|
74 | ||||
|
75 | raise KeyError((name, node)) | |||
|
76 | ||||
|
77 | def add(self, filename, node, p1, p2, linknode, copyfrom): | |||
|
78 | raise RuntimeError("cannot add to historypackstore (%s:%s)" | |||
|
79 | % (filename, hex(node))) | |||
|
80 | ||||
|
81 | class historypack(basepack.basepack): | |||
|
82 | INDEXSUFFIX = INDEXSUFFIX | |||
|
83 | PACKSUFFIX = PACKSUFFIX | |||
|
84 | ||||
|
85 | SUPPORTED_VERSIONS = [2] | |||
|
86 | ||||
|
87 | def __init__(self, path): | |||
|
88 | super(historypack, self).__init__(path) | |||
|
89 | self.INDEXFORMAT = INDEXFORMAT2 | |||
|
90 | self.INDEXENTRYLENGTH = INDEXENTRYLENGTH2 | |||
|
91 | ||||
|
92 | def getmissing(self, keys): | |||
|
93 | missing = [] | |||
|
94 | for name, node in keys: | |||
|
95 | try: | |||
|
96 | self._findnode(name, node) | |||
|
97 | except KeyError: | |||
|
98 | missing.append((name, node)) | |||
|
99 | ||||
|
100 | return missing | |||
|
101 | ||||
|
102 | def getancestors(self, name, node, known=None): | |||
|
103 | """Returns as many ancestors as we're aware of. | |||
|
104 | ||||
|
105 | return value: { | |||
|
106 | node: (p1, p2, linknode, copyfrom), | |||
|
107 | ... | |||
|
108 | } | |||
|
109 | """ | |||
|
110 | if known and node in known: | |||
|
111 | return [] | |||
|
112 | ||||
|
113 | ancestors = self._getancestors(name, node, known=known) | |||
|
114 | results = {} | |||
|
115 | for ancnode, p1, p2, linknode, copyfrom in ancestors: | |||
|
116 | results[ancnode] = (p1, p2, linknode, copyfrom) | |||
|
117 | ||||
|
118 | if not results: | |||
|
119 | raise KeyError((name, node)) | |||
|
120 | return results | |||
|
121 | ||||
|
122 | def getnodeinfo(self, name, node): | |||
|
123 | # Drop the node from the tuple before returning, since the result should | |||
|
124 | # just be (p1, p2, linknode, copyfrom) | |||
|
125 | return self._findnode(name, node)[1:] | |||
|
126 | ||||
|
127 | def _getancestors(self, name, node, known=None): | |||
|
128 | if known is None: | |||
|
129 | known = set() | |||
|
130 | section = self._findsection(name) | |||
|
131 | filename, offset, size, nodeindexoffset, nodeindexsize = section | |||
|
132 | pending = set((node,)) | |||
|
133 | o = 0 | |||
|
134 | while o < size: | |||
|
135 | if not pending: | |||
|
136 | break | |||
|
137 | entry, copyfrom = self._readentry(offset + o) | |||
|
138 | o += PACKENTRYLENGTH | |||
|
139 | if copyfrom: | |||
|
140 | o += len(copyfrom) | |||
|
141 | ||||
|
142 | ancnode = entry[ANC_NODE] | |||
|
143 | if ancnode in pending: | |||
|
144 | pending.remove(ancnode) | |||
|
145 | p1node = entry[ANC_P1NODE] | |||
|
146 | p2node = entry[ANC_P2NODE] | |||
|
147 | if p1node != nullid and p1node not in known: | |||
|
148 | pending.add(p1node) | |||
|
149 | if p2node != nullid and p2node not in known: | |||
|
150 | pending.add(p2node) | |||
|
151 | ||||
|
152 | yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom) | |||
|
153 | ||||
|
154 | def _readentry(self, offset): | |||
|
155 | data = self._data | |||
|
156 | entry = struct.unpack(PACKFORMAT, data[offset:offset + PACKENTRYLENGTH]) | |||
|
157 | copyfrom = None | |||
|
158 | copyfromlen = entry[ANC_COPYFROM] | |||
|
159 | if copyfromlen != 0: | |||
|
160 | offset += PACKENTRYLENGTH | |||
|
161 | copyfrom = data[offset:offset + copyfromlen] | |||
|
162 | return entry, copyfrom | |||
|
163 | ||||
|
164 | def add(self, filename, node, p1, p2, linknode, copyfrom): | |||
|
165 | raise RuntimeError("cannot add to historypack (%s:%s)" % | |||
|
166 | (filename, hex(node))) | |||
|
167 | ||||
|
168 | def _findnode(self, name, node): | |||
|
169 | if self.VERSION == 0: | |||
|
170 | ancestors = self._getancestors(name, node) | |||
|
171 | for ancnode, p1node, p2node, linknode, copyfrom in ancestors: | |||
|
172 | if ancnode == node: | |||
|
173 | return (ancnode, p1node, p2node, linknode, copyfrom) | |||
|
174 | else: | |||
|
175 | section = self._findsection(name) | |||
|
176 | nodeindexoffset, nodeindexsize = section[3:] | |||
|
177 | entry = self._bisect(node, nodeindexoffset, | |||
|
178 | nodeindexoffset + nodeindexsize, | |||
|
179 | NODEINDEXENTRYLENGTH) | |||
|
180 | if entry is not None: | |||
|
181 | node, offset = struct.unpack(NODEINDEXFORMAT, entry) | |||
|
182 | entry, copyfrom = self._readentry(offset) | |||
|
183 | # Drop the copyfromlen from the end of entry, and replace it | |||
|
184 | # with the copyfrom string. | |||
|
185 | return entry[:4] + (copyfrom,) | |||
|
186 | ||||
|
187 | raise KeyError("unable to find history for %s:%s" % (name, hex(node))) | |||
|
188 | ||||
|
189 | def _findsection(self, name): | |||
|
190 | params = self.params | |||
|
191 | namehash = hashlib.sha1(name).digest() | |||
|
192 | fanoutkey = struct.unpack(params.fanoutstruct, | |||
|
193 | namehash[:params.fanoutprefix])[0] | |||
|
194 | fanout = self._fanouttable | |||
|
195 | ||||
|
196 | start = fanout[fanoutkey] + params.indexstart | |||
|
197 | indexend = self._indexend | |||
|
198 | ||||
|
199 | for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount): | |||
|
200 | end = fanout[i] + params.indexstart | |||
|
201 | if end != start: | |||
|
202 | break | |||
|
203 | else: | |||
|
204 | end = indexend | |||
|
205 | ||||
|
206 | entry = self._bisect(namehash, start, end, self.INDEXENTRYLENGTH) | |||
|
207 | if not entry: | |||
|
208 | raise KeyError(name) | |||
|
209 | ||||
|
210 | rawentry = struct.unpack(self.INDEXFORMAT, entry) | |||
|
211 | x, offset, size, nodeindexoffset, nodeindexsize = rawentry | |||
|
212 | rawnamelen = self._index[nodeindexoffset:nodeindexoffset + | |||
|
213 | constants.FILENAMESIZE] | |||
|
214 | actualnamelen = struct.unpack('!H', rawnamelen)[0] | |||
|
215 | nodeindexoffset += constants.FILENAMESIZE | |||
|
216 | actualname = self._index[nodeindexoffset:nodeindexoffset + | |||
|
217 | actualnamelen] | |||
|
218 | if actualname != name: | |||
|
219 | raise KeyError("found file name %s when looking for %s" % | |||
|
220 | (actualname, name)) | |||
|
221 | nodeindexoffset += actualnamelen | |||
|
222 | ||||
|
223 | filenamelength = struct.unpack('!H', self._data[offset:offset + | |||
|
224 | constants.FILENAMESIZE])[0] | |||
|
225 | offset += constants.FILENAMESIZE | |||
|
226 | ||||
|
227 | actualname = self._data[offset:offset + filenamelength] | |||
|
228 | offset += filenamelength | |||
|
229 | ||||
|
230 | if name != actualname: | |||
|
231 | raise KeyError("found file name %s when looking for %s" % | |||
|
232 | (actualname, name)) | |||
|
233 | ||||
|
234 | # Skip entry list size | |||
|
235 | offset += ENTRYCOUNTSIZE | |||
|
236 | ||||
|
237 | nodelistoffset = offset | |||
|
238 | nodelistsize = (size - constants.FILENAMESIZE - filenamelength - | |||
|
239 | ENTRYCOUNTSIZE) | |||
|
240 | return (name, nodelistoffset, nodelistsize, | |||
|
241 | nodeindexoffset, nodeindexsize) | |||
|
242 | ||||
|
243 | def _bisect(self, node, start, end, entrylen): | |||
|
244 | # Bisect between start and end to find node | |||
|
245 | origstart = start | |||
|
246 | startnode = self._index[start:start + NODELENGTH] | |||
|
247 | endnode = self._index[end:end + NODELENGTH] | |||
|
248 | ||||
|
249 | if startnode == node: | |||
|
250 | return self._index[start:start + entrylen] | |||
|
251 | elif endnode == node: | |||
|
252 | return self._index[end:end + entrylen] | |||
|
253 | else: | |||
|
254 | while start < end - entrylen: | |||
|
255 | mid = start + (end - start) // 2 | |||
|
256 | mid = mid - ((mid - origstart) % entrylen) | |||
|
257 | midnode = self._index[mid:mid + NODELENGTH] | |||
|
258 | if midnode == node: | |||
|
259 | return self._index[mid:mid + entrylen] | |||
|
260 | if node > midnode: | |||
|
261 | start = mid | |||
|
262 | startnode = midnode | |||
|
263 | elif node < midnode: | |||
|
264 | end = mid | |||
|
265 | endnode = midnode | |||
|
266 | return None | |||
|
267 | ||||
|
268 | def markledger(self, ledger, options=None): | |||
|
269 | for filename, node in self: | |||
|
270 | ledger.markhistoryentry(self, filename, node) | |||
|
271 | ||||
|
272 | def cleanup(self, ledger): | |||
|
273 | entries = ledger.sources.get(self, []) | |||
|
274 | allkeys = set(self) | |||
|
275 | repackedkeys = set((e.filename, e.node) for e in entries if | |||
|
276 | e.historyrepacked) | |||
|
277 | ||||
|
278 | if len(allkeys - repackedkeys) == 0: | |||
|
279 | if self.path not in ledger.created: | |||
|
280 | util.unlinkpath(self.indexpath, ignoremissing=True) | |||
|
281 | util.unlinkpath(self.packpath, ignoremissing=True) | |||
|
282 | ||||
|
283 | def __iter__(self): | |||
|
284 | for f, n, x, x, x, x in self.iterentries(): | |||
|
285 | yield f, n | |||
|
286 | ||||
|
287 | def iterentries(self): | |||
|
288 | # Start at 1 to skip the header | |||
|
289 | offset = 1 | |||
|
290 | while offset < self.datasize: | |||
|
291 | data = self._data | |||
|
292 | # <2 byte len> + <filename> | |||
|
293 | filenamelen = struct.unpack('!H', data[offset:offset + | |||
|
294 | constants.FILENAMESIZE])[0] | |||
|
295 | offset += constants.FILENAMESIZE | |||
|
296 | filename = data[offset:offset + filenamelen] | |||
|
297 | offset += filenamelen | |||
|
298 | ||||
|
299 | revcount = struct.unpack('!I', data[offset:offset + | |||
|
300 | ENTRYCOUNTSIZE])[0] | |||
|
301 | offset += ENTRYCOUNTSIZE | |||
|
302 | ||||
|
303 | for i in pycompat.xrange(revcount): | |||
|
304 | entry = struct.unpack(PACKFORMAT, data[offset:offset + | |||
|
305 | PACKENTRYLENGTH]) | |||
|
306 | offset += PACKENTRYLENGTH | |||
|
307 | ||||
|
308 | copyfrom = data[offset:offset + entry[ANC_COPYFROM]] | |||
|
309 | offset += entry[ANC_COPYFROM] | |||
|
310 | ||||
|
311 | yield (filename, entry[ANC_NODE], entry[ANC_P1NODE], | |||
|
312 | entry[ANC_P2NODE], entry[ANC_LINKNODE], copyfrom) | |||
|
313 | ||||
|
314 | self._pagedin += PACKENTRYLENGTH | |||
|
315 | ||||
|
316 | # If we've read a lot of data from the mmap, free some memory. | |||
|
317 | self.freememory() | |||
|
318 | ||||
|
319 | class mutablehistorypack(basepack.mutablebasepack): | |||
|
320 | """A class for constructing and serializing a histpack file and index. | |||
|
321 | ||||
|
322 | A history pack is a pair of files that contain the revision history for | |||
|
323 | various file revisions in Mercurial. It contains only revision history (like | |||
|
324 | parent pointers and linknodes), not any revision content information. | |||
|
325 | ||||
|
326 | It consists of two files, with the following format: | |||
|
327 | ||||
|
328 | .histpack | |||
|
329 | The pack itself is a series of file revisions with some basic header | |||
|
330 | information on each. | |||
|
331 | ||||
|
332 | datapack = <version: 1 byte> | |||
|
333 | [<filesection>,...] | |||
|
334 | filesection = <filename len: 2 byte unsigned int> | |||
|
335 | <filename> | |||
|
336 | <revision count: 4 byte unsigned int> | |||
|
337 | [<revision>,...] | |||
|
338 | revision = <node: 20 byte> | |||
|
339 | <p1node: 20 byte> | |||
|
340 | <p2node: 20 byte> | |||
|
341 | <linknode: 20 byte> | |||
|
342 | <copyfromlen: 2 byte> | |||
|
343 | <copyfrom> | |||
|
344 | ||||
|
345 | The revisions within each filesection are stored in topological order | |||
|
346 | (newest first). If a given entry has a parent from another file (a copy) | |||
|
347 | then p1node is the node from the other file, and copyfrom is the | |||
|
348 | filepath of the other file. | |||
|
349 | ||||
|
350 | .histidx | |||
|
351 | The index file provides a mapping from filename to the file section in | |||
|
352 | the histpack. In V1 it also contains sub-indexes for specific nodes | |||
|
353 | within each file. It consists of three parts, the fanout, the file index | |||
|
354 | and the node indexes. | |||
|
355 | ||||
|
356 | The file index is a list of index entries, sorted by filename hash (one | |||
|
357 | per file section in the pack). Each entry has: | |||
|
358 | ||||
|
359 | - node (The 20 byte hash of the filename) | |||
|
360 | - pack entry offset (The location of this file section in the histpack) | |||
|
361 | - pack content size (The on-disk length of this file section's pack | |||
|
362 | data) | |||
|
363 | - node index offset (The location of the file's node index in the index | |||
|
364 | file) [1] | |||
|
365 | - node index size (the on-disk length of this file's node index) [1] | |||
|
366 | ||||
|
367 | The fanout is a quick lookup table to reduce the number of steps for | |||
|
368 | bisecting the index. It is a series of 4 byte pointers to positions | |||
|
369 | within the index. It has 2^16 entries, which corresponds to hash | |||
|
370 | prefixes [00, 01, 02,..., FD, FE, FF]. Example: the pointer in slot 4F | |||
|
371 | points to the index position of the first revision whose node starts | |||
|
372 | with 4F. This saves log(2^16) bisect steps. | |||
|
373 | ||||
|
374 | dataidx = <fanouttable> | |||
|
375 | <file count: 8 byte unsigned> [1] | |||
|
376 | <fileindex> | |||
|
377 | <node count: 8 byte unsigned> [1] | |||
|
378 | [<nodeindex>,...] [1] | |||
|
379 | fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries) | |||
|
380 | ||||
|
381 | fileindex = [<file index entry>,...] | |||
|
382 | fileindexentry = <node: 20 byte> | |||
|
383 | <pack file section offset: 8 byte unsigned int> | |||
|
384 | <pack file section size: 8 byte unsigned int> | |||
|
385 | <node index offset: 4 byte unsigned int> [1] | |||
|
386 | <node index size: 4 byte unsigned int> [1] | |||
|
387 | nodeindex = <filename>[<node index entry>,...] [1] | |||
|
388 | filename = <filename len : 2 byte unsigned int><filename value> [1] | |||
|
389 | nodeindexentry = <node: 20 byte> [1] | |||
|
390 | <pack file node offset: 8 byte unsigned int> [1] | |||
|
391 | ||||
|
392 | [1]: new in version 1. | |||
|
393 | """ | |||
|
394 | INDEXSUFFIX = INDEXSUFFIX | |||
|
395 | PACKSUFFIX = PACKSUFFIX | |||
|
396 | ||||
|
397 | SUPPORTED_VERSIONS = [2] | |||
|
398 | ||||
|
399 | def __init__(self, ui, packpath, version=2): | |||
|
400 | super(mutablehistorypack, self).__init__(ui, packpath, version=version) | |||
|
401 | self.files = {} | |||
|
402 | self.entrylocations = {} | |||
|
403 | self.fileentries = {} | |||
|
404 | ||||
|
405 | self.INDEXFORMAT = INDEXFORMAT2 | |||
|
406 | self.INDEXENTRYLENGTH = INDEXENTRYLENGTH2 | |||
|
407 | ||||
|
408 | self.NODEINDEXFORMAT = NODEINDEXFORMAT | |||
|
409 | self.NODEINDEXENTRYLENGTH = NODEINDEXENTRYLENGTH | |||
|
410 | ||||
|
411 | def add(self, filename, node, p1, p2, linknode, copyfrom): | |||
|
412 | copyfrom = copyfrom or '' | |||
|
413 | copyfromlen = struct.pack('!H', len(copyfrom)) | |||
|
414 | self.fileentries.setdefault(filename, []).append((node, p1, p2, | |||
|
415 | linknode, | |||
|
416 | copyfromlen, | |||
|
417 | copyfrom)) | |||
|
418 | ||||
|
419 | def _write(self): | |||
|
420 | for filename in sorted(self.fileentries): | |||
|
421 | entries = self.fileentries[filename] | |||
|
422 | sectionstart = self.packfp.tell() | |||
|
423 | ||||
|
424 | # Write the file section content | |||
|
425 | entrymap = dict((e[0], e) for e in entries) | |||
|
426 | def parentfunc(node): | |||
|
427 | x, p1, p2, x, x, x = entrymap[node] | |||
|
428 | parents = [] | |||
|
429 | if p1 != nullid: | |||
|
430 | parents.append(p1) | |||
|
431 | if p2 != nullid: | |||
|
432 | parents.append(p2) | |||
|
433 | return parents | |||
|
434 | ||||
|
435 | sortednodes = list(reversed(shallowutil.sortnodes( | |||
|
436 | (e[0] for e in entries), | |||
|
437 | parentfunc))) | |||
|
438 | ||||
|
439 | # Write the file section header | |||
|
440 | self.writeraw("%s%s%s" % ( | |||
|
441 | struct.pack('!H', len(filename)), | |||
|
442 | filename, | |||
|
443 | struct.pack('!I', len(sortednodes)), | |||
|
444 | )) | |||
|
445 | ||||
|
446 | sectionlen = constants.FILENAMESIZE + len(filename) + 4 | |||
|
447 | ||||
|
448 | rawstrings = [] | |||
|
449 | ||||
|
450 | # Record the node locations for the index | |||
|
451 | locations = self.entrylocations.setdefault(filename, {}) | |||
|
452 | offset = sectionstart + sectionlen | |||
|
453 | for node in sortednodes: | |||
|
454 | locations[node] = offset | |||
|
455 | raw = '%s%s%s%s%s%s' % entrymap[node] | |||
|
456 | rawstrings.append(raw) | |||
|
457 | offset += len(raw) | |||
|
458 | ||||
|
459 | rawdata = ''.join(rawstrings) | |||
|
460 | sectionlen += len(rawdata) | |||
|
461 | ||||
|
462 | self.writeraw(rawdata) | |||
|
463 | ||||
|
464 | # Record metadata for the index | |||
|
465 | self.files[filename] = (sectionstart, sectionlen) | |||
|
466 | node = hashlib.sha1(filename).digest() | |||
|
467 | self.entries[node] = node | |||
|
468 | ||||
|
469 | def close(self, ledger=None): | |||
|
470 | if self._closed: | |||
|
471 | return | |||
|
472 | ||||
|
473 | self._write() | |||
|
474 | ||||
|
475 | return super(mutablehistorypack, self).close(ledger=ledger) | |||
|
476 | ||||
|
477 | def createindex(self, nodelocations, indexoffset): | |||
|
478 | fileindexformat = self.INDEXFORMAT | |||
|
479 | fileindexlength = self.INDEXENTRYLENGTH | |||
|
480 | nodeindexformat = self.NODEINDEXFORMAT | |||
|
481 | nodeindexlength = self.NODEINDEXENTRYLENGTH | |||
|
482 | ||||
|
483 | files = ((hashlib.sha1(filename).digest(), filename, offset, size) | |||
|
484 | for filename, (offset, size) in self.files.iteritems()) | |||
|
485 | files = sorted(files) | |||
|
486 | ||||
|
487 | # node index is after file index size, file index, and node index size | |||
|
488 | indexlensize = struct.calcsize('!Q') | |||
|
489 | nodeindexoffset = (indexoffset + indexlensize + | |||
|
490 | (len(files) * fileindexlength) + indexlensize) | |||
|
491 | ||||
|
492 | fileindexentries = [] | |||
|
493 | nodeindexentries = [] | |||
|
494 | nodecount = 0 | |||
|
495 | for namehash, filename, offset, size in files: | |||
|
496 | # File section index | |||
|
497 | nodelocations = self.entrylocations[filename] | |||
|
498 | ||||
|
499 | nodeindexsize = len(nodelocations) * nodeindexlength | |||
|
500 | ||||
|
501 | rawentry = struct.pack(fileindexformat, namehash, offset, size, | |||
|
502 | nodeindexoffset, nodeindexsize) | |||
|
503 | # Node index | |||
|
504 | nodeindexentries.append(struct.pack(constants.FILENAMESTRUCT, | |||
|
505 | len(filename)) + filename) | |||
|
506 | nodeindexoffset += constants.FILENAMESIZE + len(filename) | |||
|
507 | ||||
|
508 | for node, location in sorted(nodelocations.iteritems()): | |||
|
509 | nodeindexentries.append(struct.pack(nodeindexformat, node, | |||
|
510 | location)) | |||
|
511 | nodecount += 1 | |||
|
512 | ||||
|
513 | nodeindexoffset += len(nodelocations) * nodeindexlength | |||
|
514 | ||||
|
515 | fileindexentries.append(rawentry) | |||
|
516 | ||||
|
517 | nodecountraw = '' | |||
|
518 | nodecountraw = struct.pack('!Q', nodecount) | |||
|
519 | return (''.join(fileindexentries) + nodecountraw + | |||
|
520 | ''.join(nodeindexentries)) |
@@ -0,0 +1,156 b'' | |||||
|
1 | from __future__ import absolute_import | |||
|
2 | ||||
|
3 | from mercurial.node import hex, nullid | |||
|
4 | from . import ( | |||
|
5 | basestore, | |||
|
6 | shallowutil, | |||
|
7 | ) | |||
|
8 | ||||
|
9 | class unionmetadatastore(basestore.baseunionstore): | |||
|
10 | def __init__(self, *args, **kwargs): | |||
|
11 | super(unionmetadatastore, self).__init__(*args, **kwargs) | |||
|
12 | ||||
|
13 | self.stores = args | |||
|
14 | self.writestore = kwargs.get(r'writestore') | |||
|
15 | ||||
|
16 | # If allowincomplete==True then the union store can return partial | |||
|
17 | # ancestor lists, otherwise it will throw a KeyError if a full | |||
|
18 | # history can't be found. | |||
|
19 | self.allowincomplete = kwargs.get(r'allowincomplete', False) | |||
|
20 | ||||
|
21 | def getancestors(self, name, node, known=None): | |||
|
22 | """Returns as many ancestors as we're aware of. | |||
|
23 | ||||
|
24 | return value: { | |||
|
25 | node: (p1, p2, linknode, copyfrom), | |||
|
26 | ... | |||
|
27 | } | |||
|
28 | """ | |||
|
29 | if known is None: | |||
|
30 | known = set() | |||
|
31 | if node in known: | |||
|
32 | return [] | |||
|
33 | ||||
|
34 | ancestors = {} | |||
|
35 | def traverse(curname, curnode): | |||
|
36 | # TODO: this algorithm has the potential to traverse parts of | |||
|
37 | # history twice. Ex: with A->B->C->F and A->B->D->F, both D and C | |||
|
38 | # may be queued as missing, then B and A are traversed for both. | |||
|
39 | queue = [(curname, curnode)] | |||
|
40 | missing = [] | |||
|
41 | seen = set() | |||
|
42 | while queue: | |||
|
43 | name, node = queue.pop() | |||
|
44 | if (name, node) in seen: | |||
|
45 | continue | |||
|
46 | seen.add((name, node)) | |||
|
47 | value = ancestors.get(node) | |||
|
48 | if not value: | |||
|
49 | missing.append((name, node)) | |||
|
50 | continue | |||
|
51 | p1, p2, linknode, copyfrom = value | |||
|
52 | if p1 != nullid and p1 not in known: | |||
|
53 | queue.append((copyfrom or curname, p1)) | |||
|
54 | if p2 != nullid and p2 not in known: | |||
|
55 | queue.append((curname, p2)) | |||
|
56 | return missing | |||
|
57 | ||||
|
58 | missing = [(name, node)] | |||
|
59 | while missing: | |||
|
60 | curname, curnode = missing.pop() | |||
|
61 | try: | |||
|
62 | ancestors.update(self._getpartialancestors(curname, curnode, | |||
|
63 | known=known)) | |||
|
64 | newmissing = traverse(curname, curnode) | |||
|
65 | missing.extend(newmissing) | |||
|
66 | except KeyError: | |||
|
67 | # If we allow incomplete histories, don't throw. | |||
|
68 | if not self.allowincomplete: | |||
|
69 | raise | |||
|
70 | # If the requested name+node doesn't exist, always throw. | |||
|
71 | if (curname, curnode) == (name, node): | |||
|
72 | raise | |||
|
73 | ||||
|
74 | # TODO: ancestors should probably be (name, node) -> (value) | |||
|
75 | return ancestors | |||
|
76 | ||||
|
77 | @basestore.baseunionstore.retriable | |||
|
78 | def _getpartialancestors(self, name, node, known=None): | |||
|
79 | for store in self.stores: | |||
|
80 | try: | |||
|
81 | return store.getancestors(name, node, known=known) | |||
|
82 | except KeyError: | |||
|
83 | pass | |||
|
84 | ||||
|
85 | raise KeyError((name, hex(node))) | |||
|
86 | ||||
|
87 | @basestore.baseunionstore.retriable | |||
|
88 | def getnodeinfo(self, name, node): | |||
|
89 | for store in self.stores: | |||
|
90 | try: | |||
|
91 | return store.getnodeinfo(name, node) | |||
|
92 | except KeyError: | |||
|
93 | pass | |||
|
94 | ||||
|
95 | raise KeyError((name, hex(node))) | |||
|
96 | ||||
|
97 | def add(self, name, node, data): | |||
|
98 | raise RuntimeError("cannot add content only to remotefilelog " | |||
|
99 | "contentstore") | |||
|
100 | ||||
|
101 | def getmissing(self, keys): | |||
|
102 | missing = keys | |||
|
103 | for store in self.stores: | |||
|
104 | if missing: | |||
|
105 | missing = store.getmissing(missing) | |||
|
106 | return missing | |||
|
107 | ||||
|
108 | def markledger(self, ledger, options=None): | |||
|
109 | for store in self.stores: | |||
|
110 | store.markledger(ledger, options) | |||
|
111 | ||||
|
112 | def getmetrics(self): | |||
|
113 | metrics = [s.getmetrics() for s in self.stores] | |||
|
114 | return shallowutil.sumdicts(*metrics) | |||
|
115 | ||||
|
116 | class remotefilelogmetadatastore(basestore.basestore): | |||
|
117 | def getancestors(self, name, node, known=None): | |||
|
118 | """Returns as many ancestors as we're aware of. | |||
|
119 | ||||
|
120 | return value: { | |||
|
121 | node: (p1, p2, linknode, copyfrom), | |||
|
122 | ... | |||
|
123 | } | |||
|
124 | """ | |||
|
125 | data = self._getdata(name, node) | |||
|
126 | ancestors = shallowutil.ancestormap(data) | |||
|
127 | return ancestors | |||
|
128 | ||||
|
129 | def getnodeinfo(self, name, node): | |||
|
130 | return self.getancestors(name, node)[node] | |||
|
131 | ||||
|
132 | def add(self, name, node, parents, linknode): | |||
|
133 | raise RuntimeError("cannot add metadata only to remotefilelog " | |||
|
134 | "metadatastore") | |||
|
135 | ||||
|
136 | class remotemetadatastore(object): | |||
|
137 | def __init__(self, ui, fileservice, shared): | |||
|
138 | self._fileservice = fileservice | |||
|
139 | self._shared = shared | |||
|
140 | ||||
|
141 | def getancestors(self, name, node, known=None): | |||
|
142 | self._fileservice.prefetch([(name, hex(node))], force=True, | |||
|
143 | fetchdata=False, fetchhistory=True) | |||
|
144 | return self._shared.getancestors(name, node, known=known) | |||
|
145 | ||||
|
146 | def getnodeinfo(self, name, node): | |||
|
147 | return self.getancestors(name, node)[node] | |||
|
148 | ||||
|
149 | def add(self, name, node, data): | |||
|
150 | raise RuntimeError("cannot add to a remote store") | |||
|
151 | ||||
|
152 | def getmissing(self, keys): | |||
|
153 | return keys | |||
|
154 | ||||
|
155 | def markledger(self, ledger, options=None): | |||
|
156 | pass |
@@ -0,0 +1,491 b'' | |||||
|
1 | # remotefilectx.py - filectx/workingfilectx implementations for remotefilelog | |||
|
2 | # | |||
|
3 | # Copyright 2013 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | from __future__ import absolute_import | |||
|
8 | ||||
|
9 | import collections | |||
|
10 | import time | |||
|
11 | ||||
|
12 | from mercurial.node import bin, hex, nullid, nullrev | |||
|
13 | from mercurial import ( | |||
|
14 | ancestor, | |||
|
15 | context, | |||
|
16 | error, | |||
|
17 | phases, | |||
|
18 | pycompat, | |||
|
19 | util, | |||
|
20 | ) | |||
|
21 | from . import shallowutil | |||
|
22 | ||||
|
23 | propertycache = util.propertycache | |||
|
24 | FASTLOG_TIMEOUT_IN_SECS = 0.5 | |||
|
25 | ||||
|
26 | class remotefilectx(context.filectx): | |||
|
27 | def __init__(self, repo, path, changeid=None, fileid=None, | |||
|
28 | filelog=None, changectx=None, ancestormap=None): | |||
|
29 | if fileid == nullrev: | |||
|
30 | fileid = nullid | |||
|
31 | if fileid and len(fileid) == 40: | |||
|
32 | fileid = bin(fileid) | |||
|
33 | super(remotefilectx, self).__init__(repo, path, changeid, | |||
|
34 | fileid, filelog, changectx) | |||
|
35 | self._ancestormap = ancestormap | |||
|
36 | ||||
|
37 | def size(self): | |||
|
38 | return self._filelog.size(self._filenode) | |||
|
39 | ||||
|
40 | @propertycache | |||
|
41 | def _changeid(self): | |||
|
42 | if '_changeid' in self.__dict__: | |||
|
43 | return self._changeid | |||
|
44 | elif '_changectx' in self.__dict__: | |||
|
45 | return self._changectx.rev() | |||
|
46 | elif '_descendantrev' in self.__dict__: | |||
|
47 | # this file context was created from a revision with a known | |||
|
48 | # descendant, we can (lazily) correct for linkrev aliases | |||
|
49 | linknode = self._adjustlinknode(self._path, self._filelog, | |||
|
50 | self._filenode, self._descendantrev) | |||
|
51 | return self._repo.unfiltered().changelog.rev(linknode) | |||
|
52 | else: | |||
|
53 | return self.linkrev() | |||
|
54 | ||||
|
55 | def filectx(self, fileid, changeid=None): | |||
|
56 | '''opens an arbitrary revision of the file without | |||
|
57 | opening a new filelog''' | |||
|
58 | return remotefilectx(self._repo, self._path, fileid=fileid, | |||
|
59 | filelog=self._filelog, changeid=changeid) | |||
|
60 | ||||
|
61 | def linkrev(self): | |||
|
62 | return self._linkrev | |||
|
63 | ||||
|
64 | @propertycache | |||
|
65 | def _linkrev(self): | |||
|
66 | if self._filenode == nullid: | |||
|
67 | return nullrev | |||
|
68 | ||||
|
69 | ancestormap = self.ancestormap() | |||
|
70 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] | |||
|
71 | rev = self._repo.changelog.nodemap.get(linknode) | |||
|
72 | if rev is not None: | |||
|
73 | return rev | |||
|
74 | ||||
|
75 | # Search all commits for the appropriate linkrev (slow, but uncommon) | |||
|
76 | path = self._path | |||
|
77 | fileid = self._filenode | |||
|
78 | cl = self._repo.unfiltered().changelog | |||
|
79 | mfl = self._repo.manifestlog | |||
|
80 | ||||
|
81 | for rev in range(len(cl) - 1, 0, -1): | |||
|
82 | node = cl.node(rev) | |||
|
83 | data = cl.read(node) # get changeset data (we avoid object creation) | |||
|
84 | if path in data[3]: # checking the 'files' field. | |||
|
85 | # The file has been touched, check if the hash is what we're | |||
|
86 | # looking for. | |||
|
87 | if fileid == mfl[data[0]].readfast().get(path): | |||
|
88 | return rev | |||
|
89 | ||||
|
90 | # Couldn't find the linkrev. This should generally not happen, and will | |||
|
91 | # likely cause a crash. | |||
|
92 | return None | |||
|
93 | ||||
|
94 | def introrev(self): | |||
|
95 | """return the rev of the changeset which introduced this file revision | |||
|
96 | ||||
|
97 | This method is different from linkrev because it take into account the | |||
|
98 | changeset the filectx was created from. It ensures the returned | |||
|
99 | revision is one of its ancestors. This prevents bugs from | |||
|
100 | 'linkrev-shadowing' when a file revision is used by multiple | |||
|
101 | changesets. | |||
|
102 | """ | |||
|
103 | lkr = self.linkrev() | |||
|
104 | attrs = vars(self) | |||
|
105 | noctx = not ('_changeid' in attrs or '_changectx' in attrs) | |||
|
106 | if noctx or self.rev() == lkr: | |||
|
107 | return lkr | |||
|
108 | linknode = self._adjustlinknode(self._path, self._filelog, | |||
|
109 | self._filenode, self.rev(), | |||
|
110 | inclusive=True) | |||
|
111 | return self._repo.changelog.rev(linknode) | |||
|
112 | ||||
|
113 | def renamed(self): | |||
|
114 | """check if file was actually renamed in this changeset revision | |||
|
115 | ||||
|
116 | If rename logged in file revision, we report copy for changeset only | |||
|
117 | if file revisions linkrev points back to the changeset in question | |||
|
118 | or both changeset parents contain different file revisions. | |||
|
119 | """ | |||
|
120 | ancestormap = self.ancestormap() | |||
|
121 | ||||
|
122 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] | |||
|
123 | if not copyfrom: | |||
|
124 | return None | |||
|
125 | ||||
|
126 | renamed = (copyfrom, p1) | |||
|
127 | if self.rev() == self.linkrev(): | |||
|
128 | return renamed | |||
|
129 | ||||
|
130 | name = self.path() | |||
|
131 | fnode = self._filenode | |||
|
132 | for p in self._changectx.parents(): | |||
|
133 | try: | |||
|
134 | if fnode == p.filenode(name): | |||
|
135 | return None | |||
|
136 | except error.LookupError: | |||
|
137 | pass | |||
|
138 | return renamed | |||
|
139 | ||||
|
140 | def ancestormap(self): | |||
|
141 | if not self._ancestormap: | |||
|
142 | self._ancestormap = self.filelog().ancestormap(self._filenode) | |||
|
143 | ||||
|
144 | return self._ancestormap | |||
|
145 | ||||
|
146 | def parents(self): | |||
|
147 | repo = self._repo | |||
|
148 | ancestormap = self.ancestormap() | |||
|
149 | ||||
|
150 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] | |||
|
151 | results = [] | |||
|
152 | if p1 != nullid: | |||
|
153 | path = copyfrom or self._path | |||
|
154 | flog = repo.file(path) | |||
|
155 | p1ctx = remotefilectx(repo, path, fileid=p1, filelog=flog, | |||
|
156 | ancestormap=ancestormap) | |||
|
157 | p1ctx._descendantrev = self.rev() | |||
|
158 | results.append(p1ctx) | |||
|
159 | ||||
|
160 | if p2 != nullid: | |||
|
161 | path = self._path | |||
|
162 | flog = repo.file(path) | |||
|
163 | p2ctx = remotefilectx(repo, path, fileid=p2, filelog=flog, | |||
|
164 | ancestormap=ancestormap) | |||
|
165 | p2ctx._descendantrev = self.rev() | |||
|
166 | results.append(p2ctx) | |||
|
167 | ||||
|
168 | return results | |||
|
169 | ||||
|
170 | def _nodefromancrev(self, ancrev, cl, mfl, path, fnode): | |||
|
171 | """returns the node for <path> in <ancrev> if content matches <fnode>""" | |||
|
172 | ancctx = cl.read(ancrev) # This avoids object creation. | |||
|
173 | manifestnode, files = ancctx[0], ancctx[3] | |||
|
174 | # If the file was touched in this ancestor, and the content is similar | |||
|
175 | # to the one we are searching for. | |||
|
176 | if path in files and fnode == mfl[manifestnode].readfast().get(path): | |||
|
177 | return cl.node(ancrev) | |||
|
178 | return None | |||
|
179 | ||||
|
180 | def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False): | |||
|
181 | """return the first ancestor of <srcrev> introducing <fnode> | |||
|
182 | ||||
|
183 | If the linkrev of the file revision does not point to an ancestor of | |||
|
184 | srcrev, we'll walk down the ancestors until we find one introducing | |||
|
185 | this file revision. | |||
|
186 | ||||
|
187 | :repo: a localrepository object (used to access changelog and manifest) | |||
|
188 | :path: the file path | |||
|
189 | :fnode: the nodeid of the file revision | |||
|
190 | :filelog: the filelog of this path | |||
|
191 | :srcrev: the changeset revision we search ancestors from | |||
|
192 | :inclusive: if true, the src revision will also be checked | |||
|
193 | ||||
|
194 | Note: This is based on adjustlinkrev in core, but it's quite different. | |||
|
195 | ||||
|
196 | adjustlinkrev depends on the fact that the linkrev is the bottom most | |||
|
197 | node, and uses that as a stopping point for the ancestor traversal. We | |||
|
198 | can't do that here because the linknode is not guaranteed to be the | |||
|
199 | bottom most one. | |||
|
200 | ||||
|
201 | In our code here, we actually know what a bunch of potential ancestor | |||
|
202 | linknodes are, so instead of stopping the cheap-ancestor-traversal when | |||
|
203 | we get to a linkrev, we stop when we see any of the known linknodes. | |||
|
204 | """ | |||
|
205 | repo = self._repo | |||
|
206 | cl = repo.unfiltered().changelog | |||
|
207 | mfl = repo.manifestlog | |||
|
208 | ancestormap = self.ancestormap() | |||
|
209 | linknode = ancestormap[fnode][2] | |||
|
210 | ||||
|
211 | if srcrev is None: | |||
|
212 | # wctx case, used by workingfilectx during mergecopy | |||
|
213 | revs = [p.rev() for p in self._repo[None].parents()] | |||
|
214 | inclusive = True # we skipped the real (revless) source | |||
|
215 | else: | |||
|
216 | revs = [srcrev] | |||
|
217 | ||||
|
218 | if self._verifylinknode(revs, linknode): | |||
|
219 | return linknode | |||
|
220 | ||||
|
221 | commonlogkwargs = { | |||
|
222 | r'revs': ' '.join([hex(cl.node(rev)) for rev in revs]), | |||
|
223 | r'fnode': hex(fnode), | |||
|
224 | r'filepath': path, | |||
|
225 | r'user': shallowutil.getusername(repo.ui), | |||
|
226 | r'reponame': shallowutil.getreponame(repo.ui), | |||
|
227 | } | |||
|
228 | ||||
|
229 | repo.ui.log('linkrevfixup', 'adjusting linknode\n', **commonlogkwargs) | |||
|
230 | ||||
|
231 | pc = repo._phasecache | |||
|
232 | seenpublic = False | |||
|
233 | iteranc = cl.ancestors(revs, inclusive=inclusive) | |||
|
234 | for ancrev in iteranc: | |||
|
235 | # First, check locally-available history. | |||
|
236 | lnode = self._nodefromancrev(ancrev, cl, mfl, path, fnode) | |||
|
237 | if lnode is not None: | |||
|
238 | return lnode | |||
|
239 | ||||
|
240 | # adjusting linknode can be super-slow. To mitigate the issue | |||
|
241 | # we use two heuristics: calling fastlog and forcing remotefilelog | |||
|
242 | # prefetch | |||
|
243 | if not seenpublic and pc.phase(repo, ancrev) == phases.public: | |||
|
244 | # TODO: there used to be a codepath to fetch linknodes | |||
|
245 | # from a server as a fast path, but it appeared to | |||
|
246 | # depend on an API FB added to their phabricator. | |||
|
247 | lnode = self._forceprefetch(repo, path, fnode, revs, | |||
|
248 | commonlogkwargs) | |||
|
249 | if lnode: | |||
|
250 | return lnode | |||
|
251 | seenpublic = True | |||
|
252 | ||||
|
253 | return linknode | |||
|
254 | ||||
|
255 | def _forceprefetch(self, repo, path, fnode, revs, | |||
|
256 | commonlogkwargs): | |||
|
257 | # This next part is super non-obvious, so big comment block time! | |||
|
258 | # | |||
|
259 | # It is possible to get extremely bad performance here when a fairly | |||
|
260 | # common set of circumstances occur when this extension is combined | |||
|
261 | # with a server-side commit rewriting extension like pushrebase. | |||
|
262 | # | |||
|
263 | # First, an engineer creates Commit A and pushes it to the server. | |||
|
264 | # While the server's data structure will have the correct linkrev | |||
|
265 | # for the files touched in Commit A, the client will have the | |||
|
266 | # linkrev of the local commit, which is "invalid" because it's not | |||
|
267 | # an ancestor of the main line of development. | |||
|
268 | # | |||
|
269 | # The client will never download the remotefilelog with the correct | |||
|
270 | # linkrev as long as nobody else touches that file, since the file | |||
|
271 | # data and history hasn't changed since Commit A. | |||
|
272 | # | |||
|
273 | # After a long time (or a short time in a heavily used repo), if the | |||
|
274 | # same engineer returns to change the same file, some commands -- | |||
|
275 | # such as amends of commits with file moves, logs, diffs, etc -- | |||
|
276 | # can trigger this _adjustlinknode code. In those cases, finding | |||
|
277 | # the correct rev can become quite expensive, as the correct | |||
|
278 | # revision is far back in history and we need to walk back through | |||
|
279 | # history to find it. | |||
|
280 | # | |||
|
281 | # In order to improve this situation, we force a prefetch of the | |||
|
282 | # remotefilelog data blob for the file we were called on. We do this | |||
|
283 | # at most once, when we first see a public commit in the history we | |||
|
284 | # are traversing. | |||
|
285 | # | |||
|
286 | # Forcing the prefetch means we will download the remote blob even | |||
|
287 | # if we have the "correct" blob in the local store. Since the union | |||
|
288 | # store checks the remote store first, this means we are much more | |||
|
289 | # likely to get the correct linkrev at this point. | |||
|
290 | # | |||
|
291 | # In rare circumstances (such as the server having a suboptimal | |||
|
292 | # linkrev for our use case), we will fall back to the old slow path. | |||
|
293 | # | |||
|
294 | # We may want to add additional heuristics here in the future if | |||
|
295 | # the slow path is used too much. One promising possibility is using | |||
|
296 | # obsolescence markers to find a more-likely-correct linkrev. | |||
|
297 | ||||
|
298 | logmsg = '' | |||
|
299 | start = time.time() | |||
|
300 | try: | |||
|
301 | repo.fileservice.prefetch([(path, hex(fnode))], force=True) | |||
|
302 | ||||
|
303 | # Now that we've downloaded a new blob from the server, | |||
|
304 | # we need to rebuild the ancestor map to recompute the | |||
|
305 | # linknodes. | |||
|
306 | self._ancestormap = None | |||
|
307 | linknode = self.ancestormap()[fnode][2] # 2 is linknode | |||
|
308 | if self._verifylinknode(revs, linknode): | |||
|
309 | logmsg = 'remotefilelog prefetching succeeded' | |||
|
310 | return linknode | |||
|
311 | logmsg = 'remotefilelog prefetching not found' | |||
|
312 | return None | |||
|
313 | except Exception as e: | |||
|
314 | logmsg = 'remotefilelog prefetching failed (%s)' % e | |||
|
315 | return None | |||
|
316 | finally: | |||
|
317 | elapsed = time.time() - start | |||
|
318 | repo.ui.log('linkrevfixup', logmsg + '\n', elapsed=elapsed * 1000, | |||
|
319 | **pycompat.strkwargs(commonlogkwargs)) | |||
|
320 | ||||
|
321 | def _verifylinknode(self, revs, linknode): | |||
|
322 | """ | |||
|
323 | Check if a linknode is correct one for the current history. | |||
|
324 | ||||
|
325 | That is, return True if the linkrev is the ancestor of any of the | |||
|
326 | passed in revs, otherwise return False. | |||
|
327 | ||||
|
328 | `revs` is a list that usually has one element -- usually the wdir parent | |||
|
329 | or the user-passed rev we're looking back from. It may contain two revs | |||
|
330 | when there is a merge going on, or zero revs when a root node with no | |||
|
331 | parents is being created. | |||
|
332 | """ | |||
|
333 | if not revs: | |||
|
334 | return False | |||
|
335 | try: | |||
|
336 | # Use the C fastpath to check if the given linknode is correct. | |||
|
337 | cl = self._repo.unfiltered().changelog | |||
|
338 | return any(cl.isancestor(linknode, cl.node(r)) for r in revs) | |||
|
339 | except error.LookupError: | |||
|
340 | # The linknode read from the blob may have been stripped or | |||
|
341 | # otherwise not present in the repository anymore. Do not fail hard | |||
|
342 | # in this case. Instead, return false and continue the search for | |||
|
343 | # the correct linknode. | |||
|
344 | return False | |||
|
345 | ||||
|
346 | def ancestors(self, followfirst=False): | |||
|
347 | ancestors = [] | |||
|
348 | queue = collections.deque((self,)) | |||
|
349 | seen = set() | |||
|
350 | while queue: | |||
|
351 | current = queue.pop() | |||
|
352 | if current.filenode() in seen: | |||
|
353 | continue | |||
|
354 | seen.add(current.filenode()) | |||
|
355 | ||||
|
356 | ancestors.append(current) | |||
|
357 | ||||
|
358 | parents = current.parents() | |||
|
359 | first = True | |||
|
360 | for p in parents: | |||
|
361 | if first or not followfirst: | |||
|
362 | queue.append(p) | |||
|
363 | first = False | |||
|
364 | ||||
|
365 | # Remove self | |||
|
366 | ancestors.pop(0) | |||
|
367 | ||||
|
368 | # Sort by linkrev | |||
|
369 | # The copy tracing algorithm depends on these coming out in order | |||
|
370 | ancestors = sorted(ancestors, reverse=True, key=lambda x:x.linkrev()) | |||
|
371 | ||||
|
372 | for ancestor in ancestors: | |||
|
373 | yield ancestor | |||
|
374 | ||||
|
375 | def ancestor(self, fc2, actx): | |||
|
376 | # the easy case: no (relevant) renames | |||
|
377 | if fc2.path() == self.path() and self.path() in actx: | |||
|
378 | return actx[self.path()] | |||
|
379 | ||||
|
380 | # the next easiest cases: unambiguous predecessor (name trumps | |||
|
381 | # history) | |||
|
382 | if self.path() in actx and fc2.path() not in actx: | |||
|
383 | return actx[self.path()] | |||
|
384 | if fc2.path() in actx and self.path() not in actx: | |||
|
385 | return actx[fc2.path()] | |||
|
386 | ||||
|
387 | # do a full traversal | |||
|
388 | amap = self.ancestormap() | |||
|
389 | bmap = fc2.ancestormap() | |||
|
390 | ||||
|
391 | def parents(x): | |||
|
392 | f, n = x | |||
|
393 | p = amap.get(n) or bmap.get(n) | |||
|
394 | if not p: | |||
|
395 | return [] | |||
|
396 | ||||
|
397 | return [(p[3] or f, p[0]), (f, p[1])] | |||
|
398 | ||||
|
399 | a = (self.path(), self.filenode()) | |||
|
400 | b = (fc2.path(), fc2.filenode()) | |||
|
401 | result = ancestor.genericancestor(a, b, parents) | |||
|
402 | if result: | |||
|
403 | f, n = result | |||
|
404 | r = remotefilectx(self._repo, f, fileid=n, | |||
|
405 | ancestormap=amap) | |||
|
406 | return r | |||
|
407 | ||||
|
408 | return None | |||
|
409 | ||||
|
410 | def annotate(self, *args, **kwargs): | |||
|
411 | introctx = self | |||
|
412 | prefetchskip = kwargs.pop(r'prefetchskip', None) | |||
|
413 | if prefetchskip: | |||
|
414 | # use introrev so prefetchskip can be accurately tested | |||
|
415 | introrev = self.introrev() | |||
|
416 | if self.rev() != introrev: | |||
|
417 | introctx = remotefilectx(self._repo, self._path, | |||
|
418 | changeid=introrev, | |||
|
419 | fileid=self._filenode, | |||
|
420 | filelog=self._filelog, | |||
|
421 | ancestormap=self._ancestormap) | |||
|
422 | ||||
|
423 | # like self.ancestors, but append to "fetch" and skip visiting parents | |||
|
424 | # of nodes in "prefetchskip". | |||
|
425 | fetch = [] | |||
|
426 | seen = set() | |||
|
427 | queue = collections.deque((introctx,)) | |||
|
428 | seen.add(introctx.node()) | |||
|
429 | while queue: | |||
|
430 | current = queue.pop() | |||
|
431 | if current.filenode() != self.filenode(): | |||
|
432 | # this is a "joint point". fastannotate needs contents of | |||
|
433 | # "joint point"s to calculate diffs for side branches. | |||
|
434 | fetch.append((current.path(), hex(current.filenode()))) | |||
|
435 | if prefetchskip and current in prefetchskip: | |||
|
436 | continue | |||
|
437 | for parent in current.parents(): | |||
|
438 | if parent.node() not in seen: | |||
|
439 | seen.add(parent.node()) | |||
|
440 | queue.append(parent) | |||
|
441 | ||||
|
442 | self._repo.ui.debug('remotefilelog: prefetching %d files ' | |||
|
443 | 'for annotate\n' % len(fetch)) | |||
|
444 | if fetch: | |||
|
445 | self._repo.fileservice.prefetch(fetch) | |||
|
446 | return super(remotefilectx, self).annotate(*args, **kwargs) | |||
|
447 | ||||
|
448 | # Return empty set so that the hg serve and thg don't stack trace | |||
|
449 | def children(self): | |||
|
450 | return [] | |||
|
451 | ||||
|
452 | class remoteworkingfilectx(context.workingfilectx, remotefilectx): | |||
|
453 | def __init__(self, repo, path, filelog=None, workingctx=None): | |||
|
454 | self._ancestormap = None | |||
|
455 | return super(remoteworkingfilectx, self).__init__(repo, path, | |||
|
456 | filelog, workingctx) | |||
|
457 | ||||
|
458 | def parents(self): | |||
|
459 | return remotefilectx.parents(self) | |||
|
460 | ||||
|
461 | def ancestormap(self): | |||
|
462 | if not self._ancestormap: | |||
|
463 | path = self._path | |||
|
464 | pcl = self._changectx._parents | |||
|
465 | renamed = self.renamed() | |||
|
466 | ||||
|
467 | if renamed: | |||
|
468 | p1 = renamed | |||
|
469 | else: | |||
|
470 | p1 = (path, pcl[0]._manifest.get(path, nullid)) | |||
|
471 | ||||
|
472 | p2 = (path, nullid) | |||
|
473 | if len(pcl) > 1: | |||
|
474 | p2 = (path, pcl[1]._manifest.get(path, nullid)) | |||
|
475 | ||||
|
476 | m = {} | |||
|
477 | if p1[1] != nullid: | |||
|
478 | p1ctx = self._repo.filectx(p1[0], fileid=p1[1]) | |||
|
479 | m.update(p1ctx.filelog().ancestormap(p1[1])) | |||
|
480 | ||||
|
481 | if p2[1] != nullid: | |||
|
482 | p2ctx = self._repo.filectx(p2[0], fileid=p2[1]) | |||
|
483 | m.update(p2ctx.filelog().ancestormap(p2[1])) | |||
|
484 | ||||
|
485 | copyfrom = '' | |||
|
486 | if renamed: | |||
|
487 | copyfrom = renamed[0] | |||
|
488 | m[None] = (p1[1], p2[1], nullid, copyfrom) | |||
|
489 | self._ancestormap = m | |||
|
490 | ||||
|
491 | return self._ancestormap |
@@ -0,0 +1,454 b'' | |||||
|
1 | # remotefilelog.py - filelog implementation where filelog history is stored | |||
|
2 | # remotely | |||
|
3 | # | |||
|
4 | # Copyright 2013 Facebook, Inc. | |||
|
5 | # | |||
|
6 | # This software may be used and distributed according to the terms of the | |||
|
7 | # GNU General Public License version 2 or any later version. | |||
|
8 | from __future__ import absolute_import | |||
|
9 | ||||
|
10 | import collections | |||
|
11 | import os | |||
|
12 | ||||
|
13 | from mercurial.node import bin, nullid | |||
|
14 | from mercurial.i18n import _ | |||
|
15 | from mercurial import ( | |||
|
16 | ancestor, | |||
|
17 | error, | |||
|
18 | mdiff, | |||
|
19 | revlog, | |||
|
20 | ) | |||
|
21 | from mercurial.utils import storageutil | |||
|
22 | ||||
|
23 | from . import ( | |||
|
24 | constants, | |||
|
25 | fileserverclient, | |||
|
26 | shallowutil, | |||
|
27 | ) | |||
|
28 | ||||
|
29 | class remotefilelognodemap(object): | |||
|
30 | def __init__(self, filename, store): | |||
|
31 | self._filename = filename | |||
|
32 | self._store = store | |||
|
33 | ||||
|
34 | def __contains__(self, node): | |||
|
35 | missing = self._store.getmissing([(self._filename, node)]) | |||
|
36 | return not bool(missing) | |||
|
37 | ||||
|
38 | def __get__(self, node): | |||
|
39 | if node not in self: | |||
|
40 | raise KeyError(node) | |||
|
41 | return node | |||
|
42 | ||||
|
43 | class remotefilelog(object): | |||
|
44 | ||||
|
45 | _generaldelta = True | |||
|
46 | ||||
|
47 | def __init__(self, opener, path, repo): | |||
|
48 | self.opener = opener | |||
|
49 | self.filename = path | |||
|
50 | self.repo = repo | |||
|
51 | self.nodemap = remotefilelognodemap(self.filename, repo.contentstore) | |||
|
52 | ||||
|
53 | self.version = 1 | |||
|
54 | ||||
|
55 | def read(self, node): | |||
|
56 | """returns the file contents at this node""" | |||
|
57 | t = self.revision(node) | |||
|
58 | if not t.startswith('\1\n'): | |||
|
59 | return t | |||
|
60 | s = t.index('\1\n', 2) | |||
|
61 | return t[s + 2:] | |||
|
62 | ||||
|
63 | def add(self, text, meta, transaction, linknode, p1=None, p2=None): | |||
|
64 | hashtext = text | |||
|
65 | ||||
|
66 | # hash with the metadata, like in vanilla filelogs | |||
|
67 | hashtext = shallowutil.createrevlogtext(text, meta.get('copy'), | |||
|
68 | meta.get('copyrev')) | |||
|
69 | node = storageutil.hashrevisionsha1(hashtext, p1, p2) | |||
|
70 | return self.addrevision(hashtext, transaction, linknode, p1, p2, | |||
|
71 | node=node) | |||
|
72 | ||||
|
73 | def _createfileblob(self, text, meta, flags, p1, p2, node, linknode): | |||
|
74 | # text passed to "_createfileblob" does not include filelog metadata | |||
|
75 | header = shallowutil.buildfileblobheader(len(text), flags) | |||
|
76 | data = "%s\0%s" % (header, text) | |||
|
77 | ||||
|
78 | realp1 = p1 | |||
|
79 | copyfrom = "" | |||
|
80 | if meta and 'copy' in meta: | |||
|
81 | copyfrom = meta['copy'] | |||
|
82 | realp1 = bin(meta['copyrev']) | |||
|
83 | ||||
|
84 | data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom) | |||
|
85 | ||||
|
86 | visited = set() | |||
|
87 | ||||
|
88 | pancestors = {} | |||
|
89 | queue = [] | |||
|
90 | if realp1 != nullid: | |||
|
91 | p1flog = self | |||
|
92 | if copyfrom: | |||
|
93 | p1flog = remotefilelog(self.opener, copyfrom, self.repo) | |||
|
94 | ||||
|
95 | pancestors.update(p1flog.ancestormap(realp1)) | |||
|
96 | queue.append(realp1) | |||
|
97 | visited.add(realp1) | |||
|
98 | if p2 != nullid: | |||
|
99 | pancestors.update(self.ancestormap(p2)) | |||
|
100 | queue.append(p2) | |||
|
101 | visited.add(p2) | |||
|
102 | ||||
|
103 | ancestortext = "" | |||
|
104 | ||||
|
105 | # add the ancestors in topological order | |||
|
106 | while queue: | |||
|
107 | c = queue.pop(0) | |||
|
108 | pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c] | |||
|
109 | ||||
|
110 | pacopyfrom = pacopyfrom or '' | |||
|
111 | ancestortext += "%s%s%s%s%s\0" % ( | |||
|
112 | c, pa1, pa2, ancestorlinknode, pacopyfrom) | |||
|
113 | ||||
|
114 | if pa1 != nullid and pa1 not in visited: | |||
|
115 | queue.append(pa1) | |||
|
116 | visited.add(pa1) | |||
|
117 | if pa2 != nullid and pa2 not in visited: | |||
|
118 | queue.append(pa2) | |||
|
119 | visited.add(pa2) | |||
|
120 | ||||
|
121 | data += ancestortext | |||
|
122 | ||||
|
123 | return data | |||
|
124 | ||||
|
125 | def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None, | |||
|
126 | node=None, flags=revlog.REVIDX_DEFAULT_FLAGS): | |||
|
127 | # text passed to "addrevision" includes hg filelog metadata header | |||
|
128 | if node is None: | |||
|
129 | node = storageutil.hashrevisionsha1(text, p1, p2) | |||
|
130 | ||||
|
131 | meta, metaoffset = storageutil.parsemeta(text) | |||
|
132 | rawtext, validatehash = self._processflags(text, flags, 'write') | |||
|
133 | return self.addrawrevision(rawtext, transaction, linknode, p1, p2, | |||
|
134 | node, flags, cachedelta, | |||
|
135 | _metatuple=(meta, metaoffset)) | |||
|
136 | ||||
|
137 | def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node, | |||
|
138 | flags, cachedelta=None, _metatuple=None): | |||
|
139 | if _metatuple: | |||
|
140 | # _metatuple: used by "addrevision" internally by remotefilelog | |||
|
141 | # meta was parsed confidently | |||
|
142 | meta, metaoffset = _metatuple | |||
|
143 | else: | |||
|
144 | # not from self.addrevision, but something else (repo._filecommit) | |||
|
145 | # calls addrawrevision directly. remotefilelog needs to get and | |||
|
146 | # strip filelog metadata. | |||
|
147 | # we don't have confidence about whether rawtext contains filelog | |||
|
148 | # metadata or not (flag processor could replace it), so we just | |||
|
149 | # parse it as best-effort. | |||
|
150 | # in LFS (flags != 0)'s case, the best way is to call LFS code to | |||
|
151 | # get the meta information, instead of storageutil.parsemeta. | |||
|
152 | meta, metaoffset = storageutil.parsemeta(rawtext) | |||
|
153 | if flags != 0: | |||
|
154 | # when flags != 0, be conservative and do not mangle rawtext, since | |||
|
155 | # a read flag processor expects the text not being mangled at all. | |||
|
156 | metaoffset = 0 | |||
|
157 | if metaoffset: | |||
|
158 | # remotefilelog fileblob stores copy metadata in its ancestortext, | |||
|
159 | # not its main blob. so we need to remove filelog metadata | |||
|
160 | # (containing copy information) from text. | |||
|
161 | blobtext = rawtext[metaoffset:] | |||
|
162 | else: | |||
|
163 | blobtext = rawtext | |||
|
164 | data = self._createfileblob(blobtext, meta, flags, p1, p2, node, | |||
|
165 | linknode) | |||
|
166 | self.repo.contentstore.addremotefilelognode(self.filename, node, data) | |||
|
167 | ||||
|
168 | return node | |||
|
169 | ||||
|
170 | def renamed(self, node): | |||
|
171 | ancestors = self.repo.metadatastore.getancestors(self.filename, node) | |||
|
172 | p1, p2, linknode, copyfrom = ancestors[node] | |||
|
173 | if copyfrom: | |||
|
174 | return (copyfrom, p1) | |||
|
175 | ||||
|
176 | return False | |||
|
177 | ||||
|
178 | def size(self, node): | |||
|
179 | """return the size of a given revision""" | |||
|
180 | return len(self.read(node)) | |||
|
181 | ||||
|
182 | rawsize = size | |||
|
183 | ||||
|
184 | def cmp(self, node, text): | |||
|
185 | """compare text with a given file revision | |||
|
186 | ||||
|
187 | returns True if text is different than what is stored. | |||
|
188 | """ | |||
|
189 | ||||
|
190 | if node == nullid: | |||
|
191 | return True | |||
|
192 | ||||
|
193 | nodetext = self.read(node) | |||
|
194 | return nodetext != text | |||
|
195 | ||||
|
196 | def __nonzero__(self): | |||
|
197 | return True | |||
|
198 | ||||
|
199 | __bool__ = __nonzero__ | |||
|
200 | ||||
|
201 | def __len__(self): | |||
|
202 | if self.filename == '.hgtags': | |||
|
203 | # The length of .hgtags is used to fast path tag checking. | |||
|
204 | # remotefilelog doesn't support .hgtags since the entire .hgtags | |||
|
205 | # history is needed. Use the excludepattern setting to make | |||
|
206 | # .hgtags a normal filelog. | |||
|
207 | return 0 | |||
|
208 | ||||
|
209 | raise RuntimeError("len not supported") | |||
|
210 | ||||
|
211 | def empty(self): | |||
|
212 | return False | |||
|
213 | ||||
|
214 | def flags(self, node): | |||
|
215 | if isinstance(node, int): | |||
|
216 | raise error.ProgrammingError( | |||
|
217 | 'remotefilelog does not accept integer rev for flags') | |||
|
218 | store = self.repo.contentstore | |||
|
219 | return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) | |||
|
220 | ||||
|
221 | def parents(self, node): | |||
|
222 | if node == nullid: | |||
|
223 | return nullid, nullid | |||
|
224 | ||||
|
225 | ancestormap = self.repo.metadatastore.getancestors(self.filename, node) | |||
|
226 | p1, p2, linknode, copyfrom = ancestormap[node] | |||
|
227 | if copyfrom: | |||
|
228 | p1 = nullid | |||
|
229 | ||||
|
230 | return p1, p2 | |||
|
231 | ||||
|
232 | def parentrevs(self, rev): | |||
|
233 | # TODO(augie): this is a node and should be a rev, but for now | |||
|
234 | # nothing in core seems to actually break. | |||
|
235 | return self.parents(rev) | |||
|
236 | ||||
|
237 | def linknode(self, node): | |||
|
238 | ancestormap = self.repo.metadatastore.getancestors(self.filename, node) | |||
|
239 | p1, p2, linknode, copyfrom = ancestormap[node] | |||
|
240 | return linknode | |||
|
241 | ||||
|
242 | def linkrev(self, node): | |||
|
243 | return self.repo.unfiltered().changelog.rev(self.linknode(node)) | |||
|
244 | ||||
|
245 | def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, | |||
|
246 | assumehaveparentrevisions=False, deltaprevious=False, | |||
|
247 | deltamode=None): | |||
|
248 | # we don't use any of these parameters here | |||
|
249 | del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious | |||
|
250 | del deltamode | |||
|
251 | prevnode = None | |||
|
252 | for node in nodes: | |||
|
253 | p1, p2 = self.parents(node) | |||
|
254 | if prevnode is None: | |||
|
255 | basenode = prevnode = p1 | |||
|
256 | if basenode == node: | |||
|
257 | basenode = nullid | |||
|
258 | if basenode != nullid: | |||
|
259 | revision = None | |||
|
260 | delta = self.revdiff(basenode, node) | |||
|
261 | else: | |||
|
262 | revision = self.revision(node, raw=True) | |||
|
263 | delta = None | |||
|
264 | yield revlog.revlogrevisiondelta( | |||
|
265 | node=node, | |||
|
266 | p1node=p1, | |||
|
267 | p2node=p2, | |||
|
268 | linknode=self.linknode(node), | |||
|
269 | basenode=basenode, | |||
|
270 | flags=self.flags(node), | |||
|
271 | baserevisionsize=None, | |||
|
272 | revision=revision, | |||
|
273 | delta=delta, | |||
|
274 | ) | |||
|
275 | ||||
|
276 | def revdiff(self, node1, node2): | |||
|
277 | return mdiff.textdiff(self.revision(node1, raw=True), | |||
|
278 | self.revision(node2, raw=True)) | |||
|
279 | ||||
|
280 | def lookup(self, node): | |||
|
281 | if len(node) == 40: | |||
|
282 | node = bin(node) | |||
|
283 | if len(node) != 20: | |||
|
284 | raise error.LookupError(node, self.filename, | |||
|
285 | _('invalid lookup input')) | |||
|
286 | ||||
|
287 | return node | |||
|
288 | ||||
|
289 | def rev(self, node): | |||
|
290 | # This is a hack to make TortoiseHG work. | |||
|
291 | return node | |||
|
292 | ||||
|
293 | def node(self, rev): | |||
|
294 | # This is a hack. | |||
|
295 | if isinstance(rev, int): | |||
|
296 | raise error.ProgrammingError( | |||
|
297 | 'remotefilelog does not convert integer rev to node') | |||
|
298 | return rev | |||
|
299 | ||||
|
300 | def revision(self, node, raw=False): | |||
|
301 | """returns the revlog contents at this node. | |||
|
302 | this includes the meta data traditionally included in file revlogs. | |||
|
303 | this is generally only used for bundling and communicating with vanilla | |||
|
304 | hg clients. | |||
|
305 | """ | |||
|
306 | if node == nullid: | |||
|
307 | return "" | |||
|
308 | if len(node) != 20: | |||
|
309 | raise error.LookupError(node, self.filename, | |||
|
310 | _('invalid revision input')) | |||
|
311 | ||||
|
312 | store = self.repo.contentstore | |||
|
313 | rawtext = store.get(self.filename, node) | |||
|
314 | if raw: | |||
|
315 | return rawtext | |||
|
316 | flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) | |||
|
317 | if flags == 0: | |||
|
318 | return rawtext | |||
|
319 | text, verifyhash = self._processflags(rawtext, flags, 'read') | |||
|
320 | return text | |||
|
321 | ||||
|
322 | def _processflags(self, text, flags, operation, raw=False): | |||
|
323 | # mostly copied from hg/mercurial/revlog.py | |||
|
324 | validatehash = True | |||
|
325 | orderedflags = revlog.REVIDX_FLAGS_ORDER | |||
|
326 | if operation == 'write': | |||
|
327 | orderedflags = reversed(orderedflags) | |||
|
328 | for flag in orderedflags: | |||
|
329 | if flag & flags: | |||
|
330 | vhash = True | |||
|
331 | if flag not in revlog._flagprocessors: | |||
|
332 | message = _("missing processor for flag '%#x'") % (flag) | |||
|
333 | raise revlog.RevlogError(message) | |||
|
334 | readfunc, writefunc, rawfunc = revlog._flagprocessors[flag] | |||
|
335 | if raw: | |||
|
336 | vhash = rawfunc(self, text) | |||
|
337 | elif operation == 'read': | |||
|
338 | text, vhash = readfunc(self, text) | |||
|
339 | elif operation == 'write': | |||
|
340 | text, vhash = writefunc(self, text) | |||
|
341 | validatehash = validatehash and vhash | |||
|
342 | return text, validatehash | |||
|
343 | ||||
|
344 | def _read(self, id): | |||
|
345 | """reads the raw file blob from disk, cache, or server""" | |||
|
346 | fileservice = self.repo.fileservice | |||
|
347 | localcache = fileservice.localcache | |||
|
348 | cachekey = fileserverclient.getcachekey(self.repo.name, self.filename, | |||
|
349 | id) | |||
|
350 | try: | |||
|
351 | return localcache.read(cachekey) | |||
|
352 | except KeyError: | |||
|
353 | pass | |||
|
354 | ||||
|
355 | localkey = fileserverclient.getlocalkey(self.filename, id) | |||
|
356 | localpath = os.path.join(self.localpath, localkey) | |||
|
357 | try: | |||
|
358 | return shallowutil.readfile(localpath) | |||
|
359 | except IOError: | |||
|
360 | pass | |||
|
361 | ||||
|
362 | fileservice.prefetch([(self.filename, id)]) | |||
|
363 | try: | |||
|
364 | return localcache.read(cachekey) | |||
|
365 | except KeyError: | |||
|
366 | pass | |||
|
367 | ||||
|
368 | raise error.LookupError(id, self.filename, _('no node')) | |||
|
369 | ||||
|
370 | def ancestormap(self, node): | |||
|
371 | return self.repo.metadatastore.getancestors(self.filename, node) | |||
|
372 | ||||
|
373 | def ancestor(self, a, b): | |||
|
374 | if a == nullid or b == nullid: | |||
|
375 | return nullid | |||
|
376 | ||||
|
377 | revmap, parentfunc = self._buildrevgraph(a, b) | |||
|
378 | nodemap = dict(((v, k) for (k, v) in revmap.iteritems())) | |||
|
379 | ||||
|
380 | ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b]) | |||
|
381 | if ancs: | |||
|
382 | # choose a consistent winner when there's a tie | |||
|
383 | return min(map(nodemap.__getitem__, ancs)) | |||
|
384 | return nullid | |||
|
385 | ||||
|
386 | def commonancestorsheads(self, a, b): | |||
|
387 | """calculate all the heads of the common ancestors of nodes a and b""" | |||
|
388 | ||||
|
389 | if a == nullid or b == nullid: | |||
|
390 | return nullid | |||
|
391 | ||||
|
392 | revmap, parentfunc = self._buildrevgraph(a, b) | |||
|
393 | nodemap = dict(((v, k) for (k, v) in revmap.iteritems())) | |||
|
394 | ||||
|
395 | ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b]) | |||
|
396 | return map(nodemap.__getitem__, ancs) | |||
|
397 | ||||
|
398 | def _buildrevgraph(self, a, b): | |||
|
399 | """Builds a numeric revision graph for the given two nodes. | |||
|
400 | Returns a node->rev map and a rev->[revs] parent function. | |||
|
401 | """ | |||
|
402 | amap = self.ancestormap(a) | |||
|
403 | bmap = self.ancestormap(b) | |||
|
404 | ||||
|
405 | # Union the two maps | |||
|
406 | parentsmap = collections.defaultdict(list) | |||
|
407 | allparents = set() | |||
|
408 | for mapping in (amap, bmap): | |||
|
409 | for node, pdata in mapping.iteritems(): | |||
|
410 | parents = parentsmap[node] | |||
|
411 | p1, p2, linknode, copyfrom = pdata | |||
|
412 | # Don't follow renames (copyfrom). | |||
|
413 | # remotefilectx.ancestor does that. | |||
|
414 | if p1 != nullid and not copyfrom: | |||
|
415 | parents.append(p1) | |||
|
416 | allparents.add(p1) | |||
|
417 | if p2 != nullid: | |||
|
418 | parents.append(p2) | |||
|
419 | allparents.add(p2) | |||
|
420 | ||||
|
421 | # Breadth first traversal to build linkrev graph | |||
|
422 | parentrevs = collections.defaultdict(list) | |||
|
423 | revmap = {} | |||
|
424 | queue = collections.deque(((None, n) for n in parentsmap | |||
|
425 | if n not in allparents)) | |||
|
426 | while queue: | |||
|
427 | prevrev, current = queue.pop() | |||
|
428 | if current in revmap: | |||
|
429 | if prevrev: | |||
|
430 | parentrevs[prevrev].append(revmap[current]) | |||
|
431 | continue | |||
|
432 | ||||
|
433 | # Assign linkrevs in reverse order, so start at | |||
|
434 | # len(parentsmap) and work backwards. | |||
|
435 | currentrev = len(parentsmap) - len(revmap) - 1 | |||
|
436 | revmap[current] = currentrev | |||
|
437 | ||||
|
438 | if prevrev: | |||
|
439 | parentrevs[prevrev].append(currentrev) | |||
|
440 | ||||
|
441 | for parent in parentsmap.get(current): | |||
|
442 | queue.appendleft((currentrev, parent)) | |||
|
443 | ||||
|
444 | return revmap, parentrevs.__getitem__ | |||
|
445 | ||||
|
446 | def strip(self, minlink, transaction): | |||
|
447 | pass | |||
|
448 | ||||
|
449 | # misc unused things | |||
|
450 | def files(self): | |||
|
451 | return [] | |||
|
452 | ||||
|
453 | def checksize(self): | |||
|
454 | return 0, 0 |
@@ -0,0 +1,404 b'' | |||||
|
1 | # remotefilelogserver.py - server logic for a remotefilelog server | |||
|
2 | # | |||
|
3 | # Copyright 2013 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | from __future__ import absolute_import | |||
|
8 | ||||
|
9 | import errno | |||
|
10 | import os | |||
|
11 | import stat | |||
|
12 | import time | |||
|
13 | import zlib | |||
|
14 | ||||
|
15 | from mercurial.i18n import _ | |||
|
16 | from mercurial.node import bin, hex, nullid | |||
|
17 | from mercurial import ( | |||
|
18 | changegroup, | |||
|
19 | changelog, | |||
|
20 | context, | |||
|
21 | error, | |||
|
22 | extensions, | |||
|
23 | match, | |||
|
24 | store, | |||
|
25 | streamclone, | |||
|
26 | util, | |||
|
27 | wireprotoserver, | |||
|
28 | wireprototypes, | |||
|
29 | wireprotov1server, | |||
|
30 | ) | |||
|
31 | from . import ( | |||
|
32 | constants, | |||
|
33 | shallowutil, | |||
|
34 | ) | |||
|
35 | ||||
|
36 | _sshv1server = wireprotoserver.sshv1protocolhandler | |||
|
37 | ||||
|
38 | def setupserver(ui, repo): | |||
|
39 | """Sets up a normal Mercurial repo so it can serve files to shallow repos. | |||
|
40 | """ | |||
|
41 | onetimesetup(ui) | |||
|
42 | ||||
|
43 | # don't send files to shallow clients during pulls | |||
|
44 | def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source, | |||
|
45 | *args, **kwargs): | |||
|
46 | caps = self._bundlecaps or [] | |||
|
47 | if constants.BUNDLE2_CAPABLITY in caps: | |||
|
48 | # only send files that don't match the specified patterns | |||
|
49 | includepattern = None | |||
|
50 | excludepattern = None | |||
|
51 | for cap in (self._bundlecaps or []): | |||
|
52 | if cap.startswith("includepattern="): | |||
|
53 | includepattern = cap[len("includepattern="):].split('\0') | |||
|
54 | elif cap.startswith("excludepattern="): | |||
|
55 | excludepattern = cap[len("excludepattern="):].split('\0') | |||
|
56 | ||||
|
57 | m = match.always(repo.root, '') | |||
|
58 | if includepattern or excludepattern: | |||
|
59 | m = match.match(repo.root, '', None, | |||
|
60 | includepattern, excludepattern) | |||
|
61 | ||||
|
62 | changedfiles = list([f for f in changedfiles if not m(f)]) | |||
|
63 | return orig(self, changedfiles, linknodes, commonrevs, source, | |||
|
64 | *args, **kwargs) | |||
|
65 | ||||
|
66 | extensions.wrapfunction( | |||
|
67 | changegroup.cgpacker, 'generatefiles', generatefiles) | |||
|
68 | ||||
|
69 | onetime = False | |||
|
70 | def onetimesetup(ui): | |||
|
71 | """Configures the wireprotocol for both clients and servers. | |||
|
72 | """ | |||
|
73 | global onetime | |||
|
74 | if onetime: | |||
|
75 | return | |||
|
76 | onetime = True | |||
|
77 | ||||
|
78 | # support file content requests | |||
|
79 | wireprotov1server.wireprotocommand( | |||
|
80 | 'x_rfl_getflogheads', 'path', permission='pull')(getflogheads) | |||
|
81 | wireprotov1server.wireprotocommand( | |||
|
82 | 'x_rfl_getfiles', '', permission='pull')(getfiles) | |||
|
83 | wireprotov1server.wireprotocommand( | |||
|
84 | 'x_rfl_getfile', 'file node', permission='pull')(getfile) | |||
|
85 | ||||
|
86 | class streamstate(object): | |||
|
87 | match = None | |||
|
88 | shallowremote = False | |||
|
89 | noflatmf = False | |||
|
90 | state = streamstate() | |||
|
91 | ||||
|
92 | def stream_out_shallow(repo, proto, other): | |||
|
93 | includepattern = None | |||
|
94 | excludepattern = None | |||
|
95 | raw = other.get('includepattern') | |||
|
96 | if raw: | |||
|
97 | includepattern = raw.split('\0') | |||
|
98 | raw = other.get('excludepattern') | |||
|
99 | if raw: | |||
|
100 | excludepattern = raw.split('\0') | |||
|
101 | ||||
|
102 | oldshallow = state.shallowremote | |||
|
103 | oldmatch = state.match | |||
|
104 | oldnoflatmf = state.noflatmf | |||
|
105 | try: | |||
|
106 | state.shallowremote = True | |||
|
107 | state.match = match.always(repo.root, '') | |||
|
108 | state.noflatmf = other.get('noflatmanifest') == 'True' | |||
|
109 | if includepattern or excludepattern: | |||
|
110 | state.match = match.match(repo.root, '', None, | |||
|
111 | includepattern, excludepattern) | |||
|
112 | streamres = wireprotov1server.stream(repo, proto) | |||
|
113 | ||||
|
114 | # Force the first value to execute, so the file list is computed | |||
|
115 | # within the try/finally scope | |||
|
116 | first = next(streamres.gen) | |||
|
117 | second = next(streamres.gen) | |||
|
118 | def gen(): | |||
|
119 | yield first | |||
|
120 | yield second | |||
|
121 | for value in streamres.gen: | |||
|
122 | yield value | |||
|
123 | return wireprototypes.streamres(gen()) | |||
|
124 | finally: | |||
|
125 | state.shallowremote = oldshallow | |||
|
126 | state.match = oldmatch | |||
|
127 | state.noflatmf = oldnoflatmf | |||
|
128 | ||||
|
129 | wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*') | |||
|
130 | ||||
|
131 | # don't clone filelogs to shallow clients | |||
|
132 | def _walkstreamfiles(orig, repo, matcher=None): | |||
|
133 | if state.shallowremote: | |||
|
134 | # if we are shallow ourselves, stream our local commits | |||
|
135 | if shallowutil.isenabled(repo): | |||
|
136 | striplen = len(repo.store.path) + 1 | |||
|
137 | readdir = repo.store.rawvfs.readdir | |||
|
138 | visit = [os.path.join(repo.store.path, 'data')] | |||
|
139 | while visit: | |||
|
140 | p = visit.pop() | |||
|
141 | for f, kind, st in readdir(p, stat=True): | |||
|
142 | fp = p + '/' + f | |||
|
143 | if kind == stat.S_IFREG: | |||
|
144 | if not fp.endswith('.i') and not fp.endswith('.d'): | |||
|
145 | n = util.pconvert(fp[striplen:]) | |||
|
146 | yield (store.decodedir(n), n, st.st_size) | |||
|
147 | if kind == stat.S_IFDIR: | |||
|
148 | visit.append(fp) | |||
|
149 | ||||
|
150 | if 'treemanifest' in repo.requirements: | |||
|
151 | for (u, e, s) in repo.store.datafiles(): | |||
|
152 | if (u.startswith('meta/') and | |||
|
153 | (u.endswith('.i') or u.endswith('.d'))): | |||
|
154 | yield (u, e, s) | |||
|
155 | ||||
|
156 | # Return .d and .i files that do not match the shallow pattern | |||
|
157 | match = state.match | |||
|
158 | if match and not match.always(): | |||
|
159 | for (u, e, s) in repo.store.datafiles(): | |||
|
160 | f = u[5:-2] # trim data/... and .i/.d | |||
|
161 | if not state.match(f): | |||
|
162 | yield (u, e, s) | |||
|
163 | ||||
|
164 | for x in repo.store.topfiles(): | |||
|
165 | if state.noflatmf and x[0][:11] == '00manifest.': | |||
|
166 | continue | |||
|
167 | yield x | |||
|
168 | ||||
|
169 | elif shallowutil.isenabled(repo): | |||
|
170 | # don't allow cloning from a shallow repo to a full repo | |||
|
171 | # since it would require fetching every version of every | |||
|
172 | # file in order to create the revlogs. | |||
|
173 | raise error.Abort(_("Cannot clone from a shallow repo " | |||
|
174 | "to a full repo.")) | |||
|
175 | else: | |||
|
176 | for x in orig(repo, matcher): | |||
|
177 | yield x | |||
|
178 | ||||
|
179 | extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles) | |||
|
180 | ||||
|
181 | # expose remotefilelog capabilities | |||
|
182 | def _capabilities(orig, repo, proto): | |||
|
183 | caps = orig(repo, proto) | |||
|
184 | if (shallowutil.isenabled(repo) or ui.configbool('remotefilelog', | |||
|
185 | 'server')): | |||
|
186 | if isinstance(proto, _sshv1server): | |||
|
187 | # legacy getfiles method which only works over ssh | |||
|
188 | caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES) | |||
|
189 | caps.append('x_rfl_getflogheads') | |||
|
190 | caps.append('x_rfl_getfile') | |||
|
191 | return caps | |||
|
192 | extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities) | |||
|
193 | ||||
|
194 | def _adjustlinkrev(orig, self, *args, **kwargs): | |||
|
195 | # When generating file blobs, taking the real path is too slow on large | |||
|
196 | # repos, so force it to just return the linkrev directly. | |||
|
197 | repo = self._repo | |||
|
198 | if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev: | |||
|
199 | return self._filelog.linkrev(self._filelog.rev(self._filenode)) | |||
|
200 | return orig(self, *args, **kwargs) | |||
|
201 | ||||
|
202 | extensions.wrapfunction( | |||
|
203 | context.basefilectx, '_adjustlinkrev', _adjustlinkrev) | |||
|
204 | ||||
|
205 | def _iscmd(orig, cmd): | |||
|
206 | if cmd == 'x_rfl_getfiles': | |||
|
207 | return False | |||
|
208 | return orig(cmd) | |||
|
209 | ||||
|
210 | extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd) | |||
|
211 | ||||
|
212 | def _loadfileblob(repo, cachepath, path, node): | |||
|
213 | filecachepath = os.path.join(cachepath, path, hex(node)) | |||
|
214 | if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0: | |||
|
215 | filectx = repo.filectx(path, fileid=node) | |||
|
216 | if filectx.node() == nullid: | |||
|
217 | repo.changelog = changelog.changelog(repo.svfs) | |||
|
218 | filectx = repo.filectx(path, fileid=node) | |||
|
219 | ||||
|
220 | text = createfileblob(filectx) | |||
|
221 | # TODO configurable compression engines | |||
|
222 | text = zlib.compress(text) | |||
|
223 | ||||
|
224 | # everything should be user & group read/writable | |||
|
225 | oldumask = os.umask(0o002) | |||
|
226 | try: | |||
|
227 | dirname = os.path.dirname(filecachepath) | |||
|
228 | if not os.path.exists(dirname): | |||
|
229 | try: | |||
|
230 | os.makedirs(dirname) | |||
|
231 | except OSError as ex: | |||
|
232 | if ex.errno != errno.EEXIST: | |||
|
233 | raise | |||
|
234 | ||||
|
235 | f = None | |||
|
236 | try: | |||
|
237 | f = util.atomictempfile(filecachepath, "wb") | |||
|
238 | f.write(text) | |||
|
239 | except (IOError, OSError): | |||
|
240 | # Don't abort if the user only has permission to read, | |||
|
241 | # and not write. | |||
|
242 | pass | |||
|
243 | finally: | |||
|
244 | if f: | |||
|
245 | f.close() | |||
|
246 | finally: | |||
|
247 | os.umask(oldumask) | |||
|
248 | else: | |||
|
249 | with open(filecachepath, "rb") as f: | |||
|
250 | text = f.read() | |||
|
251 | return text | |||
|
252 | ||||
|
253 | def getflogheads(repo, proto, path): | |||
|
254 | """A server api for requesting a filelog's heads | |||
|
255 | """ | |||
|
256 | flog = repo.file(path) | |||
|
257 | heads = flog.heads() | |||
|
258 | return '\n'.join((hex(head) for head in heads if head != nullid)) | |||
|
259 | ||||
|
260 | def getfile(repo, proto, file, node): | |||
|
261 | """A server api for requesting a particular version of a file. Can be used | |||
|
262 | in batches to request many files at once. The return protocol is: | |||
|
263 | <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or | |||
|
264 | non-zero for an error. | |||
|
265 | ||||
|
266 | data is a compressed blob with revlog flag and ancestors information. See | |||
|
267 | createfileblob for its content. | |||
|
268 | """ | |||
|
269 | if shallowutil.isenabled(repo): | |||
|
270 | return '1\0' + _('cannot fetch remote files from shallow repo') | |||
|
271 | cachepath = repo.ui.config("remotefilelog", "servercachepath") | |||
|
272 | if not cachepath: | |||
|
273 | cachepath = os.path.join(repo.path, "remotefilelogcache") | |||
|
274 | node = bin(node.strip()) | |||
|
275 | if node == nullid: | |||
|
276 | return '0\0' | |||
|
277 | return '0\0' + _loadfileblob(repo, cachepath, file, node) | |||
|
278 | ||||
|
279 | def getfiles(repo, proto): | |||
|
280 | """A server api for requesting particular versions of particular files. | |||
|
281 | """ | |||
|
282 | if shallowutil.isenabled(repo): | |||
|
283 | raise error.Abort(_('cannot fetch remote files from shallow repo')) | |||
|
284 | if not isinstance(proto, _sshv1server): | |||
|
285 | raise error.Abort(_('cannot fetch remote files over non-ssh protocol')) | |||
|
286 | ||||
|
287 | def streamer(): | |||
|
288 | fin = proto._fin | |||
|
289 | ||||
|
290 | cachepath = repo.ui.config("remotefilelog", "servercachepath") | |||
|
291 | if not cachepath: | |||
|
292 | cachepath = os.path.join(repo.path, "remotefilelogcache") | |||
|
293 | ||||
|
294 | while True: | |||
|
295 | request = fin.readline()[:-1] | |||
|
296 | if not request: | |||
|
297 | break | |||
|
298 | ||||
|
299 | node = bin(request[:40]) | |||
|
300 | if node == nullid: | |||
|
301 | yield '0\n' | |||
|
302 | continue | |||
|
303 | ||||
|
304 | path = request[40:] | |||
|
305 | ||||
|
306 | text = _loadfileblob(repo, cachepath, path, node) | |||
|
307 | ||||
|
308 | yield '%d\n%s' % (len(text), text) | |||
|
309 | ||||
|
310 | # it would be better to only flush after processing a whole batch | |||
|
311 | # but currently we don't know if there are more requests coming | |||
|
312 | proto._fout.flush() | |||
|
313 | return wireprototypes.streamres(streamer()) | |||
|
314 | ||||
|
315 | def createfileblob(filectx): | |||
|
316 | """ | |||
|
317 | format: | |||
|
318 | v0: | |||
|
319 | str(len(rawtext)) + '\0' + rawtext + ancestortext | |||
|
320 | v1: | |||
|
321 | 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext | |||
|
322 | metalist := metalist + '\n' + meta | meta | |||
|
323 | meta := sizemeta | flagmeta | |||
|
324 | sizemeta := METAKEYSIZE + str(len(rawtext)) | |||
|
325 | flagmeta := METAKEYFLAG + str(flag) | |||
|
326 | ||||
|
327 | note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a | |||
|
328 | length of 1. | |||
|
329 | """ | |||
|
330 | flog = filectx.filelog() | |||
|
331 | frev = filectx.filerev() | |||
|
332 | revlogflags = flog._revlog.flags(frev) | |||
|
333 | if revlogflags == 0: | |||
|
334 | # normal files | |||
|
335 | text = filectx.data() | |||
|
336 | else: | |||
|
337 | # lfs, read raw revision data | |||
|
338 | text = flog.revision(frev, raw=True) | |||
|
339 | ||||
|
340 | repo = filectx._repo | |||
|
341 | ||||
|
342 | ancestors = [filectx] | |||
|
343 | ||||
|
344 | try: | |||
|
345 | repo.forcelinkrev = True | |||
|
346 | ancestors.extend([f for f in filectx.ancestors()]) | |||
|
347 | ||||
|
348 | ancestortext = "" | |||
|
349 | for ancestorctx in ancestors: | |||
|
350 | parents = ancestorctx.parents() | |||
|
351 | p1 = nullid | |||
|
352 | p2 = nullid | |||
|
353 | if len(parents) > 0: | |||
|
354 | p1 = parents[0].filenode() | |||
|
355 | if len(parents) > 1: | |||
|
356 | p2 = parents[1].filenode() | |||
|
357 | ||||
|
358 | copyname = "" | |||
|
359 | rename = ancestorctx.renamed() | |||
|
360 | if rename: | |||
|
361 | copyname = rename[0] | |||
|
362 | linknode = ancestorctx.node() | |||
|
363 | ancestortext += "%s%s%s%s%s\0" % ( | |||
|
364 | ancestorctx.filenode(), p1, p2, linknode, | |||
|
365 | copyname) | |||
|
366 | finally: | |||
|
367 | repo.forcelinkrev = False | |||
|
368 | ||||
|
369 | header = shallowutil.buildfileblobheader(len(text), revlogflags) | |||
|
370 | ||||
|
371 | return "%s\0%s%s" % (header, text, ancestortext) | |||
|
372 | ||||
|
373 | def gcserver(ui, repo): | |||
|
374 | if not repo.ui.configbool("remotefilelog", "server"): | |||
|
375 | return | |||
|
376 | ||||
|
377 | neededfiles = set() | |||
|
378 | heads = repo.revs("heads(tip~25000:) - null") | |||
|
379 | ||||
|
380 | cachepath = repo.vfs.join("remotefilelogcache") | |||
|
381 | for head in heads: | |||
|
382 | mf = repo[head].manifest() | |||
|
383 | for filename, filenode in mf.iteritems(): | |||
|
384 | filecachepath = os.path.join(cachepath, filename, hex(filenode)) | |||
|
385 | neededfiles.add(filecachepath) | |||
|
386 | ||||
|
387 | # delete unneeded older files | |||
|
388 | days = repo.ui.configint("remotefilelog", "serverexpiration") | |||
|
389 | expiration = time.time() - (days * 24 * 60 * 60) | |||
|
390 | ||||
|
391 | progress = ui.makeprogress(_("removing old server cache"), unit="files") | |||
|
392 | progress.update(0) | |||
|
393 | for root, dirs, files in os.walk(cachepath): | |||
|
394 | for file in files: | |||
|
395 | filepath = os.path.join(root, file) | |||
|
396 | progress.increment() | |||
|
397 | if filepath in neededfiles: | |||
|
398 | continue | |||
|
399 | ||||
|
400 | stat = os.stat(filepath) | |||
|
401 | if stat.st_mtime < expiration: | |||
|
402 | os.remove(filepath) | |||
|
403 | ||||
|
404 | progress.complete() |
This diff has been collapsed as it changes many lines, (778 lines changed) Show them Hide them | |||||
@@ -0,0 +1,778 b'' | |||||
|
1 | from __future__ import absolute_import | |||
|
2 | ||||
|
3 | import os | |||
|
4 | import time | |||
|
5 | ||||
|
6 | from mercurial.i18n import _ | |||
|
7 | from mercurial.node import ( | |||
|
8 | nullid, | |||
|
9 | short, | |||
|
10 | ) | |||
|
11 | from mercurial import ( | |||
|
12 | encoding, | |||
|
13 | error, | |||
|
14 | mdiff, | |||
|
15 | policy, | |||
|
16 | pycompat, | |||
|
17 | scmutil, | |||
|
18 | util, | |||
|
19 | vfs, | |||
|
20 | ) | |||
|
21 | from mercurial.utils import procutil | |||
|
22 | from . import ( | |||
|
23 | constants, | |||
|
24 | contentstore, | |||
|
25 | datapack, | |||
|
26 | extutil, | |||
|
27 | historypack, | |||
|
28 | metadatastore, | |||
|
29 | shallowutil, | |||
|
30 | ) | |||
|
31 | ||||
|
32 | osutil = policy.importmod(r'osutil') | |||
|
33 | ||||
|
34 | class RepackAlreadyRunning(error.Abort): | |||
|
35 | pass | |||
|
36 | ||||
|
37 | def backgroundrepack(repo, incremental=True, packsonly=False): | |||
|
38 | cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'repack'] | |||
|
39 | msg = _("(running background repack)\n") | |||
|
40 | if incremental: | |||
|
41 | cmd.append('--incremental') | |||
|
42 | msg = _("(running background incremental repack)\n") | |||
|
43 | if packsonly: | |||
|
44 | cmd.append('--packsonly') | |||
|
45 | repo.ui.warn(msg) | |||
|
46 | procutil.runbgcommand(cmd, encoding.environ) | |||
|
47 | ||||
|
48 | def fullrepack(repo, options=None): | |||
|
49 | """If ``packsonly`` is True, stores creating only loose objects are skipped. | |||
|
50 | """ | |||
|
51 | if util.safehasattr(repo, 'shareddatastores'): | |||
|
52 | datasource = contentstore.unioncontentstore( | |||
|
53 | *repo.shareddatastores) | |||
|
54 | historysource = metadatastore.unionmetadatastore( | |||
|
55 | *repo.sharedhistorystores, | |||
|
56 | allowincomplete=True) | |||
|
57 | ||||
|
58 | packpath = shallowutil.getcachepackpath( | |||
|
59 | repo, | |||
|
60 | constants.FILEPACK_CATEGORY) | |||
|
61 | _runrepack(repo, datasource, historysource, packpath, | |||
|
62 | constants.FILEPACK_CATEGORY, options=options) | |||
|
63 | ||||
|
64 | if util.safehasattr(repo.manifestlog, 'datastore'): | |||
|
65 | localdata, shareddata = _getmanifeststores(repo) | |||
|
66 | lpackpath, ldstores, lhstores = localdata | |||
|
67 | spackpath, sdstores, shstores = shareddata | |||
|
68 | ||||
|
69 | # Repack the shared manifest store | |||
|
70 | datasource = contentstore.unioncontentstore(*sdstores) | |||
|
71 | historysource = metadatastore.unionmetadatastore( | |||
|
72 | *shstores, | |||
|
73 | allowincomplete=True) | |||
|
74 | _runrepack(repo, datasource, historysource, spackpath, | |||
|
75 | constants.TREEPACK_CATEGORY, options=options) | |||
|
76 | ||||
|
77 | # Repack the local manifest store | |||
|
78 | datasource = contentstore.unioncontentstore( | |||
|
79 | *ldstores, | |||
|
80 | allowincomplete=True) | |||
|
81 | historysource = metadatastore.unionmetadatastore( | |||
|
82 | *lhstores, | |||
|
83 | allowincomplete=True) | |||
|
84 | _runrepack(repo, datasource, historysource, lpackpath, | |||
|
85 | constants.TREEPACK_CATEGORY, options=options) | |||
|
86 | ||||
|
87 | def incrementalrepack(repo, options=None): | |||
|
88 | """This repacks the repo by looking at the distribution of pack files in the | |||
|
89 | repo and performing the most minimal repack to keep the repo in good shape. | |||
|
90 | """ | |||
|
91 | if util.safehasattr(repo, 'shareddatastores'): | |||
|
92 | packpath = shallowutil.getcachepackpath( | |||
|
93 | repo, | |||
|
94 | constants.FILEPACK_CATEGORY) | |||
|
95 | _incrementalrepack(repo, | |||
|
96 | repo.shareddatastores, | |||
|
97 | repo.sharedhistorystores, | |||
|
98 | packpath, | |||
|
99 | constants.FILEPACK_CATEGORY, | |||
|
100 | options=options) | |||
|
101 | ||||
|
102 | if util.safehasattr(repo.manifestlog, 'datastore'): | |||
|
103 | localdata, shareddata = _getmanifeststores(repo) | |||
|
104 | lpackpath, ldstores, lhstores = localdata | |||
|
105 | spackpath, sdstores, shstores = shareddata | |||
|
106 | ||||
|
107 | # Repack the shared manifest store | |||
|
108 | _incrementalrepack(repo, | |||
|
109 | sdstores, | |||
|
110 | shstores, | |||
|
111 | spackpath, | |||
|
112 | constants.TREEPACK_CATEGORY, | |||
|
113 | options=options) | |||
|
114 | ||||
|
115 | # Repack the local manifest store | |||
|
116 | _incrementalrepack(repo, | |||
|
117 | ldstores, | |||
|
118 | lhstores, | |||
|
119 | lpackpath, | |||
|
120 | constants.TREEPACK_CATEGORY, | |||
|
121 | allowincompletedata=True, | |||
|
122 | options=options) | |||
|
123 | ||||
|
124 | def _getmanifeststores(repo): | |||
|
125 | shareddatastores = repo.manifestlog.shareddatastores | |||
|
126 | localdatastores = repo.manifestlog.localdatastores | |||
|
127 | sharedhistorystores = repo.manifestlog.sharedhistorystores | |||
|
128 | localhistorystores = repo.manifestlog.localhistorystores | |||
|
129 | ||||
|
130 | sharedpackpath = shallowutil.getcachepackpath(repo, | |||
|
131 | constants.TREEPACK_CATEGORY) | |||
|
132 | localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base, | |||
|
133 | constants.TREEPACK_CATEGORY) | |||
|
134 | ||||
|
135 | return ((localpackpath, localdatastores, localhistorystores), | |||
|
136 | (sharedpackpath, shareddatastores, sharedhistorystores)) | |||
|
137 | ||||
|
138 | def _topacks(packpath, files, constructor): | |||
|
139 | paths = list(os.path.join(packpath, p) for p in files) | |||
|
140 | packs = list(constructor(p) for p in paths) | |||
|
141 | return packs | |||
|
142 | ||||
|
143 | def _deletebigpacks(repo, folder, files): | |||
|
144 | """Deletes packfiles that are bigger than ``packs.maxpacksize``. | |||
|
145 | ||||
|
146 | Returns ``files` with the removed files omitted.""" | |||
|
147 | maxsize = repo.ui.configbytes("packs", "maxpacksize") | |||
|
148 | if maxsize <= 0: | |||
|
149 | return files | |||
|
150 | ||||
|
151 | # This only considers datapacks today, but we could broaden it to include | |||
|
152 | # historypacks. | |||
|
153 | VALIDEXTS = [".datapack", ".dataidx"] | |||
|
154 | ||||
|
155 | # Either an oversize index or datapack will trigger cleanup of the whole | |||
|
156 | # pack: | |||
|
157 | oversized = set([os.path.splitext(path)[0] for path, ftype, stat in files | |||
|
158 | if (stat.st_size > maxsize and (os.path.splitext(path)[1] | |||
|
159 | in VALIDEXTS))]) | |||
|
160 | ||||
|
161 | for rootfname in oversized: | |||
|
162 | rootpath = os.path.join(folder, rootfname) | |||
|
163 | for ext in VALIDEXTS: | |||
|
164 | path = rootpath + ext | |||
|
165 | repo.ui.debug('removing oversize packfile %s (%s)\n' % | |||
|
166 | (path, util.bytecount(os.stat(path).st_size))) | |||
|
167 | os.unlink(path) | |||
|
168 | return [row for row in files if os.path.basename(row[0]) not in oversized] | |||
|
169 | ||||
|
170 | def _incrementalrepack(repo, datastore, historystore, packpath, category, | |||
|
171 | allowincompletedata=False, options=None): | |||
|
172 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |||
|
173 | ||||
|
174 | files = osutil.listdir(packpath, stat=True) | |||
|
175 | files = _deletebigpacks(repo, packpath, files) | |||
|
176 | datapacks = _topacks(packpath, | |||
|
177 | _computeincrementaldatapack(repo.ui, files), | |||
|
178 | datapack.datapack) | |||
|
179 | datapacks.extend(s for s in datastore | |||
|
180 | if not isinstance(s, datapack.datapackstore)) | |||
|
181 | ||||
|
182 | historypacks = _topacks(packpath, | |||
|
183 | _computeincrementalhistorypack(repo.ui, files), | |||
|
184 | historypack.historypack) | |||
|
185 | historypacks.extend(s for s in historystore | |||
|
186 | if not isinstance(s, historypack.historypackstore)) | |||
|
187 | ||||
|
188 | # ``allhistory{files,packs}`` contains all known history packs, even ones we | |||
|
189 | # don't plan to repack. They are used during the datapack repack to ensure | |||
|
190 | # good ordering of nodes. | |||
|
191 | allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX, | |||
|
192 | historypack.INDEXSUFFIX) | |||
|
193 | allhistorypacks = _topacks(packpath, | |||
|
194 | (f for f, mode, stat in allhistoryfiles), | |||
|
195 | historypack.historypack) | |||
|
196 | allhistorypacks.extend(s for s in historystore | |||
|
197 | if not isinstance(s, historypack.historypackstore)) | |||
|
198 | _runrepack(repo, | |||
|
199 | contentstore.unioncontentstore( | |||
|
200 | *datapacks, | |||
|
201 | allowincomplete=allowincompletedata), | |||
|
202 | metadatastore.unionmetadatastore( | |||
|
203 | *historypacks, | |||
|
204 | allowincomplete=True), | |||
|
205 | packpath, category, | |||
|
206 | fullhistory=metadatastore.unionmetadatastore( | |||
|
207 | *allhistorypacks, | |||
|
208 | allowincomplete=True), | |||
|
209 | options=options) | |||
|
210 | ||||
|
211 | def _computeincrementaldatapack(ui, files): | |||
|
212 | opts = { | |||
|
213 | 'gencountlimit' : ui.configint( | |||
|
214 | 'remotefilelog', 'data.gencountlimit'), | |||
|
215 | 'generations' : ui.configlist( | |||
|
216 | 'remotefilelog', 'data.generations'), | |||
|
217 | 'maxrepackpacks' : ui.configint( | |||
|
218 | 'remotefilelog', 'data.maxrepackpacks'), | |||
|
219 | 'repackmaxpacksize' : ui.configbytes( | |||
|
220 | 'remotefilelog', 'data.repackmaxpacksize'), | |||
|
221 | 'repacksizelimit' : ui.configbytes( | |||
|
222 | 'remotefilelog', 'data.repacksizelimit'), | |||
|
223 | } | |||
|
224 | ||||
|
225 | packfiles = _allpackfileswithsuffix( | |||
|
226 | files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX) | |||
|
227 | return _computeincrementalpack(packfiles, opts) | |||
|
228 | ||||
|
229 | def _computeincrementalhistorypack(ui, files): | |||
|
230 | opts = { | |||
|
231 | 'gencountlimit' : ui.configint( | |||
|
232 | 'remotefilelog', 'history.gencountlimit'), | |||
|
233 | 'generations' : ui.configlist( | |||
|
234 | 'remotefilelog', 'history.generations', ['100MB']), | |||
|
235 | 'maxrepackpacks' : ui.configint( | |||
|
236 | 'remotefilelog', 'history.maxrepackpacks'), | |||
|
237 | 'repackmaxpacksize' : ui.configbytes( | |||
|
238 | 'remotefilelog', 'history.repackmaxpacksize', '400MB'), | |||
|
239 | 'repacksizelimit' : ui.configbytes( | |||
|
240 | 'remotefilelog', 'history.repacksizelimit'), | |||
|
241 | } | |||
|
242 | ||||
|
243 | packfiles = _allpackfileswithsuffix( | |||
|
244 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX) | |||
|
245 | return _computeincrementalpack(packfiles, opts) | |||
|
246 | ||||
|
247 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): | |||
|
248 | result = [] | |||
|
249 | fileset = set(fn for fn, mode, stat in files) | |||
|
250 | for filename, mode, stat in files: | |||
|
251 | if not filename.endswith(packsuffix): | |||
|
252 | continue | |||
|
253 | ||||
|
254 | prefix = filename[:-len(packsuffix)] | |||
|
255 | ||||
|
256 | # Don't process a pack if it doesn't have an index. | |||
|
257 | if (prefix + indexsuffix) not in fileset: | |||
|
258 | continue | |||
|
259 | result.append((prefix, mode, stat)) | |||
|
260 | ||||
|
261 | return result | |||
|
262 | ||||
|
263 | def _computeincrementalpack(files, opts): | |||
|
264 | """Given a set of pack files along with the configuration options, this | |||
|
265 | function computes the list of files that should be packed as part of an | |||
|
266 | incremental repack. | |||
|
267 | ||||
|
268 | It tries to strike a balance between keeping incremental repacks cheap (i.e. | |||
|
269 | packing small things when possible, and rolling the packs up to the big ones | |||
|
270 | over time). | |||
|
271 | """ | |||
|
272 | ||||
|
273 | limits = list(sorted((util.sizetoint(s) for s in opts['generations']), | |||
|
274 | reverse=True)) | |||
|
275 | limits.append(0) | |||
|
276 | ||||
|
277 | # Group the packs by generation (i.e. by size) | |||
|
278 | generations = [] | |||
|
279 | for i in pycompat.xrange(len(limits)): | |||
|
280 | generations.append([]) | |||
|
281 | ||||
|
282 | sizes = {} | |||
|
283 | for prefix, mode, stat in files: | |||
|
284 | size = stat.st_size | |||
|
285 | if size > opts['repackmaxpacksize']: | |||
|
286 | continue | |||
|
287 | ||||
|
288 | sizes[prefix] = size | |||
|
289 | for i, limit in enumerate(limits): | |||
|
290 | if size > limit: | |||
|
291 | generations[i].append(prefix) | |||
|
292 | break | |||
|
293 | ||||
|
294 | # Steps for picking what packs to repack: | |||
|
295 | # 1. Pick the largest generation with > gencountlimit pack files. | |||
|
296 | # 2. Take the smallest three packs. | |||
|
297 | # 3. While total-size-of-packs < repacksizelimit: add another pack | |||
|
298 | ||||
|
299 | # Find the largest generation with more than gencountlimit packs | |||
|
300 | genpacks = [] | |||
|
301 | for i, limit in enumerate(limits): | |||
|
302 | if len(generations[i]) > opts['gencountlimit']: | |||
|
303 | # Sort to be smallest last, for easy popping later | |||
|
304 | genpacks.extend(sorted(generations[i], reverse=True, | |||
|
305 | key=lambda x: sizes[x])) | |||
|
306 | break | |||
|
307 | ||||
|
308 | # Take as many packs from the generation as we can | |||
|
309 | chosenpacks = genpacks[-3:] | |||
|
310 | genpacks = genpacks[:-3] | |||
|
311 | repacksize = sum(sizes[n] for n in chosenpacks) | |||
|
312 | while (repacksize < opts['repacksizelimit'] and genpacks and | |||
|
313 | len(chosenpacks) < opts['maxrepackpacks']): | |||
|
314 | chosenpacks.append(genpacks.pop()) | |||
|
315 | repacksize += sizes[chosenpacks[-1]] | |||
|
316 | ||||
|
317 | return chosenpacks | |||
|
318 | ||||
|
319 | def _runrepack(repo, data, history, packpath, category, fullhistory=None, | |||
|
320 | options=None): | |||
|
321 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |||
|
322 | ||||
|
323 | def isold(repo, filename, node): | |||
|
324 | """Check if the file node is older than a limit. | |||
|
325 | Unless a limit is specified in the config the default limit is taken. | |||
|
326 | """ | |||
|
327 | filectx = repo.filectx(filename, fileid=node) | |||
|
328 | filetime = repo[filectx.linkrev()].date() | |||
|
329 | ||||
|
330 | ttl = repo.ui.configint('remotefilelog', 'nodettl') | |||
|
331 | ||||
|
332 | limit = time.time() - ttl | |||
|
333 | return filetime[0] < limit | |||
|
334 | ||||
|
335 | garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack') | |||
|
336 | if not fullhistory: | |||
|
337 | fullhistory = history | |||
|
338 | packer = repacker(repo, data, history, fullhistory, category, | |||
|
339 | gc=garbagecollect, isold=isold, options=options) | |||
|
340 | ||||
|
341 | with datapack.mutabledatapack(repo.ui, packpath, version=2) as dpack: | |||
|
342 | with historypack.mutablehistorypack(repo.ui, packpath) as hpack: | |||
|
343 | try: | |||
|
344 | packer.run(dpack, hpack) | |||
|
345 | except error.LockHeld: | |||
|
346 | raise RepackAlreadyRunning(_("skipping repack - another repack " | |||
|
347 | "is already running")) | |||
|
348 | ||||
|
349 | def keepset(repo, keyfn, lastkeepkeys=None): | |||
|
350 | """Computes a keepset which is not garbage collected. | |||
|
351 | 'keyfn' is a function that maps filename, node to a unique key. | |||
|
352 | 'lastkeepkeys' is an optional argument and if provided the keepset | |||
|
353 | function updates lastkeepkeys with more keys and returns the result. | |||
|
354 | """ | |||
|
355 | if not lastkeepkeys: | |||
|
356 | keepkeys = set() | |||
|
357 | else: | |||
|
358 | keepkeys = lastkeepkeys | |||
|
359 | ||||
|
360 | # We want to keep: | |||
|
361 | # 1. Working copy parent | |||
|
362 | # 2. Draft commits | |||
|
363 | # 3. Parents of draft commits | |||
|
364 | # 4. Pullprefetch and bgprefetchrevs revsets if specified | |||
|
365 | revs = ['.', 'draft()', 'parents(draft())'] | |||
|
366 | prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None) | |||
|
367 | if prefetchrevs: | |||
|
368 | revs.append('(%s)' % prefetchrevs) | |||
|
369 | prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None) | |||
|
370 | if prefetchrevs: | |||
|
371 | revs.append('(%s)' % prefetchrevs) | |||
|
372 | revs = '+'.join(revs) | |||
|
373 | ||||
|
374 | revs = ['sort((%s), "topo")' % revs] | |||
|
375 | keep = scmutil.revrange(repo, revs) | |||
|
376 | ||||
|
377 | processed = set() | |||
|
378 | lastmanifest = None | |||
|
379 | ||||
|
380 | # process the commits in toposorted order starting from the oldest | |||
|
381 | for r in reversed(keep._list): | |||
|
382 | if repo[r].p1().rev() in processed: | |||
|
383 | # if the direct parent has already been processed | |||
|
384 | # then we only need to process the delta | |||
|
385 | m = repo[r].manifestctx().readdelta() | |||
|
386 | else: | |||
|
387 | # otherwise take the manifest and diff it | |||
|
388 | # with the previous manifest if one exists | |||
|
389 | if lastmanifest: | |||
|
390 | m = repo[r].manifest().diff(lastmanifest) | |||
|
391 | else: | |||
|
392 | m = repo[r].manifest() | |||
|
393 | lastmanifest = repo[r].manifest() | |||
|
394 | processed.add(r) | |||
|
395 | ||||
|
396 | # populate keepkeys with keys from the current manifest | |||
|
397 | if type(m) is dict: | |||
|
398 | # m is a result of diff of two manifests and is a dictionary that | |||
|
399 | # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple | |||
|
400 | for filename, diff in m.iteritems(): | |||
|
401 | if diff[0][0] is not None: | |||
|
402 | keepkeys.add(keyfn(filename, diff[0][0])) | |||
|
403 | else: | |||
|
404 | # m is a manifest object | |||
|
405 | for filename, filenode in m.iteritems(): | |||
|
406 | keepkeys.add(keyfn(filename, filenode)) | |||
|
407 | ||||
|
408 | return keepkeys | |||
|
409 | ||||
|
410 | class repacker(object): | |||
|
411 | """Class for orchestrating the repack of data and history information into a | |||
|
412 | new format. | |||
|
413 | """ | |||
|
414 | def __init__(self, repo, data, history, fullhistory, category, gc=False, | |||
|
415 | isold=None, options=None): | |||
|
416 | self.repo = repo | |||
|
417 | self.data = data | |||
|
418 | self.history = history | |||
|
419 | self.fullhistory = fullhistory | |||
|
420 | self.unit = constants.getunits(category) | |||
|
421 | self.garbagecollect = gc | |||
|
422 | self.options = options | |||
|
423 | if self.garbagecollect: | |||
|
424 | if not isold: | |||
|
425 | raise ValueError("Function 'isold' is not properly specified") | |||
|
426 | # use (filename, node) tuple as a keepset key | |||
|
427 | self.keepkeys = keepset(repo, lambda f, n : (f, n)) | |||
|
428 | self.isold = isold | |||
|
429 | ||||
|
430 | def run(self, targetdata, targethistory): | |||
|
431 | ledger = repackledger() | |||
|
432 | ||||
|
433 | with extutil.flock(repacklockvfs(self.repo).join("repacklock"), | |||
|
434 | _('repacking %s') % self.repo.origroot, timeout=0): | |||
|
435 | self.repo.hook('prerepack') | |||
|
436 | ||||
|
437 | # Populate ledger from source | |||
|
438 | self.data.markledger(ledger, options=self.options) | |||
|
439 | self.history.markledger(ledger, options=self.options) | |||
|
440 | ||||
|
441 | # Run repack | |||
|
442 | self.repackdata(ledger, targetdata) | |||
|
443 | self.repackhistory(ledger, targethistory) | |||
|
444 | ||||
|
445 | # Call cleanup on each source | |||
|
446 | for source in ledger.sources: | |||
|
447 | source.cleanup(ledger) | |||
|
448 | ||||
|
449 | def _chainorphans(self, ui, filename, nodes, orphans, deltabases): | |||
|
450 | """Reorderes ``orphans`` into a single chain inside ``nodes`` and | |||
|
451 | ``deltabases``. | |||
|
452 | ||||
|
453 | We often have orphan entries (nodes without a base that aren't | |||
|
454 | referenced by other nodes -- i.e., part of a chain) due to gaps in | |||
|
455 | history. Rather than store them as individual fulltexts, we prefer to | |||
|
456 | insert them as one chain sorted by size. | |||
|
457 | """ | |||
|
458 | if not orphans: | |||
|
459 | return nodes | |||
|
460 | ||||
|
461 | def getsize(node, default=0): | |||
|
462 | meta = self.data.getmeta(filename, node) | |||
|
463 | if constants.METAKEYSIZE in meta: | |||
|
464 | return meta[constants.METAKEYSIZE] | |||
|
465 | else: | |||
|
466 | return default | |||
|
467 | ||||
|
468 | # Sort orphans by size; biggest first is preferred, since it's more | |||
|
469 | # likely to be the newest version assuming files grow over time. | |||
|
470 | # (Sort by node first to ensure the sort is stable.) | |||
|
471 | orphans = sorted(orphans) | |||
|
472 | orphans = list(sorted(orphans, key=getsize, reverse=True)) | |||
|
473 | if ui.debugflag: | |||
|
474 | ui.debug("%s: orphan chain: %s\n" % (filename, | |||
|
475 | ", ".join([short(s) for s in orphans]))) | |||
|
476 | ||||
|
477 | # Create one contiguous chain and reassign deltabases. | |||
|
478 | for i, node in enumerate(orphans): | |||
|
479 | if i == 0: | |||
|
480 | deltabases[node] = (nullid, 0) | |||
|
481 | else: | |||
|
482 | parent = orphans[i - 1] | |||
|
483 | deltabases[node] = (parent, deltabases[parent][1] + 1) | |||
|
484 | nodes = [n for n in nodes if n not in orphans] | |||
|
485 | nodes += orphans | |||
|
486 | return nodes | |||
|
487 | ||||
|
488 | def repackdata(self, ledger, target): | |||
|
489 | ui = self.repo.ui | |||
|
490 | maxchainlen = ui.configint('packs', 'maxchainlen', 1000) | |||
|
491 | ||||
|
492 | byfile = {} | |||
|
493 | for entry in ledger.entries.itervalues(): | |||
|
494 | if entry.datasource: | |||
|
495 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |||
|
496 | ||||
|
497 | count = 0 | |||
|
498 | repackprogress = ui.makeprogress(_("repacking data"), unit=self.unit, | |||
|
499 | total=len(byfile)) | |||
|
500 | for filename, entries in sorted(byfile.iteritems()): | |||
|
501 | repackprogress.update(count) | |||
|
502 | ||||
|
503 | ancestors = {} | |||
|
504 | nodes = list(node for node in entries) | |||
|
505 | nohistory = [] | |||
|
506 | buildprogress = ui.makeprogress(_("building history"), unit='nodes', | |||
|
507 | total=len(nodes)) | |||
|
508 | for i, node in enumerate(nodes): | |||
|
509 | if node in ancestors: | |||
|
510 | continue | |||
|
511 | buildprogress.update(i) | |||
|
512 | try: | |||
|
513 | ancestors.update(self.fullhistory.getancestors(filename, | |||
|
514 | node, known=ancestors)) | |||
|
515 | except KeyError: | |||
|
516 | # Since we're packing data entries, we may not have the | |||
|
517 | # corresponding history entries for them. It's not a big | |||
|
518 | # deal, but the entries won't be delta'd perfectly. | |||
|
519 | nohistory.append(node) | |||
|
520 | buildprogress.complete() | |||
|
521 | ||||
|
522 | # Order the nodes children first, so we can produce reverse deltas | |||
|
523 | orderednodes = list(reversed(self._toposort(ancestors))) | |||
|
524 | if len(nohistory) > 0: | |||
|
525 | ui.debug('repackdata: %d nodes without history\n' % | |||
|
526 | len(nohistory)) | |||
|
527 | orderednodes.extend(sorted(nohistory)) | |||
|
528 | ||||
|
529 | # Filter orderednodes to just the nodes we want to serialize (it | |||
|
530 | # currently also has the edge nodes' ancestors). | |||
|
531 | orderednodes = list(filter(lambda node: node in nodes, | |||
|
532 | orderednodes)) | |||
|
533 | ||||
|
534 | # Garbage collect old nodes: | |||
|
535 | if self.garbagecollect: | |||
|
536 | neworderednodes = [] | |||
|
537 | for node in orderednodes: | |||
|
538 | # If the node is old and is not in the keepset, we skip it, | |||
|
539 | # and mark as garbage collected | |||
|
540 | if ((filename, node) not in self.keepkeys and | |||
|
541 | self.isold(self.repo, filename, node)): | |||
|
542 | entries[node].gced = True | |||
|
543 | continue | |||
|
544 | neworderednodes.append(node) | |||
|
545 | orderednodes = neworderednodes | |||
|
546 | ||||
|
547 | # Compute delta bases for nodes: | |||
|
548 | deltabases = {} | |||
|
549 | nobase = set() | |||
|
550 | referenced = set() | |||
|
551 | nodes = set(nodes) | |||
|
552 | processprogress = ui.makeprogress(_("processing nodes"), | |||
|
553 | unit='nodes', | |||
|
554 | total=len(orderednodes)) | |||
|
555 | for i, node in enumerate(orderednodes): | |||
|
556 | processprogress.update(i) | |||
|
557 | # Find delta base | |||
|
558 | # TODO: allow delta'ing against most recent descendant instead | |||
|
559 | # of immediate child | |||
|
560 | deltatuple = deltabases.get(node, None) | |||
|
561 | if deltatuple is None: | |||
|
562 | deltabase, chainlen = nullid, 0 | |||
|
563 | deltabases[node] = (nullid, 0) | |||
|
564 | nobase.add(node) | |||
|
565 | else: | |||
|
566 | deltabase, chainlen = deltatuple | |||
|
567 | referenced.add(deltabase) | |||
|
568 | ||||
|
569 | # Use available ancestor information to inform our delta choices | |||
|
570 | ancestorinfo = ancestors.get(node) | |||
|
571 | if ancestorinfo: | |||
|
572 | p1, p2, linknode, copyfrom = ancestorinfo | |||
|
573 | ||||
|
574 | # The presence of copyfrom means we're at a point where the | |||
|
575 | # file was copied from elsewhere. So don't attempt to do any | |||
|
576 | # deltas with the other file. | |||
|
577 | if copyfrom: | |||
|
578 | p1 = nullid | |||
|
579 | ||||
|
580 | if chainlen < maxchainlen: | |||
|
581 | # Record this child as the delta base for its parents. | |||
|
582 | # This may be non optimal, since the parents may have | |||
|
583 | # many children, and this will only choose the last one. | |||
|
584 | # TODO: record all children and try all deltas to find | |||
|
585 | # best | |||
|
586 | if p1 != nullid: | |||
|
587 | deltabases[p1] = (node, chainlen + 1) | |||
|
588 | if p2 != nullid: | |||
|
589 | deltabases[p2] = (node, chainlen + 1) | |||
|
590 | ||||
|
591 | # experimental config: repack.chainorphansbysize | |||
|
592 | if ui.configbool('repack', 'chainorphansbysize'): | |||
|
593 | orphans = nobase - referenced | |||
|
594 | orderednodes = self._chainorphans(ui, filename, orderednodes, | |||
|
595 | orphans, deltabases) | |||
|
596 | ||||
|
597 | # Compute deltas and write to the pack | |||
|
598 | for i, node in enumerate(orderednodes): | |||
|
599 | deltabase, chainlen = deltabases[node] | |||
|
600 | # Compute delta | |||
|
601 | # TODO: Optimize the deltachain fetching. Since we're | |||
|
602 | # iterating over the different version of the file, we may | |||
|
603 | # be fetching the same deltachain over and over again. | |||
|
604 | meta = None | |||
|
605 | if deltabase != nullid: | |||
|
606 | deltaentry = self.data.getdelta(filename, node) | |||
|
607 | delta, deltabasename, origdeltabase, meta = deltaentry | |||
|
608 | size = meta.get(constants.METAKEYSIZE) | |||
|
609 | if (deltabasename != filename or origdeltabase != deltabase | |||
|
610 | or size is None): | |||
|
611 | deltabasetext = self.data.get(filename, deltabase) | |||
|
612 | original = self.data.get(filename, node) | |||
|
613 | size = len(original) | |||
|
614 | delta = mdiff.textdiff(deltabasetext, original) | |||
|
615 | else: | |||
|
616 | delta = self.data.get(filename, node) | |||
|
617 | size = len(delta) | |||
|
618 | meta = self.data.getmeta(filename, node) | |||
|
619 | ||||
|
620 | # TODO: don't use the delta if it's larger than the fulltext | |||
|
621 | if constants.METAKEYSIZE not in meta: | |||
|
622 | meta[constants.METAKEYSIZE] = size | |||
|
623 | target.add(filename, node, deltabase, delta, meta) | |||
|
624 | ||||
|
625 | entries[node].datarepacked = True | |||
|
626 | ||||
|
627 | processprogress.complete() | |||
|
628 | count += 1 | |||
|
629 | ||||
|
630 | repackprogress.complete() | |||
|
631 | target.close(ledger=ledger) | |||
|
632 | ||||
|
633 | def repackhistory(self, ledger, target): | |||
|
634 | ui = self.repo.ui | |||
|
635 | ||||
|
636 | byfile = {} | |||
|
637 | for entry in ledger.entries.itervalues(): | |||
|
638 | if entry.historysource: | |||
|
639 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |||
|
640 | ||||
|
641 | progress = ui.makeprogress(_("repacking history"), unit=self.unit, | |||
|
642 | total=len(byfile)) | |||
|
643 | for filename, entries in sorted(byfile.iteritems()): | |||
|
644 | ancestors = {} | |||
|
645 | nodes = list(node for node in entries) | |||
|
646 | ||||
|
647 | for node in nodes: | |||
|
648 | if node in ancestors: | |||
|
649 | continue | |||
|
650 | ancestors.update(self.history.getancestors(filename, node, | |||
|
651 | known=ancestors)) | |||
|
652 | ||||
|
653 | # Order the nodes children first | |||
|
654 | orderednodes = reversed(self._toposort(ancestors)) | |||
|
655 | ||||
|
656 | # Write to the pack | |||
|
657 | dontprocess = set() | |||
|
658 | for node in orderednodes: | |||
|
659 | p1, p2, linknode, copyfrom = ancestors[node] | |||
|
660 | ||||
|
661 | # If the node is marked dontprocess, but it's also in the | |||
|
662 | # explicit entries set, that means the node exists both in this | |||
|
663 | # file and in another file that was copied to this file. | |||
|
664 | # Usually this happens if the file was copied to another file, | |||
|
665 | # then the copy was deleted, then reintroduced without copy | |||
|
666 | # metadata. The original add and the new add have the same hash | |||
|
667 | # since the content is identical and the parents are null. | |||
|
668 | if node in dontprocess and node not in entries: | |||
|
669 | # If copyfrom == filename, it means the copy history | |||
|
670 | # went to come other file, then came back to this one, so we | |||
|
671 | # should continue processing it. | |||
|
672 | if p1 != nullid and copyfrom != filename: | |||
|
673 | dontprocess.add(p1) | |||
|
674 | if p2 != nullid: | |||
|
675 | dontprocess.add(p2) | |||
|
676 | continue | |||
|
677 | ||||
|
678 | if copyfrom: | |||
|
679 | dontprocess.add(p1) | |||
|
680 | ||||
|
681 | target.add(filename, node, p1, p2, linknode, copyfrom) | |||
|
682 | ||||
|
683 | if node in entries: | |||
|
684 | entries[node].historyrepacked = True | |||
|
685 | ||||
|
686 | progress.increment() | |||
|
687 | ||||
|
688 | progress.complete() | |||
|
689 | target.close(ledger=ledger) | |||
|
690 | ||||
|
691 | def _toposort(self, ancestors): | |||
|
692 | def parentfunc(node): | |||
|
693 | p1, p2, linknode, copyfrom = ancestors[node] | |||
|
694 | parents = [] | |||
|
695 | if p1 != nullid: | |||
|
696 | parents.append(p1) | |||
|
697 | if p2 != nullid: | |||
|
698 | parents.append(p2) | |||
|
699 | return parents | |||
|
700 | ||||
|
701 | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) | |||
|
702 | return sortednodes | |||
|
703 | ||||
|
704 | class repackledger(object): | |||
|
705 | """Storage for all the bookkeeping that happens during a repack. It contains | |||
|
706 | the list of revisions being repacked, what happened to each revision, and | |||
|
707 | which source store contained which revision originally (for later cleanup). | |||
|
708 | """ | |||
|
709 | def __init__(self): | |||
|
710 | self.entries = {} | |||
|
711 | self.sources = {} | |||
|
712 | self.created = set() | |||
|
713 | ||||
|
714 | def markdataentry(self, source, filename, node): | |||
|
715 | """Mark the given filename+node revision as having a data rev in the | |||
|
716 | given source. | |||
|
717 | """ | |||
|
718 | entry = self._getorcreateentry(filename, node) | |||
|
719 | entry.datasource = True | |||
|
720 | entries = self.sources.get(source) | |||
|
721 | if not entries: | |||
|
722 | entries = set() | |||
|
723 | self.sources[source] = entries | |||
|
724 | entries.add(entry) | |||
|
725 | ||||
|
726 | def markhistoryentry(self, source, filename, node): | |||
|
727 | """Mark the given filename+node revision as having a history rev in the | |||
|
728 | given source. | |||
|
729 | """ | |||
|
730 | entry = self._getorcreateentry(filename, node) | |||
|
731 | entry.historysource = True | |||
|
732 | entries = self.sources.get(source) | |||
|
733 | if not entries: | |||
|
734 | entries = set() | |||
|
735 | self.sources[source] = entries | |||
|
736 | entries.add(entry) | |||
|
737 | ||||
|
738 | def _getorcreateentry(self, filename, node): | |||
|
739 | key = (filename, node) | |||
|
740 | value = self.entries.get(key) | |||
|
741 | if not value: | |||
|
742 | value = repackentry(filename, node) | |||
|
743 | self.entries[key] = value | |||
|
744 | ||||
|
745 | return value | |||
|
746 | ||||
|
747 | def addcreated(self, value): | |||
|
748 | self.created.add(value) | |||
|
749 | ||||
|
750 | class repackentry(object): | |||
|
751 | """Simple class representing a single revision entry in the repackledger. | |||
|
752 | """ | |||
|
753 | __slots__ = (r'filename', r'node', r'datasource', r'historysource', | |||
|
754 | r'datarepacked', r'historyrepacked', r'gced') | |||
|
755 | def __init__(self, filename, node): | |||
|
756 | self.filename = filename | |||
|
757 | self.node = node | |||
|
758 | # If the revision has a data entry in the source | |||
|
759 | self.datasource = False | |||
|
760 | # If the revision has a history entry in the source | |||
|
761 | self.historysource = False | |||
|
762 | # If the revision's data entry was repacked into the repack target | |||
|
763 | self.datarepacked = False | |||
|
764 | # If the revision's history entry was repacked into the repack target | |||
|
765 | self.historyrepacked = False | |||
|
766 | # If garbage collected | |||
|
767 | self.gced = False | |||
|
768 | ||||
|
769 | def repacklockvfs(repo): | |||
|
770 | if util.safehasattr(repo, 'name'): | |||
|
771 | # Lock in the shared cache so repacks across multiple copies of the same | |||
|
772 | # repo are coordinated. | |||
|
773 | sharedcachepath = shallowutil.getcachepackpath( | |||
|
774 | repo, | |||
|
775 | constants.FILEPACK_CATEGORY) | |||
|
776 | return vfs.vfs(sharedcachepath) | |||
|
777 | else: | |||
|
778 | return repo.svfs |
@@ -0,0 +1,293 b'' | |||||
|
1 | # shallowbundle.py - bundle10 implementation for use with shallow repositories | |||
|
2 | # | |||
|
3 | # Copyright 2013 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | from __future__ import absolute_import | |||
|
8 | ||||
|
9 | from mercurial.i18n import _ | |||
|
10 | from mercurial.node import bin, hex, nullid | |||
|
11 | from mercurial import ( | |||
|
12 | bundlerepo, | |||
|
13 | changegroup, | |||
|
14 | error, | |||
|
15 | match, | |||
|
16 | mdiff, | |||
|
17 | pycompat, | |||
|
18 | ) | |||
|
19 | from . import ( | |||
|
20 | constants, | |||
|
21 | remotefilelog, | |||
|
22 | shallowutil, | |||
|
23 | ) | |||
|
24 | ||||
|
25 | NoFiles = 0 | |||
|
26 | LocalFiles = 1 | |||
|
27 | AllFiles = 2 | |||
|
28 | ||||
|
29 | def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None): | |||
|
30 | if not isinstance(rlog, remotefilelog.remotefilelog): | |||
|
31 | for c in super(cls, self).group(nodelist, rlog, lookup, | |||
|
32 | units=units): | |||
|
33 | yield c | |||
|
34 | return | |||
|
35 | ||||
|
36 | if len(nodelist) == 0: | |||
|
37 | yield self.close() | |||
|
38 | return | |||
|
39 | ||||
|
40 | nodelist = shallowutil.sortnodes(nodelist, rlog.parents) | |||
|
41 | ||||
|
42 | # add the parent of the first rev | |||
|
43 | p = rlog.parents(nodelist[0])[0] | |||
|
44 | nodelist.insert(0, p) | |||
|
45 | ||||
|
46 | # build deltas | |||
|
47 | for i in pycompat.xrange(len(nodelist) - 1): | |||
|
48 | prev, curr = nodelist[i], nodelist[i + 1] | |||
|
49 | linknode = lookup(curr) | |||
|
50 | for c in self.nodechunk(rlog, curr, prev, linknode): | |||
|
51 | yield c | |||
|
52 | ||||
|
53 | yield self.close() | |||
|
54 | ||||
|
55 | class shallowcg1packer(changegroup.cgpacker): | |||
|
56 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): | |||
|
57 | if shallowutil.isenabled(self._repo): | |||
|
58 | fastpathlinkrev = False | |||
|
59 | ||||
|
60 | return super(shallowcg1packer, self).generate(commonrevs, clnodes, | |||
|
61 | fastpathlinkrev, source) | |||
|
62 | ||||
|
63 | def group(self, nodelist, rlog, lookup, units=None, reorder=None): | |||
|
64 | return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup, | |||
|
65 | units=units) | |||
|
66 | ||||
|
67 | def generatefiles(self, changedfiles, *args): | |||
|
68 | try: | |||
|
69 | linknodes, commonrevs, source = args | |||
|
70 | except ValueError: | |||
|
71 | commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args | |||
|
72 | if shallowutil.isenabled(self._repo): | |||
|
73 | repo = self._repo | |||
|
74 | if isinstance(repo, bundlerepo.bundlerepository): | |||
|
75 | # If the bundle contains filelogs, we can't pull from it, since | |||
|
76 | # bundlerepo is heavily tied to revlogs. Instead require that | |||
|
77 | # the user use unbundle instead. | |||
|
78 | # Force load the filelog data. | |||
|
79 | bundlerepo.bundlerepository.file(repo, 'foo') | |||
|
80 | if repo._cgfilespos: | |||
|
81 | raise error.Abort("cannot pull from full bundles", | |||
|
82 | hint="use `hg unbundle` instead") | |||
|
83 | return [] | |||
|
84 | filestosend = self.shouldaddfilegroups(source) | |||
|
85 | if filestosend == NoFiles: | |||
|
86 | changedfiles = list([f for f in changedfiles | |||
|
87 | if not repo.shallowmatch(f)]) | |||
|
88 | ||||
|
89 | return super(shallowcg1packer, self).generatefiles( | |||
|
90 | changedfiles, *args) | |||
|
91 | ||||
|
92 | def shouldaddfilegroups(self, source): | |||
|
93 | repo = self._repo | |||
|
94 | if not shallowutil.isenabled(repo): | |||
|
95 | return AllFiles | |||
|
96 | ||||
|
97 | if source == "push" or source == "bundle": | |||
|
98 | return AllFiles | |||
|
99 | ||||
|
100 | caps = self._bundlecaps or [] | |||
|
101 | if source == "serve" or source == "pull": | |||
|
102 | if constants.BUNDLE2_CAPABLITY in caps: | |||
|
103 | return LocalFiles | |||
|
104 | else: | |||
|
105 | # Serving to a full repo requires us to serve everything | |||
|
106 | repo.ui.warn(_("pulling from a shallow repo\n")) | |||
|
107 | return AllFiles | |||
|
108 | ||||
|
109 | return NoFiles | |||
|
110 | ||||
|
111 | def prune(self, rlog, missing, commonrevs): | |||
|
112 | if not isinstance(rlog, remotefilelog.remotefilelog): | |||
|
113 | return super(shallowcg1packer, self).prune(rlog, missing, | |||
|
114 | commonrevs) | |||
|
115 | ||||
|
116 | repo = self._repo | |||
|
117 | results = [] | |||
|
118 | for fnode in missing: | |||
|
119 | fctx = repo.filectx(rlog.filename, fileid=fnode) | |||
|
120 | if fctx.linkrev() not in commonrevs: | |||
|
121 | results.append(fnode) | |||
|
122 | return results | |||
|
123 | ||||
|
124 | def nodechunk(self, revlog, node, prevnode, linknode): | |||
|
125 | prefix = '' | |||
|
126 | if prevnode == nullid: | |||
|
127 | delta = revlog.revision(node, raw=True) | |||
|
128 | prefix = mdiff.trivialdiffheader(len(delta)) | |||
|
129 | else: | |||
|
130 | # Actually uses remotefilelog.revdiff which works on nodes, not revs | |||
|
131 | delta = revlog.revdiff(prevnode, node) | |||
|
132 | p1, p2 = revlog.parents(node) | |||
|
133 | flags = revlog.flags(node) | |||
|
134 | meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags) | |||
|
135 | meta += prefix | |||
|
136 | l = len(meta) + len(delta) | |||
|
137 | yield changegroup.chunkheader(l) | |||
|
138 | yield meta | |||
|
139 | yield delta | |||
|
140 | ||||
|
141 | def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs): | |||
|
142 | if not shallowutil.isenabled(repo): | |||
|
143 | return orig(repo, outgoing, version, source, *args, **kwargs) | |||
|
144 | ||||
|
145 | original = repo.shallowmatch | |||
|
146 | try: | |||
|
147 | # if serving, only send files the clients has patterns for | |||
|
148 | if source == 'serve': | |||
|
149 | bundlecaps = kwargs.get(r'bundlecaps') | |||
|
150 | includepattern = None | |||
|
151 | excludepattern = None | |||
|
152 | for cap in (bundlecaps or []): | |||
|
153 | if cap.startswith("includepattern="): | |||
|
154 | raw = cap[len("includepattern="):] | |||
|
155 | if raw: | |||
|
156 | includepattern = raw.split('\0') | |||
|
157 | elif cap.startswith("excludepattern="): | |||
|
158 | raw = cap[len("excludepattern="):] | |||
|
159 | if raw: | |||
|
160 | excludepattern = raw.split('\0') | |||
|
161 | if includepattern or excludepattern: | |||
|
162 | repo.shallowmatch = match.match(repo.root, '', None, | |||
|
163 | includepattern, excludepattern) | |||
|
164 | else: | |||
|
165 | repo.shallowmatch = match.always(repo.root, '') | |||
|
166 | return orig(repo, outgoing, version, source, *args, **kwargs) | |||
|
167 | finally: | |||
|
168 | repo.shallowmatch = original | |||
|
169 | ||||
|
170 | def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args): | |||
|
171 | if not shallowutil.isenabled(repo): | |||
|
172 | return orig(repo, source, revmap, trp, expectedfiles, *args) | |||
|
173 | ||||
|
174 | newfiles = 0 | |||
|
175 | visited = set() | |||
|
176 | revisiondatas = {} | |||
|
177 | queue = [] | |||
|
178 | ||||
|
179 | # Normal Mercurial processes each file one at a time, adding all | |||
|
180 | # the new revisions for that file at once. In remotefilelog a file | |||
|
181 | # revision may depend on a different file's revision (in the case | |||
|
182 | # of a rename/copy), so we must lay all revisions down across all | |||
|
183 | # files in topological order. | |||
|
184 | ||||
|
185 | # read all the file chunks but don't add them | |||
|
186 | progress = repo.ui.makeprogress(_('files'), total=expectedfiles) | |||
|
187 | while True: | |||
|
188 | chunkdata = source.filelogheader() | |||
|
189 | if not chunkdata: | |||
|
190 | break | |||
|
191 | f = chunkdata["filename"] | |||
|
192 | repo.ui.debug("adding %s revisions\n" % f) | |||
|
193 | progress.increment() | |||
|
194 | ||||
|
195 | if not repo.shallowmatch(f): | |||
|
196 | fl = repo.file(f) | |||
|
197 | deltas = source.deltaiter() | |||
|
198 | fl.addgroup(deltas, revmap, trp) | |||
|
199 | continue | |||
|
200 | ||||
|
201 | chain = None | |||
|
202 | while True: | |||
|
203 | # returns: (node, p1, p2, cs, deltabase, delta, flags) or None | |||
|
204 | revisiondata = source.deltachunk(chain) | |||
|
205 | if not revisiondata: | |||
|
206 | break | |||
|
207 | ||||
|
208 | chain = revisiondata[0] | |||
|
209 | ||||
|
210 | revisiondatas[(f, chain)] = revisiondata | |||
|
211 | queue.append((f, chain)) | |||
|
212 | ||||
|
213 | if f not in visited: | |||
|
214 | newfiles += 1 | |||
|
215 | visited.add(f) | |||
|
216 | ||||
|
217 | if chain is None: | |||
|
218 | raise error.Abort(_("received file revlog group is empty")) | |||
|
219 | ||||
|
220 | processed = set() | |||
|
221 | def available(f, node, depf, depnode): | |||
|
222 | if depnode != nullid and (depf, depnode) not in processed: | |||
|
223 | if not (depf, depnode) in revisiondatas: | |||
|
224 | # It's not in the changegroup, assume it's already | |||
|
225 | # in the repo | |||
|
226 | return True | |||
|
227 | # re-add self to queue | |||
|
228 | queue.insert(0, (f, node)) | |||
|
229 | # add dependency in front | |||
|
230 | queue.insert(0, (depf, depnode)) | |||
|
231 | return False | |||
|
232 | return True | |||
|
233 | ||||
|
234 | skipcount = 0 | |||
|
235 | ||||
|
236 | # Prefetch the non-bundled revisions that we will need | |||
|
237 | prefetchfiles = [] | |||
|
238 | for f, node in queue: | |||
|
239 | revisiondata = revisiondatas[(f, node)] | |||
|
240 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) | |||
|
241 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] | |||
|
242 | ||||
|
243 | for dependent in dependents: | |||
|
244 | if dependent == nullid or (f, dependent) in revisiondatas: | |||
|
245 | continue | |||
|
246 | prefetchfiles.append((f, hex(dependent))) | |||
|
247 | ||||
|
248 | repo.fileservice.prefetch(prefetchfiles) | |||
|
249 | ||||
|
250 | # Apply the revisions in topological order such that a revision | |||
|
251 | # is only written once it's deltabase and parents have been written. | |||
|
252 | while queue: | |||
|
253 | f, node = queue.pop(0) | |||
|
254 | if (f, node) in processed: | |||
|
255 | continue | |||
|
256 | ||||
|
257 | skipcount += 1 | |||
|
258 | if skipcount > len(queue) + 1: | |||
|
259 | raise error.Abort(_("circular node dependency")) | |||
|
260 | ||||
|
261 | fl = repo.file(f) | |||
|
262 | ||||
|
263 | revisiondata = revisiondatas[(f, node)] | |||
|
264 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) | |||
|
265 | node, p1, p2, linknode, deltabase, delta, flags = revisiondata | |||
|
266 | ||||
|
267 | if not available(f, node, f, deltabase): | |||
|
268 | continue | |||
|
269 | ||||
|
270 | base = fl.revision(deltabase, raw=True) | |||
|
271 | text = mdiff.patch(base, delta) | |||
|
272 | if not isinstance(text, bytes): | |||
|
273 | text = bytes(text) | |||
|
274 | ||||
|
275 | meta, text = shallowutil.parsemeta(text) | |||
|
276 | if 'copy' in meta: | |||
|
277 | copyfrom = meta['copy'] | |||
|
278 | copynode = bin(meta['copyrev']) | |||
|
279 | if not available(f, node, copyfrom, copynode): | |||
|
280 | continue | |||
|
281 | ||||
|
282 | for p in [p1, p2]: | |||
|
283 | if p != nullid: | |||
|
284 | if not available(f, node, f, p): | |||
|
285 | continue | |||
|
286 | ||||
|
287 | fl.add(text, meta, trp, linknode, p1, p2) | |||
|
288 | processed.add((f, node)) | |||
|
289 | skipcount = 0 | |||
|
290 | ||||
|
291 | progress.complete() | |||
|
292 | ||||
|
293 | return len(revisiondatas), newfiles |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100755 |
|
NO CONTENT: new file 100755 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100755 |
|
NO CONTENT: new file 100755 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100755 |
|
NO CONTENT: new file 100755 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
@@ -72,6 +72,8 b' cleanbutpackages:' | |||||
72 | rm -rf build mercurial/locale |
|
72 | rm -rf build mercurial/locale | |
73 | $(MAKE) -C doc clean |
|
73 | $(MAKE) -C doc clean | |
74 | $(MAKE) -C contrib/chg distclean |
|
74 | $(MAKE) -C contrib/chg distclean | |
|
75 | rm -rf rust/target | |||
|
76 | rm -f mercurial/rustext.so | |||
75 |
|
77 | |||
76 | clean: cleanbutpackages |
|
78 | clean: cleanbutpackages | |
77 | rm -rf packages |
|
79 | rm -rf packages | |
@@ -178,6 +180,7 b' packaging_targets := \\' | |||||
178 | docker-fedora20 \ |
|
180 | docker-fedora20 \ | |
179 | docker-fedora21 \ |
|
181 | docker-fedora21 \ | |
180 | docker-fedora28 \ |
|
182 | docker-fedora28 \ | |
|
183 | docker-fedora29 \ | |||
181 | docker-ubuntu-trusty \ |
|
184 | docker-ubuntu-trusty \ | |
182 | docker-ubuntu-trusty-ppa \ |
|
185 | docker-ubuntu-trusty-ppa \ | |
183 | docker-ubuntu-xenial \ |
|
186 | docker-ubuntu-xenial \ | |
@@ -189,6 +192,7 b' packaging_targets := \\' | |||||
189 | fedora20 \ |
|
192 | fedora20 \ | |
190 | fedora21 \ |
|
193 | fedora21 \ | |
191 | fedora28 \ |
|
194 | fedora28 \ | |
|
195 | fedora29 \ | |||
192 | linux-wheels \ |
|
196 | linux-wheels \ | |
193 | linux-wheels-x86_64 \ |
|
197 | linux-wheels-x86_64 \ | |
194 | linux-wheels-i686 \ |
|
198 | linux-wheels-i686 \ |
@@ -139,3 +139,18 b' secret()' | |||||
139 | # test finding common ancestors |
|
139 | # test finding common ancestors | |
140 | heads(commonancestors(last(head(), 2))) |
|
140 | heads(commonancestors(last(head(), 2))) | |
141 | heads(commonancestors(head())) |
|
141 | heads(commonancestors(head())) | |
|
142 | ||||
|
143 | # more heads testing | |||
|
144 | heads(all()) | |||
|
145 | heads(-10000:-1) | |||
|
146 | (-5000:-1000) and heads(-10000:-1) | |||
|
147 | heads(matching(tip, "author")) | |||
|
148 | heads(matching(tip, "author")) and -10000:-1 | |||
|
149 | (-10000:-1) and heads(matching(tip, "author")) | |||
|
150 | # more roots testing | |||
|
151 | roots(all()) | |||
|
152 | roots(-10000:-1) | |||
|
153 | (-5000:-1000) and roots(-10000:-1) | |||
|
154 | roots(matching(tip, "author")) | |||
|
155 | roots(matching(tip, "author")) and -10000:-1 | |||
|
156 | (-10000:-1) and roots(matching(tip, "author")) |
@@ -6,6 +6,14 b'' | |||||
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | """Tool read primitive events from a pipe to produce a catapult trace. |
|
7 | """Tool read primitive events from a pipe to produce a catapult trace. | |
8 |
|
8 | |||
|
9 | Usage: | |||
|
10 | Terminal 1: $ catapipe.py /tmp/mypipe /tmp/trace.json | |||
|
11 | Terminal 2: $ HGCATAPULTSERVERPIPE=/tmp/mypipe hg root | |||
|
12 | <ctrl-c catapipe.py in Terminal 1> | |||
|
13 | $ catapult/tracing/bin/trace2html /tmp/trace.json # produce /tmp/trace.html | |||
|
14 | <open trace.html in your browser of choice; the WASD keys are very useful> | |||
|
15 | (catapult is located at https://github.com/catapult-project/catapult) | |||
|
16 | ||||
9 | For now the event stream supports |
|
17 | For now the event stream supports | |
10 |
|
18 | |||
11 | START $SESSIONID ... |
|
19 | START $SESSIONID ... | |
@@ -24,7 +32,7 b' run-tests.py.' | |||||
24 |
|
32 | |||
25 | Typically you'll want to place the path to the named pipe in the |
|
33 | Typically you'll want to place the path to the named pipe in the | |
26 | HGCATAPULTSERVERPIPE environment variable, which both run-tests and hg |
|
34 | HGCATAPULTSERVERPIPE environment variable, which both run-tests and hg | |
27 | understand. |
|
35 | understand. To trace *only* run-tests, use HGTESTCATAPULTSERVERPIPE instead. | |
28 | """ |
|
36 | """ | |
29 | from __future__ import absolute_import, print_function |
|
37 | from __future__ import absolute_import, print_function | |
30 |
|
38 |
@@ -34,7 +34,7 b' errors = [' | |||||
34 | (commitheader + r"(?!merge with )[^#]\S+[^:] ", |
|
34 | (commitheader + r"(?!merge with )[^#]\S+[^:] ", | |
35 | "summary line doesn't start with 'topic: '"), |
|
35 | "summary line doesn't start with 'topic: '"), | |
36 | (afterheader + r"[A-Z][a-z]\S+", "don't capitalize summary lines"), |
|
36 | (afterheader + r"[A-Z][a-z]\S+", "don't capitalize summary lines"), | |
37 |
(afterheader + r" |
|
37 | (afterheader + r"^\S+: *[A-Z][a-z]\S+", "don't capitalize summary lines"), | |
38 | (afterheader + r"\S*[^A-Za-z0-9-_]\S*: ", |
|
38 | (afterheader + r"\S*[^A-Za-z0-9-_]\S*: ", | |
39 | "summary keyword should be most user-relevant one-word command or topic"), |
|
39 | "summary keyword should be most user-relevant one-word command or topic"), | |
40 | (afterheader + r".*\.\s*\n", "don't add trailing period on summary line"), |
|
40 | (afterheader + r".*\.\s*\n", "don't add trailing period on summary line"), |
@@ -3,7 +3,6 b'' | |||||
3 | mercurial/cext/dirs.c |
|
3 | mercurial/cext/dirs.c | |
4 | mercurial/cext/manifest.c |
|
4 | mercurial/cext/manifest.c | |
5 | mercurial/cext/osutil.c |
|
5 | mercurial/cext/osutil.c | |
6 | mercurial/cext/revlog.c |
|
|||
7 | # Vendored code that we should never format: |
|
6 | # Vendored code that we should never format: | |
8 | contrib/python-zstandard/c-ext/bufferutil.c |
|
7 | contrib/python-zstandard/c-ext/bufferutil.c | |
9 | contrib/python-zstandard/c-ext/compressionchunker.c |
|
8 | contrib/python-zstandard/c-ext/compressionchunker.c |
@@ -4,7 +4,7 b' CXX = clang++' | |||||
4 | all: bdiff mpatch xdiff |
|
4 | all: bdiff mpatch xdiff | |
5 |
|
5 | |||
6 | fuzzutil.o: fuzzutil.cc fuzzutil.h |
|
6 | fuzzutil.o: fuzzutil.cc fuzzutil.h | |
7 |
$(CXX) $(CXXFLAGS) -g -O1 |
|
7 | $(CXX) $(CXXFLAGS) -g -O1 \ | |
8 | -std=c++17 \ |
|
8 | -std=c++17 \ | |
9 | -I../../mercurial -c -o fuzzutil.o fuzzutil.cc |
|
9 | -I../../mercurial -c -o fuzzutil.o fuzzutil.cc | |
10 |
|
10 | |||
@@ -12,6 +12,11 b' fuzzutil-oss-fuzz.o: fuzzutil.cc fuzzuti' | |||||
12 | $(CXX) $(CXXFLAGS) -std=c++17 \ |
|
12 | $(CXX) $(CXXFLAGS) -std=c++17 \ | |
13 | -I../../mercurial -c -o fuzzutil-oss-fuzz.o fuzzutil.cc |
|
13 | -I../../mercurial -c -o fuzzutil-oss-fuzz.o fuzzutil.cc | |
14 |
|
14 | |||
|
15 | pyutil.o: pyutil.cc pyutil.h | |||
|
16 | $(CXX) $(CXXFLAGS) -g -O1 \ | |||
|
17 | `$$OUT/sanpy/bin/python-config --cflags` \ | |||
|
18 | -I../../mercurial -c -o pyutil.o pyutil.cc | |||
|
19 | ||||
15 | bdiff.o: ../../mercurial/bdiff.c |
|
20 | bdiff.o: ../../mercurial/bdiff.c | |
16 | $(CC) $(CFLAGS) -fsanitize=fuzzer-no-link,address -c -o bdiff.o \ |
|
21 | $(CC) $(CFLAGS) -fsanitize=fuzzer-no-link,address -c -o bdiff.o \ | |
17 | ../../mercurial/bdiff.c |
|
22 | ../../mercurial/bdiff.c | |
@@ -70,59 +75,86 b' xdiff_fuzzer: xdiff.cc fuzz-xdiffi.o fuz' | |||||
70 | fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o \ |
|
75 | fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o \ | |
71 | -lFuzzingEngine -o $$OUT/xdiff_fuzzer |
|
76 | -lFuzzingEngine -o $$OUT/xdiff_fuzzer | |
72 |
|
77 | |||
73 | # TODO use the $OUT env var instead of hardcoding /out |
|
78 | manifest.o: ../../mercurial/cext/manifest.c | |
74 | /out/sanpy/bin/python: |
|
|||
75 | cd /Python-2.7.15/ && ./configure --without-pymalloc --prefix=$$OUT/sanpy CFLAGS='-O1 -fno-omit-frame-pointer -g -fwrapv -fstack-protector-strong' LDFLAGS=-lasan && ASAN_OPTIONS=detect_leaks=0 make && make install |
|
|||
76 |
|
||||
77 | sanpy: /out/sanpy/bin/python |
|
|||
78 |
|
||||
79 | manifest.o: sanpy ../../mercurial/cext/manifest.c |
|
|||
80 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ |
|
79 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |
81 | -I../../mercurial \ |
|
80 | -I../../mercurial \ | |
82 | -c -o manifest.o ../../mercurial/cext/manifest.c |
|
81 | -c -o manifest.o ../../mercurial/cext/manifest.c | |
83 |
|
82 | |||
84 |
charencode.o: |
|
83 | charencode.o: ../../mercurial/cext/charencode.c | |
85 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ |
|
84 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |
86 | -I../../mercurial \ |
|
85 | -I../../mercurial \ | |
87 | -c -o charencode.o ../../mercurial/cext/charencode.c |
|
86 | -c -o charencode.o ../../mercurial/cext/charencode.c | |
88 |
|
87 | |||
89 |
parsers.o: |
|
88 | parsers.o: ../../mercurial/cext/parsers.c | |
90 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ |
|
89 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |
91 | -I../../mercurial \ |
|
90 | -I../../mercurial \ | |
92 | -c -o parsers.o ../../mercurial/cext/parsers.c |
|
91 | -c -o parsers.o ../../mercurial/cext/parsers.c | |
93 |
|
92 | |||
94 |
dirs.o: |
|
93 | dirs.o: ../../mercurial/cext/dirs.c | |
95 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ |
|
94 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |
96 | -I../../mercurial \ |
|
95 | -I../../mercurial \ | |
97 | -c -o dirs.o ../../mercurial/cext/dirs.c |
|
96 | -c -o dirs.o ../../mercurial/cext/dirs.c | |
98 |
|
97 | |||
99 |
pathencode.o: |
|
98 | pathencode.o: ../../mercurial/cext/pathencode.c | |
100 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ |
|
99 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |
101 | -I../../mercurial \ |
|
100 | -I../../mercurial \ | |
102 | -c -o pathencode.o ../../mercurial/cext/pathencode.c |
|
101 | -c -o pathencode.o ../../mercurial/cext/pathencode.c | |
103 |
|
102 | |||
104 |
revlog.o: |
|
103 | revlog.o: ../../mercurial/cext/revlog.c | |
105 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ |
|
104 | $(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |
106 | -I../../mercurial \ |
|
105 | -I../../mercurial \ | |
107 | -c -o revlog.o ../../mercurial/cext/revlog.c |
|
106 | -c -o revlog.o ../../mercurial/cext/revlog.c | |
108 |
|
107 | |||
109 |
manifest_fuzzer: |
|
108 | manifest_fuzzer: manifest.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o | |
110 | $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ |
|
109 | $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |
111 | -Wno-register -Wno-macro-redefined \ |
|
110 | -Wno-register -Wno-macro-redefined \ | |
112 | -I../../mercurial manifest.cc \ |
|
111 | -I../../mercurial manifest.cc \ | |
113 | manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o \ |
|
112 | manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \ | |
114 | -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \ |
|
113 | -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \ | |
115 | -o $$OUT/manifest_fuzzer |
|
114 | -o $$OUT/manifest_fuzzer | |
116 |
|
115 | |||
117 | manifest_corpus.zip: |
|
116 | manifest_corpus.zip: | |
118 | python manifest_corpus.py $$OUT/manifest_fuzzer_seed_corpus.zip |
|
117 | python manifest_corpus.py $$OUT/manifest_fuzzer_seed_corpus.zip | |
119 |
|
118 | |||
|
119 | revlog_fuzzer: revlog.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o | |||
|
120 | $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |||
|
121 | -Wno-register -Wno-macro-redefined \ | |||
|
122 | -I../../mercurial revlog.cc \ | |||
|
123 | manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \ | |||
|
124 | -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \ | |||
|
125 | -o $$OUT/revlog_fuzzer | |||
|
126 | ||||
|
127 | revlog_corpus.zip: | |||
|
128 | python revlog_corpus.py $$OUT/revlog_fuzzer_seed_corpus.zip | |||
|
129 | ||||
|
130 | dirstate_fuzzer: dirstate.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o | |||
|
131 | $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |||
|
132 | -Wno-register -Wno-macro-redefined \ | |||
|
133 | -I../../mercurial dirstate.cc \ | |||
|
134 | manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \ | |||
|
135 | -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \ | |||
|
136 | -o $$OUT/dirstate_fuzzer | |||
|
137 | ||||
|
138 | dirstate_corpus.zip: | |||
|
139 | python dirstate_corpus.py $$OUT/dirstate_fuzzer_seed_corpus.zip | |||
|
140 | ||||
|
141 | fm1readmarkers_fuzzer: fm1readmarkers.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o | |||
|
142 | $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \ | |||
|
143 | -Wno-register -Wno-macro-redefined \ | |||
|
144 | -I../../mercurial fm1readmarkers.cc \ | |||
|
145 | manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \ | |||
|
146 | -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \ | |||
|
147 | -o $$OUT/fm1readmarkers_fuzzer | |||
|
148 | ||||
|
149 | fm1readmarkers_corpus.zip: | |||
|
150 | python fm1readmarkers_corpus.py $$OUT/fm1readmarkers_fuzzer_seed_corpus.zip | |||
|
151 | ||||
120 | clean: |
|
152 | clean: | |
121 | $(RM) *.o *_fuzzer \ |
|
153 | $(RM) *.o *_fuzzer \ | |
122 | bdiff \ |
|
154 | bdiff \ | |
123 | mpatch \ |
|
155 | mpatch \ | |
124 | xdiff |
|
156 | xdiff | |
125 |
|
157 | |||
126 | oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer manifest_fuzzer manifest_corpus.zip |
|
158 | oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer manifest_fuzzer manifest_corpus.zip revlog_fuzzer revlog_corpus.zip dirstate_fuzzer dirstate_corpus.zip fm1readmarkers_fuzzer fm1readmarkers_corpus.zip | |
127 |
|
159 | |||
128 |
.PHONY: all clean oss-fuzz |
|
160 | .PHONY: all clean oss-fuzz |
@@ -3,43 +3,17 b'' | |||||
3 | #include <stdlib.h> |
|
3 | #include <stdlib.h> | |
4 | #include <unistd.h> |
|
4 | #include <unistd.h> | |
5 |
|
5 | |||
|
6 | #include "pyutil.h" | |||
|
7 | ||||
6 | #include <string> |
|
8 | #include <string> | |
7 |
|
9 | |||
8 | extern "C" { |
|
10 | extern "C" { | |
9 |
|
11 | |||
10 | /* TODO: use Python 3 for this fuzzing? */ |
|
|||
11 | PyMODINIT_FUNC initparsers(void); |
|
|||
12 |
|
||||
13 | static char cpypath[8192] = "\0"; |
|
|||
14 |
|
||||
15 | static PyCodeObject *code; |
|
12 | static PyCodeObject *code; | |
16 | static PyObject *mainmod; |
|
|||
17 | static PyObject *globals; |
|
|||
18 |
|
13 | |||
19 | extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv) |
|
14 | extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv) | |
20 | { |
|
15 | { | |
21 | const std::string subdir = "/sanpy/lib/python2.7"; |
|
16 | contrib::initpy(*argv[0]); | |
22 | /* HACK ALERT: we need a full Python installation built without |
|
|||
23 | pymalloc and with ASAN, so we dump one in |
|
|||
24 | $OUT/sanpy/lib/python2.7. This helps us wire that up. */ |
|
|||
25 | std::string selfpath(*argv[0]); |
|
|||
26 | std::string pypath; |
|
|||
27 | auto pos = selfpath.rfind("/"); |
|
|||
28 | if (pos == std::string::npos) { |
|
|||
29 | char wd[8192]; |
|
|||
30 | getcwd(wd, 8192); |
|
|||
31 | pypath = std::string(wd) + subdir; |
|
|||
32 | } else { |
|
|||
33 | pypath = selfpath.substr(0, pos) + subdir; |
|
|||
34 | } |
|
|||
35 | strncpy(cpypath, pypath.c_str(), pypath.size()); |
|
|||
36 | setenv("PYTHONPATH", cpypath, 1); |
|
|||
37 | setenv("PYTHONNOUSERSITE", "1", 1); |
|
|||
38 | /* prevent Python from looking up users in the fuzz environment */ |
|
|||
39 | setenv("PYTHONUSERBASE", cpypath, 1); |
|
|||
40 | Py_SetPythonHome(cpypath); |
|
|||
41 | Py_InitializeEx(0); |
|
|||
42 | initparsers(); |
|
|||
43 | code = (PyCodeObject *)Py_CompileString(R"py( |
|
17 | code = (PyCodeObject *)Py_CompileString(R"py( | |
44 | from parsers import lazymanifest |
|
18 | from parsers import lazymanifest | |
45 | try: |
|
19 | try: | |
@@ -60,8 +34,6 b' except Exception as e:' | |||||
60 | # print e |
|
34 | # print e | |
61 | )py", |
|
35 | )py", | |
62 | "fuzzer", Py_file_input); |
|
36 | "fuzzer", Py_file_input); | |
63 | mainmod = PyImport_AddModule("__main__"); |
|
|||
64 | globals = PyModule_GetDict(mainmod); |
|
|||
65 | return 0; |
|
37 | return 0; | |
66 | } |
|
38 | } | |
67 |
|
39 | |||
@@ -71,7 +43,7 b' int LLVMFuzzerTestOneInput(const uint8_t' | |||||
71 | PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size); |
|
43 | PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size); | |
72 | PyObject *locals = PyDict_New(); |
|
44 | PyObject *locals = PyDict_New(); | |
73 | PyDict_SetItemString(locals, "mdata", mtext); |
|
45 | PyDict_SetItemString(locals, "mdata", mtext); | |
74 | PyObject *res = PyEval_EvalCode(code, globals, locals); |
|
46 | PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals); | |
75 | if (!res) { |
|
47 | if (!res) { | |
76 | PyErr_Print(); |
|
48 | PyErr_Print(); | |
77 | } |
|
49 | } |
@@ -22,6 +22,11 b' int hunk_consumer(long a1, long a2, long' | |||||
22 |
|
22 | |||
23 | int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) |
|
23 | int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) | |
24 | { |
|
24 | { | |
|
25 | // Don't allow fuzzer inputs larger than 100k, since we'll just bog | |||
|
26 | // down and not accomplish much. | |||
|
27 | if (Size > 100000) { | |||
|
28 | return 0; | |||
|
29 | } | |||
25 | auto maybe_inputs = SplitInputs(Data, Size); |
|
30 | auto maybe_inputs = SplitInputs(Data, Size); | |
26 | if (!maybe_inputs) { |
|
31 | if (!maybe_inputs) { | |
27 | return 0; |
|
32 | return 0; |
@@ -27,12 +27,18 b' else:' | |||||
27 | stringio = cStringIO.StringIO |
|
27 | stringio = cStringIO.StringIO | |
28 | bprint = print |
|
28 | bprint = print | |
29 |
|
29 | |||
30 | def connectpipe(path=None): |
|
30 | def connectpipe(path=None, extraargs=()): | |
31 | cmdline = [b'hg', b'serve', b'--cmdserver', b'pipe'] |
|
31 | cmdline = [b'hg', b'serve', b'--cmdserver', b'pipe'] | |
32 | if path: |
|
32 | if path: | |
33 | cmdline += [b'-R', path] |
|
33 | cmdline += [b'-R', path] | |
|
34 | cmdline.extend(extraargs) | |||
34 |
|
35 | |||
35 | server = subprocess.Popen(cmdline, stdin=subprocess.PIPE, |
|
36 | def tonative(cmdline): | |
|
37 | if os.name != r'nt': | |||
|
38 | return cmdline | |||
|
39 | return [arg.decode("utf-8") for arg in cmdline] | |||
|
40 | ||||
|
41 | server = subprocess.Popen(tonative(cmdline), stdin=subprocess.PIPE, | |||
36 | stdout=subprocess.PIPE) |
|
42 | stdout=subprocess.PIPE) | |
37 |
|
43 | |||
38 | return server |
|
44 | return server | |
@@ -114,6 +120,8 b' def runcommand(server, args, output=stdo' | |||||
114 | writeblock(server, input.read(data)) |
|
120 | writeblock(server, input.read(data)) | |
115 | elif ch == b'L': |
|
121 | elif ch == b'L': | |
116 | writeblock(server, input.readline(data)) |
|
122 | writeblock(server, input.readline(data)) | |
|
123 | elif ch == b'm': | |||
|
124 | bprint(b"message: %r" % data) | |||
117 | elif ch == b'r': |
|
125 | elif ch == b'r': | |
118 | ret, = struct.unpack('>i', data) |
|
126 | ret, = struct.unpack('>i', data) | |
119 | if ret != 0: |
|
127 | if ret != 0: | |
@@ -132,3 +140,8 b' def check(func, connect=connectpipe):' | |||||
132 | finally: |
|
140 | finally: | |
133 | server.stdin.close() |
|
141 | server.stdin.close() | |
134 | server.wait() |
|
142 | server.wait() | |
|
143 | ||||
|
144 | def checkwith(connect=connectpipe, **kwargs): | |||
|
145 | def wrap(func): | |||
|
146 | return check(func, lambda: connect(**kwargs)) | |||
|
147 | return wrap |
@@ -40,8 +40,6 b' allowsymbolimports = (' | |||||
40 | # third-party imports should be directly imported |
|
40 | # third-party imports should be directly imported | |
41 | 'mercurial.thirdparty', |
|
41 | 'mercurial.thirdparty', | |
42 | 'mercurial.thirdparty.attr', |
|
42 | 'mercurial.thirdparty.attr', | |
43 | 'mercurial.thirdparty.cbor', |
|
|||
44 | 'mercurial.thirdparty.cbor.cbor2', |
|
|||
45 | 'mercurial.thirdparty.zope', |
|
43 | 'mercurial.thirdparty.zope', | |
46 | 'mercurial.thirdparty.zope.interface', |
|
44 | 'mercurial.thirdparty.zope.interface', | |
47 | ) |
|
45 | ) | |
@@ -260,10 +258,12 b' def list_stdlib_modules():' | |||||
260 | break |
|
258 | break | |
261 | else: |
|
259 | else: | |
262 | stdlib_prefixes.add(dirname) |
|
260 | stdlib_prefixes.add(dirname) | |
|
261 | sourceroot = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) | |||
263 | for libpath in sys.path: |
|
262 | for libpath in sys.path: | |
264 | # We want to walk everything in sys.path that starts with |
|
263 | # We want to walk everything in sys.path that starts with something in | |
265 | # something in stdlib_prefixes. |
|
264 | # stdlib_prefixes, but not directories from the hg sources. | |
266 | if not any(libpath.startswith(p) for p in stdlib_prefixes): |
|
265 | if (os.path.abspath(libpath).startswith(sourceroot) | |
|
266 | or not any(libpath.startswith(p) for p in stdlib_prefixes)): | |||
267 | continue |
|
267 | continue | |
268 | for top, dirs, files in os.walk(libpath): |
|
268 | for top, dirs, files in os.walk(libpath): | |
269 | for i, d in reversed(list(enumerate(dirs))): |
|
269 | for i, d in reversed(list(enumerate(dirs))): | |
@@ -674,6 +674,8 b' def embedded(f, modname, src):' | |||||
674 | # "starts" is "line number" (1-origin), but embedded() is |
|
674 | # "starts" is "line number" (1-origin), but embedded() is | |
675 | # expected to return "line offset" (0-origin). Therefore, this |
|
675 | # expected to return "line offset" (0-origin). Therefore, this | |
676 | # yields "starts - 1". |
|
676 | # yields "starts - 1". | |
|
677 | if not isinstance(modname, str): | |||
|
678 | modname = modname.decode('utf8') | |||
677 | yield code, "%s[%d]" % (modname, starts), name, starts - 1 |
|
679 | yield code, "%s[%d]" % (modname, starts), name, starts - 1 | |
678 |
|
680 | |||
679 | def sources(f, modname): |
|
681 | def sources(f, modname): | |
@@ -694,7 +696,7 b' def sources(f, modname):' | |||||
694 | if py or f.endswith('.t'): |
|
696 | if py or f.endswith('.t'): | |
695 | with open(f, 'rb') as src: |
|
697 | with open(f, 'rb') as src: | |
696 | for script, modname, t, line in embedded(f, modname, src): |
|
698 | for script, modname, t, line in embedded(f, modname, src): | |
697 | yield script, modname, t, line |
|
699 | yield script, modname.encode('utf8'), t, line | |
698 |
|
700 | |||
699 | def main(argv): |
|
701 | def main(argv): | |
700 | if len(argv) < 2 or (argv[1] == '-' and len(argv) > 2): |
|
702 | if len(argv) < 2 or (argv[1] == '-' and len(argv) > 2): |
@@ -14,7 +14,8 b' UBUNTU_CODENAMES := \\' | |||||
14 | FEDORA_RELEASES := \ |
|
14 | FEDORA_RELEASES := \ | |
15 | 20 \ |
|
15 | 20 \ | |
16 | 21 \ |
|
16 | 21 \ | |
17 | 28 |
|
17 | 28 \ | |
|
18 | 29 | |||
18 |
|
19 | |||
19 | CENTOS_RELEASES := \ |
|
20 | CENTOS_RELEASES := \ | |
20 | 5 \ |
|
21 | 5 \ |
@@ -1,7 +1,7 b'' | |||||
1 | FROM centos:centos5 |
|
1 | FROM centos:centos5 | |
2 |
|
2 | |||
3 |
RUN groupadd -g |
|
3 | RUN groupadd -g %GID% build && \ | |
4 |
useradd -u |
|
4 | useradd -u %UID% -g %GID% -s /bin/bash -d /build -m build | |
5 |
|
5 | |||
6 | RUN \ |
|
6 | RUN \ | |
7 | sed -i 's/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo && \ |
|
7 | sed -i 's/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo && \ |
@@ -1,7 +1,7 b'' | |||||
1 | FROM centos:centos6 |
|
1 | FROM centos:centos6 | |
2 |
|
2 | |||
3 |
RUN groupadd -g |
|
3 | RUN groupadd -g %GID% build && \ | |
4 |
useradd -u |
|
4 | useradd -u %UID% -g %GID% -s /bin/bash -d /build -m build | |
5 |
|
5 | |||
6 | RUN yum install -y \ |
|
6 | RUN yum install -y \ | |
7 | gcc \ |
|
7 | gcc \ |
@@ -1,7 +1,7 b'' | |||||
1 | FROM centos:centos7 |
|
1 | FROM centos:centos7 | |
2 |
|
2 | |||
3 |
RUN groupadd -g |
|
3 | RUN groupadd -g %GID% build && \ | |
4 |
useradd -u |
|
4 | useradd -u %UID% -g %GID% -s /bin/bash -d /build -m build | |
5 |
|
5 | |||
6 | RUN yum install -y \ |
|
6 | RUN yum install -y \ | |
7 | gcc \ |
|
7 | gcc \ |
@@ -1,4 +1,4 b'' | |||||
1 |
FROM fedora:2 |
|
1 | FROM fedora:29 | |
2 |
|
2 | |||
3 | RUN groupadd -g 1000 build && \ |
|
3 | RUN groupadd -g 1000 build && \ | |
4 | useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build |
|
4 | useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build |
@@ -10,7 +10,15 b' DOCKER=$($BUILDDIR/hg-docker docker-path' | |||||
10 |
|
10 | |||
11 | CONTAINER=hg-docker-$PLATFORM |
|
11 | CONTAINER=hg-docker-$PLATFORM | |
12 |
|
12 | |||
13 | $BUILDDIR/hg-docker build $BUILDDIR/docker/$PLATFORM $CONTAINER |
|
13 | if [[ -z "${HG_DOCKER_OWN_USER}" ]]; then | |
|
14 | DOCKERUID=1000 | |||
|
15 | DOCKERGID=1000 | |||
|
16 | else | |||
|
17 | DOCKERUID=$(id -u) | |||
|
18 | DOCKERGID=$(id -g) | |||
|
19 | fi | |||
|
20 | ||||
|
21 | $BUILDDIR/hg-docker build --build-arg UID=$DOCKERUID --build-arg GID=$DOCKERGID $BUILDDIR/docker/$PLATFORM $CONTAINER | |||
14 |
|
22 | |||
15 | RPMBUILDDIR=$ROOTDIR/packages/$PLATFORM |
|
23 | RPMBUILDDIR=$ROOTDIR/packages/$PLATFORM | |
16 | $ROOTDIR/contrib/packaging/buildrpm --rpmbuilddir $RPMBUILDDIR --prepare $* |
|
24 | $ROOTDIR/contrib/packaging/buildrpm --rpmbuilddir $RPMBUILDDIR --prepare $* |
@@ -47,7 +47,7 b' def get_dockerfile(path: pathlib.Path, a' | |||||
47 | df = fh.read() |
|
47 | df = fh.read() | |
48 |
|
48 | |||
49 | for k, v in args: |
|
49 | for k, v in args: | |
50 | df = df.replace(b'%%%s%%' % k, v) |
|
50 | df = df.replace(bytes('%%%s%%' % k.decode(), 'utf-8'), v) | |
51 |
|
51 | |||
52 | return df |
|
52 | return df | |
53 |
|
53 | |||
@@ -72,7 +72,12 b' def build_docker_image(dockerfile: pathl' | |||||
72 | ] |
|
72 | ] | |
73 |
|
73 | |||
74 | print('executing: %r' % args) |
|
74 | print('executing: %r' % args) | |
75 | subprocess.run(args, input=dockerfile, check=True) |
|
75 | p = subprocess.Popen(args, stdin=subprocess.PIPE) | |
|
76 | p.communicate(input=dockerfile) | |||
|
77 | if p.returncode: | |||
|
78 | raise subprocess.CalledProcessException( | |||
|
79 | p.returncode, 'failed to build docker image: %s %s' \ | |||
|
80 | % (p.stdout, p.stderr)) | |||
76 |
|
81 | |||
77 | def command_build(args): |
|
82 | def command_build(args): | |
78 | build_args = [] |
|
83 | build_args = [] |
This diff has been collapsed as it changes many lines, (665 lines changed) Show them Hide them | |||||
@@ -24,8 +24,10 b' import functools' | |||||
24 | import gc |
|
24 | import gc | |
25 | import os |
|
25 | import os | |
26 | import random |
|
26 | import random | |
|
27 | import shutil | |||
27 | import struct |
|
28 | import struct | |
28 | import sys |
|
29 | import sys | |
|
30 | import tempfile | |||
29 | import threading |
|
31 | import threading | |
30 | import time |
|
32 | import time | |
31 | from mercurial import ( |
|
33 | from mercurial import ( | |
@@ -35,6 +37,7 b' from mercurial import (' | |||||
35 | copies, |
|
37 | copies, | |
36 | error, |
|
38 | error, | |
37 | extensions, |
|
39 | extensions, | |
|
40 | hg, | |||
38 | mdiff, |
|
41 | mdiff, | |
39 | merge, |
|
42 | merge, | |
40 | revlog, |
|
43 | revlog, | |
@@ -65,6 +68,11 b' try:' | |||||
65 | from mercurial import scmutil # since 1.9 (or 8b252e826c68) |
|
68 | from mercurial import scmutil # since 1.9 (or 8b252e826c68) | |
66 | except ImportError: |
|
69 | except ImportError: | |
67 | pass |
|
70 | pass | |
|
71 | try: | |||
|
72 | from mercurial import setdiscovery # since 1.9 (or cb98fed52495) | |||
|
73 | except ImportError: | |||
|
74 | pass | |||
|
75 | ||||
68 |
|
76 | |||
69 | def identity(a): |
|
77 | def identity(a): | |
70 | return a |
|
78 | return a | |
@@ -273,7 +281,9 b' def gettimer(ui, opts=None):' | |||||
273 | displayall = ui.configbool(b"perf", b"all-timing", False) |
|
281 | displayall = ui.configbool(b"perf", b"all-timing", False) | |
274 | return functools.partial(_timer, fm, displayall=displayall), fm |
|
282 | return functools.partial(_timer, fm, displayall=displayall), fm | |
275 |
|
283 | |||
276 | def stub_timer(fm, func, title=None): |
|
284 | def stub_timer(fm, func, setup=None, title=None): | |
|
285 | if setup is not None: | |||
|
286 | setup() | |||
277 | func() |
|
287 | func() | |
278 |
|
288 | |||
279 | @contextlib.contextmanager |
|
289 | @contextlib.contextmanager | |
@@ -287,12 +297,14 b' def timeone():' | |||||
287 | a, b = ostart, ostop |
|
297 | a, b = ostart, ostop | |
288 | r.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) |
|
298 | r.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) | |
289 |
|
299 | |||
290 | def _timer(fm, func, title=None, displayall=False): |
|
300 | def _timer(fm, func, setup=None, title=None, displayall=False): | |
291 | gc.collect() |
|
301 | gc.collect() | |
292 | results = [] |
|
302 | results = [] | |
293 | begin = util.timer() |
|
303 | begin = util.timer() | |
294 | count = 0 |
|
304 | count = 0 | |
295 | while True: |
|
305 | while True: | |
|
306 | if setup is not None: | |||
|
307 | setup() | |||
296 | with timeone() as item: |
|
308 | with timeone() as item: | |
297 | r = func() |
|
309 | r = func() | |
298 | count += 1 |
|
310 | count += 1 | |
@@ -453,11 +465,19 b' def repocleartagscachefunc(repo):' | |||||
453 |
|
465 | |||
454 | # utilities to clear cache |
|
466 | # utilities to clear cache | |
455 |
|
467 | |||
456 |
def clearfilecache( |
|
468 | def clearfilecache(obj, attrname): | |
457 | unfi = repo.unfiltered() |
|
469 | unfiltered = getattr(obj, 'unfiltered', None) | |
458 | if attrname in vars(unfi): |
|
470 | if unfiltered is not None: | |
459 | delattr(unfi, attrname) |
|
471 | obj = obj.unfiltered() | |
460 | unfi._filecache.pop(attrname, None) |
|
472 | if attrname in vars(obj): | |
|
473 | delattr(obj, attrname) | |||
|
474 | obj._filecache.pop(attrname, None) | |||
|
475 | ||||
|
476 | def clearchangelog(repo): | |||
|
477 | if repo is not repo.unfiltered(): | |||
|
478 | object.__setattr__(repo, r'_clcachekey', None) | |||
|
479 | object.__setattr__(repo, r'_clcache', None) | |||
|
480 | clearfilecache(repo.unfiltered(), 'changelog') | |||
461 |
|
481 | |||
462 | # perf commands |
|
482 | # perf commands | |
463 |
|
483 | |||
@@ -524,23 +544,23 b' def perfheads(ui, repo, **opts):' | |||||
524 | timer(d) |
|
544 | timer(d) | |
525 | fm.end() |
|
545 | fm.end() | |
526 |
|
546 | |||
527 |
@command(b'perftags', formatteropts |
|
547 | @command(b'perftags', formatteropts+ | |
|
548 | [ | |||
|
549 | (b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | |||
|
550 | ]) | |||
528 | def perftags(ui, repo, **opts): |
|
551 | def perftags(ui, repo, **opts): | |
529 | import mercurial.changelog |
|
|||
530 | import mercurial.manifest |
|
|||
531 |
|
||||
532 | opts = _byteskwargs(opts) |
|
552 | opts = _byteskwargs(opts) | |
533 | timer, fm = gettimer(ui, opts) |
|
553 | timer, fm = gettimer(ui, opts) | |
534 | svfs = getsvfs(repo) |
|
|||
535 | repocleartagscache = repocleartagscachefunc(repo) |
|
554 | repocleartagscache = repocleartagscachefunc(repo) | |
|
555 | clearrevlogs = opts[b'clear_revlogs'] | |||
|
556 | def s(): | |||
|
557 | if clearrevlogs: | |||
|
558 | clearchangelog(repo) | |||
|
559 | clearfilecache(repo.unfiltered(), 'manifest') | |||
|
560 | repocleartagscache() | |||
536 | def t(): |
|
561 | def t(): | |
537 | repo.changelog = mercurial.changelog.changelog(svfs) |
|
|||
538 | rootmanifest = mercurial.manifest.manifestrevlog(svfs) |
|
|||
539 | repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo, |
|
|||
540 | rootmanifest) |
|
|||
541 | repocleartagscache() |
|
|||
542 | return len(repo.tags()) |
|
562 | return len(repo.tags()) | |
543 | timer(t) |
|
563 | timer(t, setup=s) | |
544 | fm.end() |
|
564 | fm.end() | |
545 |
|
565 | |||
546 | @command(b'perfancestors', formatteropts) |
|
566 | @command(b'perfancestors', formatteropts) | |
@@ -567,15 +587,38 b' def perfancestorset(ui, repo, revset, **' | |||||
567 | timer(d) |
|
587 | timer(d) | |
568 | fm.end() |
|
588 | fm.end() | |
569 |
|
589 | |||
570 |
@command(b'perf |
|
590 | @command(b'perfdiscovery', formatteropts, b'PATH') | |
|
591 | def perfdiscovery(ui, repo, path, **opts): | |||
|
592 | """benchmark discovery between local repo and the peer at given path | |||
|
593 | """ | |||
|
594 | repos = [repo, None] | |||
|
595 | timer, fm = gettimer(ui, opts) | |||
|
596 | path = ui.expandpath(path) | |||
|
597 | ||||
|
598 | def s(): | |||
|
599 | repos[1] = hg.peer(ui, opts, path) | |||
|
600 | def d(): | |||
|
601 | setdiscovery.findcommonheads(ui, *repos) | |||
|
602 | timer(d, setup=s) | |||
|
603 | fm.end() | |||
|
604 | ||||
|
605 | @command(b'perfbookmarks', formatteropts + | |||
|
606 | [ | |||
|
607 | (b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | |||
|
608 | ]) | |||
571 | def perfbookmarks(ui, repo, **opts): |
|
609 | def perfbookmarks(ui, repo, **opts): | |
572 | """benchmark parsing bookmarks from disk to memory""" |
|
610 | """benchmark parsing bookmarks from disk to memory""" | |
573 | opts = _byteskwargs(opts) |
|
611 | opts = _byteskwargs(opts) | |
574 | timer, fm = gettimer(ui, opts) |
|
612 | timer, fm = gettimer(ui, opts) | |
575 | def d(): |
|
613 | ||
|
614 | clearrevlogs = opts[b'clear_revlogs'] | |||
|
615 | def s(): | |||
|
616 | if clearrevlogs: | |||
|
617 | clearchangelog(repo) | |||
576 | clearfilecache(repo, b'_bookmarks') |
|
618 | clearfilecache(repo, b'_bookmarks') | |
|
619 | def d(): | |||
577 | repo._bookmarks |
|
620 | repo._bookmarks | |
578 | timer(d) |
|
621 | timer(d, setup=s) | |
579 | fm.end() |
|
622 | fm.end() | |
580 |
|
623 | |||
581 | @command(b'perfbundleread', formatteropts, b'BUNDLE') |
|
624 | @command(b'perfbundleread', formatteropts, b'BUNDLE') | |
@@ -697,9 +740,9 b' def perfbundleread(ui, repo, bundlepath,' | |||||
697 | fm.end() |
|
740 | fm.end() | |
698 |
|
741 | |||
699 | @command(b'perfchangegroupchangelog', formatteropts + |
|
742 | @command(b'perfchangegroupchangelog', formatteropts + | |
700 | [(b'', b'version', b'02', b'changegroup version'), |
|
743 | [(b'', b'cgversion', b'02', b'changegroup version'), | |
701 | (b'r', b'rev', b'', b'revisions to add to changegroup')]) |
|
744 | (b'r', b'rev', b'', b'revisions to add to changegroup')]) | |
702 | def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts): |
|
745 | def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts): | |
703 | """Benchmark producing a changelog group for a changegroup. |
|
746 | """Benchmark producing a changelog group for a changegroup. | |
704 |
|
747 | |||
705 | This measures the time spent processing the changelog during a |
|
748 | This measures the time spent processing the changelog during a | |
@@ -712,7 +755,7 b' def perfchangegroupchangelog(ui, repo, v' | |||||
712 | opts = _byteskwargs(opts) |
|
755 | opts = _byteskwargs(opts) | |
713 | cl = repo.changelog |
|
756 | cl = repo.changelog | |
714 | nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')] |
|
757 | nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')] | |
715 | bundler = changegroup.getbundler(version, repo) |
|
758 | bundler = changegroup.getbundler(cgversion, repo) | |
716 |
|
759 | |||
717 | def d(): |
|
760 | def d(): | |
718 | state, chunks = bundler._generatechangelog(cl, nodes) |
|
761 | state, chunks = bundler._generatechangelog(cl, nodes) | |
@@ -819,6 +862,7 b' def perfmergecalculate(ui, repo, rev, **' | |||||
819 |
|
862 | |||
820 | @command(b'perfpathcopies', [], b"REV REV") |
|
863 | @command(b'perfpathcopies', [], b"REV REV") | |
821 | def perfpathcopies(ui, repo, rev1, rev2, **opts): |
|
864 | def perfpathcopies(ui, repo, rev1, rev2, **opts): | |
|
865 | """benchmark the copy tracing logic""" | |||
822 | opts = _byteskwargs(opts) |
|
866 | opts = _byteskwargs(opts) | |
823 | timer, fm = gettimer(ui, opts) |
|
867 | timer, fm = gettimer(ui, opts) | |
824 | ctx1 = scmutil.revsingle(repo, rev1, rev1) |
|
868 | ctx1 = scmutil.revsingle(repo, rev1, rev1) | |
@@ -952,18 +996,48 b' def perfchangeset(ui, repo, rev, **opts)' | |||||
952 | timer(d) |
|
996 | timer(d) | |
953 | fm.end() |
|
997 | fm.end() | |
954 |
|
998 | |||
955 |
@command(b'perfi |
|
999 | @command(b'perfignore', formatteropts) | |
|
1000 | def perfignore(ui, repo, **opts): | |||
|
1001 | """benchmark operation related to computing ignore""" | |||
|
1002 | opts = _byteskwargs(opts) | |||
|
1003 | timer, fm = gettimer(ui, opts) | |||
|
1004 | dirstate = repo.dirstate | |||
|
1005 | ||||
|
1006 | def setupone(): | |||
|
1007 | dirstate.invalidate() | |||
|
1008 | clearfilecache(dirstate, b'_ignore') | |||
|
1009 | ||||
|
1010 | def runone(): | |||
|
1011 | dirstate._ignore | |||
|
1012 | ||||
|
1013 | timer(runone, setup=setupone, title=b"load") | |||
|
1014 | fm.end() | |||
|
1015 | ||||
|
1016 | @command(b'perfindex', [ | |||
|
1017 | (b'', b'rev', b'', b'revision to be looked up (default tip)'), | |||
|
1018 | ] + formatteropts) | |||
956 | def perfindex(ui, repo, **opts): |
|
1019 | def perfindex(ui, repo, **opts): | |
957 | import mercurial.revlog |
|
1020 | import mercurial.revlog | |
958 | opts = _byteskwargs(opts) |
|
1021 | opts = _byteskwargs(opts) | |
959 | timer, fm = gettimer(ui, opts) |
|
1022 | timer, fm = gettimer(ui, opts) | |
960 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
|
1023 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | |
961 | n = repo[b"tip"].node() |
|
1024 | if opts[b'rev'] is None: | |
962 | svfs = getsvfs(repo) |
|
1025 | n = repo[b"tip"].node() | |
|
1026 | else: | |||
|
1027 | rev = scmutil.revsingle(repo, opts[b'rev']) | |||
|
1028 | n = repo[rev].node() | |||
|
1029 | ||||
|
1030 | unfi = repo.unfiltered() | |||
|
1031 | # find the filecache func directly | |||
|
1032 | # This avoid polluting the benchmark with the filecache logic | |||
|
1033 | makecl = unfi.__class__.changelog.func | |||
|
1034 | def setup(): | |||
|
1035 | # probably not necessary, but for good measure | |||
|
1036 | clearchangelog(unfi) | |||
963 | def d(): |
|
1037 | def d(): | |
964 | cl = mercurial.revlog.revlog(svfs, b"00changelog.i") |
|
1038 | cl = makecl(unfi) | |
965 | cl.rev(n) |
|
1039 | cl.rev(n) | |
966 | timer(d) |
|
1040 | timer(d, setup=setup) | |
967 | fm.end() |
|
1041 | fm.end() | |
968 |
|
1042 | |||
969 | @command(b'perfstartup', formatteropts) |
|
1043 | @command(b'perfstartup', formatteropts) | |
@@ -1144,6 +1218,82 b' def perftemplating(ui, repo, testedtempl' | |||||
1144 | timer(format) |
|
1218 | timer(format) | |
1145 | fm.end() |
|
1219 | fm.end() | |
1146 |
|
1220 | |||
|
1221 | @command(b'perfhelper-pathcopies', formatteropts + | |||
|
1222 | [ | |||
|
1223 | (b'r', b'revs', [], b'restrict search to these revisions'), | |||
|
1224 | (b'', b'timing', False, b'provides extra data (costly)'), | |||
|
1225 | ]) | |||
|
1226 | def perfhelperpathcopies(ui, repo, revs=[], **opts): | |||
|
1227 | """find statistic about potential parameters for the `perftracecopies` | |||
|
1228 | ||||
|
1229 | This command find source-destination pair relevant for copytracing testing. | |||
|
1230 | It report value for some of the parameters that impact copy tracing time. | |||
|
1231 | ||||
|
1232 | If `--timing` is set, rename detection is run and the associated timing | |||
|
1233 | will be reported. The extra details comes at the cost of a slower command | |||
|
1234 | execution. | |||
|
1235 | ||||
|
1236 | Since the rename detection is only run once, other factors might easily | |||
|
1237 | affect the precision of the timing. However it should give a good | |||
|
1238 | approximation of which revision pairs are very costly. | |||
|
1239 | """ | |||
|
1240 | opts = _byteskwargs(opts) | |||
|
1241 | fm = ui.formatter(b'perf', opts) | |||
|
1242 | dotiming = opts[b'timing'] | |||
|
1243 | ||||
|
1244 | if dotiming: | |||
|
1245 | header = '%12s %12s %12s %12s %12s %12s\n' | |||
|
1246 | output = ("%(source)12s %(destination)12s " | |||
|
1247 | "%(nbrevs)12d %(nbmissingfiles)12d " | |||
|
1248 | "%(nbrenamedfiles)12d %(time)18.5f\n") | |||
|
1249 | header_names = ("source", "destination", "nb-revs", "nb-files", | |||
|
1250 | "nb-renames", "time") | |||
|
1251 | fm.plain(header % header_names) | |||
|
1252 | else: | |||
|
1253 | header = '%12s %12s %12s %12s\n' | |||
|
1254 | output = ("%(source)12s %(destination)12s " | |||
|
1255 | "%(nbrevs)12d %(nbmissingfiles)12d\n") | |||
|
1256 | fm.plain(header % ("source", "destination", "nb-revs", "nb-files")) | |||
|
1257 | ||||
|
1258 | if not revs: | |||
|
1259 | revs = ['all()'] | |||
|
1260 | revs = scmutil.revrange(repo, revs) | |||
|
1261 | ||||
|
1262 | roi = repo.revs('merge() and %ld', revs) | |||
|
1263 | for r in roi: | |||
|
1264 | ctx = repo[r] | |||
|
1265 | p1 = ctx.p1().rev() | |||
|
1266 | p2 = ctx.p2().rev() | |||
|
1267 | bases = repo.changelog._commonancestorsheads(p1, p2) | |||
|
1268 | for p in (p1, p2): | |||
|
1269 | for b in bases: | |||
|
1270 | base = repo[b] | |||
|
1271 | parent = repo[p] | |||
|
1272 | missing = copies._computeforwardmissing(base, parent) | |||
|
1273 | if not missing: | |||
|
1274 | continue | |||
|
1275 | data = { | |||
|
1276 | b'source': base.hex(), | |||
|
1277 | b'destination': parent.hex(), | |||
|
1278 | b'nbrevs': len(repo.revs('%d::%d', b, p)), | |||
|
1279 | b'nbmissingfiles': len(missing), | |||
|
1280 | } | |||
|
1281 | if dotiming: | |||
|
1282 | begin = util.timer() | |||
|
1283 | renames = copies.pathcopies(base, parent) | |||
|
1284 | end = util.timer() | |||
|
1285 | # not very stable timing since we did only one run | |||
|
1286 | data['time'] = end - begin | |||
|
1287 | data['nbrenamedfiles'] = len(renames) | |||
|
1288 | fm.startitem() | |||
|
1289 | fm.data(**data) | |||
|
1290 | out = data.copy() | |||
|
1291 | out['source'] = fm.hexfunc(base.node()) | |||
|
1292 | out['destination'] = fm.hexfunc(parent.node()) | |||
|
1293 | fm.plain(output % out) | |||
|
1294 | ||||
|
1295 | fm.end() | |||
|
1296 | ||||
1147 | @command(b'perfcca', formatteropts) |
|
1297 | @command(b'perfcca', formatteropts) | |
1148 | def perfcca(ui, repo, **opts): |
|
1298 | def perfcca(ui, repo, **opts): | |
1149 | opts = _byteskwargs(opts) |
|
1299 | opts = _byteskwargs(opts) | |
@@ -1402,7 +1552,7 b' def perfdiffwd(ui, repo, **opts):' | |||||
1402 | ui.popbuffer() |
|
1552 | ui.popbuffer() | |
1403 | diffopt = diffopt.encode('ascii') |
|
1553 | diffopt = diffopt.encode('ascii') | |
1404 | title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none') |
|
1554 | title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none') | |
1405 | timer(d, title) |
|
1555 | timer(d, title=title) | |
1406 | fm.end() |
|
1556 | fm.end() | |
1407 |
|
1557 | |||
1408 | @command(b'perfrevlogindex', revlogopts + formatteropts, |
|
1558 | @command(b'perfrevlogindex', revlogopts + formatteropts, | |
@@ -1553,7 +1703,7 b' def perfrevlogrevisions(ui, repo, file_=' | |||||
1553 | dist = opts[b'dist'] |
|
1703 | dist = opts[b'dist'] | |
1554 |
|
1704 | |||
1555 | if reverse: |
|
1705 | if reverse: | |
1556 | beginrev, endrev = endrev, beginrev |
|
1706 | beginrev, endrev = endrev - 1, beginrev - 1 | |
1557 | dist = -1 * dist |
|
1707 | dist = -1 * dist | |
1558 |
|
1708 | |||
1559 | for x in _xrange(beginrev, endrev, dist): |
|
1709 | for x in _xrange(beginrev, endrev, dist): | |
@@ -1565,6 +1715,241 b' def perfrevlogrevisions(ui, repo, file_=' | |||||
1565 | timer(d) |
|
1715 | timer(d) | |
1566 | fm.end() |
|
1716 | fm.end() | |
1567 |
|
1717 | |||
|
1718 | @command(b'perfrevlogwrite', revlogopts + formatteropts + | |||
|
1719 | [(b's', b'startrev', 1000, b'revision to start writing at'), | |||
|
1720 | (b'', b'stoprev', -1, b'last revision to write'), | |||
|
1721 | (b'', b'count', 3, b'last revision to write'), | |||
|
1722 | (b'', b'details', False, b'print timing for every revisions tested'), | |||
|
1723 | (b'', b'source', b'full', b'the kind of data feed in the revlog'), | |||
|
1724 | (b'', b'lazydeltabase', True, b'try the provided delta first'), | |||
|
1725 | (b'', b'clear-caches', True, b'clear revlog cache between calls'), | |||
|
1726 | ], | |||
|
1727 | b'-c|-m|FILE') | |||
|
1728 | def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts): | |||
|
1729 | """Benchmark writing a series of revisions to a revlog. | |||
|
1730 | ||||
|
1731 | Possible source values are: | |||
|
1732 | * `full`: add from a full text (default). | |||
|
1733 | * `parent-1`: add from a delta to the first parent | |||
|
1734 | * `parent-2`: add from a delta to the second parent if it exists | |||
|
1735 | (use a delta from the first parent otherwise) | |||
|
1736 | * `parent-smallest`: add from the smallest delta (either p1 or p2) | |||
|
1737 | * `storage`: add from the existing precomputed deltas | |||
|
1738 | """ | |||
|
1739 | opts = _byteskwargs(opts) | |||
|
1740 | ||||
|
1741 | rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts) | |||
|
1742 | rllen = getlen(ui)(rl) | |||
|
1743 | if startrev < 0: | |||
|
1744 | startrev = rllen + startrev | |||
|
1745 | if stoprev < 0: | |||
|
1746 | stoprev = rllen + stoprev | |||
|
1747 | ||||
|
1748 | lazydeltabase = opts['lazydeltabase'] | |||
|
1749 | source = opts['source'] | |||
|
1750 | clearcaches = opts['clear_caches'] | |||
|
1751 | validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest', | |||
|
1752 | b'storage') | |||
|
1753 | if source not in validsource: | |||
|
1754 | raise error.Abort('invalid source type: %s' % source) | |||
|
1755 | ||||
|
1756 | ### actually gather results | |||
|
1757 | count = opts['count'] | |||
|
1758 | if count <= 0: | |||
|
1759 | raise error.Abort('invalide run count: %d' % count) | |||
|
1760 | allresults = [] | |||
|
1761 | for c in range(count): | |||
|
1762 | timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1, | |||
|
1763 | lazydeltabase=lazydeltabase, | |||
|
1764 | clearcaches=clearcaches) | |||
|
1765 | allresults.append(timing) | |||
|
1766 | ||||
|
1767 | ### consolidate the results in a single list | |||
|
1768 | results = [] | |||
|
1769 | for idx, (rev, t) in enumerate(allresults[0]): | |||
|
1770 | ts = [t] | |||
|
1771 | for other in allresults[1:]: | |||
|
1772 | orev, ot = other[idx] | |||
|
1773 | assert orev == rev | |||
|
1774 | ts.append(ot) | |||
|
1775 | results.append((rev, ts)) | |||
|
1776 | resultcount = len(results) | |||
|
1777 | ||||
|
1778 | ### Compute and display relevant statistics | |||
|
1779 | ||||
|
1780 | # get a formatter | |||
|
1781 | fm = ui.formatter(b'perf', opts) | |||
|
1782 | displayall = ui.configbool(b"perf", b"all-timing", False) | |||
|
1783 | ||||
|
1784 | # print individual details if requested | |||
|
1785 | if opts['details']: | |||
|
1786 | for idx, item in enumerate(results, 1): | |||
|
1787 | rev, data = item | |||
|
1788 | title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev) | |||
|
1789 | formatone(fm, data, title=title, displayall=displayall) | |||
|
1790 | ||||
|
1791 | # sorts results by median time | |||
|
1792 | results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2]) | |||
|
1793 | # list of (name, index) to display) | |||
|
1794 | relevants = [ | |||
|
1795 | ("min", 0), | |||
|
1796 | ("10%", resultcount * 10 // 100), | |||
|
1797 | ("25%", resultcount * 25 // 100), | |||
|
1798 | ("50%", resultcount * 70 // 100), | |||
|
1799 | ("75%", resultcount * 75 // 100), | |||
|
1800 | ("90%", resultcount * 90 // 100), | |||
|
1801 | ("95%", resultcount * 95 // 100), | |||
|
1802 | ("99%", resultcount * 99 // 100), | |||
|
1803 | ("99.9%", resultcount * 999 // 1000), | |||
|
1804 | ("99.99%", resultcount * 9999 // 10000), | |||
|
1805 | ("99.999%", resultcount * 99999 // 100000), | |||
|
1806 | ("max", -1), | |||
|
1807 | ] | |||
|
1808 | if not ui.quiet: | |||
|
1809 | for name, idx in relevants: | |||
|
1810 | data = results[idx] | |||
|
1811 | title = '%s of %d, rev %d' % (name, resultcount, data[0]) | |||
|
1812 | formatone(fm, data[1], title=title, displayall=displayall) | |||
|
1813 | ||||
|
1814 | # XXX summing that many float will not be very precise, we ignore this fact | |||
|
1815 | # for now | |||
|
1816 | totaltime = [] | |||
|
1817 | for item in allresults: | |||
|
1818 | totaltime.append((sum(x[1][0] for x in item), | |||
|
1819 | sum(x[1][1] for x in item), | |||
|
1820 | sum(x[1][2] for x in item),) | |||
|
1821 | ) | |||
|
1822 | formatone(fm, totaltime, title="total time (%d revs)" % resultcount, | |||
|
1823 | displayall=displayall) | |||
|
1824 | fm.end() | |||
|
1825 | ||||
|
1826 | class _faketr(object): | |||
|
1827 | def add(s, x, y, z=None): | |||
|
1828 | return None | |||
|
1829 | ||||
|
1830 | def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None, | |||
|
1831 | lazydeltabase=True, clearcaches=True): | |||
|
1832 | timings = [] | |||
|
1833 | tr = _faketr() | |||
|
1834 | with _temprevlog(ui, orig, startrev) as dest: | |||
|
1835 | dest._lazydeltabase = lazydeltabase | |||
|
1836 | revs = list(orig.revs(startrev, stoprev)) | |||
|
1837 | total = len(revs) | |||
|
1838 | topic = 'adding' | |||
|
1839 | if runidx is not None: | |||
|
1840 | topic += ' (run #%d)' % runidx | |||
|
1841 | # Support both old and new progress API | |||
|
1842 | if util.safehasattr(ui, 'makeprogress'): | |||
|
1843 | progress = ui.makeprogress(topic, unit='revs', total=total) | |||
|
1844 | def updateprogress(pos): | |||
|
1845 | progress.update(pos) | |||
|
1846 | def completeprogress(): | |||
|
1847 | progress.complete() | |||
|
1848 | else: | |||
|
1849 | def updateprogress(pos): | |||
|
1850 | ui.progress(topic, pos, unit='revs', total=total) | |||
|
1851 | def completeprogress(): | |||
|
1852 | ui.progress(topic, None, unit='revs', total=total) | |||
|
1853 | ||||
|
1854 | for idx, rev in enumerate(revs): | |||
|
1855 | updateprogress(idx) | |||
|
1856 | addargs, addkwargs = _getrevisionseed(orig, rev, tr, source) | |||
|
1857 | if clearcaches: | |||
|
1858 | dest.index.clearcaches() | |||
|
1859 | dest.clearcaches() | |||
|
1860 | with timeone() as r: | |||
|
1861 | dest.addrawrevision(*addargs, **addkwargs) | |||
|
1862 | timings.append((rev, r[0])) | |||
|
1863 | updateprogress(total) | |||
|
1864 | completeprogress() | |||
|
1865 | return timings | |||
|
1866 | ||||
|
1867 | def _getrevisionseed(orig, rev, tr, source): | |||
|
1868 | from mercurial.node import nullid | |||
|
1869 | ||||
|
1870 | linkrev = orig.linkrev(rev) | |||
|
1871 | node = orig.node(rev) | |||
|
1872 | p1, p2 = orig.parents(node) | |||
|
1873 | flags = orig.flags(rev) | |||
|
1874 | cachedelta = None | |||
|
1875 | text = None | |||
|
1876 | ||||
|
1877 | if source == b'full': | |||
|
1878 | text = orig.revision(rev) | |||
|
1879 | elif source == b'parent-1': | |||
|
1880 | baserev = orig.rev(p1) | |||
|
1881 | cachedelta = (baserev, orig.revdiff(p1, rev)) | |||
|
1882 | elif source == b'parent-2': | |||
|
1883 | parent = p2 | |||
|
1884 | if p2 == nullid: | |||
|
1885 | parent = p1 | |||
|
1886 | baserev = orig.rev(parent) | |||
|
1887 | cachedelta = (baserev, orig.revdiff(parent, rev)) | |||
|
1888 | elif source == b'parent-smallest': | |||
|
1889 | p1diff = orig.revdiff(p1, rev) | |||
|
1890 | parent = p1 | |||
|
1891 | diff = p1diff | |||
|
1892 | if p2 != nullid: | |||
|
1893 | p2diff = orig.revdiff(p2, rev) | |||
|
1894 | if len(p1diff) > len(p2diff): | |||
|
1895 | parent = p2 | |||
|
1896 | diff = p2diff | |||
|
1897 | baserev = orig.rev(parent) | |||
|
1898 | cachedelta = (baserev, diff) | |||
|
1899 | elif source == b'storage': | |||
|
1900 | baserev = orig.deltaparent(rev) | |||
|
1901 | cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev)) | |||
|
1902 | ||||
|
1903 | return ((text, tr, linkrev, p1, p2), | |||
|
1904 | {'node': node, 'flags': flags, 'cachedelta': cachedelta}) | |||
|
1905 | ||||
|
1906 | @contextlib.contextmanager | |||
|
1907 | def _temprevlog(ui, orig, truncaterev): | |||
|
1908 | from mercurial import vfs as vfsmod | |||
|
1909 | ||||
|
1910 | if orig._inline: | |||
|
1911 | raise error.Abort('not supporting inline revlog (yet)') | |||
|
1912 | ||||
|
1913 | origindexpath = orig.opener.join(orig.indexfile) | |||
|
1914 | origdatapath = orig.opener.join(orig.datafile) | |||
|
1915 | indexname = 'revlog.i' | |||
|
1916 | dataname = 'revlog.d' | |||
|
1917 | ||||
|
1918 | tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-') | |||
|
1919 | try: | |||
|
1920 | # copy the data file in a temporary directory | |||
|
1921 | ui.debug('copying data in %s\n' % tmpdir) | |||
|
1922 | destindexpath = os.path.join(tmpdir, 'revlog.i') | |||
|
1923 | destdatapath = os.path.join(tmpdir, 'revlog.d') | |||
|
1924 | shutil.copyfile(origindexpath, destindexpath) | |||
|
1925 | shutil.copyfile(origdatapath, destdatapath) | |||
|
1926 | ||||
|
1927 | # remove the data we want to add again | |||
|
1928 | ui.debug('truncating data to be rewritten\n') | |||
|
1929 | with open(destindexpath, 'ab') as index: | |||
|
1930 | index.seek(0) | |||
|
1931 | index.truncate(truncaterev * orig._io.size) | |||
|
1932 | with open(destdatapath, 'ab') as data: | |||
|
1933 | data.seek(0) | |||
|
1934 | data.truncate(orig.start(truncaterev)) | |||
|
1935 | ||||
|
1936 | # instantiate a new revlog from the temporary copy | |||
|
1937 | ui.debug('truncating adding to be rewritten\n') | |||
|
1938 | vfs = vfsmod.vfs(tmpdir) | |||
|
1939 | vfs.options = getattr(orig.opener, 'options', None) | |||
|
1940 | ||||
|
1941 | dest = revlog.revlog(vfs, | |||
|
1942 | indexfile=indexname, | |||
|
1943 | datafile=dataname) | |||
|
1944 | if dest._inline: | |||
|
1945 | raise error.Abort('not supporting inline revlog (yet)') | |||
|
1946 | # make sure internals are initialized | |||
|
1947 | dest.revision(len(dest) - 1) | |||
|
1948 | yield dest | |||
|
1949 | del dest, vfs | |||
|
1950 | finally: | |||
|
1951 | shutil.rmtree(tmpdir, True) | |||
|
1952 | ||||
1568 | @command(b'perfrevlogchunks', revlogopts + formatteropts + |
|
1953 | @command(b'perfrevlogchunks', revlogopts + formatteropts + | |
1569 | [(b'e', b'engines', b'', b'compression engines to use'), |
|
1954 | [(b'e', b'engines', b'', b'compression engines to use'), | |
1570 | (b's', b'startrev', 0, b'revision to start at')], |
|
1955 | (b's', b'startrev', 0, b'revision to start at')], | |
@@ -1692,10 +2077,11 b' def perfrevlogrevision(ui, repo, file_, ' | |||||
1692 | Obtaining a revlog revision consists of roughly the following steps: |
|
2077 | Obtaining a revlog revision consists of roughly the following steps: | |
1693 |
|
2078 | |||
1694 | 1. Compute the delta chain |
|
2079 | 1. Compute the delta chain | |
1695 | 2. Obtain the raw chunks for that delta chain |
|
2080 | 2. Slice the delta chain if applicable | |
1696 | 3. Decompress each raw chunk |
|
2081 | 3. Obtain the raw chunks for that delta chain | |
1697 | 4. Apply binary patches to obtain fulltext |
|
2082 | 4. Decompress each raw chunk | |
1698 | 5. Verify hash of fulltext |
|
2083 | 5. Apply binary patches to obtain fulltext | |
|
2084 | 6. Verify hash of fulltext | |||
1699 |
|
2085 | |||
1700 | This command measures the time spent in each of these phases. |
|
2086 | This command measures the time spent in each of these phases. | |
1701 | """ |
|
2087 | """ | |
@@ -1723,17 +2109,18 b' def perfrevlogrevision(ui, repo, file_, ' | |||||
1723 | inline = r._inline |
|
2109 | inline = r._inline | |
1724 | iosize = r._io.size |
|
2110 | iosize = r._io.size | |
1725 | buffer = util.buffer |
|
2111 | buffer = util.buffer | |
1726 | offset = start(chain[0]) |
|
|||
1727 |
|
2112 | |||
1728 | chunks = [] |
|
2113 | chunks = [] | |
1729 | ladd = chunks.append |
|
2114 | ladd = chunks.append | |
1730 |
|
2115 | for idx, item in enumerate(chain): | ||
1731 | for rev in chain: |
|
2116 | offset = start(item[0]) | |
1732 | chunkstart = start(rev) |
|
2117 | bits = data[idx] | |
1733 |
|
|
2118 | for rev in item: | |
1734 |
chunkstart |
|
2119 | chunkstart = start(rev) | |
1735 | chunklength = length(rev) |
|
2120 | if inline: | |
1736 | ladd(buffer(data, chunkstart - offset, chunklength)) |
|
2121 | chunkstart += (rev + 1) * iosize | |
|
2122 | chunklength = length(rev) | |||
|
2123 | ladd(buffer(bits, chunkstart - offset, chunklength)) | |||
1737 |
|
2124 | |||
1738 | return chunks |
|
2125 | return chunks | |
1739 |
|
2126 | |||
@@ -1745,7 +2132,12 b' def perfrevlogrevision(ui, repo, file_, ' | |||||
1745 | def doread(chain): |
|
2132 | def doread(chain): | |
1746 | if not cache: |
|
2133 | if not cache: | |
1747 | r.clearcaches() |
|
2134 | r.clearcaches() | |
1748 | segmentforrevs(chain[0], chain[-1]) |
|
2135 | for item in slicedchain: | |
|
2136 | segmentforrevs(item[0], item[-1]) | |||
|
2137 | ||||
|
2138 | def doslice(r, chain, size): | |||
|
2139 | for s in slicechunk(r, chain, targetsize=size): | |||
|
2140 | pass | |||
1749 |
|
2141 | |||
1750 | def dorawchunks(data, chain): |
|
2142 | def dorawchunks(data, chain): | |
1751 | if not cache: |
|
2143 | if not cache: | |
@@ -1772,9 +2164,19 b' def perfrevlogrevision(ui, repo, file_, ' | |||||
1772 | r.clearcaches() |
|
2164 | r.clearcaches() | |
1773 | r.revision(node) |
|
2165 | r.revision(node) | |
1774 |
|
2166 | |||
|
2167 | try: | |||
|
2168 | from mercurial.revlogutils.deltas import slicechunk | |||
|
2169 | except ImportError: | |||
|
2170 | slicechunk = getattr(revlog, '_slicechunk', None) | |||
|
2171 | ||||
|
2172 | size = r.length(rev) | |||
1775 | chain = r._deltachain(rev)[0] |
|
2173 | chain = r._deltachain(rev)[0] | |
1776 | data = segmentforrevs(chain[0], chain[-1])[1] |
|
2174 | if not getattr(r, '_withsparseread', False): | |
1777 | rawchunks = getrawchunks(data, chain) |
|
2175 | slicedchain = (chain,) | |
|
2176 | else: | |||
|
2177 | slicedchain = tuple(slicechunk(r, chain, targetsize=size)) | |||
|
2178 | data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain] | |||
|
2179 | rawchunks = getrawchunks(data, slicedchain) | |||
1778 | bins = r._chunks(chain) |
|
2180 | bins = r._chunks(chain) | |
1779 | text = bytes(bins[0]) |
|
2181 | text = bytes(bins[0]) | |
1780 | bins = bins[1:] |
|
2182 | bins = bins[1:] | |
@@ -1784,16 +2186,23 b' def perfrevlogrevision(ui, repo, file_, ' | |||||
1784 | (lambda: dorevision(), b'full'), |
|
2186 | (lambda: dorevision(), b'full'), | |
1785 | (lambda: dodeltachain(rev), b'deltachain'), |
|
2187 | (lambda: dodeltachain(rev), b'deltachain'), | |
1786 | (lambda: doread(chain), b'read'), |
|
2188 | (lambda: doread(chain), b'read'), | |
1787 | (lambda: dorawchunks(data, chain), b'rawchunks'), |
|
2189 | ] | |
|
2190 | ||||
|
2191 | if getattr(r, '_withsparseread', False): | |||
|
2192 | slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain') | |||
|
2193 | benches.append(slicing) | |||
|
2194 | ||||
|
2195 | benches.extend([ | |||
|
2196 | (lambda: dorawchunks(data, slicedchain), b'rawchunks'), | |||
1788 | (lambda: dodecompress(rawchunks), b'decompress'), |
|
2197 | (lambda: dodecompress(rawchunks), b'decompress'), | |
1789 | (lambda: dopatch(text, bins), b'patch'), |
|
2198 | (lambda: dopatch(text, bins), b'patch'), | |
1790 | (lambda: dohash(text), b'hash'), |
|
2199 | (lambda: dohash(text), b'hash'), | |
1791 | ] |
|
2200 | ]) | |
1792 |
|
2201 | |||
|
2202 | timer, fm = gettimer(ui, opts) | |||
1793 | for fn, title in benches: |
|
2203 | for fn, title in benches: | |
1794 | timer, fm = gettimer(ui, opts) |
|
|||
1795 | timer(fn, title=title) |
|
2204 | timer(fn, title=title) | |
1796 |
|
|
2205 | fm.end() | |
1797 |
|
2206 | |||
1798 | @command(b'perfrevset', |
|
2207 | @command(b'perfrevset', | |
1799 | [(b'C', b'clear', False, b'clear volatile cache between each call.'), |
|
2208 | [(b'C', b'clear', False, b'clear volatile cache between each call.'), | |
@@ -1929,13 +2338,120 b' def perfbranchmap(ui, repo, *filternames' | |||||
1929 | branchcachewrite.restore() |
|
2338 | branchcachewrite.restore() | |
1930 | fm.end() |
|
2339 | fm.end() | |
1931 |
|
2340 | |||
|
2341 | @command(b'perfbranchmapupdate', [ | |||
|
2342 | (b'', b'base', [], b'subset of revision to start from'), | |||
|
2343 | (b'', b'target', [], b'subset of revision to end with'), | |||
|
2344 | (b'', b'clear-caches', False, b'clear cache between each runs') | |||
|
2345 | ] + formatteropts) | |||
|
2346 | def perfbranchmapupdate(ui, repo, base=(), target=(), **opts): | |||
|
2347 | """benchmark branchmap update from for <base> revs to <target> revs | |||
|
2348 | ||||
|
2349 | If `--clear-caches` is passed, the following items will be reset before | |||
|
2350 | each update: | |||
|
2351 | * the changelog instance and associated indexes | |||
|
2352 | * the rev-branch-cache instance | |||
|
2353 | ||||
|
2354 | Examples: | |||
|
2355 | ||||
|
2356 | # update for the one last revision | |||
|
2357 | $ hg perfbranchmapupdate --base 'not tip' --target 'tip' | |||
|
2358 | ||||
|
2359 | $ update for change coming with a new branch | |||
|
2360 | $ hg perfbranchmapupdate --base 'stable' --target 'default' | |||
|
2361 | """ | |||
|
2362 | from mercurial import branchmap | |||
|
2363 | from mercurial import repoview | |||
|
2364 | opts = _byteskwargs(opts) | |||
|
2365 | timer, fm = gettimer(ui, opts) | |||
|
2366 | clearcaches = opts[b'clear_caches'] | |||
|
2367 | unfi = repo.unfiltered() | |||
|
2368 | x = [None] # used to pass data between closure | |||
|
2369 | ||||
|
2370 | # we use a `list` here to avoid possible side effect from smartset | |||
|
2371 | baserevs = list(scmutil.revrange(repo, base)) | |||
|
2372 | targetrevs = list(scmutil.revrange(repo, target)) | |||
|
2373 | if not baserevs: | |||
|
2374 | raise error.Abort(b'no revisions selected for --base') | |||
|
2375 | if not targetrevs: | |||
|
2376 | raise error.Abort(b'no revisions selected for --target') | |||
|
2377 | ||||
|
2378 | # make sure the target branchmap also contains the one in the base | |||
|
2379 | targetrevs = list(set(baserevs) | set(targetrevs)) | |||
|
2380 | targetrevs.sort() | |||
|
2381 | ||||
|
2382 | cl = repo.changelog | |||
|
2383 | allbaserevs = list(cl.ancestors(baserevs, inclusive=True)) | |||
|
2384 | allbaserevs.sort() | |||
|
2385 | alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True)) | |||
|
2386 | ||||
|
2387 | newrevs = list(alltargetrevs.difference(allbaserevs)) | |||
|
2388 | newrevs.sort() | |||
|
2389 | ||||
|
2390 | allrevs = frozenset(unfi.changelog.revs()) | |||
|
2391 | basefilterrevs = frozenset(allrevs.difference(allbaserevs)) | |||
|
2392 | targetfilterrevs = frozenset(allrevs.difference(alltargetrevs)) | |||
|
2393 | ||||
|
2394 | def basefilter(repo, visibilityexceptions=None): | |||
|
2395 | return basefilterrevs | |||
|
2396 | ||||
|
2397 | def targetfilter(repo, visibilityexceptions=None): | |||
|
2398 | return targetfilterrevs | |||
|
2399 | ||||
|
2400 | msg = b'benchmark of branchmap with %d revisions with %d new ones\n' | |||
|
2401 | ui.status(msg % (len(allbaserevs), len(newrevs))) | |||
|
2402 | if targetfilterrevs: | |||
|
2403 | msg = b'(%d revisions still filtered)\n' | |||
|
2404 | ui.status(msg % len(targetfilterrevs)) | |||
|
2405 | ||||
|
2406 | try: | |||
|
2407 | repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter | |||
|
2408 | repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter | |||
|
2409 | ||||
|
2410 | baserepo = repo.filtered(b'__perf_branchmap_update_base') | |||
|
2411 | targetrepo = repo.filtered(b'__perf_branchmap_update_target') | |||
|
2412 | ||||
|
2413 | # try to find an existing branchmap to reuse | |||
|
2414 | subsettable = getbranchmapsubsettable() | |||
|
2415 | candidatefilter = subsettable.get(None) | |||
|
2416 | while candidatefilter is not None: | |||
|
2417 | candidatebm = repo.filtered(candidatefilter).branchmap() | |||
|
2418 | if candidatebm.validfor(baserepo): | |||
|
2419 | filtered = repoview.filterrevs(repo, candidatefilter) | |||
|
2420 | missing = [r for r in allbaserevs if r in filtered] | |||
|
2421 | base = candidatebm.copy() | |||
|
2422 | base.update(baserepo, missing) | |||
|
2423 | break | |||
|
2424 | candidatefilter = subsettable.get(candidatefilter) | |||
|
2425 | else: | |||
|
2426 | # no suitable subset where found | |||
|
2427 | base = branchmap.branchcache() | |||
|
2428 | base.update(baserepo, allbaserevs) | |||
|
2429 | ||||
|
2430 | def setup(): | |||
|
2431 | x[0] = base.copy() | |||
|
2432 | if clearcaches: | |||
|
2433 | unfi._revbranchcache = None | |||
|
2434 | clearchangelog(repo) | |||
|
2435 | ||||
|
2436 | def bench(): | |||
|
2437 | x[0].update(targetrepo, newrevs) | |||
|
2438 | ||||
|
2439 | timer(bench, setup=setup) | |||
|
2440 | fm.end() | |||
|
2441 | finally: | |||
|
2442 | repoview.filtertable.pop(b'__perf_branchmap_update_base', None) | |||
|
2443 | repoview.filtertable.pop(b'__perf_branchmap_update_target', None) | |||
|
2444 | ||||
1932 | @command(b'perfbranchmapload', [ |
|
2445 | @command(b'perfbranchmapload', [ | |
1933 | (b'f', b'filter', b'', b'Specify repoview filter'), |
|
2446 | (b'f', b'filter', b'', b'Specify repoview filter'), | |
1934 | (b'', b'list', False, b'List brachmap filter caches'), |
|
2447 | (b'', b'list', False, b'List brachmap filter caches'), | |
|
2448 | (b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | |||
|
2449 | ||||
1935 | ] + formatteropts) |
|
2450 | ] + formatteropts) | |
1936 |
def perfbranchmap |
|
2451 | def perfbranchmapload(ui, repo, filter=b'', list=False, **opts): | |
1937 | """benchmark reading the branchmap""" |
|
2452 | """benchmark reading the branchmap""" | |
1938 | opts = _byteskwargs(opts) |
|
2453 | opts = _byteskwargs(opts) | |
|
2454 | clearrevlogs = opts[b'clear_revlogs'] | |||
1939 |
|
2455 | |||
1940 | if list: |
|
2456 | if list: | |
1941 | for name, kind, st in repo.cachevfs.readdir(stat=True): |
|
2457 | for name, kind, st in repo.cachevfs.readdir(stat=True): | |
@@ -1944,16 +2460,31 b' def perfbranchmapread(ui, repo, filter=b' | |||||
1944 | ui.status(b'%s - %s\n' |
|
2460 | ui.status(b'%s - %s\n' | |
1945 | % (filtername, util.bytecount(st.st_size))) |
|
2461 | % (filtername, util.bytecount(st.st_size))) | |
1946 | return |
|
2462 | return | |
1947 | if filter: |
|
2463 | if not filter: | |
|
2464 | filter = None | |||
|
2465 | subsettable = getbranchmapsubsettable() | |||
|
2466 | if filter is None: | |||
|
2467 | repo = repo.unfiltered() | |||
|
2468 | else: | |||
1948 | repo = repoview.repoview(repo, filter) |
|
2469 | repo = repoview.repoview(repo, filter) | |
1949 | else: |
|
2470 | ||
1950 | repo = repo.unfiltered() |
|
2471 | repo.branchmap() # make sure we have a relevant, up to date branchmap | |
|
2472 | ||||
|
2473 | currentfilter = filter | |||
1951 | # try once without timer, the filter may not be cached |
|
2474 | # try once without timer, the filter may not be cached | |
1952 |
|
|
2475 | while branchmap.read(repo) is None: | |
1953 | raise error.Abort(b'No brachmap cached for %s repo' |
|
2476 | currentfilter = subsettable.get(currentfilter) | |
1954 | % (filter or b'unfiltered')) |
|
2477 | if currentfilter is None: | |
|
2478 | raise error.Abort(b'No branchmap cached for %s repo' | |||
|
2479 | % (filter or b'unfiltered')) | |||
|
2480 | repo = repo.filtered(currentfilter) | |||
1955 | timer, fm = gettimer(ui, opts) |
|
2481 | timer, fm = gettimer(ui, opts) | |
1956 | timer(lambda: branchmap.read(repo) and None) |
|
2482 | def setup(): | |
|
2483 | if clearrevlogs: | |||
|
2484 | clearchangelog(repo) | |||
|
2485 | def bench(): | |||
|
2486 | branchmap.read(repo) | |||
|
2487 | timer(bench, setup=setup) | |||
1957 | fm.end() |
|
2488 | fm.end() | |
1958 |
|
2489 | |||
1959 | @command(b'perfloadmarkers') |
|
2490 | @command(b'perfloadmarkers') | |
@@ -2124,3 +2655,21 b' def uisetup(ui):' | |||||
2124 | hint=b"use 3.5 or later") |
|
2655 | hint=b"use 3.5 or later") | |
2125 | return orig(repo, cmd, file_, opts) |
|
2656 | return orig(repo, cmd, file_, opts) | |
2126 | extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog) |
|
2657 | extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog) | |
|
2658 | ||||
|
2659 | @command(b'perfprogress', formatteropts + [ | |||
|
2660 | (b'', b'topic', b'topic', b'topic for progress messages'), | |||
|
2661 | (b'c', b'total', 1000000, b'total value we are progressing to'), | |||
|
2662 | ], norepo=True) | |||
|
2663 | def perfprogress(ui, topic=None, total=None, **opts): | |||
|
2664 | """printing of progress bars""" | |||
|
2665 | opts = _byteskwargs(opts) | |||
|
2666 | ||||
|
2667 | timer, fm = gettimer(ui, opts) | |||
|
2668 | ||||
|
2669 | def doprogress(): | |||
|
2670 | with ui.makeprogress(topic, total=total) as progress: | |||
|
2671 | for i in pycompat.xrange(total): | |||
|
2672 | progress.increment() | |||
|
2673 | ||||
|
2674 | timer(doprogress) | |||
|
2675 | fm.end() |
@@ -1,4 +1,5 b'' | |||||
1 | test-abort-checkin.t |
|
1 | test-abort-checkin.t | |
|
2 | test-absorb-edit-lines.t | |||
2 | test-absorb-filefixupstate.py |
|
3 | test-absorb-filefixupstate.py | |
3 | test-absorb-phase.t |
|
4 | test-absorb-phase.t | |
4 | test-absorb-rename.t |
|
5 | test-absorb-rename.t | |
@@ -30,6 +31,7 b' test-bisect.t' | |||||
30 | test-bisect2.t |
|
31 | test-bisect2.t | |
31 | test-bisect3.t |
|
32 | test-bisect3.t | |
32 | test-blackbox.t |
|
33 | test-blackbox.t | |
|
34 | test-bookflow.t | |||
33 | test-bookmarks-current.t |
|
35 | test-bookmarks-current.t | |
34 | test-bookmarks-merge.t |
|
36 | test-bookmarks-merge.t | |
35 | test-bookmarks-pushpull.t |
|
37 | test-bookmarks-pushpull.t | |
@@ -62,6 +64,7 b' test-check-commit.t' | |||||
62 | test-check-config.py |
|
64 | test-check-config.py | |
63 | test-check-config.t |
|
65 | test-check-config.t | |
64 | test-check-execute.t |
|
66 | test-check-execute.t | |
|
67 | test-check-help.t | |||
65 | test-check-interfaces.py |
|
68 | test-check-interfaces.py | |
66 | test-check-module-imports.t |
|
69 | test-check-module-imports.t | |
67 | test-check-py3-compat.t |
|
70 | test-check-py3-compat.t | |
@@ -116,6 +119,7 b' test-convert-tagsbranch-topology.t' | |||||
116 | test-copy-move-merge.t |
|
119 | test-copy-move-merge.t | |
117 | test-copy.t |
|
120 | test-copy.t | |
118 | test-copytrace-heuristics.t |
|
121 | test-copytrace-heuristics.t | |
|
122 | test-custom-filters.t | |||
119 | test-debugbuilddag.t |
|
123 | test-debugbuilddag.t | |
120 | test-debugbundle.t |
|
124 | test-debugbundle.t | |
121 | test-debugcommands.t |
|
125 | test-debugcommands.t | |
@@ -193,9 +197,18 b' test-execute-bit.t' | |||||
193 | test-export.t |
|
197 | test-export.t | |
194 | test-extdata.t |
|
198 | test-extdata.t | |
195 | test-extdiff.t |
|
199 | test-extdiff.t | |
|
200 | test-extension-timing.t | |||
196 | test-extensions-afterloaded.t |
|
201 | test-extensions-afterloaded.t | |
197 | test-extensions-wrapfunction.py |
|
202 | test-extensions-wrapfunction.py | |
198 | test-extra-filelog-entry.t |
|
203 | test-extra-filelog-entry.t | |
|
204 | test-fastannotate-corrupt.t | |||
|
205 | test-fastannotate-diffopts.t | |||
|
206 | test-fastannotate-hg.t | |||
|
207 | test-fastannotate-perfhack.t | |||
|
208 | test-fastannotate-protocol.t | |||
|
209 | test-fastannotate-renames.t | |||
|
210 | test-fastannotate-revmap.py | |||
|
211 | test-fastannotate.t | |||
199 | test-fetch.t |
|
212 | test-fetch.t | |
200 | test-filebranch.t |
|
213 | test-filebranch.t | |
201 | test-filecache.py |
|
214 | test-filecache.py | |
@@ -206,6 +219,19 b' test-fix-topology.t' | |||||
206 | test-fix.t |
|
219 | test-fix.t | |
207 | test-flags.t |
|
220 | test-flags.t | |
208 | test-fncache.t |
|
221 | test-fncache.t | |
|
222 | test-gendoc-da.t | |||
|
223 | test-gendoc-de.t | |||
|
224 | test-gendoc-el.t | |||
|
225 | test-gendoc-fr.t | |||
|
226 | test-gendoc-it.t | |||
|
227 | test-gendoc-ja.t | |||
|
228 | test-gendoc-pt_BR.t | |||
|
229 | test-gendoc-ro.t | |||
|
230 | test-gendoc-ru.t | |||
|
231 | test-gendoc-sv.t | |||
|
232 | test-gendoc-zh_CN.t | |||
|
233 | test-gendoc-zh_TW.t | |||
|
234 | test-gendoc.t | |||
209 | test-generaldelta.t |
|
235 | test-generaldelta.t | |
210 | test-getbundle.t |
|
236 | test-getbundle.t | |
211 | test-git-export.t |
|
237 | test-git-export.t | |
@@ -217,6 +243,7 b' test-gpg.t' | |||||
217 | test-graft.t |
|
243 | test-graft.t | |
218 | test-grep.t |
|
244 | test-grep.t | |
219 | test-hardlinks.t |
|
245 | test-hardlinks.t | |
|
246 | test-help-hide.t | |||
220 | test-help.t |
|
247 | test-help.t | |
221 | test-hg-parseurl.py |
|
248 | test-hg-parseurl.py | |
222 | test-hghave.t |
|
249 | test-hghave.t | |
@@ -261,6 +288,7 b' test-i18n.t' | |||||
261 | test-identify.t |
|
288 | test-identify.t | |
262 | test-impexp-branch.t |
|
289 | test-impexp-branch.t | |
263 | test-import-bypass.t |
|
290 | test-import-bypass.t | |
|
291 | test-import-context.t | |||
264 | test-import-eol.t |
|
292 | test-import-eol.t | |
265 | test-import-merge.t |
|
293 | test-import-merge.t | |
266 | test-import-unknown.t |
|
294 | test-import-unknown.t | |
@@ -301,16 +329,22 b' test-largefiles-cache.t' | |||||
301 | test-largefiles-misc.t |
|
329 | test-largefiles-misc.t | |
302 | test-largefiles-small-disk.t |
|
330 | test-largefiles-small-disk.t | |
303 | test-largefiles-update.t |
|
331 | test-largefiles-update.t | |
|
332 | test-largefiles-wireproto.t | |||
304 | test-largefiles.t |
|
333 | test-largefiles.t | |
|
334 | test-lfconvert.t | |||
|
335 | test-lfs-bundle.t | |||
305 | test-lfs-largefiles.t |
|
336 | test-lfs-largefiles.t | |
306 | test-lfs-pointer.py |
|
337 | test-lfs-pointer.py | |
|
338 | test-lfs.t | |||
307 | test-linelog.py |
|
339 | test-linelog.py | |
308 | test-linerange.py |
|
340 | test-linerange.py | |
309 | test-locate.t |
|
341 | test-locate.t | |
310 | test-lock-badness.t |
|
342 | test-lock-badness.t | |
|
343 | test-log-exthook.t | |||
311 | test-log-linerange.t |
|
344 | test-log-linerange.t | |
312 | test-log.t |
|
345 | test-log.t | |
313 | test-logexchange.t |
|
346 | test-logexchange.t | |
|
347 | test-logtoprocess.t | |||
314 | test-lrucachedict.py |
|
348 | test-lrucachedict.py | |
315 | test-mactext.t |
|
349 | test-mactext.t | |
316 | test-mailmap.t |
|
350 | test-mailmap.t | |
@@ -394,6 +428,8 b' test-narrow-pull.t' | |||||
394 | test-narrow-rebase.t |
|
428 | test-narrow-rebase.t | |
395 | test-narrow-shallow-merges.t |
|
429 | test-narrow-shallow-merges.t | |
396 | test-narrow-shallow.t |
|
430 | test-narrow-shallow.t | |
|
431 | test-narrow-share.t | |||
|
432 | test-narrow-sparse.t | |||
397 | test-narrow-strip.t |
|
433 | test-narrow-strip.t | |
398 | test-narrow-trackedcmd.t |
|
434 | test-narrow-trackedcmd.t | |
399 | test-narrow-update.t |
|
435 | test-narrow-update.t | |
@@ -474,6 +510,7 b' test-push-checkheads-unpushed-D5.t' | |||||
474 | test-push-checkheads-unpushed-D6.t |
|
510 | test-push-checkheads-unpushed-D6.t | |
475 | test-push-checkheads-unpushed-D7.t |
|
511 | test-push-checkheads-unpushed-D7.t | |
476 | test-push-http.t |
|
512 | test-push-http.t | |
|
513 | test-push-race.t | |||
477 | test-push-warn.t |
|
514 | test-push-warn.t | |
478 | test-push.t |
|
515 | test-push.t | |
479 | test-pushvars.t |
|
516 | test-pushvars.t | |
@@ -512,6 +549,28 b' test-releasenotes-formatting.t' | |||||
512 | test-releasenotes-merging.t |
|
549 | test-releasenotes-merging.t | |
513 | test-releasenotes-parsing.t |
|
550 | test-releasenotes-parsing.t | |
514 | test-relink.t |
|
551 | test-relink.t | |
|
552 | test-remotefilelog-bad-configs.t | |||
|
553 | test-remotefilelog-bgprefetch.t | |||
|
554 | test-remotefilelog-blame.t | |||
|
555 | test-remotefilelog-bundle2.t | |||
|
556 | test-remotefilelog-bundles.t | |||
|
557 | test-remotefilelog-cacheprocess.t | |||
|
558 | test-remotefilelog-clone-tree.t | |||
|
559 | test-remotefilelog-clone.t | |||
|
560 | test-remotefilelog-gcrepack.t | |||
|
561 | test-remotefilelog-http.t | |||
|
562 | test-remotefilelog-keepset.t | |||
|
563 | test-remotefilelog-local.t | |||
|
564 | test-remotefilelog-log.t | |||
|
565 | test-remotefilelog-partial-shallow.t | |||
|
566 | test-remotefilelog-permissions.t | |||
|
567 | test-remotefilelog-permisssions.t | |||
|
568 | test-remotefilelog-prefetch.t | |||
|
569 | test-remotefilelog-pull-noshallow.t | |||
|
570 | test-remotefilelog-share.t | |||
|
571 | test-remotefilelog-sparse.t | |||
|
572 | test-remotefilelog-tags.t | |||
|
573 | test-remotefilelog-wireproto.t | |||
515 | test-remove.t |
|
574 | test-remove.t | |
516 | test-removeemptydirs.t |
|
575 | test-removeemptydirs.t | |
517 | test-rename-after-merge.t |
|
576 | test-rename-after-merge.t | |
@@ -541,11 +600,13 b' test-revset-outgoing.t' | |||||
541 | test-rollback.t |
|
600 | test-rollback.t | |
542 | test-run-tests.py |
|
601 | test-run-tests.py | |
543 | test-run-tests.t |
|
602 | test-run-tests.t | |
|
603 | test-rust-ancestor.py | |||
544 | test-schemes.t |
|
604 | test-schemes.t | |
545 | test-serve.t |
|
605 | test-serve.t | |
546 | test-setdiscovery.t |
|
606 | test-setdiscovery.t | |
547 | test-share.t |
|
607 | test-share.t | |
548 | test-shelve.t |
|
608 | test-shelve.t | |
|
609 | test-shelve2.t | |||
549 | test-show-stack.t |
|
610 | test-show-stack.t | |
550 | test-show-work.t |
|
611 | test-show-work.t | |
551 | test-show.t |
|
612 | test-show.t |
@@ -56,9 +56,11 b' def hg(cmd, repo=None):' | |||||
56 | def perf(revset, target=None, contexts=False): |
|
56 | def perf(revset, target=None, contexts=False): | |
57 | """run benchmark for this very revset""" |
|
57 | """run benchmark for this very revset""" | |
58 | try: |
|
58 | try: | |
59 |
args = ['perfrevset' |
|
59 | args = ['perfrevset'] | |
60 | if contexts: |
|
60 | if contexts: | |
61 | args.append('--contexts') |
|
61 | args.append('--contexts') | |
|
62 | args.append('--') | |||
|
63 | args.append(revset) | |||
62 | output = hg(args, repo=target) |
|
64 | output = hg(args, repo=target) | |
63 | return parseoutput(output) |
|
65 | return parseoutput(output) | |
64 | except subprocess.CalledProcessError as exc: |
|
66 | except subprocess.CalledProcessError as exc: |
@@ -47,6 +47,7 b'' | |||||
47 | <File Id="internals.censor.txt" Name="censor.txt" /> |
|
47 | <File Id="internals.censor.txt" Name="censor.txt" /> | |
48 | <File Id="internals.changegroups.txt" Name="changegroups.txt" /> |
|
48 | <File Id="internals.changegroups.txt" Name="changegroups.txt" /> | |
49 | <File Id="internals.config.txt" Name="config.txt" /> |
|
49 | <File Id="internals.config.txt" Name="config.txt" /> | |
|
50 | <File Id="internals.extensions.txt" Name="extensions.txt" /> | |||
50 | <File Id="internals.linelog.txt" Name="linelog.txt" /> |
|
51 | <File Id="internals.linelog.txt" Name="linelog.txt" /> | |
51 | <File Id="internals.requirements.txt" Name="requirements.txt" /> |
|
52 | <File Id="internals.requirements.txt" Name="requirements.txt" /> | |
52 | <File Id="internals.revlogs.txt" Name="revlogs.txt" /> |
|
53 | <File Id="internals.revlogs.txt" Name="revlogs.txt" /> |
@@ -9,18 +9,28 b'' | |||||
9 |
|
9 | |||
10 | from __future__ import absolute_import, print_function |
|
10 | from __future__ import absolute_import, print_function | |
11 |
|
11 | |||
|
12 | import os | |||
12 | import re |
|
13 | import re | |
13 | import sys |
|
14 | import sys | |
14 |
|
15 | |||
15 | leadingline = re.compile(r'(^\s*)(\S.*)$') |
|
16 | try: | |
|
17 | import msvcrt | |||
|
18 | msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) | |||
|
19 | msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) | |||
|
20 | except ImportError: | |||
|
21 | pass | |||
|
22 | ||||
|
23 | stdout = getattr(sys.stdout, 'buffer', sys.stdout) | |||
|
24 | ||||
|
25 | leadingline = re.compile(br'(^\s*)(\S.*)$') | |||
16 |
|
26 | |||
17 | checks = [ |
|
27 | checks = [ | |
18 | (r""":hg:`[^`]*'[^`]*`""", |
|
28 | (br""":hg:`[^`]*'[^`]*`""", | |
19 |
|
|
29 | b"""warning: please avoid nesting ' in :hg:`...`"""), | |
20 | (r'\w:hg:`', |
|
30 | (br'\w:hg:`', | |
21 |
|
|
31 | b'warning: please have a space before :hg:'), | |
22 | (r"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""", |
|
32 | (br"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""", | |
23 |
|
|
33 | b'''warning: please use " instead of ' for hg ... "..."'''), | |
24 | ] |
|
34 | ] | |
25 |
|
35 | |||
26 | def check(line): |
|
36 | def check(line): | |
@@ -29,25 +39,25 b' def check(line):' | |||||
29 | if re.search(match, line): |
|
39 | if re.search(match, line): | |
30 | messages.append(msg) |
|
40 | messages.append(msg) | |
31 | if messages: |
|
41 | if messages: | |
32 | print(line) |
|
42 | stdout.write(b'%s\n' % line) | |
33 | for msg in messages: |
|
43 | for msg in messages: | |
34 |
|
|
44 | stdout.write(b'%s\n' % msg) | |
35 |
|
45 | |||
36 | def work(file): |
|
46 | def work(file): | |
37 | (llead, lline) = ('', '') |
|
47 | (llead, lline) = (b'', b'') | |
38 |
|
48 | |||
39 | for line in file: |
|
49 | for line in file: | |
40 | # this section unwraps lines |
|
50 | # this section unwraps lines | |
41 | match = leadingline.match(line) |
|
51 | match = leadingline.match(line) | |
42 | if not match: |
|
52 | if not match: | |
43 | check(lline) |
|
53 | check(lline) | |
44 | (llead, lline) = ('', '') |
|
54 | (llead, lline) = (b'', b'') | |
45 | continue |
|
55 | continue | |
46 |
|
56 | |||
47 | lead, line = match.group(1), match.group(2) |
|
57 | lead, line = match.group(1), match.group(2) | |
48 | if (lead == llead): |
|
58 | if (lead == llead): | |
49 | if (lline != ''): |
|
59 | if (lline != b''): | |
50 | lline += ' ' + line |
|
60 | lline += b' ' + line | |
51 | else: |
|
61 | else: | |
52 | lline = line |
|
62 | lline = line | |
53 | else: |
|
63 | else: | |
@@ -58,9 +68,9 b' def work(file):' | |||||
58 | def main(): |
|
68 | def main(): | |
59 | for f in sys.argv[1:]: |
|
69 | for f in sys.argv[1:]: | |
60 | try: |
|
70 | try: | |
61 | with open(f) as file: |
|
71 | with open(f, 'rb') as file: | |
62 | work(file) |
|
72 | work(file) | |
63 | except BaseException as e: |
|
73 | except BaseException as e: | |
64 |
|
|
74 | sys.stdout.write(r"failed to process %s: %s\n" % (f, e)) | |
65 |
|
75 | |||
66 | main() |
|
76 | main() |
@@ -10,11 +10,18 b' import os' | |||||
10 | import sys |
|
10 | import sys | |
11 | import textwrap |
|
11 | import textwrap | |
12 |
|
12 | |||
|
13 | try: | |||
|
14 | import msvcrt | |||
|
15 | msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) | |||
|
16 | msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY) | |||
|
17 | except ImportError: | |||
|
18 | pass | |||
|
19 | ||||
13 | # This script is executed during installs and may not have C extensions |
|
20 | # This script is executed during installs and may not have C extensions | |
14 | # available. Relax C module requirements. |
|
21 | # available. Relax C module requirements. | |
15 | os.environ['HGMODULEPOLICY'] = 'allow' |
|
22 | os.environ[r'HGMODULEPOLICY'] = r'allow' | |
16 | # import from the live mercurial repo |
|
23 | # import from the live mercurial repo | |
17 | sys.path.insert(0, "..") |
|
24 | sys.path.insert(0, r"..") | |
18 | from mercurial import demandimport; demandimport.enable() |
|
25 | from mercurial import demandimport; demandimport.enable() | |
19 | # Load util so that the locale path is set by i18n.setdatapath() before |
|
26 | # Load util so that the locale path is set by i18n.setdatapath() before | |
20 | # calling _(). |
|
27 | # calling _(). | |
@@ -22,9 +29,11 b' from mercurial import util' | |||||
22 | util.datapath |
|
29 | util.datapath | |
23 | from mercurial import ( |
|
30 | from mercurial import ( | |
24 | commands, |
|
31 | commands, | |
|
32 | encoding, | |||
25 | extensions, |
|
33 | extensions, | |
26 | help, |
|
34 | help, | |
27 | minirst, |
|
35 | minirst, | |
|
36 | pycompat, | |||
28 | ui as uimod, |
|
37 | ui as uimod, | |
29 | ) |
|
38 | ) | |
30 | from mercurial.i18n import ( |
|
39 | from mercurial.i18n import ( | |
@@ -39,19 +48,19 b' loaddoc = help.loaddoc' | |||||
39 |
|
48 | |||
40 | def get_desc(docstr): |
|
49 | def get_desc(docstr): | |
41 | if not docstr: |
|
50 | if not docstr: | |
42 | return "", "" |
|
51 | return b"", b"" | |
43 | # sanitize |
|
52 | # sanitize | |
44 | docstr = docstr.strip("\n") |
|
53 | docstr = docstr.strip(b"\n") | |
45 | docstr = docstr.rstrip() |
|
54 | docstr = docstr.rstrip() | |
46 | shortdesc = docstr.splitlines()[0].strip() |
|
55 | shortdesc = docstr.splitlines()[0].strip() | |
47 |
|
56 | |||
48 | i = docstr.find("\n") |
|
57 | i = docstr.find(b"\n") | |
49 | if i != -1: |
|
58 | if i != -1: | |
50 | desc = docstr[i + 2:] |
|
59 | desc = docstr[i + 2:] | |
51 | else: |
|
60 | else: | |
52 | desc = shortdesc |
|
61 | desc = shortdesc | |
53 |
|
62 | |||
54 | desc = textwrap.dedent(desc) |
|
63 | desc = textwrap.dedent(desc.decode('latin1')).encode('latin1') | |
55 |
|
64 | |||
56 | return (shortdesc, desc) |
|
65 | return (shortdesc, desc) | |
57 |
|
66 | |||
@@ -61,91 +70,93 b' def get_opts(opts):' | |||||
61 | shortopt, longopt, default, desc, optlabel = opt |
|
70 | shortopt, longopt, default, desc, optlabel = opt | |
62 | else: |
|
71 | else: | |
63 | shortopt, longopt, default, desc = opt |
|
72 | shortopt, longopt, default, desc = opt | |
64 | optlabel = _("VALUE") |
|
73 | optlabel = _(b"VALUE") | |
65 | allopts = [] |
|
74 | allopts = [] | |
66 | if shortopt: |
|
75 | if shortopt: | |
67 | allopts.append("-%s" % shortopt) |
|
76 | allopts.append(b"-%s" % shortopt) | |
68 | if longopt: |
|
77 | if longopt: | |
69 | allopts.append("--%s" % longopt) |
|
78 | allopts.append(b"--%s" % longopt) | |
70 | if isinstance(default, list): |
|
79 | if isinstance(default, list): | |
71 | allopts[-1] += " <%s[+]>" % optlabel |
|
80 | allopts[-1] += b" <%s[+]>" % optlabel | |
72 | elif (default is not None) and not isinstance(default, bool): |
|
81 | elif (default is not None) and not isinstance(default, bool): | |
73 | allopts[-1] += " <%s>" % optlabel |
|
82 | allopts[-1] += b" <%s>" % optlabel | |
74 | if '\n' in desc: |
|
83 | if b'\n' in desc: | |
75 | # only remove line breaks and indentation |
|
84 | # only remove line breaks and indentation | |
76 | desc = ' '.join(l.lstrip() for l in desc.split('\n')) |
|
85 | desc = b' '.join(l.lstrip() for l in desc.split(b'\n')) | |
77 | desc += default and _(" (default: %s)") % default or "" |
|
86 | desc += default and _(b" (default: %s)") % bytes(default) or b"" | |
78 | yield (", ".join(allopts), desc) |
|
87 | yield (b", ".join(allopts), desc) | |
79 |
|
88 | |||
80 | def get_cmd(cmd, cmdtable): |
|
89 | def get_cmd(cmd, cmdtable): | |
81 | d = {} |
|
90 | d = {} | |
82 | attr = cmdtable[cmd] |
|
91 | attr = cmdtable[cmd] | |
83 | cmds = cmd.lstrip("^").split("|") |
|
92 | cmds = cmd.lstrip(b"^").split(b"|") | |
84 |
|
93 | |||
85 | d['cmd'] = cmds[0] |
|
94 | d[b'cmd'] = cmds[0] | |
86 | d['aliases'] = cmd.split("|")[1:] |
|
95 | d[b'aliases'] = cmd.split(b"|")[1:] | |
87 |
d['desc'] = get_desc(gettext(attr[0] |
|
96 | d[b'desc'] = get_desc(gettext(pycompat.getdoc(attr[0]))) | |
88 | d['opts'] = list(get_opts(attr[1])) |
|
97 | d[b'opts'] = list(get_opts(attr[1])) | |
89 |
|
98 | |||
90 | s = 'hg ' + cmds[0] |
|
99 | s = b'hg ' + cmds[0] | |
91 | if len(attr) > 2: |
|
100 | if len(attr) > 2: | |
92 | if not attr[2].startswith('hg'): |
|
101 | if not attr[2].startswith(b'hg'): | |
93 | s += ' ' + attr[2] |
|
102 | s += b' ' + attr[2] | |
94 | else: |
|
103 | else: | |
95 | s = attr[2] |
|
104 | s = attr[2] | |
96 | d['synopsis'] = s.strip() |
|
105 | d[b'synopsis'] = s.strip() | |
97 |
|
106 | |||
98 | return d |
|
107 | return d | |
99 |
|
108 | |||
100 | def showdoc(ui): |
|
109 | def showdoc(ui): | |
101 | # print options |
|
110 | # print options | |
102 | ui.write(minirst.section(_("Options"))) |
|
111 | ui.write(minirst.section(_(b"Options"))) | |
103 | multioccur = False |
|
112 | multioccur = False | |
104 | for optstr, desc in get_opts(globalopts): |
|
113 | for optstr, desc in get_opts(globalopts): | |
105 | ui.write("%s\n %s\n\n" % (optstr, desc)) |
|
114 | ui.write(b"%s\n %s\n\n" % (optstr, desc)) | |
106 | if optstr.endswith("[+]>"): |
|
115 | if optstr.endswith(b"[+]>"): | |
107 | multioccur = True |
|
116 | multioccur = True | |
108 | if multioccur: |
|
117 | if multioccur: | |
109 | ui.write(_("\n[+] marked option can be specified multiple times\n")) |
|
118 | ui.write(_(b"\n[+] marked option can be specified multiple times\n")) | |
110 | ui.write("\n") |
|
119 | ui.write(b"\n") | |
111 |
|
120 | |||
112 | # print cmds |
|
121 | # print cmds | |
113 | ui.write(minirst.section(_("Commands"))) |
|
122 | ui.write(minirst.section(_(b"Commands"))) | |
114 | commandprinter(ui, table, minirst.subsection) |
|
123 | commandprinter(ui, table, minirst.subsection) | |
115 |
|
124 | |||
116 | # print help topics |
|
125 | # print help topics | |
117 | # The config help topic is included in the hgrc.5 man page. |
|
126 | # The config help topic is included in the hgrc.5 man page. | |
118 | helpprinter(ui, helptable, minirst.section, exclude=['config']) |
|
127 | helpprinter(ui, helptable, minirst.section, exclude=[b'config']) | |
119 |
|
128 | |||
120 | ui.write(minirst.section(_("Extensions"))) |
|
129 | ui.write(minirst.section(_(b"Extensions"))) | |
121 | ui.write(_("This section contains help for extensions that are " |
|
130 | ui.write(_(b"This section contains help for extensions that are " | |
122 | "distributed together with Mercurial. Help for other " |
|
131 | b"distributed together with Mercurial. Help for other " | |
123 | "extensions is available in the help system.")) |
|
132 | b"extensions is available in the help system.")) | |
124 | ui.write(("\n\n" |
|
133 | ui.write((b"\n\n" | |
125 | ".. contents::\n" |
|
134 | b".. contents::\n" | |
126 | " :class: htmlonly\n" |
|
135 | b" :class: htmlonly\n" | |
127 | " :local:\n" |
|
136 | b" :local:\n" | |
128 | " :depth: 1\n\n")) |
|
137 | b" :depth: 1\n\n")) | |
129 |
|
138 | |||
130 | for extensionname in sorted(allextensionnames()): |
|
139 | for extensionname in sorted(allextensionnames()): | |
131 | mod = extensions.load(ui, extensionname, None) |
|
140 | mod = extensions.load(ui, extensionname, None) | |
132 | ui.write(minirst.subsection(extensionname)) |
|
141 | ui.write(minirst.subsection(extensionname)) | |
133 |
ui.write("%s\n\n" % gettext(mod |
|
142 | ui.write(b"%s\n\n" % gettext(pycompat.getdoc(mod))) | |
134 | cmdtable = getattr(mod, 'cmdtable', None) |
|
143 | cmdtable = getattr(mod, 'cmdtable', None) | |
135 | if cmdtable: |
|
144 | if cmdtable: | |
136 | ui.write(minirst.subsubsection(_('Commands'))) |
|
145 | ui.write(minirst.subsubsection(_(b'Commands'))) | |
137 | commandprinter(ui, cmdtable, minirst.subsubsubsection) |
|
146 | commandprinter(ui, cmdtable, minirst.subsubsubsection) | |
138 |
|
147 | |||
139 | def showtopic(ui, topic): |
|
148 | def showtopic(ui, topic): | |
140 | extrahelptable = [ |
|
149 | extrahelptable = [ | |
141 | (["common"], '', loaddoc('common'), help.TOPIC_CATEGORY_MISC), |
|
150 | ([b"common"], b'', loaddoc(b'common'), help.TOPIC_CATEGORY_MISC), | |
142 | (["hg.1"], '', loaddoc('hg.1'), help.TOPIC_CATEGORY_CONFIG), |
|
151 | ([b"hg.1"], b'', loaddoc(b'hg.1'), help.TOPIC_CATEGORY_CONFIG), | |
143 | (["hg-ssh.8"], '', loaddoc('hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG), |
|
152 | ([b"hg-ssh.8"], b'', loaddoc(b'hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG), | |
144 |
(["hgignore.5"], '', loaddoc('hgignore.5'), |
|
153 | ([b"hgignore.5"], b'', loaddoc(b'hgignore.5'), | |
145 | (["hgrc.5"], '', loaddoc('hgrc.5'), help.TOPIC_CATEGORY_CONFIG), |
|
|||
146 | (["hgignore.5.gendoc"], '', loaddoc('hgignore'), |
|
|||
147 | help.TOPIC_CATEGORY_CONFIG), |
|
154 | help.TOPIC_CATEGORY_CONFIG), | |
148 |
(["hgrc.5 |
|
155 | ([b"hgrc.5"], b'', loaddoc(b'hgrc.5'), help.TOPIC_CATEGORY_CONFIG), | |
|
156 | ([b"hgignore.5.gendoc"], b'', loaddoc(b'hgignore'), | |||
|
157 | help.TOPIC_CATEGORY_CONFIG), | |||
|
158 | ([b"hgrc.5.gendoc"], b'', loaddoc(b'config'), | |||
|
159 | help.TOPIC_CATEGORY_CONFIG), | |||
149 | ] |
|
160 | ] | |
150 | helpprinter(ui, helptable + extrahelptable, None, include=[topic]) |
|
161 | helpprinter(ui, helptable + extrahelptable, None, include=[topic]) | |
151 |
|
162 | |||
@@ -157,74 +168,73 b' def helpprinter(ui, helptable, sectionfu' | |||||
157 | if include and names[0] not in include: |
|
168 | if include and names[0] not in include: | |
158 | continue |
|
169 | continue | |
159 | for name in names: |
|
170 | for name in names: | |
160 | ui.write(".. _%s:\n" % name) |
|
171 | ui.write(b".. _%s:\n" % name) | |
161 | ui.write("\n") |
|
172 | ui.write(b"\n") | |
162 | if sectionfunc: |
|
173 | if sectionfunc: | |
163 | ui.write(sectionfunc(sec)) |
|
174 | ui.write(sectionfunc(sec)) | |
164 | if callable(doc): |
|
175 | if callable(doc): | |
165 | doc = doc(ui) |
|
176 | doc = doc(ui) | |
166 | ui.write(doc) |
|
177 | ui.write(doc) | |
167 | ui.write("\n") |
|
178 | ui.write(b"\n") | |
168 |
|
179 | |||
169 | def commandprinter(ui, cmdtable, sectionfunc): |
|
180 | def commandprinter(ui, cmdtable, sectionfunc): | |
170 | h = {} |
|
181 | h = {} | |
171 | for c, attr in cmdtable.items(): |
|
182 | for c, attr in cmdtable.items(): | |
172 | f = c.split("|")[0] |
|
183 | f = c.split(b"|")[0] | |
173 | f = f.lstrip("^") |
|
184 | f = f.lstrip(b"^") | |
174 | h[f] = c |
|
185 | h[f] = c | |
175 | cmds = h.keys() |
|
186 | cmds = h.keys() | |
176 | cmds.sort() |
|
|||
177 |
|
187 | |||
178 | for f in cmds: |
|
188 | for f in sorted(cmds): | |
179 | if f.startswith("debug"): |
|
189 | if f.startswith(b"debug"): | |
180 | continue |
|
190 | continue | |
181 | d = get_cmd(h[f], cmdtable) |
|
191 | d = get_cmd(h[f], cmdtable) | |
182 | ui.write(sectionfunc(d['cmd'])) |
|
192 | ui.write(sectionfunc(d[b'cmd'])) | |
183 | # short description |
|
193 | # short description | |
184 | ui.write(d['desc'][0]) |
|
194 | ui.write(d[b'desc'][0]) | |
185 | # synopsis |
|
195 | # synopsis | |
186 | ui.write("::\n\n") |
|
196 | ui.write(b"::\n\n") | |
187 | synopsislines = d['synopsis'].splitlines() |
|
197 | synopsislines = d[b'synopsis'].splitlines() | |
188 | for line in synopsislines: |
|
198 | for line in synopsislines: | |
189 | # some commands (such as rebase) have a multi-line |
|
199 | # some commands (such as rebase) have a multi-line | |
190 | # synopsis |
|
200 | # synopsis | |
191 | ui.write(" %s\n" % line) |
|
201 | ui.write(b" %s\n" % line) | |
192 | ui.write('\n') |
|
202 | ui.write(b'\n') | |
193 | # description |
|
203 | # description | |
194 | ui.write("%s\n\n" % d['desc'][1]) |
|
204 | ui.write(b"%s\n\n" % d[b'desc'][1]) | |
195 | # options |
|
205 | # options | |
196 | opt_output = list(d['opts']) |
|
206 | opt_output = list(d[b'opts']) | |
197 | if opt_output: |
|
207 | if opt_output: | |
198 | opts_len = max([len(line[0]) for line in opt_output]) |
|
208 | opts_len = max([len(line[0]) for line in opt_output]) | |
199 | ui.write(_("Options:\n\n")) |
|
209 | ui.write(_(b"Options:\n\n")) | |
200 | multioccur = False |
|
210 | multioccur = False | |
201 | for optstr, desc in opt_output: |
|
211 | for optstr, desc in opt_output: | |
202 | if desc: |
|
212 | if desc: | |
203 | s = "%-*s %s" % (opts_len, optstr, desc) |
|
213 | s = b"%-*s %s" % (opts_len, optstr, desc) | |
204 | else: |
|
214 | else: | |
205 | s = optstr |
|
215 | s = optstr | |
206 | ui.write("%s\n" % s) |
|
216 | ui.write(b"%s\n" % s) | |
207 | if optstr.endswith("[+]>"): |
|
217 | if optstr.endswith(b"[+]>"): | |
208 | multioccur = True |
|
218 | multioccur = True | |
209 | if multioccur: |
|
219 | if multioccur: | |
210 | ui.write(_("\n[+] marked option can be specified" |
|
220 | ui.write(_(b"\n[+] marked option can be specified" | |
211 | " multiple times\n")) |
|
221 | b" multiple times\n")) | |
212 | ui.write("\n") |
|
222 | ui.write(b"\n") | |
213 | # aliases |
|
223 | # aliases | |
214 | if d['aliases']: |
|
224 | if d[b'aliases']: | |
215 | ui.write(_(" aliases: %s\n\n") % " ".join(d['aliases'])) |
|
225 | ui.write(_(b" aliases: %s\n\n") % b" ".join(d[b'aliases'])) | |
216 |
|
226 | |||
217 |
|
227 | |||
218 | def allextensionnames(): |
|
228 | def allextensionnames(): | |
219 |
return extensions.enabled().keys() |
|
229 | return set(extensions.enabled().keys()) | set(extensions.disabled().keys()) | |
220 |
|
230 | |||
221 | if __name__ == "__main__": |
|
231 | if __name__ == "__main__": | |
222 | doc = 'hg.1.gendoc' |
|
232 | doc = b'hg.1.gendoc' | |
223 | if len(sys.argv) > 1: |
|
233 | if len(sys.argv) > 1: | |
224 | doc = sys.argv[1] |
|
234 | doc = encoding.strtolocal(sys.argv[1]) | |
225 |
|
235 | |||
226 | ui = uimod.ui.load() |
|
236 | ui = uimod.ui.load() | |
227 | if doc == 'hg.1.gendoc': |
|
237 | if doc == b'hg.1.gendoc': | |
228 | showdoc(ui) |
|
238 | showdoc(ui) | |
229 | else: |
|
239 | else: | |
230 | showtopic(ui, sys.argv[1]) |
|
240 | showtopic(ui, encoding.strtolocal(sys.argv[1])) |
@@ -489,7 +489,8 b' class filefixupstate(object):' | |||||
489 | if l[colonpos - 1:colonpos + 2] != ' : ': |
|
489 | if l[colonpos - 1:colonpos + 2] != ' : ': | |
490 | raise error.Abort(_('malformed line: %s') % l) |
|
490 | raise error.Abort(_('malformed line: %s') % l) | |
491 | linecontent = l[colonpos + 2:] |
|
491 | linecontent = l[colonpos + 2:] | |
492 |
for i, ch in enumerate( |
|
492 | for i, ch in enumerate( | |
|
493 | pycompat.bytestr(l[leftpadpos:colonpos - 1])): | |||
493 | if ch == 'y': |
|
494 | if ch == 'y': | |
494 | contents[visiblefctxs[i][0]] += linecontent |
|
495 | contents[visiblefctxs[i][0]] += linecontent | |
495 | # chunkstats is hard to calculate if anything changes, therefore |
|
496 | # chunkstats is hard to calculate if anything changes, therefore | |
@@ -971,9 +972,10 b' def absorb(ui, repo, stack=None, targetc' | |||||
971 | label='absorb.description') |
|
972 | label='absorb.description') | |
972 | fm.end() |
|
973 | fm.end() | |
973 | if not opts.get('dry_run'): |
|
974 | if not opts.get('dry_run'): | |
974 |
if not opts.get('apply_changes') |
|
975 | if (not opts.get('apply_changes') and | |
975 | if ui.promptchoice("apply changes (yn)? $$ &Yes $$ &No", default=1): |
|
976 | state.ctxaffected and | |
976 | raise error.Abort(_('absorb cancelled\n')) |
|
977 | ui.promptchoice("apply changes (yn)? $$ &Yes $$ &No", default=1)): | |
|
978 | raise error.Abort(_('absorb cancelled\n')) | |||
977 |
|
979 | |||
978 | state.apply() |
|
980 | state.apply() | |
979 | if state.commit(): |
|
981 | if state.commit(): |
@@ -36,6 +36,8 b' command = registrar.command(cmdtable)' | |||||
36 | ('e', 'edit', None, _('invoke editor on commit messages')), |
|
36 | ('e', 'edit', None, _('invoke editor on commit messages')), | |
37 | ('i', 'interactive', None, _('use interactive mode')), |
|
37 | ('i', 'interactive', None, _('use interactive mode')), | |
38 | ('n', 'note', '', _('store a note on the amend')), |
|
38 | ('n', 'note', '', _('store a note on the amend')), | |
|
39 | ('D', 'currentdate', None, | |||
|
40 | _('record the current date as commit date')), | |||
39 | ] + cmdutil.walkopts + cmdutil.commitopts + cmdutil.commitopts2, |
|
41 | ] + cmdutil.walkopts + cmdutil.commitopts + cmdutil.commitopts2, | |
40 | _('[OPTION]... [FILE]...'), |
|
42 | _('[OPTION]... [FILE]...'), | |
41 | helpcategory=command.CATEGORY_COMMITTING, |
|
43 | helpcategory=command.CATEGORY_COMMITTING, |
@@ -31,8 +31,6 b" testedwith = 'ships-with-hg-core'" | |||||
31 | def prettyedge(before, edge, after): |
|
31 | def prettyedge(before, edge, after): | |
32 | if edge == '~': |
|
32 | if edge == '~': | |
33 | return '\xE2\x95\xA7' # U+2567 ╧ |
|
33 | return '\xE2\x95\xA7' # U+2567 ╧ | |
34 | if edge == 'X': |
|
|||
35 | return '\xE2\x95\xB3' # U+2573 ╳ |
|
|||
36 | if edge == '/': |
|
34 | if edge == '/': | |
37 | return '\xE2\x95\xB1' # U+2571 ╱ |
|
35 | return '\xE2\x95\xB1' # U+2571 ╱ | |
38 | if edge == '-': |
|
36 | if edge == '-': |
@@ -33,11 +33,15 b' Examples::' | |||||
33 | # rotate up to N log files when the current one gets too big |
|
33 | # rotate up to N log files when the current one gets too big | |
34 | maxfiles = 3 |
|
34 | maxfiles = 3 | |
35 |
|
35 | |||
|
36 | [blackbox] | |||
|
37 | # Include nanoseconds in log entries with %f (see Python function | |||
|
38 | # datetime.datetime.strftime) | |||
|
39 | date-format = '%Y-%m-%d @ %H:%M:%S.%f' | |||
|
40 | ||||
36 | """ |
|
41 | """ | |
37 |
|
42 | |||
38 | from __future__ import absolute_import |
|
43 | from __future__ import absolute_import | |
39 |
|
44 | |||
40 | import errno |
|
|||
41 | import re |
|
45 | import re | |
42 |
|
46 | |||
43 | from mercurial.i18n import _ |
|
47 | from mercurial.i18n import _ | |
@@ -45,10 +49,8 b' from mercurial.node import hex' | |||||
45 |
|
49 | |||
46 | from mercurial import ( |
|
50 | from mercurial import ( | |
47 | encoding, |
|
51 | encoding, | |
48 | pycompat, |
|
52 | loggingutil, | |
49 | registrar, |
|
53 | registrar, | |
50 | ui as uimod, |
|
|||
51 | util, |
|
|||
52 | ) |
|
54 | ) | |
53 | from mercurial.utils import ( |
|
55 | from mercurial.utils import ( | |
54 | dateutil, |
|
56 | dateutil, | |
@@ -82,131 +84,69 b" configitem('blackbox', 'maxfiles'," | |||||
82 | configitem('blackbox', 'track', |
|
84 | configitem('blackbox', 'track', | |
83 | default=lambda: ['*'], |
|
85 | default=lambda: ['*'], | |
84 | ) |
|
86 | ) | |
|
87 | configitem('blackbox', 'date-format', | |||
|
88 | default='%Y/%m/%d %H:%M:%S', | |||
|
89 | ) | |||
85 |
|
90 | |||
86 | lastui = None |
|
91 | _lastlogger = loggingutil.proxylogger() | |
87 |
|
92 | |||
88 | def _openlogfile(ui, vfs): |
|
93 | class blackboxlogger(object): | |
89 | def rotate(oldpath, newpath): |
|
94 | def __init__(self, ui, repo): | |
90 | try: |
|
95 | self._repo = repo | |
91 | vfs.unlink(newpath) |
|
96 | self._trackedevents = set(ui.configlist('blackbox', 'track')) | |
92 | except OSError as err: |
|
97 | self._maxfiles = ui.configint('blackbox', 'maxfiles') | |
93 | if err.errno != errno.ENOENT: |
|
98 | self._maxsize = ui.configbytes('blackbox', 'maxsize') | |
94 | ui.debug("warning: cannot remove '%s': %s\n" % |
|
99 | self._inlog = False | |
95 | (newpath, err.strerror)) |
|
|||
96 | try: |
|
|||
97 | if newpath: |
|
|||
98 | vfs.rename(oldpath, newpath) |
|
|||
99 | except OSError as err: |
|
|||
100 | if err.errno != errno.ENOENT: |
|
|||
101 | ui.debug("warning: cannot rename '%s' to '%s': %s\n" % |
|
|||
102 | (newpath, oldpath, err.strerror)) |
|
|||
103 |
|
100 | |||
104 | maxsize = ui.configbytes('blackbox', 'maxsize') |
|
101 | def tracked(self, event): | |
105 | name = 'blackbox.log' |
|
102 | return b'*' in self._trackedevents or event in self._trackedevents | |
106 | if maxsize > 0: |
|
103 | ||
|
104 | def log(self, ui, event, msg, opts): | |||
|
105 | # self._log() -> ctx.dirty() may create new subrepo instance, which | |||
|
106 | # ui is derived from baseui. So the recursion guard in ui.log() | |||
|
107 | # doesn't work as it's local to the ui instance. | |||
|
108 | if self._inlog: | |||
|
109 | return | |||
|
110 | self._inlog = True | |||
107 | try: |
|
111 | try: | |
108 | st = vfs.stat(name) |
|
112 | self._log(ui, event, msg, opts) | |
109 | except OSError: |
|
113 | finally: | |
110 | pass |
|
114 | self._inlog = False | |
111 | else: |
|
|||
112 | if st.st_size >= maxsize: |
|
|||
113 | path = vfs.join(name) |
|
|||
114 | maxfiles = ui.configint('blackbox', 'maxfiles') |
|
|||
115 | for i in pycompat.xrange(maxfiles - 1, 1, -1): |
|
|||
116 | rotate(oldpath='%s.%d' % (path, i - 1), |
|
|||
117 | newpath='%s.%d' % (path, i)) |
|
|||
118 | rotate(oldpath=path, |
|
|||
119 | newpath=maxfiles > 0 and path + '.1') |
|
|||
120 | return vfs(name, 'a') |
|
|||
121 |
|
||||
122 | def wrapui(ui): |
|
|||
123 | class blackboxui(ui.__class__): |
|
|||
124 | @property |
|
|||
125 | def _bbvfs(self): |
|
|||
126 | vfs = None |
|
|||
127 | repo = getattr(self, '_bbrepo', None) |
|
|||
128 | if repo: |
|
|||
129 | vfs = repo.vfs |
|
|||
130 | if not vfs.isdir('.'): |
|
|||
131 | vfs = None |
|
|||
132 | return vfs |
|
|||
133 |
|
||||
134 | @util.propertycache |
|
|||
135 | def track(self): |
|
|||
136 | return self.configlist('blackbox', 'track') |
|
|||
137 |
|
||||
138 | def debug(self, *msg, **opts): |
|
|||
139 | super(blackboxui, self).debug(*msg, **opts) |
|
|||
140 | if self.debugflag: |
|
|||
141 | self.log('debug', '%s', ''.join(msg)) |
|
|||
142 |
|
||||
143 | def log(self, event, *msg, **opts): |
|
|||
144 | global lastui |
|
|||
145 | super(blackboxui, self).log(event, *msg, **opts) |
|
|||
146 |
|
115 | |||
147 | if not '*' in self.track and not event in self.track: |
|
116 | def _log(self, ui, event, msg, opts): | |
148 | return |
|
117 | default = ui.configdate('devel', 'default-date') | |
149 |
|
118 | date = dateutil.datestr(default, ui.config('blackbox', 'date-format')) | ||
150 | if self._bbvfs: |
|
119 | user = procutil.getuser() | |
151 | ui = self |
|
120 | pid = '%d' % procutil.getpid() | |
152 | else: |
|
121 | rev = '(unknown)' | |
153 | # certain ui instances exist outside the context of |
|
122 | changed = '' | |
154 | # a repo, so just default to the last blackbox that |
|
123 | ctx = self._repo[None] | |
155 | # was seen. |
|
124 | parents = ctx.parents() | |
156 | ui = lastui |
|
125 | rev = ('+'.join([hex(p.node()) for p in parents])) | |
157 |
|
126 | if (ui.configbool('blackbox', 'dirty') and | ||
158 | if not ui: |
|
127 | ctx.dirty(missing=True, merge=False, branch=False)): | |
159 | return |
|
128 | changed = '+' | |
160 | vfs = ui._bbvfs |
|
129 | if ui.configbool('blackbox', 'logsource'): | |
161 | if not vfs: |
|
130 | src = ' [%s]' % event | |
162 | return |
|
131 | else: | |
|
132 | src = '' | |||
|
133 | try: | |||
|
134 | fmt = '%s %s @%s%s (%s)%s> %s' | |||
|
135 | args = (date, user, rev, changed, pid, src, msg) | |||
|
136 | with loggingutil.openlogfile( | |||
|
137 | ui, self._repo.vfs, name='blackbox.log', | |||
|
138 | maxfiles=self._maxfiles, maxsize=self._maxsize) as fp: | |||
|
139 | fp.write(fmt % args) | |||
|
140 | except (IOError, OSError) as err: | |||
|
141 | # deactivate this to avoid failed logging again | |||
|
142 | self._trackedevents.clear() | |||
|
143 | ui.debug('warning: cannot write to blackbox.log: %s\n' % | |||
|
144 | encoding.strtolocal(err.strerror)) | |||
|
145 | return | |||
|
146 | _lastlogger.logger = self | |||
163 |
|
147 | |||
164 | repo = getattr(ui, '_bbrepo', None) |
|
148 | def uipopulate(ui): | |
165 | if not lastui or repo: |
|
149 | ui.setlogger(b'blackbox', _lastlogger) | |
166 | lastui = ui |
|
|||
167 | if getattr(ui, '_bbinlog', False): |
|
|||
168 | # recursion and failure guard |
|
|||
169 | return |
|
|||
170 | ui._bbinlog = True |
|
|||
171 | default = self.configdate('devel', 'default-date') |
|
|||
172 | date = dateutil.datestr(default, '%Y/%m/%d %H:%M:%S') |
|
|||
173 | user = procutil.getuser() |
|
|||
174 | pid = '%d' % procutil.getpid() |
|
|||
175 | formattedmsg = msg[0] % msg[1:] |
|
|||
176 | rev = '(unknown)' |
|
|||
177 | changed = '' |
|
|||
178 | if repo: |
|
|||
179 | ctx = repo[None] |
|
|||
180 | parents = ctx.parents() |
|
|||
181 | rev = ('+'.join([hex(p.node()) for p in parents])) |
|
|||
182 | if (ui.configbool('blackbox', 'dirty') and |
|
|||
183 | ctx.dirty(missing=True, merge=False, branch=False)): |
|
|||
184 | changed = '+' |
|
|||
185 | if ui.configbool('blackbox', 'logsource'): |
|
|||
186 | src = ' [%s]' % event |
|
|||
187 | else: |
|
|||
188 | src = '' |
|
|||
189 | try: |
|
|||
190 | fmt = '%s %s @%s%s (%s)%s> %s' |
|
|||
191 | args = (date, user, rev, changed, pid, src, formattedmsg) |
|
|||
192 | with _openlogfile(ui, vfs) as fp: |
|
|||
193 | fp.write(fmt % args) |
|
|||
194 | except (IOError, OSError) as err: |
|
|||
195 | self.debug('warning: cannot write to blackbox.log: %s\n' % |
|
|||
196 | encoding.strtolocal(err.strerror)) |
|
|||
197 | # do not restore _bbinlog intentionally to avoid failed |
|
|||
198 | # logging again |
|
|||
199 | else: |
|
|||
200 | ui._bbinlog = False |
|
|||
201 |
|
||||
202 | def setrepo(self, repo): |
|
|||
203 | self._bbrepo = repo |
|
|||
204 |
|
||||
205 | ui.__class__ = blackboxui |
|
|||
206 | uimod.ui = blackboxui |
|
|||
207 |
|
||||
208 | def uisetup(ui): |
|
|||
209 | wrapui(ui) |
|
|||
210 |
|
150 | |||
211 | def reposetup(ui, repo): |
|
151 | def reposetup(ui, repo): | |
212 | # During 'hg pull' a httppeer repo is created to represent the remote repo. |
|
152 | # During 'hg pull' a httppeer repo is created to represent the remote repo. | |
@@ -215,14 +155,15 b' def reposetup(ui, repo):' | |||||
215 | if not repo.local(): |
|
155 | if not repo.local(): | |
216 | return |
|
156 | return | |
217 |
|
157 | |||
218 | if util.safehasattr(ui, 'setrepo'): |
|
158 | # Since blackbox.log is stored in the repo directory, the logger should be | |
219 | ui.setrepo(repo) |
|
159 | # instantiated per repository. | |
|
160 | logger = blackboxlogger(ui, repo) | |||
|
161 | ui.setlogger(b'blackbox', logger) | |||
220 |
|
162 | |||
221 |
|
|
163 | # Set _lastlogger even if ui.log is not called. This gives blackbox a | |
222 |
|
|
164 | # fallback place to log | |
223 | global lastui |
|
165 | if _lastlogger.logger is None: | |
224 | if lastui is None: |
|
166 | _lastlogger.logger = logger | |
225 | lastui = ui |
|
|||
226 |
|
167 | |||
227 | repo._wlockfreeprefix.add('blackbox.log') |
|
168 | repo._wlockfreeprefix.add('blackbox.log') | |
228 |
|
169 |
@@ -270,6 +270,9 b' class filemap_source(common.converter_so' | |||||
270 | self.children[p] = self.children.get(p, 0) + 1 |
|
270 | self.children[p] = self.children.get(p, 0) + 1 | |
271 | return c |
|
271 | return c | |
272 |
|
272 | |||
|
273 | def numcommits(self): | |||
|
274 | return self.base.numcommits() | |||
|
275 | ||||
273 | def _cachedcommit(self, rev): |
|
276 | def _cachedcommit(self, rev): | |
274 | if rev in self.commits: |
|
277 | if rev in self.commits: | |
275 | return self.commits[rev] |
|
278 | return self.commits[rev] | |
@@ -302,7 +305,18 b' class filemap_source(common.converter_so' | |||||
302 | for f in files: |
|
305 | for f in files: | |
303 | if self.filemapper(f): |
|
306 | if self.filemapper(f): | |
304 | return True |
|
307 | return True | |
305 | return False |
|
308 | ||
|
309 | # The include directive is documented to include nothing else (though | |||
|
310 | # valid branch closes are included). | |||
|
311 | if self.filemapper.include: | |||
|
312 | return False | |||
|
313 | ||||
|
314 | # Allow empty commits in the source revision through. The getchanges() | |||
|
315 | # method doesn't even bother calling this if it determines that the | |||
|
316 | # close marker is significant (i.e. all of the branch ancestors weren't | |||
|
317 | # eliminated). Therefore if there *is* a close marker, getchanges() | |||
|
318 | # doesn't consider it significant, and this revision should be dropped. | |||
|
319 | return not files and 'close' not in self.commits[rev].extra | |||
306 |
|
320 | |||
307 | def mark_not_wanted(self, rev, p): |
|
321 | def mark_not_wanted(self, rev, p): | |
308 | # Mark rev as not interesting and update data structures. |
|
322 | # Mark rev as not interesting and update data structures. |
@@ -597,6 +597,9 b' class mercurial_source(common.converter_' | |||||
597 | saverev=self.saverev, |
|
597 | saverev=self.saverev, | |
598 | phase=ctx.phase()) |
|
598 | phase=ctx.phase()) | |
599 |
|
599 | |||
|
600 | def numcommits(self): | |||
|
601 | return len(self.repo) | |||
|
602 | ||||
600 | def gettags(self): |
|
603 | def gettags(self): | |
601 | # This will get written to .hgtags, filter non global tags out. |
|
604 | # This will get written to .hgtags, filter non global tags out. | |
602 | tags = [t for t in self.repo.tagslist() |
|
605 | tags = [t for t in self.repo.tagslist() |
@@ -139,7 +139,7 b' def snapshot(ui, repo, files, node, tmpr' | |||||
139 | repo.ui.setconfig("ui", "archivemeta", False) |
|
139 | repo.ui.setconfig("ui", "archivemeta", False) | |
140 |
|
140 | |||
141 | archival.archive(repo, base, node, 'files', |
|
141 | archival.archive(repo, base, node, 'files', | |
142 |
match |
|
142 | match=scmutil.matchfiles(repo, files), | |
143 | subrepos=listsubrepos) |
|
143 | subrepos=listsubrepos) | |
144 |
|
144 | |||
145 | for fn in sorted(files): |
|
145 | for fn in sorted(files): | |
@@ -152,6 +152,29 b' def snapshot(ui, repo, files, node, tmpr' | |||||
152 | fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest))) |
|
152 | fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest))) | |
153 | return dirname, fnsandstat |
|
153 | return dirname, fnsandstat | |
154 |
|
154 | |||
|
155 | def formatcmdline(cmdline, repo_root, do3way, | |||
|
156 | parent1, plabel1, parent2, plabel2, child, clabel): | |||
|
157 | # Function to quote file/dir names in the argument string. | |||
|
158 | # When not operating in 3-way mode, an empty string is | |||
|
159 | # returned for parent2 | |||
|
160 | replace = {'parent': parent1, 'parent1': parent1, 'parent2': parent2, | |||
|
161 | 'plabel1': plabel1, 'plabel2': plabel2, | |||
|
162 | 'child': child, 'clabel': clabel, | |||
|
163 | 'root': repo_root} | |||
|
164 | def quote(match): | |||
|
165 | pre = match.group(2) | |||
|
166 | key = match.group(3) | |||
|
167 | if not do3way and key == 'parent2': | |||
|
168 | return pre | |||
|
169 | return pre + procutil.shellquote(replace[key]) | |||
|
170 | ||||
|
171 | # Match parent2 first, so 'parent1?' will match both parent1 and parent | |||
|
172 | regex = (br'''(['"]?)([^\s'"$]*)''' | |||
|
173 | br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1') | |||
|
174 | if not do3way and not re.search(regex, cmdline): | |||
|
175 | cmdline += ' $parent1 $child' | |||
|
176 | return re.sub(regex, quote, cmdline) | |||
|
177 | ||||
155 | def dodiff(ui, repo, cmdline, pats, opts): |
|
178 | def dodiff(ui, repo, cmdline, pats, opts): | |
156 | '''Do the actual diff: |
|
179 | '''Do the actual diff: | |
157 |
|
180 | |||
@@ -281,28 +304,14 b' def dodiff(ui, repo, cmdline, pats, opts' | |||||
281 | label1b = None |
|
304 | label1b = None | |
282 | fnsandstat = [] |
|
305 | fnsandstat = [] | |
283 |
|
306 | |||
284 | # Function to quote file/dir names in the argument string. |
|
307 | # Run the external tool on the 2 temp directories or the patches | |
285 | # When not operating in 3-way mode, an empty string is |
|
308 | cmdline = formatcmdline( | |
286 | # returned for parent2 |
|
309 | cmdline, repo.root, do3way=do3way, | |
287 | replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b, |
|
310 | parent1=dir1a, plabel1=label1a, | |
288 |
|
|
311 | parent2=dir1b, plabel2=label1b, | |
289 | 'clabel': label2, 'child': dir2, |
|
312 | child=dir2, clabel=label2) | |
290 | 'root': repo.root} |
|
313 | ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), | |
291 | def quote(match): |
|
314 | tmproot)) | |
292 | pre = match.group(2) |
|
|||
293 | key = match.group(3) |
|
|||
294 | if not do3way and key == 'parent2': |
|
|||
295 | return pre |
|
|||
296 | return pre + procutil.shellquote(replace[key]) |
|
|||
297 |
|
||||
298 | # Match parent2 first, so 'parent1?' will match both parent1 and parent |
|
|||
299 | regex = (br'''(['"]?)([^\s'"$]*)''' |
|
|||
300 | br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1') |
|
|||
301 | if not do3way and not re.search(regex, cmdline): |
|
|||
302 | cmdline += ' $parent1 $child' |
|
|||
303 | cmdline = re.sub(regex, quote, cmdline) |
|
|||
304 |
|
||||
305 | ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot)) |
|
|||
306 | ui.system(cmdline, cwd=tmproot, blockedtag='extdiff') |
|
315 | ui.system(cmdline, cwd=tmproot, blockedtag='extdiff') | |
307 |
|
316 | |||
308 | for copy_fn, working_fn, st in fnsandstat: |
|
317 | for copy_fn, working_fn, st in fnsandstat: | |
@@ -383,8 +392,9 b' class savedcmd(object):' | |||||
383 |
|
392 | |||
384 | def __init__(self, path, cmdline): |
|
393 | def __init__(self, path, cmdline): | |
385 | # We can't pass non-ASCII through docstrings (and path is |
|
394 | # We can't pass non-ASCII through docstrings (and path is | |
386 | # in an unknown encoding anyway) |
|
395 | # in an unknown encoding anyway), but avoid double separators on | |
387 | docpath = stringutil.escapestr(path) |
|
396 | # Windows | |
|
397 | docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\') | |||
388 | self.__doc__ %= {r'path': pycompat.sysstr(stringutil.uirepr(docpath))} |
|
398 | self.__doc__ %= {r'path': pycompat.sysstr(stringutil.uirepr(docpath))} | |
389 | self._cmdline = cmdline |
|
399 | self._cmdline = cmdline | |
390 |
|
400 |
@@ -261,8 +261,9 b' def debugbuildannotatecache(ui, repo, *p' | |||||
261 | repo.prefetchfastannotate(paths) |
|
261 | repo.prefetchfastannotate(paths) | |
262 | else: |
|
262 | else: | |
263 | # server, or full repo |
|
263 | # server, or full repo | |
|
264 | progress = ui.makeprogress(_('building'), total=len(paths)) | |||
264 | for i, path in enumerate(paths): |
|
265 | for i, path in enumerate(paths): | |
265 | ui.progress(_('building'), i, total=len(paths)) |
|
266 | progress.update(i) | |
266 | with facontext.annotatecontext(repo, path) as actx: |
|
267 | with facontext.annotatecontext(repo, path) as actx: | |
267 | try: |
|
268 | try: | |
268 | if actx.isuptodate(rev): |
|
269 | if actx.isuptodate(rev): | |
@@ -281,5 +282,4 b' def debugbuildannotatecache(ui, repo, *p' | |||||
281 | # cache for other files. |
|
282 | # cache for other files. | |
282 | ui.warn(_('fastannotate: %s: failed to ' |
|
283 | ui.warn(_('fastannotate: %s: failed to ' | |
283 | 'build cache: %r\n') % (path, ex)) |
|
284 | 'build cache: %r\n') % (path, ex)) | |
284 | # clear the progress bar |
|
285 | progress.complete() | |
285 | ui.write() |
|
@@ -138,7 +138,7 b' def hashdiffopts(diffopts):' | |||||
138 | (k, getattr(diffopts, k)) |
|
138 | (k, getattr(diffopts, k)) | |
139 | for k in mdiff.diffopts.defaults |
|
139 | for k in mdiff.diffopts.defaults | |
140 | )) |
|
140 | )) | |
141 |
return hashlib.sha1(diffoptstr). |
|
141 | return node.hex(hashlib.sha1(diffoptstr).digest())[:6] | |
142 |
|
142 | |||
143 | _defaultdiffopthash = hashdiffopts(mdiff.defaultopts) |
|
143 | _defaultdiffopthash = hashdiffopts(mdiff.defaultopts) | |
144 |
|
144 | |||
@@ -156,6 +156,7 b' class annotateopts(object):' | |||||
156 | } |
|
156 | } | |
157 |
|
157 | |||
158 | def __init__(self, **opts): |
|
158 | def __init__(self, **opts): | |
|
159 | opts = pycompat.byteskwargs(opts) | |||
159 | for k, v in self.defaults.iteritems(): |
|
160 | for k, v in self.defaults.iteritems(): | |
160 | setattr(self, k, opts.get(k, v)) |
|
161 | setattr(self, k, opts.get(k, v)) | |
161 |
|
162 | |||
@@ -397,7 +398,8 b' class _annotatecontext(object):' | |||||
397 |
|
398 | |||
398 | # 3rd DFS does the actual annotate |
|
399 | # 3rd DFS does the actual annotate | |
399 | visit = initvisit[:] |
|
400 | visit = initvisit[:] | |
400 | progress = 0 |
|
401 | progress = self.ui.makeprogress(('building cache'), | |
|
402 | total=len(newmainbranch)) | |||
401 | while visit: |
|
403 | while visit: | |
402 | f = visit[-1] |
|
404 | f = visit[-1] | |
403 | if f in hist: |
|
405 | if f in hist: | |
@@ -436,10 +438,7 b' class _annotatecontext(object):' | |||||
436 | del pcache[f] |
|
438 | del pcache[f] | |
437 |
|
439 | |||
438 | if ismainbranch: # need to write to linelog |
|
440 | if ismainbranch: # need to write to linelog | |
439 | if not self.ui.quiet: |
|
441 | progress.increment() | |
440 | progress += 1 |
|
|||
441 | self.ui.progress(_('building cache'), progress, |
|
|||
442 | total=len(newmainbranch)) |
|
|||
443 | bannotated = None |
|
442 | bannotated = None | |
444 | if len(pl) == 2 and self.opts.followmerge: # merge |
|
443 | if len(pl) == 2 and self.opts.followmerge: # merge | |
445 | bannotated = curr[0] |
|
444 | bannotated = curr[0] | |
@@ -449,8 +448,7 b' class _annotatecontext(object):' | |||||
449 | elif showpath: # not append linelog, but we need to record path |
|
448 | elif showpath: # not append linelog, but we need to record path | |
450 | self._node2path[f.node()] = f.path() |
|
449 | self._node2path[f.node()] = f.path() | |
451 |
|
450 | |||
452 | if progress: # clean progress bar |
|
451 | progress.complete() | |
453 | self.ui.write() |
|
|||
454 |
|
452 | |||
455 | result = [ |
|
453 | result = [ | |
456 | ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l) |
|
454 | ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l) | |
@@ -604,7 +602,7 b' class _annotatecontext(object):' | |||||
604 | the best case, the user provides a node and we don't need to read the |
|
602 | the best case, the user provides a node and we don't need to read the | |
605 | filelog or construct any filecontext. |
|
603 | filelog or construct any filecontext. | |
606 | """ |
|
604 | """ | |
607 |
if isinstance(f, |
|
605 | if isinstance(f, bytes): | |
608 | hsh = f |
|
606 | hsh = f | |
609 | else: |
|
607 | else: | |
610 | hsh = f.node() |
|
608 | hsh = f.node() | |
@@ -627,7 +625,7 b' class _annotatecontext(object):' | |||||
627 | if showpath: |
|
625 | if showpath: | |
628 | result = self._addpathtoresult(result) |
|
626 | result = self._addpathtoresult(result) | |
629 | if showlines: |
|
627 | if showlines: | |
630 |
if isinstance(f, |
|
628 | if isinstance(f, bytes): # f: node or fctx | |
631 | llrev = self.revmap.hsh2rev(f) |
|
629 | llrev = self.revmap.hsh2rev(f) | |
632 | fctx = self._resolvefctx(f, self.revmap.rev2path(llrev)) |
|
630 | fctx = self._resolvefctx(f, self.revmap.rev2path(llrev)) | |
633 | else: |
|
631 | else: |
@@ -39,23 +39,26 b' class defaultformatter(object):' | |||||
39 | orig = hexfunc |
|
39 | orig = hexfunc | |
40 | hexfunc = lambda x: None if x is None else orig(x) |
|
40 | hexfunc = lambda x: None if x is None else orig(x) | |
41 | wnode = hexfunc(repo[None].p1().node()) + '+' |
|
41 | wnode = hexfunc(repo[None].p1().node()) + '+' | |
42 |
wrev = |
|
42 | wrev = '%d' % repo[None].p1().rev() | |
43 | wrevpad = '' |
|
43 | wrevpad = '' | |
44 | if not opts.get('changeset'): # only show + if changeset is hidden |
|
44 | if not opts.get('changeset'): # only show + if changeset is hidden | |
45 | wrev += '+' |
|
45 | wrev += '+' | |
46 | wrevpad = ' ' |
|
46 | wrevpad = ' ' | |
47 |
revenc = lambda x: wrev if x is None else |
|
47 | revenc = lambda x: wrev if x is None else ('%d' % x) + wrevpad | |
48 | csetenc = lambda x: wnode if x is None else str(x) + ' ' |
|
48 | def csetenc(x): | |
|
49 | if x is None: | |||
|
50 | return wnode | |||
|
51 | return pycompat.bytestr(x) + ' ' | |||
49 | else: |
|
52 | else: | |
50 | revenc = csetenc = str |
|
53 | revenc = csetenc = pycompat.bytestr | |
51 |
|
54 | |||
52 | # opt name, separator, raw value (for json/plain), encoder (for plain) |
|
55 | # opt name, separator, raw value (for json/plain), encoder (for plain) | |
53 | opmap = [('user', ' ', lambda x: getctx(x).user(), ui.shortuser), |
|
56 | opmap = [('user', ' ', lambda x: getctx(x).user(), ui.shortuser), | |
54 | ('number', ' ', lambda x: getctx(x).rev(), revenc), |
|
57 | ('number', ' ', lambda x: getctx(x).rev(), revenc), | |
55 | ('changeset', ' ', lambda x: hexfunc(x[0]), csetenc), |
|
58 | ('changeset', ' ', lambda x: hexfunc(x[0]), csetenc), | |
56 | ('date', ' ', lambda x: getctx(x).date(), datefunc), |
|
59 | ('date', ' ', lambda x: getctx(x).date(), datefunc), | |
57 | ('file', ' ', lambda x: x[2], str), |
|
60 | ('file', ' ', lambda x: x[2], pycompat.bytestr), | |
58 | ('line_number', ':', lambda x: x[1] + 1, str)] |
|
61 | ('line_number', ':', lambda x: x[1] + 1, pycompat.bytestr)] | |
59 | fieldnamemap = {'number': 'rev', 'changeset': 'node'} |
|
62 | fieldnamemap = {'number': 'rev', 'changeset': 'node'} | |
60 | funcmap = [(get, sep, fieldnamemap.get(op, op), enc) |
|
63 | funcmap = [(get, sep, fieldnamemap.get(op, op), enc) | |
61 | for op, sep, get, enc in opmap |
|
64 | for op, sep, get, enc in opmap | |
@@ -100,7 +103,7 b' class defaultformatter(object):' | |||||
100 | result += ': ' + self.ui.label('-' + lines[i], |
|
103 | result += ': ' + self.ui.label('-' + lines[i], | |
101 | 'diff.deleted') |
|
104 | 'diff.deleted') | |
102 |
|
105 | |||
103 | if result[-1] != '\n': |
|
106 | if result[-1:] != '\n': | |
104 | result += '\n' |
|
107 | result += '\n' | |
105 |
|
108 | |||
106 | self.ui.write(result) |
|
109 | self.ui.write(result) | |
@@ -125,7 +128,7 b' class jsonformatter(defaultformatter):' | |||||
125 | if annotatedresult: |
|
128 | if annotatedresult: | |
126 | self._writecomma() |
|
129 | self._writecomma() | |
127 |
|
130 | |||
128 | pieces = [(name, map(f, annotatedresult)) |
|
131 | pieces = [(name, pycompat.maplist(f, annotatedresult)) | |
129 | for f, sep, name, enc in self.funcmap] |
|
132 | for f, sep, name, enc in self.funcmap] | |
130 | if lines is not None: |
|
133 | if lines is not None: | |
131 | pieces.append(('line', lines)) |
|
134 | pieces.append(('line', lines)) |
@@ -98,10 +98,10 b' def _parseresponse(payload):' | |||||
98 | state = 0 # 0: vfspath, 1: size |
|
98 | state = 0 # 0: vfspath, 1: size | |
99 | vfspath = size = '' |
|
99 | vfspath = size = '' | |
100 | while i < l: |
|
100 | while i < l: | |
101 | ch = payload[i] |
|
101 | ch = payload[i:i + 1] | |
102 | if ch == '\0': |
|
102 | if ch == '\0': | |
103 | if state == 1: |
|
103 | if state == 1: | |
104 |
result[vfspath] = |
|
104 | result[vfspath] = payload[i + 1:i + 1 + int(size)] | |
105 | i += int(size) |
|
105 | i += int(size) | |
106 | state = 0 |
|
106 | state = 0 | |
107 | vfspath = size = '' |
|
107 | vfspath = size = '' |
@@ -207,7 +207,7 b' class revmap(object):' | |||||
207 | path = self.rev2path(rev) |
|
207 | path = self.rev2path(rev) | |
208 | if path is None: |
|
208 | if path is None: | |
209 | raise error.CorruptedFileError('cannot find path for %s' % rev) |
|
209 | raise error.CorruptedFileError('cannot find path for %s' % rev) | |
210 | f.write(path + '\0') |
|
210 | f.write(path + b'\0') | |
211 | f.write(hsh) |
|
211 | f.write(hsh) | |
212 |
|
212 | |||
213 | @staticmethod |
|
213 | @staticmethod |
@@ -15,13 +15,15 b' formatting fixes to modified lines in C+' | |||||
15 | [fix] |
|
15 | [fix] | |
16 | clang-format:command=clang-format --assume-filename={rootpath} |
|
16 | clang-format:command=clang-format --assume-filename={rootpath} | |
17 | clang-format:linerange=--lines={first}:{last} |
|
17 | clang-format:linerange=--lines={first}:{last} | |
18 |
clang-format: |
|
18 | clang-format:pattern=set:**.cpp or **.hpp | |
19 |
|
19 | |||
20 | The :command suboption forms the first part of the shell command that will be |
|
20 | The :command suboption forms the first part of the shell command that will be | |
21 | used to fix a file. The content of the file is passed on standard input, and the |
|
21 | used to fix a file. The content of the file is passed on standard input, and the | |
22 |
fixed file content is expected on standard output. |
|
22 | fixed file content is expected on standard output. Any output on standard error | |
23 | standard error, the file will not be affected. Some values may be substituted |
|
23 | will be displayed as a warning. If the exit status is not zero, the file will | |
24 | into the command:: |
|
24 | not be affected. A placeholder warning is displayed if there is a non-zero exit | |
|
25 | status but no standard error output. Some values may be substituted into the | |||
|
26 | command:: | |||
25 |
|
27 | |||
26 | {rootpath} The path of the file being fixed, relative to the repo root |
|
28 | {rootpath} The path of the file being fixed, relative to the repo root | |
27 | {basename} The name of the file being fixed, without the directory path |
|
29 | {basename} The name of the file being fixed, without the directory path | |
@@ -34,16 +36,42 b' substituted into the command::' | |||||
34 | {first} The 1-based line number of the first line in the modified range |
|
36 | {first} The 1-based line number of the first line in the modified range | |
35 | {last} The 1-based line number of the last line in the modified range |
|
37 | {last} The 1-based line number of the last line in the modified range | |
36 |
|
38 | |||
37 |
The : |
|
39 | The :pattern suboption determines which files will be passed through each | |
38 |
configured tool. See :hg:`help |
|
40 | configured tool. See :hg:`help patterns` for possible values. If there are file | |
39 |
arguments to :hg:`fix`, the intersection of these |
|
41 | arguments to :hg:`fix`, the intersection of these patterns is used. | |
40 |
|
42 | |||
41 | There is also a configurable limit for the maximum size of file that will be |
|
43 | There is also a configurable limit for the maximum size of file that will be | |
42 | processed by :hg:`fix`:: |
|
44 | processed by :hg:`fix`:: | |
43 |
|
45 | |||
44 | [fix] |
|
46 | [fix] | |
45 | maxfilesize=2MB |
|
47 | maxfilesize = 2MB | |
|
48 | ||||
|
49 | Normally, execution of configured tools will continue after a failure (indicated | |||
|
50 | by a non-zero exit status). It can also be configured to abort after the first | |||
|
51 | such failure, so that no files will be affected if any tool fails. This abort | |||
|
52 | will also cause :hg:`fix` to exit with a non-zero status:: | |||
|
53 | ||||
|
54 | [fix] | |||
|
55 | failure = abort | |||
46 |
|
56 | |||
|
57 | When multiple tools are configured to affect a file, they execute in an order | |||
|
58 | defined by the :priority suboption. The priority suboption has a default value | |||
|
59 | of zero for each tool. Tools are executed in order of descending priority. The | |||
|
60 | execution order of tools with equal priority is unspecified. For example, you | |||
|
61 | could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers | |||
|
62 | in a text file by ensuring that 'sort' runs before 'head':: | |||
|
63 | ||||
|
64 | [fix] | |||
|
65 | sort:command = sort -n | |||
|
66 | head:command = head -n 10 | |||
|
67 | sort:pattern = numbers.txt | |||
|
68 | head:pattern = numbers.txt | |||
|
69 | sort:priority = 2 | |||
|
70 | head:priority = 1 | |||
|
71 | ||||
|
72 | To account for changes made by each tool, the line numbers used for incremental | |||
|
73 | formatting are recomputed before executing the next tool. So, each tool may see | |||
|
74 | different values for the arguments added by the :linerange suboption. | |||
47 | """ |
|
75 | """ | |
48 |
|
76 | |||
49 | from __future__ import absolute_import |
|
77 | from __future__ import absolute_import | |
@@ -90,16 +118,36 b' configtable = {}' | |||||
90 | configitem = registrar.configitem(configtable) |
|
118 | configitem = registrar.configitem(configtable) | |
91 |
|
119 | |||
92 | # Register the suboptions allowed for each configured fixer. |
|
120 | # Register the suboptions allowed for each configured fixer. | |
93 | FIXER_ATTRS = ('command', 'linerange', 'fileset') |
|
121 | FIXER_ATTRS = { | |
|
122 | 'command': None, | |||
|
123 | 'linerange': None, | |||
|
124 | 'fileset': None, | |||
|
125 | 'pattern': None, | |||
|
126 | 'priority': 0, | |||
|
127 | } | |||
94 |
|
128 | |||
95 | for key in FIXER_ATTRS: |
|
129 | for key, default in FIXER_ATTRS.items(): | |
96 |
configitem('fix', '.*(:%s)?' % key, default= |
|
130 | configitem('fix', '.*(:%s)?' % key, default=default, generic=True) | |
97 |
|
131 | |||
98 | # A good default size allows most source code files to be fixed, but avoids |
|
132 | # A good default size allows most source code files to be fixed, but avoids | |
99 | # letting fixer tools choke on huge inputs, which could be surprising to the |
|
133 | # letting fixer tools choke on huge inputs, which could be surprising to the | |
100 | # user. |
|
134 | # user. | |
101 | configitem('fix', 'maxfilesize', default='2MB') |
|
135 | configitem('fix', 'maxfilesize', default='2MB') | |
102 |
|
136 | |||
|
137 | # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero. | |||
|
138 | # This helps users do shell scripts that stop when a fixer tool signals a | |||
|
139 | # problem. | |||
|
140 | configitem('fix', 'failure', default='continue') | |||
|
141 | ||||
|
142 | def checktoolfailureaction(ui, message, hint=None): | |||
|
143 | """Abort with 'message' if fix.failure=abort""" | |||
|
144 | action = ui.config('fix', 'failure') | |||
|
145 | if action not in ('continue', 'abort'): | |||
|
146 | raise error.Abort(_('unknown fix.failure action: %s') % (action,), | |||
|
147 | hint=_('use "continue" or "abort"')) | |||
|
148 | if action == 'abort': | |||
|
149 | raise error.Abort(message, hint=hint) | |||
|
150 | ||||
103 | allopt = ('', 'all', False, _('fix all non-public non-obsolete revisions')) |
|
151 | allopt = ('', 'all', False, _('fix all non-public non-obsolete revisions')) | |
104 | baseopt = ('', 'base', [], _('revisions to diff against (overrides automatic ' |
|
152 | baseopt = ('', 'base', [], _('revisions to diff against (overrides automatic ' | |
105 | 'selection, and applies to every revision being ' |
|
153 | 'selection, and applies to every revision being ' | |
@@ -465,9 +513,14 b' def fixfile(ui, opts, fixers, fixctx, pa' | |||||
465 | showstderr(ui, fixctx.rev(), fixername, stderr) |
|
513 | showstderr(ui, fixctx.rev(), fixername, stderr) | |
466 | if proc.returncode == 0: |
|
514 | if proc.returncode == 0: | |
467 | newdata = newerdata |
|
515 | newdata = newerdata | |
468 |
el |
|
516 | else: | |
469 | showstderr(ui, fixctx.rev(), fixername, |
|
517 | if not stderr: | |
470 |
|
|
518 | message = _('exited with status %d\n') % (proc.returncode,) | |
|
519 | showstderr(ui, fixctx.rev(), fixername, message) | |||
|
520 | checktoolfailureaction( | |||
|
521 | ui, _('no fixes will be applied'), | |||
|
522 | hint=_('use --config fix.failure=continue to apply any ' | |||
|
523 | 'successful fixes anyway')) | |||
471 | return newdata |
|
524 | return newdata | |
472 |
|
525 | |||
473 | def showstderr(ui, rev, fixername, stderr): |
|
526 | def showstderr(ui, rev, fixername, stderr): | |
@@ -533,6 +586,17 b' def replacerev(ui, repo, ctx, filedata, ' | |||||
533 | newp1node = replacements.get(p1ctx.node(), p1ctx.node()) |
|
586 | newp1node = replacements.get(p1ctx.node(), p1ctx.node()) | |
534 | newp2node = replacements.get(p2ctx.node(), p2ctx.node()) |
|
587 | newp2node = replacements.get(p2ctx.node(), p2ctx.node()) | |
535 |
|
588 | |||
|
589 | # We don't want to create a revision that has no changes from the original, | |||
|
590 | # but we should if the original revision's parent has been replaced. | |||
|
591 | # Otherwise, we would produce an orphan that needs no actual human | |||
|
592 | # intervention to evolve. We can't rely on commit() to avoid creating the | |||
|
593 | # un-needed revision because the extra field added below produces a new hash | |||
|
594 | # regardless of file content changes. | |||
|
595 | if (not filedata and | |||
|
596 | p1ctx.node() not in replacements and | |||
|
597 | p2ctx.node() not in replacements): | |||
|
598 | return | |||
|
599 | ||||
536 | def filectxfn(repo, memctx, path): |
|
600 | def filectxfn(repo, memctx, path): | |
537 | if path not in ctx: |
|
601 | if path not in ctx: | |
538 | return None |
|
602 | return None | |
@@ -549,6 +613,9 b' def replacerev(ui, repo, ctx, filedata, ' | |||||
549 | isexec=fctx.isexec(), |
|
613 | isexec=fctx.isexec(), | |
550 | copied=copied) |
|
614 | copied=copied) | |
551 |
|
615 | |||
|
616 | extra = ctx.extra().copy() | |||
|
617 | extra['fix_source'] = ctx.hex() | |||
|
618 | ||||
552 | memctx = context.memctx( |
|
619 | memctx = context.memctx( | |
553 | repo, |
|
620 | repo, | |
554 | parents=(newp1node, newp2node), |
|
621 | parents=(newp1node, newp2node), | |
@@ -557,7 +624,7 b' def replacerev(ui, repo, ctx, filedata, ' | |||||
557 | filectxfn=filectxfn, |
|
624 | filectxfn=filectxfn, | |
558 | user=ctx.user(), |
|
625 | user=ctx.user(), | |
559 | date=ctx.date(), |
|
626 | date=ctx.date(), | |
560 |
extra= |
|
627 | extra=extra, | |
561 | branch=ctx.branch(), |
|
628 | branch=ctx.branch(), | |
562 | editor=None) |
|
629 | editor=None) | |
563 | sucnode = memctx.commit() |
|
630 | sucnode = memctx.commit() | |
@@ -573,14 +640,21 b' def getfixers(ui):' | |||||
573 | Each value is a Fixer object with methods that implement the behavior of the |
|
640 | Each value is a Fixer object with methods that implement the behavior of the | |
574 | fixer's config suboptions. Does not validate the config values. |
|
641 | fixer's config suboptions. Does not validate the config values. | |
575 | """ |
|
642 | """ | |
576 |
|
|
643 | fixers = {} | |
577 | for name in fixernames(ui): |
|
644 | for name in fixernames(ui): | |
578 |
|
|
645 | fixers[name] = Fixer() | |
579 | attrs = ui.configsuboptions('fix', name)[1] |
|
646 | attrs = ui.configsuboptions('fix', name)[1] | |
580 | for key in FIXER_ATTRS: |
|
647 | if 'fileset' in attrs and 'pattern' not in attrs: | |
581 | setattr(result[name], pycompat.sysstr('_' + key), |
|
648 | ui.warn(_('the fix.tool:fileset config name is deprecated; ' | |
582 | attrs.get(key, '')) |
|
649 | 'please rename it to fix.tool:pattern\n')) | |
583 | return result |
|
650 | attrs['pattern'] = attrs['fileset'] | |
|
651 | for key, default in FIXER_ATTRS.items(): | |||
|
652 | setattr(fixers[name], pycompat.sysstr('_' + key), | |||
|
653 | attrs.get(key, default)) | |||
|
654 | fixers[name]._priority = int(fixers[name]._priority) | |||
|
655 | return collections.OrderedDict( | |||
|
656 | sorted(fixers.items(), key=lambda item: item[1]._priority, | |||
|
657 | reverse=True)) | |||
584 |
|
658 | |||
585 | def fixernames(ui): |
|
659 | def fixernames(ui): | |
586 | """Returns the names of [fix] config options that have suboptions""" |
|
660 | """Returns the names of [fix] config options that have suboptions""" | |
@@ -595,7 +669,7 b' class Fixer(object):' | |||||
595 |
|
669 | |||
596 | def affects(self, opts, fixctx, path): |
|
670 | def affects(self, opts, fixctx, path): | |
597 | """Should this fixer run on the file at the given path and context?""" |
|
671 | """Should this fixer run on the file at the given path and context?""" | |
598 |
return scmutil.match(fixctx, [self._ |
|
672 | return scmutil.match(fixctx, [self._pattern], opts)(path) | |
599 |
|
673 | |||
600 | def command(self, ui, path, rangesfn): |
|
674 | def command(self, ui, path, rangesfn): | |
601 | """A shell command to use to invoke this fixer on the given file/lines |
|
675 | """A shell command to use to invoke this fixer on the given file/lines |
@@ -87,7 +87,7 b' def generate_css(web):' | |||||
87 | ])) |
|
87 | ])) | |
88 | return web.res.sendresponse() |
|
88 | return web.res.sendresponse() | |
89 |
|
89 | |||
90 | def extsetup(): |
|
90 | def extsetup(ui): | |
91 | # monkeypatch in the new version |
|
91 | # monkeypatch in the new version | |
92 | extensions.wrapfunction(webcommands, '_filerevision', |
|
92 | extensions.wrapfunction(webcommands, '_filerevision', | |
93 | filerevision_highlight) |
|
93 | filerevision_highlight) |
This diff has been collapsed as it changes many lines, (646 lines changed) Show them Hide them | |||||
@@ -183,7 +183,17 b' unexpectedly::' | |||||
183 |
|
183 | |||
184 | from __future__ import absolute_import |
|
184 | from __future__ import absolute_import | |
185 |
|
185 | |||
|
186 | # chistedit dependencies that are not available everywhere | |||
|
187 | try: | |||
|
188 | import fcntl | |||
|
189 | import termios | |||
|
190 | except ImportError: | |||
|
191 | fcntl = None | |||
|
192 | termios = None | |||
|
193 | ||||
|
194 | import functools | |||
186 | import os |
|
195 | import os | |
|
196 | import struct | |||
187 |
|
197 | |||
188 | from mercurial.i18n import _ |
|
198 | from mercurial.i18n import _ | |
189 | from mercurial import ( |
|
199 | from mercurial import ( | |
@@ -197,7 +207,7 b' from mercurial import (' | |||||
197 | exchange, |
|
207 | exchange, | |
198 | extensions, |
|
208 | extensions, | |
199 | hg, |
|
209 | hg, | |
200 |
lo |
|
210 | logcmdutil, | |
201 | merge as mergemod, |
|
211 | merge as mergemod, | |
202 | mergeutil, |
|
212 | mergeutil, | |
203 | node, |
|
213 | node, | |
@@ -210,11 +220,11 b' from mercurial import (' | |||||
210 | util, |
|
220 | util, | |
211 | ) |
|
221 | ) | |
212 | from mercurial.utils import ( |
|
222 | from mercurial.utils import ( | |
|
223 | dateutil, | |||
213 | stringutil, |
|
224 | stringutil, | |
214 | ) |
|
225 | ) | |
215 |
|
226 | |||
216 | pickle = util.pickle |
|
227 | pickle = util.pickle | |
217 | release = lock.release |
|
|||
218 | cmdtable = {} |
|
228 | cmdtable = {} | |
219 | command = registrar.command(cmdtable) |
|
229 | command = registrar.command(cmdtable) | |
220 |
|
230 | |||
@@ -235,6 +245,9 b" configitem('histedit', 'linelen'," | |||||
235 | configitem('histedit', 'singletransaction', |
|
245 | configitem('histedit', 'singletransaction', | |
236 | default=False, |
|
246 | default=False, | |
237 | ) |
|
247 | ) | |
|
248 | configitem('ui', 'interface.histedit', | |||
|
249 | default=None, | |||
|
250 | ) | |||
238 |
|
251 | |||
239 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
252 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
240 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
253 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
@@ -294,21 +307,17 b' Commands:' | |||||
294 | return ''.join(['# %s\n' % l if l else '#\n' for l in lines]) |
|
307 | return ''.join(['# %s\n' % l if l else '#\n' for l in lines]) | |
295 |
|
308 | |||
296 | class histeditstate(object): |
|
309 | class histeditstate(object): | |
297 | def __init__(self, repo, parentctxnode=None, actions=None, keep=None, |
|
310 | def __init__(self, repo): | |
298 | topmost=None, replacements=None, lock=None, wlock=None): |
|
|||
299 | self.repo = repo |
|
311 | self.repo = repo | |
300 |
self.actions = |
|
312 | self.actions = None | |
301 |
self.keep = |
|
313 | self.keep = None | |
302 |
self.topmost = |
|
314 | self.topmost = None | |
303 |
self.parentctxnode = |
|
315 | self.parentctxnode = None | |
304 |
self.lock = |
|
316 | self.lock = None | |
305 |
self.wlock = |
|
317 | self.wlock = None | |
306 | self.backupfile = None |
|
318 | self.backupfile = None | |
307 | self.stateobj = statemod.cmdstate(repo, 'histedit-state') |
|
319 | self.stateobj = statemod.cmdstate(repo, 'histedit-state') | |
308 |
|
|
320 | self.replacements = [] | |
309 | self.replacements = [] |
|
|||
310 | else: |
|
|||
311 | self.replacements = replacements |
|
|||
312 |
|
321 | |||
313 | def read(self): |
|
322 | def read(self): | |
314 | """Load histedit state from disk and set fields appropriately.""" |
|
323 | """Load histedit state from disk and set fields appropriately.""" | |
@@ -519,9 +528,12 b' class histeditaction(object):' | |||||
519 |
|
528 | |||
520 | editor = self.commiteditor() |
|
529 | editor = self.commiteditor() | |
521 | commit = commitfuncfor(repo, rulectx) |
|
530 | commit = commitfuncfor(repo, rulectx) | |
522 |
|
531 | if repo.ui.configbool('rewrite', 'update-timestamp'): | ||
|
532 | date = dateutil.makedate() | |||
|
533 | else: | |||
|
534 | date = rulectx.date() | |||
523 | commit(text=rulectx.description(), user=rulectx.user(), |
|
535 | commit(text=rulectx.description(), user=rulectx.user(), | |
524 |
date= |
|
536 | date=date, extra=rulectx.extra(), editor=editor) | |
525 |
|
537 | |||
526 | def commiteditor(self): |
|
538 | def commiteditor(self): | |
527 | """The editor to be used to edit the commit message.""" |
|
539 | """The editor to be used to edit the commit message.""" | |
@@ -802,6 +814,10 b' class fold(histeditaction):' | |||||
802 | commitopts['date'] = ctx.date() |
|
814 | commitopts['date'] = ctx.date() | |
803 | else: |
|
815 | else: | |
804 | commitopts['date'] = max(ctx.date(), oldctx.date()) |
|
816 | commitopts['date'] = max(ctx.date(), oldctx.date()) | |
|
817 | # if date is to be updated to current | |||
|
818 | if ui.configbool('rewrite', 'update-timestamp'): | |||
|
819 | commitopts['date'] = dateutil.makedate() | |||
|
820 | ||||
805 | extra = ctx.extra().copy() |
|
821 | extra = ctx.extra().copy() | |
806 | # histedit_source |
|
822 | # histedit_source | |
807 | # note: ctx is likely a temporary commit but that the best we can do |
|
823 | # note: ctx is likely a temporary commit but that the best we can do | |
@@ -915,6 +931,562 b' def findoutgoing(ui, repo, remote=None, ' | |||||
915 | raise error.Abort(msg, hint=hint) |
|
931 | raise error.Abort(msg, hint=hint) | |
916 | return repo[roots[0]].node() |
|
932 | return repo[roots[0]].node() | |
917 |
|
933 | |||
|
934 | # Curses Support | |||
|
935 | try: | |||
|
936 | import curses | |||
|
937 | except ImportError: | |||
|
938 | curses = None | |||
|
939 | ||||
|
940 | KEY_LIST = ['pick', 'edit', 'fold', 'drop', 'mess', 'roll'] | |||
|
941 | ACTION_LABELS = { | |||
|
942 | 'fold': '^fold', | |||
|
943 | 'roll': '^roll', | |||
|
944 | } | |||
|
945 | ||||
|
946 | COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN = 1, 2, 3, 4 | |||
|
947 | ||||
|
948 | E_QUIT, E_HISTEDIT = 1, 2 | |||
|
949 | E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7 | |||
|
950 | MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3 | |||
|
951 | ||||
|
952 | KEYTABLE = { | |||
|
953 | 'global': { | |||
|
954 | 'h': 'next-action', | |||
|
955 | 'KEY_RIGHT': 'next-action', | |||
|
956 | 'l': 'prev-action', | |||
|
957 | 'KEY_LEFT': 'prev-action', | |||
|
958 | 'q': 'quit', | |||
|
959 | 'c': 'histedit', | |||
|
960 | 'C': 'histedit', | |||
|
961 | 'v': 'showpatch', | |||
|
962 | '?': 'help', | |||
|
963 | }, | |||
|
964 | MODE_RULES: { | |||
|
965 | 'd': 'action-drop', | |||
|
966 | 'e': 'action-edit', | |||
|
967 | 'f': 'action-fold', | |||
|
968 | 'm': 'action-mess', | |||
|
969 | 'p': 'action-pick', | |||
|
970 | 'r': 'action-roll', | |||
|
971 | ' ': 'select', | |||
|
972 | 'j': 'down', | |||
|
973 | 'k': 'up', | |||
|
974 | 'KEY_DOWN': 'down', | |||
|
975 | 'KEY_UP': 'up', | |||
|
976 | 'J': 'move-down', | |||
|
977 | 'K': 'move-up', | |||
|
978 | 'KEY_NPAGE': 'move-down', | |||
|
979 | 'KEY_PPAGE': 'move-up', | |||
|
980 | '0': 'goto', # Used for 0..9 | |||
|
981 | }, | |||
|
982 | MODE_PATCH: { | |||
|
983 | ' ': 'page-down', | |||
|
984 | 'KEY_NPAGE': 'page-down', | |||
|
985 | 'KEY_PPAGE': 'page-up', | |||
|
986 | 'j': 'line-down', | |||
|
987 | 'k': 'line-up', | |||
|
988 | 'KEY_DOWN': 'line-down', | |||
|
989 | 'KEY_UP': 'line-up', | |||
|
990 | 'J': 'down', | |||
|
991 | 'K': 'up', | |||
|
992 | }, | |||
|
993 | MODE_HELP: { | |||
|
994 | }, | |||
|
995 | } | |||
|
996 | ||||
|
997 | def screen_size(): | |||
|
998 | return struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, ' ')) | |||
|
999 | ||||
|
1000 | class histeditrule(object): | |||
|
1001 | def __init__(self, ctx, pos, action='pick'): | |||
|
1002 | self.ctx = ctx | |||
|
1003 | self.action = action | |||
|
1004 | self.origpos = pos | |||
|
1005 | self.pos = pos | |||
|
1006 | self.conflicts = [] | |||
|
1007 | ||||
|
1008 | def __str__(self): | |||
|
1009 | # Some actions ('fold' and 'roll') combine a patch with a previous one. | |||
|
1010 | # Add a marker showing which patch they apply to, and also omit the | |||
|
1011 | # description for 'roll' (since it will get discarded). Example display: | |||
|
1012 | # | |||
|
1013 | # #10 pick 316392:06a16c25c053 add option to skip tests | |||
|
1014 | # #11 ^roll 316393:71313c964cc5 | |||
|
1015 | # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h | |||
|
1016 | # #13 ^fold 316395:14ce5803f4c3 fix warnings | |||
|
1017 | # | |||
|
1018 | # The carets point to the changeset being folded into ("roll this | |||
|
1019 | # changeset into the changeset above"). | |||
|
1020 | action = ACTION_LABELS.get(self.action, self.action) | |||
|
1021 | h = self.ctx.hex()[0:12] | |||
|
1022 | r = self.ctx.rev() | |||
|
1023 | desc = self.ctx.description().splitlines()[0].strip() | |||
|
1024 | if self.action == 'roll': | |||
|
1025 | desc = '' | |||
|
1026 | return "#{0:<2} {1:<6} {2}:{3} {4}".format( | |||
|
1027 | self.origpos, action, r, h, desc) | |||
|
1028 | ||||
|
1029 | def checkconflicts(self, other): | |||
|
1030 | if other.pos > self.pos and other.origpos <= self.origpos: | |||
|
1031 | if set(other.ctx.files()) & set(self.ctx.files()) != set(): | |||
|
1032 | self.conflicts.append(other) | |||
|
1033 | return self.conflicts | |||
|
1034 | ||||
|
1035 | if other in self.conflicts: | |||
|
1036 | self.conflicts.remove(other) | |||
|
1037 | return self.conflicts | |||
|
1038 | ||||
|
1039 | # ============ EVENTS =============== | |||
|
1040 | def movecursor(state, oldpos, newpos): | |||
|
1041 | '''Change the rule/changeset that the cursor is pointing to, regardless of | |||
|
1042 | current mode (you can switch between patches from the view patch window).''' | |||
|
1043 | state['pos'] = newpos | |||
|
1044 | ||||
|
1045 | mode, _ = state['mode'] | |||
|
1046 | if mode == MODE_RULES: | |||
|
1047 | # Scroll through the list by updating the view for MODE_RULES, so that | |||
|
1048 | # even if we are not currently viewing the rules, switching back will | |||
|
1049 | # result in the cursor's rule being visible. | |||
|
1050 | modestate = state['modes'][MODE_RULES] | |||
|
1051 | if newpos < modestate['line_offset']: | |||
|
1052 | modestate['line_offset'] = newpos | |||
|
1053 | elif newpos > modestate['line_offset'] + state['page_height'] - 1: | |||
|
1054 | modestate['line_offset'] = newpos - state['page_height'] + 1 | |||
|
1055 | ||||
|
1056 | # Reset the patch view region to the top of the new patch. | |||
|
1057 | state['modes'][MODE_PATCH]['line_offset'] = 0 | |||
|
1058 | ||||
|
1059 | def changemode(state, mode): | |||
|
1060 | curmode, _ = state['mode'] | |||
|
1061 | state['mode'] = (mode, curmode) | |||
|
1062 | ||||
|
1063 | def makeselection(state, pos): | |||
|
1064 | state['selected'] = pos | |||
|
1065 | ||||
|
1066 | def swap(state, oldpos, newpos): | |||
|
1067 | """Swap two positions and calculate necessary conflicts in | |||
|
1068 | O(|newpos-oldpos|) time""" | |||
|
1069 | ||||
|
1070 | rules = state['rules'] | |||
|
1071 | assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules) | |||
|
1072 | ||||
|
1073 | rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos] | |||
|
1074 | ||||
|
1075 | # TODO: swap should not know about histeditrule's internals | |||
|
1076 | rules[newpos].pos = newpos | |||
|
1077 | rules[oldpos].pos = oldpos | |||
|
1078 | ||||
|
1079 | start = min(oldpos, newpos) | |||
|
1080 | end = max(oldpos, newpos) | |||
|
1081 | for r in pycompat.xrange(start, end + 1): | |||
|
1082 | rules[newpos].checkconflicts(rules[r]) | |||
|
1083 | rules[oldpos].checkconflicts(rules[r]) | |||
|
1084 | ||||
|
1085 | if state['selected']: | |||
|
1086 | makeselection(state, newpos) | |||
|
1087 | ||||
|
1088 | def changeaction(state, pos, action): | |||
|
1089 | """Change the action state on the given position to the new action""" | |||
|
1090 | rules = state['rules'] | |||
|
1091 | assert 0 <= pos < len(rules) | |||
|
1092 | rules[pos].action = action | |||
|
1093 | ||||
|
1094 | def cycleaction(state, pos, next=False): | |||
|
1095 | """Changes the action state the next or the previous action from | |||
|
1096 | the action list""" | |||
|
1097 | rules = state['rules'] | |||
|
1098 | assert 0 <= pos < len(rules) | |||
|
1099 | current = rules[pos].action | |||
|
1100 | ||||
|
1101 | assert current in KEY_LIST | |||
|
1102 | ||||
|
1103 | index = KEY_LIST.index(current) | |||
|
1104 | if next: | |||
|
1105 | index += 1 | |||
|
1106 | else: | |||
|
1107 | index -= 1 | |||
|
1108 | changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)]) | |||
|
1109 | ||||
|
1110 | def changeview(state, delta, unit): | |||
|
1111 | '''Change the region of whatever is being viewed (a patch or the list of | |||
|
1112 | changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.''' | |||
|
1113 | mode, _ = state['mode'] | |||
|
1114 | if mode != MODE_PATCH: | |||
|
1115 | return | |||
|
1116 | mode_state = state['modes'][mode] | |||
|
1117 | num_lines = len(patchcontents(state)) | |||
|
1118 | page_height = state['page_height'] | |||
|
1119 | unit = page_height if unit == 'page' else 1 | |||
|
1120 | num_pages = 1 + (num_lines - 1) / page_height | |||
|
1121 | max_offset = (num_pages - 1) * page_height | |||
|
1122 | newline = mode_state['line_offset'] + delta * unit | |||
|
1123 | mode_state['line_offset'] = max(0, min(max_offset, newline)) | |||
|
1124 | ||||
|
1125 | def event(state, ch): | |||
|
1126 | """Change state based on the current character input | |||
|
1127 | ||||
|
1128 | This takes the current state and based on the current character input from | |||
|
1129 | the user we change the state. | |||
|
1130 | """ | |||
|
1131 | selected = state['selected'] | |||
|
1132 | oldpos = state['pos'] | |||
|
1133 | rules = state['rules'] | |||
|
1134 | ||||
|
1135 | if ch in (curses.KEY_RESIZE, "KEY_RESIZE"): | |||
|
1136 | return E_RESIZE | |||
|
1137 | ||||
|
1138 | lookup_ch = ch | |||
|
1139 | if '0' <= ch <= '9': | |||
|
1140 | lookup_ch = '0' | |||
|
1141 | ||||
|
1142 | curmode, prevmode = state['mode'] | |||
|
1143 | action = KEYTABLE[curmode].get(lookup_ch, KEYTABLE['global'].get(lookup_ch)) | |||
|
1144 | if action is None: | |||
|
1145 | return | |||
|
1146 | if action in ('down', 'move-down'): | |||
|
1147 | newpos = min(oldpos + 1, len(rules) - 1) | |||
|
1148 | movecursor(state, oldpos, newpos) | |||
|
1149 | if selected is not None or action == 'move-down': | |||
|
1150 | swap(state, oldpos, newpos) | |||
|
1151 | elif action in ('up', 'move-up'): | |||
|
1152 | newpos = max(0, oldpos - 1) | |||
|
1153 | movecursor(state, oldpos, newpos) | |||
|
1154 | if selected is not None or action == 'move-up': | |||
|
1155 | swap(state, oldpos, newpos) | |||
|
1156 | elif action == 'next-action': | |||
|
1157 | cycleaction(state, oldpos, next=True) | |||
|
1158 | elif action == 'prev-action': | |||
|
1159 | cycleaction(state, oldpos, next=False) | |||
|
1160 | elif action == 'select': | |||
|
1161 | selected = oldpos if selected is None else None | |||
|
1162 | makeselection(state, selected) | |||
|
1163 | elif action == 'goto' and int(ch) < len(rules) and len(rules) <= 10: | |||
|
1164 | newrule = next((r for r in rules if r.origpos == int(ch))) | |||
|
1165 | movecursor(state, oldpos, newrule.pos) | |||
|
1166 | if selected is not None: | |||
|
1167 | swap(state, oldpos, newrule.pos) | |||
|
1168 | elif action.startswith('action-'): | |||
|
1169 | changeaction(state, oldpos, action[7:]) | |||
|
1170 | elif action == 'showpatch': | |||
|
1171 | changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode) | |||
|
1172 | elif action == 'help': | |||
|
1173 | changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode) | |||
|
1174 | elif action == 'quit': | |||
|
1175 | return E_QUIT | |||
|
1176 | elif action == 'histedit': | |||
|
1177 | return E_HISTEDIT | |||
|
1178 | elif action == 'page-down': | |||
|
1179 | return E_PAGEDOWN | |||
|
1180 | elif action == 'page-up': | |||
|
1181 | return E_PAGEUP | |||
|
1182 | elif action == 'line-down': | |||
|
1183 | return E_LINEDOWN | |||
|
1184 | elif action == 'line-up': | |||
|
1185 | return E_LINEUP | |||
|
1186 | ||||
|
1187 | def makecommands(rules): | |||
|
1188 | """Returns a list of commands consumable by histedit --commands based on | |||
|
1189 | our list of rules""" | |||
|
1190 | commands = [] | |||
|
1191 | for rules in rules: | |||
|
1192 | commands.append("{0} {1}\n".format(rules.action, rules.ctx)) | |||
|
1193 | return commands | |||
|
1194 | ||||
|
1195 | def addln(win, y, x, line, color=None): | |||
|
1196 | """Add a line to the given window left padding but 100% filled with | |||
|
1197 | whitespace characters, so that the color appears on the whole line""" | |||
|
1198 | maxy, maxx = win.getmaxyx() | |||
|
1199 | length = maxx - 1 - x | |||
|
1200 | line = ("{0:<%d}" % length).format(str(line).strip())[:length] | |||
|
1201 | if y < 0: | |||
|
1202 | y = maxy + y | |||
|
1203 | if x < 0: | |||
|
1204 | x = maxx + x | |||
|
1205 | if color: | |||
|
1206 | win.addstr(y, x, line, color) | |||
|
1207 | else: | |||
|
1208 | win.addstr(y, x, line) | |||
|
1209 | ||||
|
1210 | def patchcontents(state): | |||
|
1211 | repo = state['repo'] | |||
|
1212 | rule = state['rules'][state['pos']] | |||
|
1213 | displayer = logcmdutil.changesetdisplayer(repo.ui, repo, { | |||
|
1214 | 'patch': True, 'verbose': True | |||
|
1215 | }, buffered=True) | |||
|
1216 | displayer.show(rule.ctx) | |||
|
1217 | displayer.close() | |||
|
1218 | return displayer.hunk[rule.ctx.rev()].splitlines() | |||
|
1219 | ||||
|
1220 | def _chisteditmain(repo, rules, stdscr): | |||
|
1221 | # initialize color pattern | |||
|
1222 | curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE) | |||
|
1223 | curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE) | |||
|
1224 | curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW) | |||
|
1225 | curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN) | |||
|
1226 | ||||
|
1227 | # don't display the cursor | |||
|
1228 | try: | |||
|
1229 | curses.curs_set(0) | |||
|
1230 | except curses.error: | |||
|
1231 | pass | |||
|
1232 | ||||
|
1233 | def rendercommit(win, state): | |||
|
1234 | """Renders the commit window that shows the log of the current selected | |||
|
1235 | commit""" | |||
|
1236 | pos = state['pos'] | |||
|
1237 | rules = state['rules'] | |||
|
1238 | rule = rules[pos] | |||
|
1239 | ||||
|
1240 | ctx = rule.ctx | |||
|
1241 | win.box() | |||
|
1242 | ||||
|
1243 | maxy, maxx = win.getmaxyx() | |||
|
1244 | length = maxx - 3 | |||
|
1245 | ||||
|
1246 | line = "changeset: {0}:{1:<12}".format(ctx.rev(), ctx) | |||
|
1247 | win.addstr(1, 1, line[:length]) | |||
|
1248 | ||||
|
1249 | line = "user: {0}".format(stringutil.shortuser(ctx.user())) | |||
|
1250 | win.addstr(2, 1, line[:length]) | |||
|
1251 | ||||
|
1252 | bms = repo.nodebookmarks(ctx.node()) | |||
|
1253 | line = "bookmark: {0}".format(' '.join(bms)) | |||
|
1254 | win.addstr(3, 1, line[:length]) | |||
|
1255 | ||||
|
1256 | line = "files: {0}".format(','.join(ctx.files())) | |||
|
1257 | win.addstr(4, 1, line[:length]) | |||
|
1258 | ||||
|
1259 | line = "summary: {0}".format(ctx.description().splitlines()[0]) | |||
|
1260 | win.addstr(5, 1, line[:length]) | |||
|
1261 | ||||
|
1262 | conflicts = rule.conflicts | |||
|
1263 | if len(conflicts) > 0: | |||
|
1264 | conflictstr = ','.join(map(lambda r: str(r.ctx), conflicts)) | |||
|
1265 | conflictstr = "changed files overlap with {0}".format(conflictstr) | |||
|
1266 | else: | |||
|
1267 | conflictstr = 'no overlap' | |||
|
1268 | ||||
|
1269 | win.addstr(6, 1, conflictstr[:length]) | |||
|
1270 | win.noutrefresh() | |||
|
1271 | ||||
|
1272 | def helplines(mode): | |||
|
1273 | if mode == MODE_PATCH: | |||
|
1274 | help = """\ | |||
|
1275 | ?: help, k/up: line up, j/down: line down, v: stop viewing patch | |||
|
1276 | pgup: prev page, space/pgdn: next page, c: commit, q: abort | |||
|
1277 | """ | |||
|
1278 | else: | |||
|
1279 | help = """\ | |||
|
1280 | ?: help, k/up: move up, j/down: move down, space: select, v: view patch | |||
|
1281 | d: drop, e: edit, f: fold, m: mess, p: pick, r: roll | |||
|
1282 | pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort | |||
|
1283 | """ | |||
|
1284 | return help.splitlines() | |||
|
1285 | ||||
|
1286 | def renderhelp(win, state): | |||
|
1287 | maxy, maxx = win.getmaxyx() | |||
|
1288 | mode, _ = state['mode'] | |||
|
1289 | for y, line in enumerate(helplines(mode)): | |||
|
1290 | if y >= maxy: | |||
|
1291 | break | |||
|
1292 | addln(win, y, 0, line, curses.color_pair(COLOR_HELP)) | |||
|
1293 | win.noutrefresh() | |||
|
1294 | ||||
|
1295 | def renderrules(rulesscr, state): | |||
|
1296 | rules = state['rules'] | |||
|
1297 | pos = state['pos'] | |||
|
1298 | selected = state['selected'] | |||
|
1299 | start = state['modes'][MODE_RULES]['line_offset'] | |||
|
1300 | ||||
|
1301 | conflicts = [r.ctx for r in rules if r.conflicts] | |||
|
1302 | if len(conflicts) > 0: | |||
|
1303 | line = "potential conflict in %s" % ','.join(map(str, conflicts)) | |||
|
1304 | addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN)) | |||
|
1305 | ||||
|
1306 | for y, rule in enumerate(rules[start:]): | |||
|
1307 | if y >= state['page_height']: | |||
|
1308 | break | |||
|
1309 | if len(rule.conflicts) > 0: | |||
|
1310 | rulesscr.addstr(y, 0, " ", curses.color_pair(COLOR_WARN)) | |||
|
1311 | else: | |||
|
1312 | rulesscr.addstr(y, 0, " ", curses.COLOR_BLACK) | |||
|
1313 | if y + start == selected: | |||
|
1314 | addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED)) | |||
|
1315 | elif y + start == pos: | |||
|
1316 | addln(rulesscr, y, 2, rule, curses.A_BOLD) | |||
|
1317 | else: | |||
|
1318 | addln(rulesscr, y, 2, rule) | |||
|
1319 | rulesscr.noutrefresh() | |||
|
1320 | ||||
|
1321 | def renderstring(win, state, output): | |||
|
1322 | maxy, maxx = win.getmaxyx() | |||
|
1323 | length = min(maxy - 1, len(output)) | |||
|
1324 | for y in range(0, length): | |||
|
1325 | win.addstr(y, 0, output[y]) | |||
|
1326 | win.noutrefresh() | |||
|
1327 | ||||
|
1328 | def renderpatch(win, state): | |||
|
1329 | start = state['modes'][MODE_PATCH]['line_offset'] | |||
|
1330 | renderstring(win, state, patchcontents(state)[start:]) | |||
|
1331 | ||||
|
1332 | def layout(mode): | |||
|
1333 | maxy, maxx = stdscr.getmaxyx() | |||
|
1334 | helplen = len(helplines(mode)) | |||
|
1335 | return { | |||
|
1336 | 'commit': (8, maxx), | |||
|
1337 | 'help': (helplen, maxx), | |||
|
1338 | 'main': (maxy - helplen - 8, maxx), | |||
|
1339 | } | |||
|
1340 | ||||
|
1341 | def drawvertwin(size, y, x): | |||
|
1342 | win = curses.newwin(size[0], size[1], y, x) | |||
|
1343 | y += size[0] | |||
|
1344 | return win, y, x | |||
|
1345 | ||||
|
1346 | state = { | |||
|
1347 | 'pos': 0, | |||
|
1348 | 'rules': rules, | |||
|
1349 | 'selected': None, | |||
|
1350 | 'mode': (MODE_INIT, MODE_INIT), | |||
|
1351 | 'page_height': None, | |||
|
1352 | 'modes': { | |||
|
1353 | MODE_RULES: { | |||
|
1354 | 'line_offset': 0, | |||
|
1355 | }, | |||
|
1356 | MODE_PATCH: { | |||
|
1357 | 'line_offset': 0, | |||
|
1358 | } | |||
|
1359 | }, | |||
|
1360 | 'repo': repo, | |||
|
1361 | } | |||
|
1362 | ||||
|
1363 | # eventloop | |||
|
1364 | ch = None | |||
|
1365 | stdscr.clear() | |||
|
1366 | stdscr.refresh() | |||
|
1367 | while True: | |||
|
1368 | try: | |||
|
1369 | oldmode, _ = state['mode'] | |||
|
1370 | if oldmode == MODE_INIT: | |||
|
1371 | changemode(state, MODE_RULES) | |||
|
1372 | e = event(state, ch) | |||
|
1373 | ||||
|
1374 | if e == E_QUIT: | |||
|
1375 | return False | |||
|
1376 | if e == E_HISTEDIT: | |||
|
1377 | return state['rules'] | |||
|
1378 | else: | |||
|
1379 | if e == E_RESIZE: | |||
|
1380 | size = screen_size() | |||
|
1381 | if size != stdscr.getmaxyx(): | |||
|
1382 | curses.resizeterm(*size) | |||
|
1383 | ||||
|
1384 | curmode, _ = state['mode'] | |||
|
1385 | sizes = layout(curmode) | |||
|
1386 | if curmode != oldmode: | |||
|
1387 | state['page_height'] = sizes['main'][0] | |||
|
1388 | # Adjust the view to fit the current screen size. | |||
|
1389 | movecursor(state, state['pos'], state['pos']) | |||
|
1390 | ||||
|
1391 | # Pack the windows against the top, each pane spread across the | |||
|
1392 | # full width of the screen. | |||
|
1393 | y, x = (0, 0) | |||
|
1394 | helpwin, y, x = drawvertwin(sizes['help'], y, x) | |||
|
1395 | mainwin, y, x = drawvertwin(sizes['main'], y, x) | |||
|
1396 | commitwin, y, x = drawvertwin(sizes['commit'], y, x) | |||
|
1397 | ||||
|
1398 | if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP): | |||
|
1399 | if e == E_PAGEDOWN: | |||
|
1400 | changeview(state, +1, 'page') | |||
|
1401 | elif e == E_PAGEUP: | |||
|
1402 | changeview(state, -1, 'page') | |||
|
1403 | elif e == E_LINEDOWN: | |||
|
1404 | changeview(state, +1, 'line') | |||
|
1405 | elif e == E_LINEUP: | |||
|
1406 | changeview(state, -1, 'line') | |||
|
1407 | ||||
|
1408 | # start rendering | |||
|
1409 | commitwin.erase() | |||
|
1410 | helpwin.erase() | |||
|
1411 | mainwin.erase() | |||
|
1412 | if curmode == MODE_PATCH: | |||
|
1413 | renderpatch(mainwin, state) | |||
|
1414 | elif curmode == MODE_HELP: | |||
|
1415 | renderstring(mainwin, state, __doc__.strip().splitlines()) | |||
|
1416 | else: | |||
|
1417 | renderrules(mainwin, state) | |||
|
1418 | rendercommit(commitwin, state) | |||
|
1419 | renderhelp(helpwin, state) | |||
|
1420 | curses.doupdate() | |||
|
1421 | # done rendering | |||
|
1422 | ch = stdscr.getkey() | |||
|
1423 | except curses.error: | |||
|
1424 | pass | |||
|
1425 | ||||
|
1426 | def _chistedit(ui, repo, *freeargs, **opts): | |||
|
1427 | """interactively edit changeset history via a curses interface | |||
|
1428 | ||||
|
1429 | Provides a ncurses interface to histedit. Press ? in chistedit mode | |||
|
1430 | to see an extensive help. Requires python-curses to be installed.""" | |||
|
1431 | ||||
|
1432 | if curses is None: | |||
|
1433 | raise error.Abort(_("Python curses library required")) | |||
|
1434 | ||||
|
1435 | # disable color | |||
|
1436 | ui._colormode = None | |||
|
1437 | ||||
|
1438 | try: | |||
|
1439 | keep = opts.get('keep') | |||
|
1440 | revs = opts.get('rev', [])[:] | |||
|
1441 | cmdutil.checkunfinished(repo) | |||
|
1442 | cmdutil.bailifchanged(repo) | |||
|
1443 | ||||
|
1444 | if os.path.exists(os.path.join(repo.path, 'histedit-state')): | |||
|
1445 | raise error.Abort(_('history edit already in progress, try ' | |||
|
1446 | '--continue or --abort')) | |||
|
1447 | revs.extend(freeargs) | |||
|
1448 | if not revs: | |||
|
1449 | defaultrev = destutil.desthistedit(ui, repo) | |||
|
1450 | if defaultrev is not None: | |||
|
1451 | revs.append(defaultrev) | |||
|
1452 | if len(revs) != 1: | |||
|
1453 | raise error.Abort( | |||
|
1454 | _('histedit requires exactly one ancestor revision')) | |||
|
1455 | ||||
|
1456 | rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs))) | |||
|
1457 | if len(rr) != 1: | |||
|
1458 | raise error.Abort(_('The specified revisions must have ' | |||
|
1459 | 'exactly one common root')) | |||
|
1460 | root = rr[0].node() | |||
|
1461 | ||||
|
1462 | topmost, empty = repo.dirstate.parents() | |||
|
1463 | revs = between(repo, root, topmost, keep) | |||
|
1464 | if not revs: | |||
|
1465 | raise error.Abort(_('%s is not an ancestor of working directory') % | |||
|
1466 | node.short(root)) | |||
|
1467 | ||||
|
1468 | ctxs = [] | |||
|
1469 | for i, r in enumerate(revs): | |||
|
1470 | ctxs.append(histeditrule(repo[r], i)) | |||
|
1471 | rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs)) | |||
|
1472 | curses.echo() | |||
|
1473 | curses.endwin() | |||
|
1474 | if rc is False: | |||
|
1475 | ui.write(_("chistedit aborted\n")) | |||
|
1476 | return 0 | |||
|
1477 | if type(rc) is list: | |||
|
1478 | ui.status(_("running histedit\n")) | |||
|
1479 | rules = makecommands(rc) | |||
|
1480 | filename = repo.vfs.join('chistedit') | |||
|
1481 | with open(filename, 'w+') as fp: | |||
|
1482 | for r in rules: | |||
|
1483 | fp.write(r) | |||
|
1484 | opts['commands'] = filename | |||
|
1485 | return _texthistedit(ui, repo, *freeargs, **opts) | |||
|
1486 | except KeyboardInterrupt: | |||
|
1487 | pass | |||
|
1488 | return -1 | |||
|
1489 | ||||
918 | @command('histedit', |
|
1490 | @command('histedit', | |
919 | [('', 'commands', '', |
|
1491 | [('', 'commands', '', | |
920 | _('read history edits from the specified file'), _('FILE')), |
|
1492 | _('read history edits from the specified file'), _('FILE')), | |
@@ -1029,13 +1601,20 b' def histedit(ui, repo, *freeargs, **opts' | |||||
1029 | for intentional "edit" command, but also for resolving unexpected |
|
1601 | for intentional "edit" command, but also for resolving unexpected | |
1030 | conflicts). |
|
1602 | conflicts). | |
1031 | """ |
|
1603 | """ | |
|
1604 | # kludge: _chistedit only works for starting an edit, not aborting | |||
|
1605 | # or continuing, so fall back to regular _texthistedit for those | |||
|
1606 | # operations. | |||
|
1607 | if ui.interface('histedit') == 'curses' and _getgoal( | |||
|
1608 | pycompat.byteskwargs(opts)) == goalnew: | |||
|
1609 | return _chistedit(ui, repo, *freeargs, **opts) | |||
|
1610 | return _texthistedit(ui, repo, *freeargs, **opts) | |||
|
1611 | ||||
|
1612 | def _texthistedit(ui, repo, *freeargs, **opts): | |||
1032 | state = histeditstate(repo) |
|
1613 | state = histeditstate(repo) | |
1033 | try: |
|
1614 | with repo.wlock() as wlock, repo.lock() as lock: | |
1034 |
state.wlock = |
|
1615 | state.wlock = wlock | |
1035 |
state.lock = |
|
1616 | state.lock = lock | |
1036 | _histedit(ui, repo, state, *freeargs, **opts) |
|
1617 | _histedit(ui, repo, state, *freeargs, **opts) | |
1037 | finally: |
|
|||
1038 | release(state.lock, state.wlock) |
|
|||
1039 |
|
1618 | |||
1040 | goalcontinue = 'continue' |
|
1619 | goalcontinue = 'continue' | |
1041 | goalabort = 'abort' |
|
1620 | goalabort = 'abort' | |
@@ -1043,11 +1622,11 b" goaleditplan = 'edit-plan'" | |||||
1043 | goalnew = 'new' |
|
1622 | goalnew = 'new' | |
1044 |
|
1623 | |||
1045 | def _getgoal(opts): |
|
1624 | def _getgoal(opts): | |
1046 | if opts.get('continue'): |
|
1625 | if opts.get(b'continue'): | |
1047 | return goalcontinue |
|
1626 | return goalcontinue | |
1048 | if opts.get('abort'): |
|
1627 | if opts.get(b'abort'): | |
1049 | return goalabort |
|
1628 | return goalabort | |
1050 | if opts.get('edit_plan'): |
|
1629 | if opts.get(b'edit_plan'): | |
1051 | return goaleditplan |
|
1630 | return goaleditplan | |
1052 | return goalnew |
|
1631 | return goalnew | |
1053 |
|
1632 | |||
@@ -1110,13 +1689,26 b' def _histedit(ui, repo, state, *freeargs' | |||||
1110 | fm.startitem() |
|
1689 | fm.startitem() | |
1111 | goal = _getgoal(opts) |
|
1690 | goal = _getgoal(opts) | |
1112 | revs = opts.get('rev', []) |
|
1691 | revs = opts.get('rev', []) | |
1113 | # experimental config: ui.history-editing-backup |
|
1692 | nobackup = not ui.configbool('rewrite', 'backup-bundle') | |
1114 | nobackup = not ui.configbool('ui', 'history-editing-backup') |
|
|||
1115 | rules = opts.get('commands', '') |
|
1693 | rules = opts.get('commands', '') | |
1116 | state.keep = opts.get('keep', False) |
|
1694 | state.keep = opts.get('keep', False) | |
1117 |
|
1695 | |||
1118 | _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs) |
|
1696 | _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs) | |
1119 |
|
1697 | |||
|
1698 | hastags = False | |||
|
1699 | if revs: | |||
|
1700 | revs = scmutil.revrange(repo, revs) | |||
|
1701 | ctxs = [repo[rev] for rev in revs] | |||
|
1702 | for ctx in ctxs: | |||
|
1703 | tags = [tag for tag in ctx.tags() if tag != 'tip'] | |||
|
1704 | if not hastags: | |||
|
1705 | hastags = len(tags) | |||
|
1706 | if hastags: | |||
|
1707 | if ui.promptchoice(_('warning: tags associated with the given' | |||
|
1708 | ' changeset will be lost after histedit.\n' | |||
|
1709 | 'do you want to continue (yN)? $$ &Yes $$ &No'), | |||
|
1710 | default=1): | |||
|
1711 | raise error.Abort(_('histedit cancelled\n')) | |||
1120 | # rebuild state |
|
1712 | # rebuild state | |
1121 | if goal == goalcontinue: |
|
1713 | if goal == goalcontinue: | |
1122 | state.read() |
|
1714 | state.read() | |
@@ -1317,7 +1909,7 b' def _newhistedit(ui, repo, state, revs, ' | |||||
1317 | state.topmost = topmost |
|
1909 | state.topmost = topmost | |
1318 | state.replacements = [] |
|
1910 | state.replacements = [] | |
1319 |
|
1911 | |||
1320 | ui.log("histedit", "%d actions to histedit", len(actions), |
|
1912 | ui.log("histedit", "%d actions to histedit\n", len(actions), | |
1321 | histedit_num_actions=len(actions)) |
|
1913 | histedit_num_actions=len(actions)) | |
1322 |
|
1914 | |||
1323 | # Create a backup so we can always abort completely. |
|
1915 | # Create a backup so we can always abort completely. |
@@ -107,9 +107,14 b' command.' | |||||
107 | from __future__ import absolute_import |
|
107 | from __future__ import absolute_import | |
108 |
|
108 | |||
109 | from mercurial import ( |
|
109 | from mercurial import ( | |
|
110 | cmdutil, | |||
|
111 | extensions, | |||
|
112 | exthelper, | |||
110 | hg, |
|
113 | hg, | |
|
114 | httppeer, | |||
111 | localrepo, |
|
115 | localrepo, | |
112 | registrar, |
|
116 | sshpeer, | |
|
117 | wireprotov1server, | |||
113 | ) |
|
118 | ) | |
114 |
|
119 | |||
115 | from . import ( |
|
120 | from . import ( | |
@@ -117,7 +122,6 b' from . import (' | |||||
117 | overrides, |
|
122 | overrides, | |
118 | proto, |
|
123 | proto, | |
119 | reposetup, |
|
124 | reposetup, | |
120 | uisetup as uisetupmod, |
|
|||
121 | ) |
|
125 | ) | |
122 |
|
126 | |||
123 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
127 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
@@ -126,29 +130,65 b' from . import (' | |||||
126 | # leave the attribute unspecified. |
|
130 | # leave the attribute unspecified. | |
127 | testedwith = 'ships-with-hg-core' |
|
131 | testedwith = 'ships-with-hg-core' | |
128 |
|
132 | |||
129 | configtable = {} |
|
133 | eh = exthelper.exthelper() | |
130 | configitem = registrar.configitem(configtable) |
|
134 | eh.merge(lfcommands.eh) | |
|
135 | eh.merge(overrides.eh) | |||
|
136 | eh.merge(proto.eh) | |||
131 |
|
137 | |||
132 | configitem('largefiles', 'minsize', |
|
138 | eh.configitem('largefiles', 'minsize', | |
133 | default=configitem.dynamicdefault, |
|
139 | default=eh.configitem.dynamicdefault, | |
134 | ) |
|
140 | ) | |
135 | configitem('largefiles', 'patterns', |
|
141 | eh.configitem('largefiles', 'patterns', | |
136 | default=list, |
|
142 | default=list, | |
137 | ) |
|
143 | ) | |
138 | configitem('largefiles', 'usercache', |
|
144 | eh.configitem('largefiles', 'usercache', | |
139 | default=None, |
|
145 | default=None, | |
140 | ) |
|
146 | ) | |
141 |
|
147 | |||
|
148 | cmdtable = eh.cmdtable | |||
|
149 | configtable = eh.configtable | |||
|
150 | extsetup = eh.finalextsetup | |||
142 | reposetup = reposetup.reposetup |
|
151 | reposetup = reposetup.reposetup | |
|
152 | uisetup = eh.finaluisetup | |||
143 |
|
153 | |||
144 | def featuresetup(ui, supported): |
|
154 | def featuresetup(ui, supported): | |
145 | # don't die on seeing a repo with the largefiles requirement |
|
155 | # don't die on seeing a repo with the largefiles requirement | |
146 | supported |= {'largefiles'} |
|
156 | supported |= {'largefiles'} | |
147 |
|
157 | |||
148 | def uisetup(ui): |
|
158 | @eh.uisetup | |
|
159 | def _uisetup(ui): | |||
149 | localrepo.featuresetupfuncs.add(featuresetup) |
|
160 | localrepo.featuresetupfuncs.add(featuresetup) | |
150 | hg.wirepeersetupfuncs.append(proto.wirereposetup) |
|
161 | hg.wirepeersetupfuncs.append(proto.wirereposetup) | |
151 | uisetupmod.uisetup(ui) |
|
162 | ||
|
163 | cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook) | |||
|
164 | cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook) | |||
|
165 | ||||
|
166 | # create the new wireproto commands ... | |||
|
167 | wireprotov1server.wireprotocommand('putlfile', 'sha', permission='push')( | |||
|
168 | proto.putlfile) | |||
|
169 | wireprotov1server.wireprotocommand('getlfile', 'sha', permission='pull')( | |||
|
170 | proto.getlfile) | |||
|
171 | wireprotov1server.wireprotocommand('statlfile', 'sha', permission='pull')( | |||
|
172 | proto.statlfile) | |||
|
173 | wireprotov1server.wireprotocommand('lheads', '', permission='pull')( | |||
|
174 | wireprotov1server.heads) | |||
152 |
|
175 | |||
153 | cmdtable = lfcommands.cmdtable |
|
176 | extensions.wrapfunction(wireprotov1server.commands['heads'], 'func', | |
154 | revsetpredicate = overrides.revsetpredicate |
|
177 | proto.heads) | |
|
178 | # TODO also wrap wireproto.commandsv2 once heads is implemented there. | |||
|
179 | ||||
|
180 | # can't do this in reposetup because it needs to have happened before | |||
|
181 | # wirerepo.__init__ is called | |||
|
182 | proto.ssholdcallstream = sshpeer.sshv1peer._callstream | |||
|
183 | proto.httpoldcallstream = httppeer.httppeer._callstream | |||
|
184 | sshpeer.sshv1peer._callstream = proto.sshrepocallstream | |||
|
185 | httppeer.httppeer._callstream = proto.httprepocallstream | |||
|
186 | ||||
|
187 | # override some extensions' stuff as well | |||
|
188 | for name, module in extensions.extensions(): | |||
|
189 | if name == 'rebase': | |||
|
190 | # TODO: teach exthelper to handle this | |||
|
191 | extensions.wrapfunction(module, 'rebase', | |||
|
192 | overrides.overriderebase) | |||
|
193 | ||||
|
194 | revsetpredicate = eh.revsetpredicate |
@@ -20,12 +20,12 b' from mercurial import (' | |||||
20 | cmdutil, |
|
20 | cmdutil, | |
21 | context, |
|
21 | context, | |
22 | error, |
|
22 | error, | |
|
23 | exthelper, | |||
23 | hg, |
|
24 | hg, | |
24 | lock, |
|
25 | lock, | |
25 | match as matchmod, |
|
26 | match as matchmod, | |
26 | node, |
|
27 | node, | |
27 | pycompat, |
|
28 | pycompat, | |
28 | registrar, |
|
|||
29 | scmutil, |
|
29 | scmutil, | |
30 | util, |
|
30 | util, | |
31 | ) |
|
31 | ) | |
@@ -44,10 +44,9 b' release = lock.release' | |||||
44 |
|
44 | |||
45 | # -- Commands ---------------------------------------------------------- |
|
45 | # -- Commands ---------------------------------------------------------- | |
46 |
|
46 | |||
47 | cmdtable = {} |
|
47 | eh = exthelper.exthelper() | |
48 | command = registrar.command(cmdtable) |
|
|||
49 |
|
48 | |||
50 | @command('lfconvert', |
|
49 | @eh.command('lfconvert', | |
51 | [('s', 'size', '', |
|
50 | [('s', 'size', '', | |
52 | _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'), |
|
51 | _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'), | |
53 | ('', 'to-normal', False, |
|
52 | ('', 'to-normal', False, | |
@@ -240,7 +239,7 b' def _lfconvert_addchangeset(rsrc, rdst, ' | |||||
240 | # largefile was modified, update standins |
|
239 | # largefile was modified, update standins | |
241 | m = hashlib.sha1('') |
|
240 | m = hashlib.sha1('') | |
242 | m.update(ctx[f].data()) |
|
241 | m.update(ctx[f].data()) | |
243 |
hash = m. |
|
242 | hash = node.hex(m.digest()) | |
244 | if f not in lfiletohash or lfiletohash[f] != hash: |
|
243 | if f not in lfiletohash or lfiletohash[f] != hash: | |
245 | rdst.wwrite(f, ctx[f].data(), ctx[f].flags()) |
|
244 | rdst.wwrite(f, ctx[f].data(), ctx[f].flags()) | |
246 | executable = 'x' in ctx[f].flags() |
|
245 | executable = 'x' in ctx[f].flags() | |
@@ -560,7 +559,7 b' def updatelfiles(ui, repo, filelist=None' | |||||
560 | statuswriter(_('%d largefiles updated, %d removed\n') % (updated, |
|
559 | statuswriter(_('%d largefiles updated, %d removed\n') % (updated, | |
561 | removed)) |
|
560 | removed)) | |
562 |
|
561 | |||
563 | @command('lfpull', |
|
562 | @eh.command('lfpull', | |
564 | [('r', 'rev', [], _('pull largefiles for these revisions')) |
|
563 | [('r', 'rev', [], _('pull largefiles for these revisions')) | |
565 | ] + cmdutil.remoteopts, |
|
564 | ] + cmdutil.remoteopts, | |
566 | _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]')) |
|
565 | _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]')) | |
@@ -599,7 +598,7 b' def lfpull(ui, repo, source="default", *' | |||||
599 | numcached += len(cached) |
|
598 | numcached += len(cached) | |
600 | ui.status(_("%d largefiles cached\n") % numcached) |
|
599 | ui.status(_("%d largefiles cached\n") % numcached) | |
601 |
|
600 | |||
602 | @command('debuglfput', |
|
601 | @eh.command('debuglfput', | |
603 | [] + cmdutil.remoteopts, |
|
602 | [] + cmdutil.remoteopts, | |
604 | _('FILE')) |
|
603 | _('FILE')) | |
605 | def debuglfput(ui, repo, filepath, **kwargs): |
|
604 | def debuglfput(ui, repo, filepath, **kwargs): |
@@ -14,18 +14,29 b' import os' | |||||
14 |
|
14 | |||
15 | from mercurial.i18n import _ |
|
15 | from mercurial.i18n import _ | |
16 |
|
16 | |||
|
17 | from mercurial.hgweb import ( | |||
|
18 | webcommands, | |||
|
19 | ) | |||
|
20 | ||||
17 | from mercurial import ( |
|
21 | from mercurial import ( | |
18 | archival, |
|
22 | archival, | |
19 | cmdutil, |
|
23 | cmdutil, | |
|
24 | copies as copiesmod, | |||
20 | error, |
|
25 | error, | |
|
26 | exchange, | |||
|
27 | exthelper, | |||
|
28 | filemerge, | |||
21 | hg, |
|
29 | hg, | |
22 | logcmdutil, |
|
30 | logcmdutil, | |
23 | match as matchmod, |
|
31 | match as matchmod, | |
|
32 | merge, | |||
24 | pathutil, |
|
33 | pathutil, | |
25 | pycompat, |
|
34 | pycompat, | |
26 | registrar, |
|
|||
27 | scmutil, |
|
35 | scmutil, | |
28 | smartset, |
|
36 | smartset, | |
|
37 | subrepo, | |||
|
38 | upgrade, | |||
|
39 | url as urlmod, | |||
29 | util, |
|
40 | util, | |
30 | ) |
|
41 | ) | |
31 |
|
42 | |||
@@ -35,6 +46,8 b' from . import (' | |||||
35 | storefactory, |
|
46 | storefactory, | |
36 | ) |
|
47 | ) | |
37 |
|
48 | |||
|
49 | eh = exthelper.exthelper() | |||
|
50 | ||||
38 | # -- Utility functions: commonly/repeatedly needed functionality --------------- |
|
51 | # -- Utility functions: commonly/repeatedly needed functionality --------------- | |
39 |
|
52 | |||
40 | def composelargefilematcher(match, manifest): |
|
53 | def composelargefilematcher(match, manifest): | |
@@ -248,16 +261,23 b' def removelargefiles(ui, repo, isaddremo' | |||||
248 |
|
261 | |||
249 | # For overriding mercurial.hgweb.webcommands so that largefiles will |
|
262 | # For overriding mercurial.hgweb.webcommands so that largefiles will | |
250 | # appear at their right place in the manifests. |
|
263 | # appear at their right place in the manifests. | |
|
264 | @eh.wrapfunction(webcommands, 'decodepath') | |||
251 | def decodepath(orig, path): |
|
265 | def decodepath(orig, path): | |
252 | return lfutil.splitstandin(path) or path |
|
266 | return lfutil.splitstandin(path) or path | |
253 |
|
267 | |||
254 | # -- Wrappers: modify existing commands -------------------------------- |
|
268 | # -- Wrappers: modify existing commands -------------------------------- | |
255 |
|
269 | |||
|
270 | @eh.wrapcommand('add', | |||
|
271 | opts=[('', 'large', None, _('add as largefile')), | |||
|
272 | ('', 'normal', None, _('add as normal file')), | |||
|
273 | ('', 'lfsize', '', _('add all files above this size (in megabytes) ' | |||
|
274 | 'as largefiles (default: 10)'))]) | |||
256 | def overrideadd(orig, ui, repo, *pats, **opts): |
|
275 | def overrideadd(orig, ui, repo, *pats, **opts): | |
257 | if opts.get(r'normal') and opts.get(r'large'): |
|
276 | if opts.get(r'normal') and opts.get(r'large'): | |
258 | raise error.Abort(_('--normal cannot be used with --large')) |
|
277 | raise error.Abort(_('--normal cannot be used with --large')) | |
259 | return orig(ui, repo, *pats, **opts) |
|
278 | return orig(ui, repo, *pats, **opts) | |
260 |
|
279 | |||
|
280 | @eh.wrapfunction(cmdutil, 'add') | |||
261 | def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts): |
|
281 | def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts): | |
262 | # The --normal flag short circuits this override |
|
282 | # The --normal flag short circuits this override | |
263 | if opts.get(r'normal'): |
|
283 | if opts.get(r'normal'): | |
@@ -271,6 +291,7 b' def cmdutiladd(orig, ui, repo, matcher, ' | |||||
271 | bad.extend(f for f in lbad) |
|
291 | bad.extend(f for f in lbad) | |
272 | return bad |
|
292 | return bad | |
273 |
|
293 | |||
|
294 | @eh.wrapfunction(cmdutil, 'remove') | |||
274 | def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos, |
|
295 | def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos, | |
275 | dryrun): |
|
296 | dryrun): | |
276 | normalmatcher = composenormalfilematcher(matcher, repo[None].manifest()) |
|
297 | normalmatcher = composenormalfilematcher(matcher, repo[None].manifest()) | |
@@ -279,6 +300,7 b' def cmdutilremove(orig, ui, repo, matche' | |||||
279 | return removelargefiles(ui, repo, False, matcher, dryrun, after=after, |
|
300 | return removelargefiles(ui, repo, False, matcher, dryrun, after=after, | |
280 | force=force) or result |
|
301 | force=force) or result | |
281 |
|
302 | |||
|
303 | @eh.wrapfunction(subrepo.hgsubrepo, 'status') | |||
282 | def overridestatusfn(orig, repo, rev2, **opts): |
|
304 | def overridestatusfn(orig, repo, rev2, **opts): | |
283 | try: |
|
305 | try: | |
284 | repo._repo.lfstatus = True |
|
306 | repo._repo.lfstatus = True | |
@@ -286,6 +308,7 b' def overridestatusfn(orig, repo, rev2, *' | |||||
286 | finally: |
|
308 | finally: | |
287 | repo._repo.lfstatus = False |
|
309 | repo._repo.lfstatus = False | |
288 |
|
310 | |||
|
311 | @eh.wrapcommand('status') | |||
289 | def overridestatus(orig, ui, repo, *pats, **opts): |
|
312 | def overridestatus(orig, ui, repo, *pats, **opts): | |
290 | try: |
|
313 | try: | |
291 | repo.lfstatus = True |
|
314 | repo.lfstatus = True | |
@@ -293,6 +316,7 b' def overridestatus(orig, ui, repo, *pats' | |||||
293 | finally: |
|
316 | finally: | |
294 | repo.lfstatus = False |
|
317 | repo.lfstatus = False | |
295 |
|
318 | |||
|
319 | @eh.wrapfunction(subrepo.hgsubrepo, 'dirty') | |||
296 | def overridedirty(orig, repo, ignoreupdate=False, missing=False): |
|
320 | def overridedirty(orig, repo, ignoreupdate=False, missing=False): | |
297 | try: |
|
321 | try: | |
298 | repo._repo.lfstatus = True |
|
322 | repo._repo.lfstatus = True | |
@@ -300,6 +324,7 b' def overridedirty(orig, repo, ignoreupda' | |||||
300 | finally: |
|
324 | finally: | |
301 | repo._repo.lfstatus = False |
|
325 | repo._repo.lfstatus = False | |
302 |
|
326 | |||
|
327 | @eh.wrapcommand('log') | |||
303 | def overridelog(orig, ui, repo, *pats, **opts): |
|
328 | def overridelog(orig, ui, repo, *pats, **opts): | |
304 | def overridematchandpats(ctx, pats=(), opts=None, globbed=False, |
|
329 | def overridematchandpats(ctx, pats=(), opts=None, globbed=False, | |
305 | default='relpath', badfn=None): |
|
330 | default='relpath', badfn=None): | |
@@ -406,6 +431,13 b' def overridelog(orig, ui, repo, *pats, *' | |||||
406 | restorematchandpatsfn() |
|
431 | restorematchandpatsfn() | |
407 | setattr(logcmdutil, '_makenofollowfilematcher', oldmakefilematcher) |
|
432 | setattr(logcmdutil, '_makenofollowfilematcher', oldmakefilematcher) | |
408 |
|
433 | |||
|
434 | @eh.wrapcommand('verify', | |||
|
435 | opts=[('', 'large', None, | |||
|
436 | _('verify that all largefiles in current revision exists')), | |||
|
437 | ('', 'lfa', None, | |||
|
438 | _('verify largefiles in all revisions, not just current')), | |||
|
439 | ('', 'lfc', None, | |||
|
440 | _('verify local largefile contents, not just existence'))]) | |||
409 | def overrideverify(orig, ui, repo, *pats, **opts): |
|
441 | def overrideverify(orig, ui, repo, *pats, **opts): | |
410 | large = opts.pop(r'large', False) |
|
442 | large = opts.pop(r'large', False) | |
411 | all = opts.pop(r'lfa', False) |
|
443 | all = opts.pop(r'lfa', False) | |
@@ -416,6 +448,8 b' def overrideverify(orig, ui, repo, *pats' | |||||
416 | result = result or lfcommands.verifylfiles(ui, repo, all, contents) |
|
448 | result = result or lfcommands.verifylfiles(ui, repo, all, contents) | |
417 | return result |
|
449 | return result | |
418 |
|
450 | |||
|
451 | @eh.wrapcommand('debugstate', | |||
|
452 | opts=[('', 'large', None, _('display largefiles dirstate'))]) | |||
419 | def overridedebugstate(orig, ui, repo, *pats, **opts): |
|
453 | def overridedebugstate(orig, ui, repo, *pats, **opts): | |
420 | large = opts.pop(r'large', False) |
|
454 | large = opts.pop(r'large', False) | |
421 | if large: |
|
455 | if large: | |
@@ -435,6 +469,7 b' def overridedebugstate(orig, ui, repo, *' | |||||
435 | # The overridden function filters the unknown files by removing any |
|
469 | # The overridden function filters the unknown files by removing any | |
436 | # largefiles. This makes the merge proceed and we can then handle this |
|
470 | # largefiles. This makes the merge proceed and we can then handle this | |
437 | # case further in the overridden calculateupdates function below. |
|
471 | # case further in the overridden calculateupdates function below. | |
|
472 | @eh.wrapfunction(merge, '_checkunknownfile') | |||
438 | def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None): |
|
473 | def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None): | |
439 | if lfutil.standin(repo.dirstate.normalize(f)) in wctx: |
|
474 | if lfutil.standin(repo.dirstate.normalize(f)) in wctx: | |
440 | return False |
|
475 | return False | |
@@ -466,6 +501,7 b' def overridecheckunknownfile(origfn, rep' | |||||
466 | # Finally, the merge.applyupdates function will then take care of |
|
501 | # Finally, the merge.applyupdates function will then take care of | |
467 | # writing the files into the working copy and lfcommands.updatelfiles |
|
502 | # writing the files into the working copy and lfcommands.updatelfiles | |
468 | # will update the largefiles. |
|
503 | # will update the largefiles. | |
|
504 | @eh.wrapfunction(merge, 'calculateupdates') | |||
469 | def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force, |
|
505 | def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force, | |
470 | acceptremote, *args, **kwargs): |
|
506 | acceptremote, *args, **kwargs): | |
471 | overwrite = force and not branchmerge |
|
507 | overwrite = force and not branchmerge | |
@@ -534,6 +570,7 b' def overridecalculateupdates(origfn, rep' | |||||
534 |
|
570 | |||
535 | return actions, diverge, renamedelete |
|
571 | return actions, diverge, renamedelete | |
536 |
|
572 | |||
|
573 | @eh.wrapfunction(merge, 'recordupdates') | |||
537 | def mergerecordupdates(orig, repo, actions, branchmerge): |
|
574 | def mergerecordupdates(orig, repo, actions, branchmerge): | |
538 | if 'lfmr' in actions: |
|
575 | if 'lfmr' in actions: | |
539 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
576 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) | |
@@ -549,6 +586,7 b' def mergerecordupdates(orig, repo, actio' | |||||
549 |
|
586 | |||
550 | # Override filemerge to prompt the user about how they wish to merge |
|
587 | # Override filemerge to prompt the user about how they wish to merge | |
551 | # largefiles. This will handle identical edits without prompting the user. |
|
588 | # largefiles. This will handle identical edits without prompting the user. | |
|
589 | @eh.wrapfunction(filemerge, '_filemerge') | |||
552 | def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, |
|
590 | def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, | |
553 | labels=None): |
|
591 | labels=None): | |
554 | if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent(): |
|
592 | if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent(): | |
@@ -570,6 +608,7 b' def overridefilemerge(origfn, premerge, ' | |||||
570 | repo.wwrite(fcd.path(), fco.data(), fco.flags()) |
|
608 | repo.wwrite(fcd.path(), fco.data(), fco.flags()) | |
571 | return True, 0, False |
|
609 | return True, 0, False | |
572 |
|
610 | |||
|
611 | @eh.wrapfunction(copiesmod, 'pathcopies') | |||
573 | def copiespathcopies(orig, ctx1, ctx2, match=None): |
|
612 | def copiespathcopies(orig, ctx1, ctx2, match=None): | |
574 | copies = orig(ctx1, ctx2, match=match) |
|
613 | copies = orig(ctx1, ctx2, match=match) | |
575 | updated = {} |
|
614 | updated = {} | |
@@ -584,6 +623,7 b' def copiespathcopies(orig, ctx1, ctx2, m' | |||||
584 | # checks if the destination largefile already exists. It also keeps a |
|
623 | # checks if the destination largefile already exists. It also keeps a | |
585 | # list of copied files so that the largefiles can be copied and the |
|
624 | # list of copied files so that the largefiles can be copied and the | |
586 | # dirstate updated. |
|
625 | # dirstate updated. | |
|
626 | @eh.wrapfunction(cmdutil, 'copy') | |||
587 | def overridecopy(orig, ui, repo, pats, opts, rename=False): |
|
627 | def overridecopy(orig, ui, repo, pats, opts, rename=False): | |
588 | # doesn't remove largefile on rename |
|
628 | # doesn't remove largefile on rename | |
589 | if len(pats) < 2: |
|
629 | if len(pats) < 2: | |
@@ -729,6 +769,7 b' def overridecopy(orig, ui, repo, pats, o' | |||||
729 | # commits. Update the standins then run the original revert, changing |
|
769 | # commits. Update the standins then run the original revert, changing | |
730 | # the matcher to hit standins instead of largefiles. Based on the |
|
770 | # the matcher to hit standins instead of largefiles. Based on the | |
731 | # resulting standins update the largefiles. |
|
771 | # resulting standins update the largefiles. | |
|
772 | @eh.wrapfunction(cmdutil, 'revert') | |||
732 | def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts): |
|
773 | def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts): | |
733 | # Because we put the standins in a bad state (by updating them) |
|
774 | # Because we put the standins in a bad state (by updating them) | |
734 | # and then return them to a correct state we need to lock to |
|
775 | # and then return them to a correct state we need to lock to | |
@@ -799,6 +840,11 b' def overriderevert(orig, ui, repo, ctx, ' | |||||
799 |
|
840 | |||
800 | # after pulling changesets, we need to take some extra care to get |
|
841 | # after pulling changesets, we need to take some extra care to get | |
801 | # largefiles updated remotely |
|
842 | # largefiles updated remotely | |
|
843 | @eh.wrapcommand('pull', | |||
|
844 | opts=[('', 'all-largefiles', None, | |||
|
845 | _('download all pulled versions of largefiles (DEPRECATED)')), | |||
|
846 | ('', 'lfrev', [], | |||
|
847 | _('download largefiles for these revisions'), _('REV'))]) | |||
802 | def overridepull(orig, ui, repo, source=None, **opts): |
|
848 | def overridepull(orig, ui, repo, source=None, **opts): | |
803 | revsprepull = len(repo) |
|
849 | revsprepull = len(repo) | |
804 | if not source: |
|
850 | if not source: | |
@@ -822,6 +868,9 b' def overridepull(orig, ui, repo, source=' | |||||
822 | ui.status(_("%d largefiles cached\n") % numcached) |
|
868 | ui.status(_("%d largefiles cached\n") % numcached) | |
823 | return result |
|
869 | return result | |
824 |
|
870 | |||
|
871 | @eh.wrapcommand('push', | |||
|
872 | opts=[('', 'lfrev', [], | |||
|
873 | _('upload largefiles for these revisions'), _('REV'))]) | |||
825 | def overridepush(orig, ui, repo, *args, **kwargs): |
|
874 | def overridepush(orig, ui, repo, *args, **kwargs): | |
826 | """Override push command and store --lfrev parameters in opargs""" |
|
875 | """Override push command and store --lfrev parameters in opargs""" | |
827 | lfrevs = kwargs.pop(r'lfrev', None) |
|
876 | lfrevs = kwargs.pop(r'lfrev', None) | |
@@ -830,6 +879,7 b' def overridepush(orig, ui, repo, *args, ' | |||||
830 | opargs['lfrevs'] = scmutil.revrange(repo, lfrevs) |
|
879 | opargs['lfrevs'] = scmutil.revrange(repo, lfrevs) | |
831 | return orig(ui, repo, *args, **kwargs) |
|
880 | return orig(ui, repo, *args, **kwargs) | |
832 |
|
881 | |||
|
882 | @eh.wrapfunction(exchange, 'pushoperation') | |||
833 | def exchangepushoperation(orig, *args, **kwargs): |
|
883 | def exchangepushoperation(orig, *args, **kwargs): | |
834 | """Override pushoperation constructor and store lfrevs parameter""" |
|
884 | """Override pushoperation constructor and store lfrevs parameter""" | |
835 | lfrevs = kwargs.pop(r'lfrevs', None) |
|
885 | lfrevs = kwargs.pop(r'lfrevs', None) | |
@@ -837,9 +887,7 b' def exchangepushoperation(orig, *args, *' | |||||
837 | pushop.lfrevs = lfrevs |
|
887 | pushop.lfrevs = lfrevs | |
838 | return pushop |
|
888 | return pushop | |
839 |
|
889 | |||
840 | revsetpredicate = registrar.revsetpredicate() |
|
890 | @eh.revsetpredicate('pulled()') | |
841 |
|
||||
842 | @revsetpredicate('pulled()') |
|
|||
843 | def pulledrevsetsymbol(repo, subset, x): |
|
891 | def pulledrevsetsymbol(repo, subset, x): | |
844 | """Changesets that just has been pulled. |
|
892 | """Changesets that just has been pulled. | |
845 |
|
893 | |||
@@ -865,6 +913,9 b' def pulledrevsetsymbol(repo, subset, x):' | |||||
865 | raise error.Abort(_("pulled() only available in --lfrev")) |
|
913 | raise error.Abort(_("pulled() only available in --lfrev")) | |
866 | return smartset.baseset([r for r in subset if r >= firstpulled]) |
|
914 | return smartset.baseset([r for r in subset if r >= firstpulled]) | |
867 |
|
915 | |||
|
916 | @eh.wrapcommand('clone', | |||
|
917 | opts=[('', 'all-largefiles', None, | |||
|
918 | _('download all versions of all largefiles'))]) | |||
868 | def overrideclone(orig, ui, source, dest=None, **opts): |
|
919 | def overrideclone(orig, ui, source, dest=None, **opts): | |
869 | d = dest |
|
920 | d = dest | |
870 | if d is None: |
|
921 | if d is None: | |
@@ -876,6 +927,7 b' def overrideclone(orig, ui, source, dest' | |||||
876 |
|
927 | |||
877 | return orig(ui, source, dest, **opts) |
|
928 | return orig(ui, source, dest, **opts) | |
878 |
|
929 | |||
|
930 | @eh.wrapfunction(hg, 'clone') | |||
879 | def hgclone(orig, ui, opts, *args, **kwargs): |
|
931 | def hgclone(orig, ui, opts, *args, **kwargs): | |
880 | result = orig(ui, opts, *args, **kwargs) |
|
932 | result = orig(ui, opts, *args, **kwargs) | |
881 |
|
933 | |||
@@ -900,6 +952,7 b' def hgclone(orig, ui, opts, *args, **kwa' | |||||
900 |
|
952 | |||
901 | return result |
|
953 | return result | |
902 |
|
954 | |||
|
955 | @eh.wrapcommand('rebase', extension='rebase') | |||
903 | def overriderebase(orig, ui, repo, **opts): |
|
956 | def overriderebase(orig, ui, repo, **opts): | |
904 | if not util.safehasattr(repo, '_largefilesenabled'): |
|
957 | if not util.safehasattr(repo, '_largefilesenabled'): | |
905 | return orig(ui, repo, **opts) |
|
958 | return orig(ui, repo, **opts) | |
@@ -913,6 +966,7 b' def overriderebase(orig, ui, repo, **opt' | |||||
913 | repo._lfstatuswriters.pop() |
|
966 | repo._lfstatuswriters.pop() | |
914 | repo._lfcommithooks.pop() |
|
967 | repo._lfcommithooks.pop() | |
915 |
|
968 | |||
|
969 | @eh.wrapcommand('archive') | |||
916 | def overridearchivecmd(orig, ui, repo, dest, **opts): |
|
970 | def overridearchivecmd(orig, ui, repo, dest, **opts): | |
917 | repo.unfiltered().lfstatus = True |
|
971 | repo.unfiltered().lfstatus = True | |
918 |
|
972 | |||
@@ -921,6 +975,7 b' def overridearchivecmd(orig, ui, repo, d' | |||||
921 | finally: |
|
975 | finally: | |
922 | repo.unfiltered().lfstatus = False |
|
976 | repo.unfiltered().lfstatus = False | |
923 |
|
977 | |||
|
978 | @eh.wrapfunction(webcommands, 'archive') | |||
924 | def hgwebarchive(orig, web): |
|
979 | def hgwebarchive(orig, web): | |
925 | web.repo.lfstatus = True |
|
980 | web.repo.lfstatus = True | |
926 |
|
981 | |||
@@ -929,12 +984,13 b' def hgwebarchive(orig, web):' | |||||
929 | finally: |
|
984 | finally: | |
930 | web.repo.lfstatus = False |
|
985 | web.repo.lfstatus = False | |
931 |
|
986 | |||
932 | def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None, |
|
987 | @eh.wrapfunction(archival, 'archive') | |
|
988 | def overridearchive(orig, repo, dest, node, kind, decode=True, match=None, | |||
933 | prefix='', mtime=None, subrepos=None): |
|
989 | prefix='', mtime=None, subrepos=None): | |
934 | # For some reason setting repo.lfstatus in hgwebarchive only changes the |
|
990 | # For some reason setting repo.lfstatus in hgwebarchive only changes the | |
935 | # unfiltered repo's attr, so check that as well. |
|
991 | # unfiltered repo's attr, so check that as well. | |
936 | if not repo.lfstatus and not repo.unfiltered().lfstatus: |
|
992 | if not repo.lfstatus and not repo.unfiltered().lfstatus: | |
937 |
return orig(repo, dest, node, kind, decode, match |
|
993 | return orig(repo, dest, node, kind, decode, match, prefix, mtime, | |
938 | subrepos) |
|
994 | subrepos) | |
939 |
|
995 | |||
940 | # No need to lock because we are only reading history and |
|
996 | # No need to lock because we are only reading history and | |
@@ -955,7 +1011,7 b' def overridearchive(orig, repo, dest, no' | |||||
955 | prefix = archival.tidyprefix(dest, kind, prefix) |
|
1011 | prefix = archival.tidyprefix(dest, kind, prefix) | |
956 |
|
1012 | |||
957 | def write(name, mode, islink, getdata): |
|
1013 | def write(name, mode, islink, getdata): | |
958 |
if match |
|
1014 | if match and not match(name): | |
959 | return |
|
1015 | return | |
960 | data = getdata() |
|
1016 | data = getdata() | |
961 | if decode: |
|
1017 | if decode: | |
@@ -991,12 +1047,13 b' def overridearchive(orig, repo, dest, no' | |||||
991 | if subrepos: |
|
1047 | if subrepos: | |
992 | for subpath in sorted(ctx.substate): |
|
1048 | for subpath in sorted(ctx.substate): | |
993 | sub = ctx.workingsub(subpath) |
|
1049 | sub = ctx.workingsub(subpath) | |
994 |
submatch = matchmod.subdirmatcher(subpath, match |
|
1050 | submatch = matchmod.subdirmatcher(subpath, match) | |
995 | sub._repo.lfstatus = True |
|
1051 | sub._repo.lfstatus = True | |
996 | sub.archive(archiver, prefix, submatch) |
|
1052 | sub.archive(archiver, prefix, submatch) | |
997 |
|
1053 | |||
998 | archiver.done() |
|
1054 | archiver.done() | |
999 |
|
1055 | |||
|
1056 | @eh.wrapfunction(subrepo.hgsubrepo, 'archive') | |||
1000 | def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True): |
|
1057 | def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True): | |
1001 | lfenabled = util.safehasattr(repo._repo, '_largefilesenabled') |
|
1058 | lfenabled = util.safehasattr(repo._repo, '_largefilesenabled') | |
1002 | if not lfenabled or not repo._repo.lfstatus: |
|
1059 | if not lfenabled or not repo._repo.lfstatus: | |
@@ -1051,6 +1108,7 b' def hgsubrepoarchive(orig, repo, archive' | |||||
1051 | # standin until a commit. cmdutil.bailifchanged() raises an exception |
|
1108 | # standin until a commit. cmdutil.bailifchanged() raises an exception | |
1052 | # if the repo has uncommitted changes. Wrap it to also check if |
|
1109 | # if the repo has uncommitted changes. Wrap it to also check if | |
1053 | # largefiles were changed. This is used by bisect, backout and fetch. |
|
1110 | # largefiles were changed. This is used by bisect, backout and fetch. | |
|
1111 | @eh.wrapfunction(cmdutil, 'bailifchanged') | |||
1054 | def overridebailifchanged(orig, repo, *args, **kwargs): |
|
1112 | def overridebailifchanged(orig, repo, *args, **kwargs): | |
1055 | orig(repo, *args, **kwargs) |
|
1113 | orig(repo, *args, **kwargs) | |
1056 | repo.lfstatus = True |
|
1114 | repo.lfstatus = True | |
@@ -1059,6 +1117,7 b' def overridebailifchanged(orig, repo, *a' | |||||
1059 | if s.modified or s.added or s.removed or s.deleted: |
|
1117 | if s.modified or s.added or s.removed or s.deleted: | |
1060 | raise error.Abort(_('uncommitted changes')) |
|
1118 | raise error.Abort(_('uncommitted changes')) | |
1061 |
|
1119 | |||
|
1120 | @eh.wrapfunction(cmdutil, 'postcommitstatus') | |||
1062 | def postcommitstatus(orig, repo, *args, **kwargs): |
|
1121 | def postcommitstatus(orig, repo, *args, **kwargs): | |
1063 | repo.lfstatus = True |
|
1122 | repo.lfstatus = True | |
1064 | try: |
|
1123 | try: | |
@@ -1066,6 +1125,7 b' def postcommitstatus(orig, repo, *args, ' | |||||
1066 | finally: |
|
1125 | finally: | |
1067 | repo.lfstatus = False |
|
1126 | repo.lfstatus = False | |
1068 |
|
1127 | |||
|
1128 | @eh.wrapfunction(cmdutil, 'forget') | |||
1069 | def cmdutilforget(orig, ui, repo, match, prefix, explicitonly, dryrun, |
|
1129 | def cmdutilforget(orig, ui, repo, match, prefix, explicitonly, dryrun, | |
1070 | interactive): |
|
1130 | interactive): | |
1071 | normalmatcher = composenormalfilematcher(match, repo[None].manifest()) |
|
1131 | normalmatcher = composenormalfilematcher(match, repo[None].manifest()) | |
@@ -1167,6 +1227,13 b' def outgoinghook(ui, repo, other, opts, ' | |||||
1167 | showhashes(file) |
|
1227 | showhashes(file) | |
1168 | ui.status('\n') |
|
1228 | ui.status('\n') | |
1169 |
|
1229 | |||
|
1230 | @eh.wrapcommand('outgoing', | |||
|
1231 | opts=[('', 'large', None, _('display outgoing largefiles'))]) | |||
|
1232 | def _outgoingcmd(orig, *args, **kwargs): | |||
|
1233 | # Nothing to do here other than add the extra help option- the hook above | |||
|
1234 | # processes it. | |||
|
1235 | return orig(*args, **kwargs) | |||
|
1236 | ||||
1170 | def summaryremotehook(ui, repo, opts, changes): |
|
1237 | def summaryremotehook(ui, repo, opts, changes): | |
1171 | largeopt = opts.get('large', False) |
|
1238 | largeopt = opts.get('large', False) | |
1172 | if changes is None: |
|
1239 | if changes is None: | |
@@ -1196,6 +1263,8 b' def summaryremotehook(ui, repo, opts, ch' | |||||
1196 | ui.status(_('largefiles: %d entities for %d files to upload\n') |
|
1263 | ui.status(_('largefiles: %d entities for %d files to upload\n') | |
1197 | % (len(lfhashes), len(toupload))) |
|
1264 | % (len(lfhashes), len(toupload))) | |
1198 |
|
1265 | |||
|
1266 | @eh.wrapcommand('summary', | |||
|
1267 | opts=[('', 'large', None, _('display outgoing largefiles'))]) | |||
1199 | def overridesummary(orig, ui, repo, *pats, **opts): |
|
1268 | def overridesummary(orig, ui, repo, *pats, **opts): | |
1200 | try: |
|
1269 | try: | |
1201 | repo.lfstatus = True |
|
1270 | repo.lfstatus = True | |
@@ -1203,6 +1272,7 b' def overridesummary(orig, ui, repo, *pat' | |||||
1203 | finally: |
|
1272 | finally: | |
1204 | repo.lfstatus = False |
|
1273 | repo.lfstatus = False | |
1205 |
|
1274 | |||
|
1275 | @eh.wrapfunction(scmutil, 'addremove') | |||
1206 | def scmutiladdremove(orig, repo, matcher, prefix, opts=None): |
|
1276 | def scmutiladdremove(orig, repo, matcher, prefix, opts=None): | |
1207 | if opts is None: |
|
1277 | if opts is None: | |
1208 | opts = {} |
|
1278 | opts = {} | |
@@ -1242,6 +1312,7 b' def scmutiladdremove(orig, repo, matcher' | |||||
1242 |
|
1312 | |||
1243 | # Calling purge with --all will cause the largefiles to be deleted. |
|
1313 | # Calling purge with --all will cause the largefiles to be deleted. | |
1244 | # Override repo.status to prevent this from happening. |
|
1314 | # Override repo.status to prevent this from happening. | |
|
1315 | @eh.wrapcommand('purge', extension='purge') | |||
1245 | def overridepurge(orig, ui, repo, *dirs, **opts): |
|
1316 | def overridepurge(orig, ui, repo, *dirs, **opts): | |
1246 | # XXX Monkey patching a repoview will not work. The assigned attribute will |
|
1317 | # XXX Monkey patching a repoview will not work. The assigned attribute will | |
1247 | # be set on the unfiltered repo, but we will only lookup attributes in the |
|
1318 | # be set on the unfiltered repo, but we will only lookup attributes in the | |
@@ -1267,6 +1338,7 b' def overridepurge(orig, ui, repo, *dirs,' | |||||
1267 | orig(ui, repo, *dirs, **opts) |
|
1338 | orig(ui, repo, *dirs, **opts) | |
1268 | repo.status = oldstatus |
|
1339 | repo.status = oldstatus | |
1269 |
|
1340 | |||
|
1341 | @eh.wrapcommand('rollback') | |||
1270 | def overriderollback(orig, ui, repo, **opts): |
|
1342 | def overriderollback(orig, ui, repo, **opts): | |
1271 | with repo.wlock(): |
|
1343 | with repo.wlock(): | |
1272 | before = repo.dirstate.parents() |
|
1344 | before = repo.dirstate.parents() | |
@@ -1304,6 +1376,7 b' def overriderollback(orig, ui, repo, **o' | |||||
1304 | lfdirstate.write() |
|
1376 | lfdirstate.write() | |
1305 | return result |
|
1377 | return result | |
1306 |
|
1378 | |||
|
1379 | @eh.wrapcommand('transplant', extension='transplant') | |||
1307 | def overridetransplant(orig, ui, repo, *revs, **opts): |
|
1380 | def overridetransplant(orig, ui, repo, *revs, **opts): | |
1308 | resuming = opts.get(r'continue') |
|
1381 | resuming = opts.get(r'continue') | |
1309 | repo._lfcommithooks.append(lfutil.automatedcommithook(resuming)) |
|
1382 | repo._lfcommithooks.append(lfutil.automatedcommithook(resuming)) | |
@@ -1315,6 +1388,7 b' def overridetransplant(orig, ui, repo, *' | |||||
1315 | repo._lfcommithooks.pop() |
|
1388 | repo._lfcommithooks.pop() | |
1316 | return result |
|
1389 | return result | |
1317 |
|
1390 | |||
|
1391 | @eh.wrapcommand('cat') | |||
1318 | def overridecat(orig, ui, repo, file1, *pats, **opts): |
|
1392 | def overridecat(orig, ui, repo, file1, *pats, **opts): | |
1319 | opts = pycompat.byteskwargs(opts) |
|
1393 | opts = pycompat.byteskwargs(opts) | |
1320 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
1394 | ctx = scmutil.revsingle(repo, opts.get('rev')) | |
@@ -1375,6 +1449,7 b' def overridecat(orig, ui, repo, file1, *' | |||||
1375 | err = 0 |
|
1449 | err = 0 | |
1376 | return err |
|
1450 | return err | |
1377 |
|
1451 | |||
|
1452 | @eh.wrapfunction(merge, 'update') | |||
1378 | def mergeupdate(orig, repo, node, branchmerge, force, |
|
1453 | def mergeupdate(orig, repo, node, branchmerge, force, | |
1379 | *args, **kwargs): |
|
1454 | *args, **kwargs): | |
1380 | matcher = kwargs.get(r'matcher', None) |
|
1455 | matcher = kwargs.get(r'matcher', None) | |
@@ -1452,6 +1527,7 b' def mergeupdate(orig, repo, node, branch' | |||||
1452 |
|
1527 | |||
1453 | return result |
|
1528 | return result | |
1454 |
|
1529 | |||
|
1530 | @eh.wrapfunction(scmutil, 'marktouched') | |||
1455 | def scmutilmarktouched(orig, repo, files, *args, **kwargs): |
|
1531 | def scmutilmarktouched(orig, repo, files, *args, **kwargs): | |
1456 | result = orig(repo, files, *args, **kwargs) |
|
1532 | result = orig(repo, files, *args, **kwargs) | |
1457 |
|
1533 | |||
@@ -1466,6 +1542,8 b' def scmutilmarktouched(orig, repo, files' | |||||
1466 |
|
1542 | |||
1467 | return result |
|
1543 | return result | |
1468 |
|
1544 | |||
|
1545 | @eh.wrapfunction(upgrade, 'preservedrequirements') | |||
|
1546 | @eh.wrapfunction(upgrade, 'supporteddestrequirements') | |||
1469 | def upgraderequirements(orig, repo): |
|
1547 | def upgraderequirements(orig, repo): | |
1470 | reqs = orig(repo) |
|
1548 | reqs = orig(repo) | |
1471 | if 'largefiles' in repo.requirements: |
|
1549 | if 'largefiles' in repo.requirements: | |
@@ -1473,6 +1551,8 b' def upgraderequirements(orig, repo):' | |||||
1473 | return reqs |
|
1551 | return reqs | |
1474 |
|
1552 | |||
1475 | _lfscheme = 'largefile://' |
|
1553 | _lfscheme = 'largefile://' | |
|
1554 | ||||
|
1555 | @eh.wrapfunction(urlmod, 'open') | |||
1476 | def openlargefile(orig, ui, url_, data=None): |
|
1556 | def openlargefile(orig, ui, url_, data=None): | |
1477 | if url_.startswith(_lfscheme): |
|
1557 | if url_.startswith(_lfscheme): | |
1478 | if data: |
|
1558 | if data: |
@@ -11,10 +11,12 b' from mercurial.i18n import _' | |||||
11 |
|
11 | |||
12 | from mercurial import ( |
|
12 | from mercurial import ( | |
13 | error, |
|
13 | error, | |
|
14 | exthelper, | |||
14 | httppeer, |
|
15 | httppeer, | |
15 | util, |
|
16 | util, | |
16 | wireprototypes, |
|
17 | wireprototypes, | |
17 | wireprotov1peer, |
|
18 | wireprotov1peer, | |
|
19 | wireprotov1server, | |||
18 | ) |
|
20 | ) | |
19 |
|
21 | |||
20 | from . import ( |
|
22 | from . import ( | |
@@ -28,6 +30,8 b" LARGEFILES_REQUIRED_MSG = ('\\nThis repos" | |||||
28 | '\n\nPlease enable it in your Mercurial config ' |
|
30 | '\n\nPlease enable it in your Mercurial config ' | |
29 | 'file.\n') |
|
31 | 'file.\n') | |
30 |
|
32 | |||
|
33 | eh = exthelper.exthelper() | |||
|
34 | ||||
31 | # these will all be replaced by largefiles.uisetup |
|
35 | # these will all be replaced by largefiles.uisetup | |
32 | ssholdcallstream = None |
|
36 | ssholdcallstream = None | |
33 | httpoldcallstream = None |
|
37 | httpoldcallstream = None | |
@@ -162,6 +166,7 b' def wirereposetup(ui, repo):' | |||||
162 | repo.__class__ = lfileswirerepository |
|
166 | repo.__class__ = lfileswirerepository | |
163 |
|
167 | |||
164 | # advertise the largefiles=serve capability |
|
168 | # advertise the largefiles=serve capability | |
|
169 | @eh.wrapfunction(wireprotov1server, '_capabilities') | |||
165 | def _capabilities(orig, repo, proto): |
|
170 | def _capabilities(orig, repo, proto): | |
166 | '''announce largefile server capability''' |
|
171 | '''announce largefile server capability''' | |
167 | caps = orig(repo, proto) |
|
172 | caps = orig(repo, proto) |
@@ -129,30 +129,23 b' import sys' | |||||
129 | from mercurial.i18n import _ |
|
129 | from mercurial.i18n import _ | |
130 |
|
130 | |||
131 | from mercurial import ( |
|
131 | from mercurial import ( | |
132 | bundle2, |
|
|||
133 | changegroup, |
|
|||
134 | cmdutil, |
|
|||
135 | config, |
|
132 | config, | |
136 | context, |
|
133 | context, | |
137 | error, |
|
134 | error, | |
138 | exchange, |
|
135 | exchange, | |
139 | extensions, |
|
136 | extensions, | |
|
137 | exthelper, | |||
140 | filelog, |
|
138 | filelog, | |
141 | filesetlang, |
|
139 | filesetlang, | |
142 | localrepo, |
|
140 | localrepo, | |
143 | minifileset, |
|
141 | minifileset, | |
144 | node, |
|
142 | node, | |
145 | pycompat, |
|
143 | pycompat, | |
146 | registrar, |
|
|||
147 | repository, |
|
144 | repository, | |
148 | revlog, |
|
145 | revlog, | |
149 | scmutil, |
|
146 | scmutil, | |
150 | templateutil, |
|
147 | templateutil, | |
151 | upgrade, |
|
|||
152 | util, |
|
148 | util, | |
153 | vfs as vfsmod, |
|
|||
154 | wireprotoserver, |
|
|||
155 | wireprotov1server, |
|
|||
156 | ) |
|
149 | ) | |
157 |
|
150 | |||
158 | from . import ( |
|
151 | from . import ( | |
@@ -167,45 +160,48 b' from . import (' | |||||
167 | # leave the attribute unspecified. |
|
160 | # leave the attribute unspecified. | |
168 | testedwith = 'ships-with-hg-core' |
|
161 | testedwith = 'ships-with-hg-core' | |
169 |
|
162 | |||
170 | configtable = {} |
|
163 | eh = exthelper.exthelper() | |
171 | configitem = registrar.configitem(configtable) |
|
164 | eh.merge(wrapper.eh) | |
|
165 | eh.merge(wireprotolfsserver.eh) | |||
172 |
|
166 | |||
173 | configitem('experimental', 'lfs.serve', |
|
167 | cmdtable = eh.cmdtable | |
|
168 | configtable = eh.configtable | |||
|
169 | extsetup = eh.finalextsetup | |||
|
170 | uisetup = eh.finaluisetup | |||
|
171 | filesetpredicate = eh.filesetpredicate | |||
|
172 | reposetup = eh.finalreposetup | |||
|
173 | templatekeyword = eh.templatekeyword | |||
|
174 | ||||
|
175 | eh.configitem('experimental', 'lfs.serve', | |||
174 | default=True, |
|
176 | default=True, | |
175 | ) |
|
177 | ) | |
176 | configitem('experimental', 'lfs.user-agent', |
|
178 | eh.configitem('experimental', 'lfs.user-agent', | |
177 | default=None, |
|
179 | default=None, | |
178 | ) |
|
180 | ) | |
179 | configitem('experimental', 'lfs.disableusercache', |
|
181 | eh.configitem('experimental', 'lfs.disableusercache', | |
180 | default=False, |
|
182 | default=False, | |
181 | ) |
|
183 | ) | |
182 | configitem('experimental', 'lfs.worker-enable', |
|
184 | eh.configitem('experimental', 'lfs.worker-enable', | |
183 | default=False, |
|
185 | default=False, | |
184 | ) |
|
186 | ) | |
185 |
|
187 | |||
186 | configitem('lfs', 'url', |
|
188 | eh.configitem('lfs', 'url', | |
187 | default=None, |
|
189 | default=None, | |
188 | ) |
|
190 | ) | |
189 | configitem('lfs', 'usercache', |
|
191 | eh.configitem('lfs', 'usercache', | |
190 | default=None, |
|
192 | default=None, | |
191 | ) |
|
193 | ) | |
192 | # Deprecated |
|
194 | # Deprecated | |
193 | configitem('lfs', 'threshold', |
|
195 | eh.configitem('lfs', 'threshold', | |
194 | default=None, |
|
196 | default=None, | |
195 | ) |
|
197 | ) | |
196 | configitem('lfs', 'track', |
|
198 | eh.configitem('lfs', 'track', | |
197 | default='none()', |
|
199 | default='none()', | |
198 | ) |
|
200 | ) | |
199 | configitem('lfs', 'retry', |
|
201 | eh.configitem('lfs', 'retry', | |
200 | default=5, |
|
202 | default=5, | |
201 | ) |
|
203 | ) | |
202 |
|
204 | |||
203 | cmdtable = {} |
|
|||
204 | command = registrar.command(cmdtable) |
|
|||
205 |
|
||||
206 | templatekeyword = registrar.templatekeyword() |
|
|||
207 | filesetpredicate = registrar.filesetpredicate() |
|
|||
208 |
|
||||
209 | lfsprocessor = ( |
|
205 | lfsprocessor = ( | |
210 | wrapper.readfromstore, |
|
206 | wrapper.readfromstore, | |
211 | wrapper.writetostore, |
|
207 | wrapper.writetostore, | |
@@ -216,10 +212,12 b' def featuresetup(ui, supported):' | |||||
216 | # don't die on seeing a repo with the lfs requirement |
|
212 | # don't die on seeing a repo with the lfs requirement | |
217 | supported |= {'lfs'} |
|
213 | supported |= {'lfs'} | |
218 |
|
214 | |||
219 | def uisetup(ui): |
|
215 | @eh.uisetup | |
|
216 | def _uisetup(ui): | |||
220 | localrepo.featuresetupfuncs.add(featuresetup) |
|
217 | localrepo.featuresetupfuncs.add(featuresetup) | |
221 |
|
218 | |||
222 | def reposetup(ui, repo): |
|
219 | @eh.reposetup | |
|
220 | def _reposetup(ui, repo): | |||
223 | # Nothing to do with a remote repo |
|
221 | # Nothing to do with a remote repo | |
224 | if not repo.local(): |
|
222 | if not repo.local(): | |
225 | return |
|
223 | return | |
@@ -246,7 +244,7 b' def reposetup(ui, repo):' | |||||
246 | s = repo.set('%n:%n', _bin(kwargs[r'node']), _bin(last)) |
|
244 | s = repo.set('%n:%n', _bin(kwargs[r'node']), _bin(last)) | |
247 | else: |
|
245 | else: | |
248 | s = repo.set('%n', _bin(kwargs[r'node'])) |
|
246 | s = repo.set('%n', _bin(kwargs[r'node'])) | |
249 |
match = repo.narrowmatch |
|
247 | match = repo._storenarrowmatch | |
250 | for ctx in s: |
|
248 | for ctx in s: | |
251 | # TODO: is there a way to just walk the files in the commit? |
|
249 | # TODO: is there a way to just walk the files in the commit? | |
252 | if any(ctx[f].islfs() for f in ctx.files() |
|
250 | if any(ctx[f].islfs() for f in ctx.files() | |
@@ -305,6 +303,7 b' def _trackedmatcher(repo):' | |||||
305 |
|
303 | |||
306 | return _match |
|
304 | return _match | |
307 |
|
305 | |||
|
306 | # Called by remotefilelog | |||
308 | def wrapfilelog(filelog): |
|
307 | def wrapfilelog(filelog): | |
309 | wrapfunction = extensions.wrapfunction |
|
308 | wrapfunction = extensions.wrapfunction | |
310 |
|
309 | |||
@@ -312,6 +311,7 b' def wrapfilelog(filelog):' | |||||
312 | wrapfunction(filelog, 'renamed', wrapper.filelogrenamed) |
|
311 | wrapfunction(filelog, 'renamed', wrapper.filelogrenamed) | |
313 | wrapfunction(filelog, 'size', wrapper.filelogsize) |
|
312 | wrapfunction(filelog, 'size', wrapper.filelogsize) | |
314 |
|
313 | |||
|
314 | @eh.wrapfunction(localrepo, 'resolverevlogstorevfsoptions') | |||
315 | def _resolverevlogstorevfsoptions(orig, ui, requirements, features): |
|
315 | def _resolverevlogstorevfsoptions(orig, ui, requirements, features): | |
316 | opts = orig(ui, requirements, features) |
|
316 | opts = orig(ui, requirements, features) | |
317 | for name, module in extensions.extensions(ui): |
|
317 | for name, module in extensions.extensions(ui): | |
@@ -326,38 +326,10 b' def _resolverevlogstorevfsoptions(orig, ' | |||||
326 |
|
326 | |||
327 | return opts |
|
327 | return opts | |
328 |
|
328 | |||
329 |
|
|
329 | @eh.extsetup | |
|
330 | def _extsetup(ui): | |||
330 | wrapfilelog(filelog.filelog) |
|
331 | wrapfilelog(filelog.filelog) | |
331 |
|
332 | |||
332 | wrapfunction = extensions.wrapfunction |
|
|||
333 |
|
||||
334 | wrapfunction(localrepo, 'makefilestorage', wrapper.localrepomakefilestorage) |
|
|||
335 | wrapfunction(localrepo, 'resolverevlogstorevfsoptions', |
|
|||
336 | _resolverevlogstorevfsoptions) |
|
|||
337 |
|
||||
338 | wrapfunction(cmdutil, '_updatecatformatter', wrapper._updatecatformatter) |
|
|||
339 | wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink) |
|
|||
340 |
|
||||
341 | wrapfunction(upgrade, '_finishdatamigration', |
|
|||
342 | wrapper.upgradefinishdatamigration) |
|
|||
343 |
|
||||
344 | wrapfunction(upgrade, 'preservedrequirements', |
|
|||
345 | wrapper.upgraderequirements) |
|
|||
346 |
|
||||
347 | wrapfunction(upgrade, 'supporteddestrequirements', |
|
|||
348 | wrapper.upgraderequirements) |
|
|||
349 |
|
||||
350 | wrapfunction(changegroup, |
|
|||
351 | 'allsupportedversions', |
|
|||
352 | wrapper.allsupportedversions) |
|
|||
353 |
|
||||
354 | wrapfunction(exchange, 'push', wrapper.push) |
|
|||
355 | wrapfunction(wireprotov1server, '_capabilities', wrapper._capabilities) |
|
|||
356 | wrapfunction(wireprotoserver, 'handlewsgirequest', |
|
|||
357 | wireprotolfsserver.handlewsgirequest) |
|
|||
358 |
|
||||
359 | wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp) |
|
|||
360 | wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary) |
|
|||
361 | context.basefilectx.islfs = wrapper.filectxislfs |
|
333 | context.basefilectx.islfs = wrapper.filectxislfs | |
362 |
|
334 | |||
363 | scmutil.fileprefetchhooks.add('lfs', wrapper._prefetchfiles) |
|
335 | scmutil.fileprefetchhooks.add('lfs', wrapper._prefetchfiles) | |
@@ -367,14 +339,7 b' def extsetup(ui):' | |||||
367 | # "packed1". Using "packed1" with lfs will likely cause trouble. |
|
339 | # "packed1". Using "packed1" with lfs will likely cause trouble. | |
368 | exchange._bundlespeccontentopts["v2"]["cg.version"] = "03" |
|
340 | exchange._bundlespeccontentopts["v2"]["cg.version"] = "03" | |
369 |
|
341 | |||
370 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
|
342 | @eh.filesetpredicate('lfs()') | |
371 | # options and blob stores are passed from othervfs to the new readonlyvfs. |
|
|||
372 | wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit) |
|
|||
373 |
|
||||
374 | # when writing a bundle via "hg bundle" command, upload related LFS blobs |
|
|||
375 | wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle) |
|
|||
376 |
|
||||
377 | @filesetpredicate('lfs()') |
|
|||
378 | def lfsfileset(mctx, x): |
|
343 | def lfsfileset(mctx, x): | |
379 | """File that uses LFS storage.""" |
|
344 | """File that uses LFS storage.""" | |
380 | # i18n: "lfs" is a keyword |
|
345 | # i18n: "lfs" is a keyword | |
@@ -384,7 +349,7 b' def lfsfileset(mctx, x):' | |||||
384 | return wrapper.pointerfromctx(ctx, f, removed=True) is not None |
|
349 | return wrapper.pointerfromctx(ctx, f, removed=True) is not None | |
385 | return mctx.predicate(lfsfilep, predrepr='<lfs>') |
|
350 | return mctx.predicate(lfsfilep, predrepr='<lfs>') | |
386 |
|
351 | |||
387 | @templatekeyword('lfs_files', requires={'ctx'}) |
|
352 | @eh.templatekeyword('lfs_files', requires={'ctx'}) | |
388 | def lfsfiles(context, mapping): |
|
353 | def lfsfiles(context, mapping): | |
389 | """List of strings. All files modified, added, or removed by this |
|
354 | """List of strings. All files modified, added, or removed by this | |
390 | changeset.""" |
|
355 | changeset.""" | |
@@ -409,8 +374,8 b' def lfsfiles(context, mapping):' | |||||
409 | f = templateutil._showcompatlist(context, mapping, 'lfs_file', files) |
|
374 | f = templateutil._showcompatlist(context, mapping, 'lfs_file', files) | |
410 | return templateutil.hybrid(f, files, makemap, pycompat.identity) |
|
375 | return templateutil.hybrid(f, files, makemap, pycompat.identity) | |
411 |
|
376 | |||
412 | @command('debuglfsupload', |
|
377 | @eh.command('debuglfsupload', | |
413 | [('r', 'rev', [], _('upload large files introduced by REV'))]) |
|
378 | [('r', 'rev', [], _('upload large files introduced by REV'))]) | |
414 | def debuglfsupload(ui, repo, **opts): |
|
379 | def debuglfsupload(ui, repo, **opts): | |
415 | """upload lfs blobs added by the working copy parent or given revisions""" |
|
380 | """upload lfs blobs added by the working copy parent or given revisions""" | |
416 | revs = opts.get(r'rev', []) |
|
381 | revs = opts.get(r'rev', []) |
@@ -7,6 +7,7 b'' | |||||
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
|
10 | import contextlib | |||
10 | import errno |
|
11 | import errno | |
11 | import hashlib |
|
12 | import hashlib | |
12 | import json |
|
13 | import json | |
@@ -17,7 +18,9 b' import socket' | |||||
17 | from mercurial.i18n import _ |
|
18 | from mercurial.i18n import _ | |
18 |
|
19 | |||
19 | from mercurial import ( |
|
20 | from mercurial import ( | |
|
21 | encoding, | |||
20 | error, |
|
22 | error, | |
|
23 | node, | |||
21 | pathutil, |
|
24 | pathutil, | |
22 | pycompat, |
|
25 | pycompat, | |
23 | url as urlmod, |
|
26 | url as urlmod, | |
@@ -26,6 +29,10 b' from mercurial import (' | |||||
26 | worker, |
|
29 | worker, | |
27 | ) |
|
30 | ) | |
28 |
|
31 | |||
|
32 | from mercurial.utils import ( | |||
|
33 | stringutil, | |||
|
34 | ) | |||
|
35 | ||||
29 | from ..largefiles import lfutil |
|
36 | from ..largefiles import lfutil | |
30 |
|
37 | |||
31 | # 64 bytes for SHA256 |
|
38 | # 64 bytes for SHA256 | |
@@ -150,7 +157,7 b' class local(object):' | |||||
150 | fp.write(chunk) |
|
157 | fp.write(chunk) | |
151 | sha256.update(chunk) |
|
158 | sha256.update(chunk) | |
152 |
|
159 | |||
153 |
realoid = sha256. |
|
160 | realoid = node.hex(sha256.digest()) | |
154 | if realoid != oid: |
|
161 | if realoid != oid: | |
155 | raise LfsCorruptionError(_('corrupt remote lfs object: %s') |
|
162 | raise LfsCorruptionError(_('corrupt remote lfs object: %s') | |
156 | % oid) |
|
163 | % oid) | |
@@ -200,7 +207,7 b' class local(object):' | |||||
200 | # Don't abort if corruption is detected, because `hg verify` will |
|
207 | # Don't abort if corruption is detected, because `hg verify` will | |
201 | # give more useful info about the corruption- simply don't add the |
|
208 | # give more useful info about the corruption- simply don't add the | |
202 | # hardlink. |
|
209 | # hardlink. | |
203 |
if verify or hashlib.sha256(blob). |
|
210 | if verify or node.hex(hashlib.sha256(blob).digest()) == oid: | |
204 | self.ui.note(_('lfs: found %s in the usercache\n') % oid) |
|
211 | self.ui.note(_('lfs: found %s in the usercache\n') % oid) | |
205 | lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid)) |
|
212 | lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid)) | |
206 | else: |
|
213 | else: | |
@@ -224,13 +231,37 b' class local(object):' | |||||
224 | for chunk in util.filechunkiter(fp, size=1048576): |
|
231 | for chunk in util.filechunkiter(fp, size=1048576): | |
225 | sha256.update(chunk) |
|
232 | sha256.update(chunk) | |
226 |
|
233 | |||
227 |
return oid == sha256. |
|
234 | return oid == node.hex(sha256.digest()) | |
228 |
|
235 | |||
229 | def has(self, oid): |
|
236 | def has(self, oid): | |
230 | """Returns True if the local blobstore contains the requested blob, |
|
237 | """Returns True if the local blobstore contains the requested blob, | |
231 | False otherwise.""" |
|
238 | False otherwise.""" | |
232 | return self.cachevfs.exists(oid) or self.vfs.exists(oid) |
|
239 | return self.cachevfs.exists(oid) or self.vfs.exists(oid) | |
233 |
|
240 | |||
|
241 | def _urlerrorreason(urlerror): | |||
|
242 | '''Create a friendly message for the given URLError to be used in an | |||
|
243 | LfsRemoteError message. | |||
|
244 | ''' | |||
|
245 | inst = urlerror | |||
|
246 | ||||
|
247 | if isinstance(urlerror.reason, Exception): | |||
|
248 | inst = urlerror.reason | |||
|
249 | ||||
|
250 | if util.safehasattr(inst, 'reason'): | |||
|
251 | try: # usually it is in the form (errno, strerror) | |||
|
252 | reason = inst.reason.args[1] | |||
|
253 | except (AttributeError, IndexError): | |||
|
254 | # it might be anything, for example a string | |||
|
255 | reason = inst.reason | |||
|
256 | if isinstance(reason, pycompat.unicode): | |||
|
257 | # SSLError of Python 2.7.9 contains a unicode | |||
|
258 | reason = encoding.unitolocal(reason) | |||
|
259 | return reason | |||
|
260 | elif getattr(inst, "strerror", None): | |||
|
261 | return encoding.strtolocal(inst.strerror) | |||
|
262 | else: | |||
|
263 | return stringutil.forcebytestr(urlerror) | |||
|
264 | ||||
234 | class _gitlfsremote(object): |
|
265 | class _gitlfsremote(object): | |
235 |
|
266 | |||
236 | def __init__(self, repo, url): |
|
267 | def __init__(self, repo, url): | |
@@ -263,16 +294,27 b' class _gitlfsremote(object):' | |||||
263 | 'objects': objects, |
|
294 | 'objects': objects, | |
264 | 'operation': action, |
|
295 | 'operation': action, | |
265 | }) |
|
296 | }) | |
266 |
|
|
297 | url = '%s/objects/batch' % self.baseurl | |
267 | data=requestdata) |
|
298 | batchreq = util.urlreq.request(url, data=requestdata) | |
268 | batchreq.add_header('Accept', 'application/vnd.git-lfs+json') |
|
299 | batchreq.add_header('Accept', 'application/vnd.git-lfs+json') | |
269 | batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json') |
|
300 | batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json') | |
270 | try: |
|
301 | try: | |
271 |
|
|
302 | with contextlib.closing(self.urlopener.open(batchreq)) as rsp: | |
272 | rawjson = rsp.read() |
|
303 | rawjson = rsp.read() | |
273 | except util.urlerr.httperror as ex: |
|
304 | except util.urlerr.httperror as ex: | |
274 | raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)') |
|
305 | hints = { | |
275 | % (ex, action)) |
|
306 | 400: _('check that lfs serving is enabled on %s and "%s" is ' | |
|
307 | 'supported') % (self.baseurl, action), | |||
|
308 | 404: _('the "lfs.url" config may be used to override %s') | |||
|
309 | % self.baseurl, | |||
|
310 | } | |||
|
311 | hint = hints.get(ex.code, _('api=%s, action=%s') % (url, action)) | |||
|
312 | raise LfsRemoteError(_('LFS HTTP error: %s') % ex, hint=hint) | |||
|
313 | except util.urlerr.urlerror as ex: | |||
|
314 | hint = (_('the "lfs.url" config may be used to override %s') | |||
|
315 | % self.baseurl) | |||
|
316 | raise LfsRemoteError(_('LFS error: %s') % _urlerrorreason(ex), | |||
|
317 | hint=hint) | |||
276 | try: |
|
318 | try: | |
277 | response = json.loads(rawjson) |
|
319 | response = json.loads(rawjson) | |
278 | except ValueError: |
|
320 | except ValueError: | |
@@ -379,30 +421,37 b' class _gitlfsremote(object):' | |||||
379 |
|
421 | |||
380 | response = b'' |
|
422 | response = b'' | |
381 | try: |
|
423 | try: | |
382 |
|
|
424 | with contextlib.closing(self.urlopener.open(request)) as req: | |
383 |
|
425 | ui = self.ui # Shorten debug lines | ||
384 | if self.ui.debugflag: |
|
426 | if self.ui.debugflag: | |
385 |
|
|
427 | ui.debug('Status: %d\n' % req.status) | |
386 |
# lfs-test-server and hg serve return headers in different |
|
428 | # lfs-test-server and hg serve return headers in different | |
387 | self.ui.debug('%s\n' |
|
429 | # order | |
388 | % '\n'.join(sorted(str(req.info()).splitlines()))) |
|
430 | ui.debug('%s\n' | |
|
431 | % '\n'.join(sorted(str(req.info()).splitlines()))) | |||
389 |
|
432 | |||
390 | if action == 'download': |
|
433 | if action == 'download': | |
391 |
# If downloading blobs, store downloaded data to local |
|
434 | # If downloading blobs, store downloaded data to local | |
392 |
|
|
435 | # blobstore | |
393 | else: |
|
436 | localstore.download(oid, req) | |
394 |
|
|
437 | else: | |
395 |
|
|
438 | while True: | |
396 |
|
|
439 | data = req.read(1048576) | |
397 |
|
|
440 | if not data: | |
398 |
|
|
441 | break | |
399 |
|
|
442 | response += data | |
400 | self.ui.debug('lfs %s response: %s' % (action, response)) |
|
443 | if response: | |
|
444 | ui.debug('lfs %s response: %s' % (action, response)) | |||
401 | except util.urlerr.httperror as ex: |
|
445 | except util.urlerr.httperror as ex: | |
402 | if self.ui.debugflag: |
|
446 | if self.ui.debugflag: | |
403 | self.ui.debug('%s: %s\n' % (oid, ex.read())) |
|
447 | self.ui.debug('%s: %s\n' % (oid, ex.read())) | |
404 | raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)') |
|
448 | raise LfsRemoteError(_('LFS HTTP error: %s (oid=%s, action=%s)') | |
405 | % (ex, oid, action)) |
|
449 | % (ex, oid, action)) | |
|
450 | except util.urlerr.urlerror as ex: | |||
|
451 | hint = (_('attempted connection to %s') | |||
|
452 | % util.urllibcompat.getfullurl(request)) | |||
|
453 | raise LfsRemoteError(_('LFS error: %s') % _urlerrorreason(ex), | |||
|
454 | hint=hint) | |||
406 |
|
455 | |||
407 | def _batch(self, pointers, localstore, action): |
|
456 | def _batch(self, pointers, localstore, action): | |
408 | if action not in ['upload', 'download']: |
|
457 | if action not in ['upload', 'download']: | |
@@ -539,7 +588,7 b' def _deduplicate(pointers):' | |||||
539 | return reduced.values() |
|
588 | return reduced.values() | |
540 |
|
589 | |||
541 | def _verify(oid, content): |
|
590 | def _verify(oid, content): | |
542 |
realoid = hashlib.sha256(content). |
|
591 | realoid = node.hex(hashlib.sha256(content).digest()) | |
543 | if realoid != oid: |
|
592 | if realoid != oid: | |
544 | raise LfsCorruptionError(_('detected corrupt lfs object: %s') % oid, |
|
593 | raise LfsCorruptionError(_('detected corrupt lfs object: %s') % oid, | |
545 | hint=_('run hg verify')) |
|
594 | hint=_('run hg verify')) |
@@ -17,8 +17,10 b' from mercurial.hgweb import (' | |||||
17 | ) |
|
17 | ) | |
18 |
|
18 | |||
19 | from mercurial import ( |
|
19 | from mercurial import ( | |
|
20 | exthelper, | |||
20 | pycompat, |
|
21 | pycompat, | |
21 | util, |
|
22 | util, | |
|
23 | wireprotoserver, | |||
22 | ) |
|
24 | ) | |
23 |
|
25 | |||
24 | from . import blobstore |
|
26 | from . import blobstore | |
@@ -31,6 +33,9 b' HTTP_METHOD_NOT_ALLOWED = hgwebcommon.HT' | |||||
31 | HTTP_NOT_ACCEPTABLE = hgwebcommon.HTTP_NOT_ACCEPTABLE |
|
33 | HTTP_NOT_ACCEPTABLE = hgwebcommon.HTTP_NOT_ACCEPTABLE | |
32 | HTTP_UNSUPPORTED_MEDIA_TYPE = hgwebcommon.HTTP_UNSUPPORTED_MEDIA_TYPE |
|
34 | HTTP_UNSUPPORTED_MEDIA_TYPE = hgwebcommon.HTTP_UNSUPPORTED_MEDIA_TYPE | |
33 |
|
35 | |||
|
36 | eh = exthelper.exthelper() | |||
|
37 | ||||
|
38 | @eh.wrapfunction(wireprotoserver, 'handlewsgirequest') | |||
34 | def handlewsgirequest(orig, rctx, req, res, checkperm): |
|
39 | def handlewsgirequest(orig, rctx, req, res, checkperm): | |
35 | """Wrap wireprotoserver.handlewsgirequest() to possibly process an LFS |
|
40 | """Wrap wireprotoserver.handlewsgirequest() to possibly process an LFS | |
36 | request if it is left unprocessed by the wrapped method. |
|
41 | request if it is left unprocessed by the wrapped method. |
@@ -13,10 +13,21 b' from mercurial.i18n import _' | |||||
13 | from mercurial.node import bin, hex, nullid, short |
|
13 | from mercurial.node import bin, hex, nullid, short | |
14 |
|
14 | |||
15 | from mercurial import ( |
|
15 | from mercurial import ( | |
|
16 | bundle2, | |||
|
17 | changegroup, | |||
|
18 | cmdutil, | |||
|
19 | context, | |||
16 | error, |
|
20 | error, | |
|
21 | exchange, | |||
|
22 | exthelper, | |||
|
23 | localrepo, | |||
17 | repository, |
|
24 | repository, | |
18 | revlog, |
|
25 | revlog, | |
|
26 | scmutil, | |||
|
27 | upgrade, | |||
19 | util, |
|
28 | util, | |
|
29 | vfs as vfsmod, | |||
|
30 | wireprotov1server, | |||
20 | ) |
|
31 | ) | |
21 |
|
32 | |||
22 | from mercurial.utils import ( |
|
33 | from mercurial.utils import ( | |
@@ -31,17 +42,22 b' from . import (' | |||||
31 | pointer, |
|
42 | pointer, | |
32 | ) |
|
43 | ) | |
33 |
|
44 | |||
|
45 | eh = exthelper.exthelper() | |||
|
46 | ||||
|
47 | @eh.wrapfunction(localrepo, 'makefilestorage') | |||
34 | def localrepomakefilestorage(orig, requirements, features, **kwargs): |
|
48 | def localrepomakefilestorage(orig, requirements, features, **kwargs): | |
35 | if b'lfs' in requirements: |
|
49 | if b'lfs' in requirements: | |
36 | features.add(repository.REPO_FEATURE_LFS) |
|
50 | features.add(repository.REPO_FEATURE_LFS) | |
37 |
|
51 | |||
38 | return orig(requirements=requirements, features=features, **kwargs) |
|
52 | return orig(requirements=requirements, features=features, **kwargs) | |
39 |
|
53 | |||
|
54 | @eh.wrapfunction(changegroup, 'allsupportedversions') | |||
40 | def allsupportedversions(orig, ui): |
|
55 | def allsupportedversions(orig, ui): | |
41 | versions = orig(ui) |
|
56 | versions = orig(ui) | |
42 | versions.add('03') |
|
57 | versions.add('03') | |
43 | return versions |
|
58 | return versions | |
44 |
|
59 | |||
|
60 | @eh.wrapfunction(wireprotov1server, '_capabilities') | |||
45 | def _capabilities(orig, repo, proto): |
|
61 | def _capabilities(orig, repo, proto): | |
46 | '''Wrap server command to announce lfs server capability''' |
|
62 | '''Wrap server command to announce lfs server capability''' | |
47 | caps = orig(repo, proto) |
|
63 | caps = orig(repo, proto) | |
@@ -130,6 +146,7 b' def _islfs(rlog, node=None, rev=None):' | |||||
130 | flags = rlog._revlog.flags(rev) |
|
146 | flags = rlog._revlog.flags(rev) | |
131 | return bool(flags & revlog.REVIDX_EXTSTORED) |
|
147 | return bool(flags & revlog.REVIDX_EXTSTORED) | |
132 |
|
148 | |||
|
149 | # Wrapping may also be applied by remotefilelog | |||
133 | def filelogaddrevision(orig, self, text, transaction, link, p1, p2, |
|
150 | def filelogaddrevision(orig, self, text, transaction, link, p1, p2, | |
134 | cachedelta=None, node=None, |
|
151 | cachedelta=None, node=None, | |
135 | flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds): |
|
152 | flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds): | |
@@ -149,6 +166,7 b' def filelogaddrevision(orig, self, text,' | |||||
149 | return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, |
|
166 | return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, | |
150 | node=node, flags=flags, **kwds) |
|
167 | node=node, flags=flags, **kwds) | |
151 |
|
168 | |||
|
169 | # Wrapping may also be applied by remotefilelog | |||
152 | def filelogrenamed(orig, self, node): |
|
170 | def filelogrenamed(orig, self, node): | |
153 | if _islfs(self, node): |
|
171 | if _islfs(self, node): | |
154 | rawtext = self._revlog.revision(node, raw=True) |
|
172 | rawtext = self._revlog.revision(node, raw=True) | |
@@ -161,6 +179,7 b' def filelogrenamed(orig, self, node):' | |||||
161 | return False |
|
179 | return False | |
162 | return orig(self, node) |
|
180 | return orig(self, node) | |
163 |
|
181 | |||
|
182 | # Wrapping may also be applied by remotefilelog | |||
164 | def filelogsize(orig, self, rev): |
|
183 | def filelogsize(orig, self, rev): | |
165 | if _islfs(self, rev=rev): |
|
184 | if _islfs(self, rev=rev): | |
166 | # fast path: use lfs metadata to answer size |
|
185 | # fast path: use lfs metadata to answer size | |
@@ -169,6 +188,7 b' def filelogsize(orig, self, rev):' | |||||
169 | return int(metadata['size']) |
|
188 | return int(metadata['size']) | |
170 | return orig(self, rev) |
|
189 | return orig(self, rev) | |
171 |
|
190 | |||
|
191 | @eh.wrapfunction(context.basefilectx, 'cmp') | |||
172 | def filectxcmp(orig, self, fctx): |
|
192 | def filectxcmp(orig, self, fctx): | |
173 | """returns True if text is different than fctx""" |
|
193 | """returns True if text is different than fctx""" | |
174 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs |
|
194 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs | |
@@ -179,6 +199,7 b' def filectxcmp(orig, self, fctx):' | |||||
179 | return p1.oid() != p2.oid() |
|
199 | return p1.oid() != p2.oid() | |
180 | return orig(self, fctx) |
|
200 | return orig(self, fctx) | |
181 |
|
201 | |||
|
202 | @eh.wrapfunction(context.basefilectx, 'isbinary') | |||
182 | def filectxisbinary(orig, self): |
|
203 | def filectxisbinary(orig, self): | |
183 | if self.islfs(): |
|
204 | if self.islfs(): | |
184 | # fast path: use lfs metadata to answer isbinary |
|
205 | # fast path: use lfs metadata to answer isbinary | |
@@ -190,10 +211,12 b' def filectxisbinary(orig, self):' | |||||
190 | def filectxislfs(self): |
|
211 | def filectxislfs(self): | |
191 | return _islfs(self.filelog(), self.filenode()) |
|
212 | return _islfs(self.filelog(), self.filenode()) | |
192 |
|
213 | |||
|
214 | @eh.wrapfunction(cmdutil, '_updatecatformatter') | |||
193 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): |
|
215 | def _updatecatformatter(orig, fm, ctx, matcher, path, decode): | |
194 | orig(fm, ctx, matcher, path, decode) |
|
216 | orig(fm, ctx, matcher, path, decode) | |
195 | fm.data(rawdata=ctx[path].rawdata()) |
|
217 | fm.data(rawdata=ctx[path].rawdata()) | |
196 |
|
218 | |||
|
219 | @eh.wrapfunction(scmutil, 'wrapconvertsink') | |||
197 | def convertsink(orig, sink): |
|
220 | def convertsink(orig, sink): | |
198 | sink = orig(sink) |
|
221 | sink = orig(sink) | |
199 | if sink.repotype == 'hg': |
|
222 | if sink.repotype == 'hg': | |
@@ -219,6 +242,9 b' def convertsink(orig, sink):' | |||||
219 |
|
242 | |||
220 | return sink |
|
243 | return sink | |
221 |
|
244 | |||
|
245 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs | |||
|
246 | # options and blob stores are passed from othervfs to the new readonlyvfs. | |||
|
247 | @eh.wrapfunction(vfsmod.readonlyvfs, '__init__') | |||
222 | def vfsinit(orig, self, othervfs): |
|
248 | def vfsinit(orig, self, othervfs): | |
223 | orig(self, othervfs) |
|
249 | orig(self, othervfs) | |
224 | # copy lfs related options |
|
250 | # copy lfs related options | |
@@ -290,6 +316,7 b' def prepush(pushop):' | |||||
290 | """ |
|
316 | """ | |
291 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) |
|
317 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) | |
292 |
|
318 | |||
|
319 | @eh.wrapfunction(exchange, 'push') | |||
293 | def push(orig, repo, remote, *args, **kwargs): |
|
320 | def push(orig, repo, remote, *args, **kwargs): | |
294 | """bail on push if the extension isn't enabled on remote when needed, and |
|
321 | """bail on push if the extension isn't enabled on remote when needed, and | |
295 | update the remote store based on the destination path.""" |
|
322 | update the remote store based on the destination path.""" | |
@@ -316,6 +343,8 b' def push(orig, repo, remote, *args, **kw' | |||||
316 | else: |
|
343 | else: | |
317 | return orig(repo, remote, *args, **kwargs) |
|
344 | return orig(repo, remote, *args, **kwargs) | |
318 |
|
345 | |||
|
346 | # when writing a bundle via "hg bundle" command, upload related LFS blobs | |||
|
347 | @eh.wrapfunction(bundle2, 'writenewbundle') | |||
319 | def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing, |
|
348 | def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing, | |
320 | *args, **kwargs): |
|
349 | *args, **kwargs): | |
321 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" |
|
350 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" | |
@@ -393,6 +422,7 b' def uploadblobs(repo, pointers):' | |||||
393 | remoteblob = repo.svfs.lfsremoteblobstore |
|
422 | remoteblob = repo.svfs.lfsremoteblobstore | |
394 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
|
423 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) | |
395 |
|
424 | |||
|
425 | @eh.wrapfunction(upgrade, '_finishdatamigration') | |||
396 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): |
|
426 | def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements): | |
397 | orig(ui, srcrepo, dstrepo, requirements) |
|
427 | orig(ui, srcrepo, dstrepo, requirements) | |
398 |
|
428 | |||
@@ -407,6 +437,8 b' def upgradefinishdatamigration(orig, ui,' | |||||
407 | ui.write(_('copying lfs blob %s\n') % oid) |
|
437 | ui.write(_('copying lfs blob %s\n') % oid) | |
408 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) |
|
438 | lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid)) | |
409 |
|
439 | |||
|
440 | @eh.wrapfunction(upgrade, 'preservedrequirements') | |||
|
441 | @eh.wrapfunction(upgrade, 'supporteddestrequirements') | |||
410 | def upgraderequirements(orig, repo): |
|
442 | def upgraderequirements(orig, repo): | |
411 | reqs = orig(repo) |
|
443 | reqs = orig(repo) | |
412 | if 'lfs' in repo.requirements: |
|
444 | if 'lfs' in repo.requirements: |
@@ -9,21 +9,21 b'' | |||||
9 | This extension lets you specify a shell command per ui.log() event, |
|
9 | This extension lets you specify a shell command per ui.log() event, | |
10 | sending all remaining arguments to as environment variables to that command. |
|
10 | sending all remaining arguments to as environment variables to that command. | |
11 |
|
11 | |||
12 | Each positional argument to the method results in a `MSG[N]` key in the |
|
12 | Positional arguments construct a log message, which is passed in the `MSG1` | |
13 | environment, starting at 1 (so `MSG1`, `MSG2`, etc.). Each keyword argument |
|
13 | environment variables. Each keyword argument is set as a `OPT_UPPERCASE_KEY` | |
14 | is set as a `OPT_UPPERCASE_KEY` variable (so the key is uppercased, and |
|
14 | variable (so the key is uppercased, and prefixed with `OPT_`). The original | |
15 | prefixed with `OPT_`). The original event name is passed in the `EVENT` |
|
15 | event name is passed in the `EVENT` environment variable, and the process ID | |
16 |
|
|
16 | of mercurial is given in `HGPID`. | |
17 |
|
17 | |||
18 |
So given a call `ui.log('foo', 'bar', 'baz', spam='eggs'), a script |
|
18 | So given a call `ui.log('foo', 'bar %s\n', 'baz', spam='eggs'), a script | |
19 |
for the `foo` event can expect an environment with `MSG1=bar |
|
19 | configured for the `foo` event can expect an environment with `MSG1=bar baz`, | |
20 | `OPT_SPAM=eggs`. |
|
20 | and `OPT_SPAM=eggs`. | |
21 |
|
21 | |||
22 | Scripts are configured in the `[logtoprocess]` section, each key an event name. |
|
22 | Scripts are configured in the `[logtoprocess]` section, each key an event name. | |
23 | For example:: |
|
23 | For example:: | |
24 |
|
24 | |||
25 | [logtoprocess] |
|
25 | [logtoprocess] | |
26 |
commandexception = echo "$MSG |
|
26 | commandexception = echo "$MSG1" > /var/log/mercurial_exceptions.log | |
27 |
|
27 | |||
28 | would log the warning message and traceback of any failed command dispatch. |
|
28 | would log the warning message and traceback of any failed command dispatch. | |
29 |
|
29 | |||
@@ -34,14 +34,7 b' not ensure that they exit cleanly.' | |||||
34 |
|
34 | |||
35 | from __future__ import absolute_import |
|
35 | from __future__ import absolute_import | |
36 |
|
36 | |||
37 | import itertools |
|
|||
38 | import os |
|
37 | import os | |
39 | import subprocess |
|
|||
40 | import sys |
|
|||
41 |
|
||||
42 | from mercurial import ( |
|
|||
43 | pycompat, |
|
|||
44 | ) |
|
|||
45 |
|
38 | |||
46 | from mercurial.utils import ( |
|
39 | from mercurial.utils import ( | |
47 | procutil, |
|
40 | procutil, | |
@@ -53,88 +46,30 b' from mercurial.utils import (' | |||||
53 | # leave the attribute unspecified. |
|
46 | # leave the attribute unspecified. | |
54 | testedwith = 'ships-with-hg-core' |
|
47 | testedwith = 'ships-with-hg-core' | |
55 |
|
48 | |||
56 | def uisetup(ui): |
|
49 | class processlogger(object): | |
57 | if pycompat.iswindows: |
|
50 | """Map log events to external commands | |
58 | # no fork on Windows, but we can create a detached process |
|
51 | ||
59 | # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx |
|
52 | Arguments are passed on as environment variables. | |
60 | # No stdlib constant exists for this value |
|
53 | """ | |
61 | DETACHED_PROCESS = 0x00000008 |
|
54 | ||
62 | _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP |
|
55 | def __init__(self, ui): | |
|
56 | self._scripts = dict(ui.configitems(b'logtoprocess')) | |||
|
57 | ||||
|
58 | def tracked(self, event): | |||
|
59 | return bool(self._scripts.get(event)) | |||
63 |
|
60 | |||
64 | def runshellcommand(script, env): |
|
61 | def log(self, ui, event, msg, opts): | |
65 | # we can't use close_fds *and* redirect stdin. I'm not sure that we |
|
62 | script = self._scripts[event] | |
66 | # need to because the detached process has no console connection. |
|
63 | env = { | |
67 | subprocess.Popen( |
|
64 | b'EVENT': event, | |
68 | procutil.tonativestr(script), |
|
65 | b'HGPID': os.getpid(), | |
69 | shell=True, env=procutil.tonativeenv(env), close_fds=True, |
|
66 | b'MSG1': msg, | |
70 | creationflags=_creationflags) |
|
67 | } | |
71 | else: |
|
68 | # keyword arguments get prefixed with OPT_ and uppercased | |
72 | def runshellcommand(script, env): |
|
69 | env.update((b'OPT_%s' % key.upper(), value) | |
73 | # double-fork to completely detach from the parent process |
|
70 | for key, value in opts.items()) | |
74 | # based on http://code.activestate.com/recipes/278731 |
|
71 | fullenv = procutil.shellenviron(env) | |
75 | pid = os.fork() |
|
72 | procutil.runbgcommand(script, fullenv, shell=True) | |
76 | if pid: |
|
|||
77 | # parent |
|
|||
78 | return |
|
|||
79 | # subprocess.Popen() forks again, all we need to add is |
|
|||
80 | # flag the new process as a new session. |
|
|||
81 | if sys.version_info < (3, 2): |
|
|||
82 | newsession = {'preexec_fn': os.setsid} |
|
|||
83 | else: |
|
|||
84 | newsession = {'start_new_session': True} |
|
|||
85 | try: |
|
|||
86 | # connect std* to devnull to make sure the subprocess can't |
|
|||
87 | # muck up these stream for mercurial. |
|
|||
88 | # Connect all the streams to be more close to Windows behavior |
|
|||
89 | # and pager will wait for scripts to end if we don't do that |
|
|||
90 | nullrfd = open(os.devnull, 'r') |
|
|||
91 | nullwfd = open(os.devnull, 'w') |
|
|||
92 | subprocess.Popen( |
|
|||
93 | procutil.tonativestr(script), |
|
|||
94 | shell=True, stdin=nullrfd, |
|
|||
95 | stdout=nullwfd, stderr=nullwfd, |
|
|||
96 | env=procutil.tonativeenv(env), |
|
|||
97 | close_fds=True, **newsession) |
|
|||
98 | finally: |
|
|||
99 | # mission accomplished, this child needs to exit and not |
|
|||
100 | # continue the hg process here. |
|
|||
101 | os._exit(0) |
|
|||
102 |
|
73 | |||
103 | class logtoprocessui(ui.__class__): |
|
74 | def uipopulate(ui): | |
104 | def log(self, event, *msg, **opts): |
|
75 | ui.setlogger(b'logtoprocess', processlogger(ui)) | |
105 | """Map log events to external commands |
|
|||
106 |
|
||||
107 | Arguments are passed on as environment variables. |
|
|||
108 |
|
||||
109 | """ |
|
|||
110 | script = self.config('logtoprocess', event) |
|
|||
111 | if script: |
|
|||
112 | if msg: |
|
|||
113 | # try to format the log message given the remaining |
|
|||
114 | # arguments |
|
|||
115 | try: |
|
|||
116 | # Format the message as blackbox does |
|
|||
117 | formatted = msg[0] % msg[1:] |
|
|||
118 | except (TypeError, KeyError): |
|
|||
119 | # Failed to apply the arguments, ignore |
|
|||
120 | formatted = msg[0] |
|
|||
121 | messages = (formatted,) + msg[1:] |
|
|||
122 | else: |
|
|||
123 | messages = msg |
|
|||
124 | # positional arguments are listed as MSG[N] keys in the |
|
|||
125 | # environment |
|
|||
126 | msgpairs = ( |
|
|||
127 | ('MSG{0:d}'.format(i), str(m)) |
|
|||
128 | for i, m in enumerate(messages, 1)) |
|
|||
129 | # keyword arguments get prefixed with OPT_ and uppercased |
|
|||
130 | optpairs = ( |
|
|||
131 | ('OPT_{0}'.format(key.upper()), str(value)) |
|
|||
132 | for key, value in opts.iteritems()) |
|
|||
133 | env = dict(itertools.chain(procutil.shellenviron().items(), |
|
|||
134 | msgpairs, optpairs), |
|
|||
135 | EVENT=event, HGPID=str(os.getpid())) |
|
|||
136 | runshellcommand(script, env) |
|
|||
137 | return super(logtoprocessui, self).log(event, *msg, **opts) |
|
|||
138 |
|
||||
139 | # Replace the class for this instance and all clones created from it: |
|
|||
140 | ui.__class__ = logtoprocessui |
|
@@ -139,6 +139,8 b' except KeyError:' | |||||
139 | class dummyui(object): |
|
139 | class dummyui(object): | |
140 | def debug(self, msg): |
|
140 | def debug(self, msg): | |
141 | pass |
|
141 | pass | |
|
142 | def log(self, event, msgfmt, *msgargs, **opts): | |||
|
143 | pass | |||
142 | stripext = extensions.load(dummyui(), 'strip', '') |
|
144 | stripext = extensions.load(dummyui(), 'strip', '') | |
143 |
|
145 | |||
144 | strip = stripext.strip |
|
146 | strip = stripext.strip |
@@ -1,6 +1,3 b'' | |||||
1 | Integration with the share extension needs improvement. Right now |
|
|||
2 | we've seen some odd bugs. |
|
|||
3 |
|
||||
4 |
|
|
1 | Address commentary in manifest.excludedmanifestrevlog.add - | |
5 | specifically we should improve the collaboration with core so that |
|
2 | specifically we should improve the collaboration with core so that | |
6 | add() never gets called on an excluded directory and we can improve |
|
3 | add() never gets called on an excluded directory and we can improve |
@@ -20,7 +20,7 b' from mercurial import (' | |||||
20 | changegroup, |
|
20 | changegroup, | |
21 | error, |
|
21 | error, | |
22 | exchange, |
|
22 | exchange, | |
23 | extensions, |
|
23 | localrepo, | |
24 | narrowspec, |
|
24 | narrowspec, | |
25 | repair, |
|
25 | repair, | |
26 | repository, |
|
26 | repository, | |
@@ -31,10 +31,9 b' from mercurial.utils import (' | |||||
31 | stringutil, |
|
31 | stringutil, | |
32 | ) |
|
32 | ) | |
33 |
|
33 | |||
34 | NARROWCAP = 'narrow' |
|
|||
35 | _NARROWACL_SECTION = 'narrowhgacl' |
|
34 | _NARROWACL_SECTION = 'narrowhgacl' | |
36 |
_CHANGESPECPART = |
|
35 | _CHANGESPECPART = 'narrow:changespec' | |
37 |
_SPECPART = |
|
36 | _SPECPART = 'narrow:spec' | |
38 | _SPECPART_INCLUDE = 'include' |
|
37 | _SPECPART_INCLUDE = 'include' | |
39 | _SPECPART_EXCLUDE = 'exclude' |
|
38 | _SPECPART_EXCLUDE = 'exclude' | |
40 | _KILLNODESIGNAL = 'KILL' |
|
39 | _KILLNODESIGNAL = 'KILL' | |
@@ -44,12 +43,6 b" NARROWCAP = 'narrow'" | |||||
44 | _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) |
|
43 | _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) | |
45 | _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) |
|
44 | _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) | |
46 |
|
45 | |||
47 | # When advertising capabilities, always include narrow clone support. |
|
|||
48 | def getrepocaps_narrow(orig, repo, **kwargs): |
|
|||
49 | caps = orig(repo, **kwargs) |
|
|||
50 | caps[NARROWCAP] = ['v0'] |
|
|||
51 | return caps |
|
|||
52 |
|
||||
53 | # Serve a changegroup for a client with a narrow clone. |
|
46 | # Serve a changegroup for a client with a narrow clone. | |
54 | def getbundlechangegrouppart_narrow(bundler, repo, source, |
|
47 | def getbundlechangegrouppart_narrow(bundler, repo, source, | |
55 | bundlecaps=None, b2caps=None, heads=None, |
|
48 | bundlecaps=None, b2caps=None, heads=None, | |
@@ -158,6 +151,7 b' def _handlechangespec_2(op, inpart):' | |||||
158 | op.repo.requirements.add(repository.NARROW_REQUIREMENT) |
|
151 | op.repo.requirements.add(repository.NARROW_REQUIREMENT) | |
159 | op.repo._writerequirements() |
|
152 | op.repo._writerequirements() | |
160 | op.repo.setnarrowpats(includepats, excludepats) |
|
153 | op.repo.setnarrowpats(includepats, excludepats) | |
|
154 | narrowspec.copytoworkingcopy(op.repo) | |||
161 |
|
155 | |||
162 | @bundle2.parthandler(_CHANGESPECPART) |
|
156 | @bundle2.parthandler(_CHANGESPECPART) | |
163 | def _handlechangespec(op, inpart): |
|
157 | def _handlechangespec(op, inpart): | |
@@ -187,18 +181,15 b' def _handlechangespec(op, inpart):' | |||||
187 |
|
181 | |||
188 | if clkills: |
|
182 | if clkills: | |
189 | # preserve bookmarks that repair.strip() would otherwise strip |
|
183 | # preserve bookmarks that repair.strip() would otherwise strip | |
190 |
|
|
184 | op._bookmarksbackup = repo._bookmarks | |
191 | class dummybmstore(dict): |
|
185 | class dummybmstore(dict): | |
192 | def applychanges(self, repo, tr, changes): |
|
186 | def applychanges(self, repo, tr, changes): | |
193 | pass |
|
187 | pass | |
194 | def recordchange(self, tr): # legacy version |
|
188 | localrepo.localrepository._bookmarks.set(repo, dummybmstore()) | |
195 | pass |
|
|||
196 | repo._bookmarks = dummybmstore() |
|
|||
197 | chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True, |
|
189 | chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True, | |
198 | topic='widen') |
|
190 | topic='widen') | |
199 | repo._bookmarks = bmstore |
|
|||
200 | if chgrpfile: |
|
191 | if chgrpfile: | |
201 |
op._widen_uninterr = repo.ui.uninterrupt |
|
192 | op._widen_uninterr = repo.ui.uninterruptible() | |
202 | op._widen_uninterr.__enter__() |
|
193 | op._widen_uninterr.__enter__() | |
203 | # presence of _widen_bundle attribute activates widen handler later |
|
194 | # presence of _widen_bundle attribute activates widen handler later | |
204 | op._widen_bundle = chgrpfile |
|
195 | op._widen_bundle = chgrpfile | |
@@ -252,16 +243,12 b' def handlechangegroup_widen(op, inpart):' | |||||
252 |
|
243 | |||
253 | def setup(): |
|
244 | def setup(): | |
254 | """Enable narrow repo support in bundle2-related extension points.""" |
|
245 | """Enable narrow repo support in bundle2-related extension points.""" | |
255 | extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow) |
|
|||
256 |
|
||||
257 | getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS |
|
246 | getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS | |
258 |
|
247 | |||
259 | getbundleargs['narrow'] = 'boolean' |
|
248 | getbundleargs['narrow'] = 'boolean' | |
260 | getbundleargs['depth'] = 'plain' |
|
249 | getbundleargs['depth'] = 'plain' | |
261 | getbundleargs['oldincludepats'] = 'csv' |
|
250 | getbundleargs['oldincludepats'] = 'csv' | |
262 | getbundleargs['oldexcludepats'] = 'csv' |
|
251 | getbundleargs['oldexcludepats'] = 'csv' | |
263 | getbundleargs['includepats'] = 'csv' |
|
|||
264 | getbundleargs['excludepats'] = 'csv' |
|
|||
265 | getbundleargs['known'] = 'csv' |
|
252 | getbundleargs['known'] = 'csv' | |
266 |
|
253 | |||
267 | # Extend changegroup serving to handle requests from narrow clients. |
|
254 | # Extend changegroup serving to handle requests from narrow clients. | |
@@ -284,5 +271,10 b' def setup():' | |||||
284 | origcghandler(op, inpart) |
|
271 | origcghandler(op, inpart) | |
285 | if util.safehasattr(op, '_widen_bundle'): |
|
272 | if util.safehasattr(op, '_widen_bundle'): | |
286 | handlechangegroup_widen(op, inpart) |
|
273 | handlechangegroup_widen(op, inpart) | |
|
274 | if util.safehasattr(op, '_bookmarksbackup'): | |||
|
275 | localrepo.localrepository._bookmarks.set(op.repo, | |||
|
276 | op._bookmarksbackup) | |||
|
277 | del op._bookmarksbackup | |||
|
278 | ||||
287 | wrappedcghandler.params = origcghandler.params |
|
279 | wrappedcghandler.params = origcghandler.params | |
288 | bundle2.parthandlermapping['changegroup'] = wrappedcghandler |
|
280 | bundle2.parthandlermapping['changegroup'] = wrappedcghandler |
@@ -20,7 +20,6 b' from mercurial import (' | |||||
20 | exchange, |
|
20 | exchange, | |
21 | extensions, |
|
21 | extensions, | |
22 | hg, |
|
22 | hg, | |
23 | merge, |
|
|||
24 | narrowspec, |
|
23 | narrowspec, | |
25 | node, |
|
24 | node, | |
26 | pycompat, |
|
25 | pycompat, | |
@@ -141,8 +140,10 b' def pullbundle2extraprepare(orig, pullop' | |||||
141 | include, exclude = repo.narrowpats |
|
140 | include, exclude = repo.narrowpats | |
142 | kwargs['oldincludepats'] = include |
|
141 | kwargs['oldincludepats'] = include | |
143 | kwargs['oldexcludepats'] = exclude |
|
142 | kwargs['oldexcludepats'] = exclude | |
144 | kwargs['includepats'] = include |
|
143 | if include: | |
145 |
kwargs[' |
|
144 | kwargs['includepats'] = include | |
|
145 | if exclude: | |||
|
146 | kwargs['excludepats'] = exclude | |||
146 | # calculate known nodes only in ellipses cases because in non-ellipses cases |
|
147 | # calculate known nodes only in ellipses cases because in non-ellipses cases | |
147 | # we have all the nodes |
|
148 | # we have all the nodes | |
148 | if wireprototypes.ELLIPSESCAP in pullop.remote.capabilities(): |
|
149 | if wireprototypes.ELLIPSESCAP in pullop.remote.capabilities(): | |
@@ -158,16 +159,6 b' def pullbundle2extraprepare(orig, pullop' | |||||
158 | extensions.wrapfunction(exchange,'_pullbundle2extraprepare', |
|
159 | extensions.wrapfunction(exchange,'_pullbundle2extraprepare', | |
159 | pullbundle2extraprepare) |
|
160 | pullbundle2extraprepare) | |
160 |
|
161 | |||
161 | # This is an extension point for filesystems that need to do something other |
|
|||
162 | # than just blindly unlink the files. It's not clear what arguments would be |
|
|||
163 | # useful, so we're passing in a fair number of them, some of them redundant. |
|
|||
164 | def _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes, newexcludes, |
|
|||
165 | oldmatch, newmatch): |
|
|||
166 | for f in repo.dirstate: |
|
|||
167 | if not newmatch(f): |
|
|||
168 | repo.dirstate.drop(f) |
|
|||
169 | repo.wvfs.unlinkpath(f) |
|
|||
170 |
|
||||
171 | def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, |
|
162 | def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, | |
172 | newincludes, newexcludes, force): |
|
163 | newincludes, newexcludes, force): | |
173 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) |
|
164 | oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) | |
@@ -205,7 +196,7 b' def _narrow(ui, repo, remote, commoninc,' | |||||
205 | hint=_('use --force-delete-local-changes to ' |
|
196 | hint=_('use --force-delete-local-changes to ' | |
206 | 'ignore')) |
|
197 | 'ignore')) | |
207 |
|
198 | |||
208 |
with ui.uninterrupt |
|
199 | with ui.uninterruptible(): | |
209 | if revstostrip: |
|
200 | if revstostrip: | |
210 | tostrip = [unfi.changelog.node(r) for r in revstostrip] |
|
201 | tostrip = [unfi.changelog.node(r) for r in revstostrip] | |
211 | if repo['.'].node() in tostrip: |
|
202 | if repo['.'].node() in tostrip: | |
@@ -213,7 +204,9 b' def _narrow(ui, repo, remote, commoninc,' | |||||
213 | urev = max(repo.revs('(::%n) - %ln + null', |
|
204 | urev = max(repo.revs('(::%n) - %ln + null', | |
214 | repo['.'].node(), visibletostrip)) |
|
205 | repo['.'].node(), visibletostrip)) | |
215 | hg.clean(repo, urev) |
|
206 | hg.clean(repo, urev) | |
216 | repair.strip(ui, unfi, tostrip, topic='narrow') |
|
207 | overrides = {('devel', 'strip-obsmarkers'): False} | |
|
208 | with ui.configoverride(overrides, 'narrow'): | |||
|
209 | repair.strip(ui, unfi, tostrip, topic='narrow') | |||
217 |
|
210 | |||
218 | todelete = [] |
|
211 | todelete = [] | |
219 | for f, f2, size in repo.store.datafiles(): |
|
212 | for f, f2, size in repo.store.datafiles(): | |
@@ -237,22 +230,23 b' def _narrow(ui, repo, remote, commoninc,' | |||||
237 |
|
230 | |||
238 | repo.destroying() |
|
231 | repo.destroying() | |
239 |
|
232 | |||
240 |
with repo.transaction( |
|
233 | with repo.transaction('narrowing'): | |
|
234 | # Update narrowspec before removing revlogs, so repo won't be | |||
|
235 | # corrupt in case of crash | |||
|
236 | repo.setnarrowpats(newincludes, newexcludes) | |||
|
237 | ||||
241 | for f in todelete: |
|
238 | for f in todelete: | |
242 | ui.status(_('deleting %s\n') % f) |
|
239 | ui.status(_('deleting %s\n') % f) | |
243 | util.unlinkpath(repo.svfs.join(f)) |
|
240 | util.unlinkpath(repo.svfs.join(f)) | |
244 | repo.store.markremoved(f) |
|
241 | repo.store.markremoved(f) | |
245 |
|
242 | |||
246 | _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes, |
|
243 | narrowspec.updateworkingcopy(repo, assumeclean=True) | |
247 | newexcludes, oldmatch, newmatch) |
|
244 | narrowspec.copytoworkingcopy(repo) | |
248 | repo.setnarrowpats(newincludes, newexcludes) |
|
|||
249 |
|
245 | |||
250 | repo.destroyed() |
|
246 | repo.destroyed() | |
251 |
|
247 | |||
252 | def _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes, |
|
248 | def _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes, | |
253 | newincludes, newexcludes): |
|
249 | newincludes, newexcludes): | |
254 | newmatch = narrowspec.match(repo.root, newincludes, newexcludes) |
|
|||
255 |
|
||||
256 | # for now we assume that if a server has ellipses enabled, we will be |
|
250 | # for now we assume that if a server has ellipses enabled, we will be | |
257 | # exchanging ellipses nodes. In future we should add ellipses as a client |
|
251 | # exchanging ellipses nodes. In future we should add ellipses as a client | |
258 | # side requirement (maybe) to distinguish a client is shallow or not and |
|
252 | # side requirement (maybe) to distinguish a client is shallow or not and | |
@@ -277,7 +271,7 b' def _widen(ui, repo, remote, commoninc, ' | |||||
277 | # silence the devel-warning of applying an empty changegroup |
|
271 | # silence the devel-warning of applying an empty changegroup | |
278 | overrides = {('devel', 'all-warnings'): False} |
|
272 | overrides = {('devel', 'all-warnings'): False} | |
279 |
|
273 | |||
280 |
with ui.uninterrupt |
|
274 | with ui.uninterruptible(): | |
281 | common = commoninc[0] |
|
275 | common = commoninc[0] | |
282 | if ellipsesremote: |
|
276 | if ellipsesremote: | |
283 | ds = repo.dirstate |
|
277 | ds = repo.dirstate | |
@@ -308,19 +302,10 b' def _widen(ui, repo, remote, commoninc, ' | |||||
308 | bundle2.processbundle(repo, bundle, |
|
302 | bundle2.processbundle(repo, bundle, | |
309 | transactiongetter=tgetter) |
|
303 | transactiongetter=tgetter) | |
310 |
|
304 | |||
311 | repo.setnewnarrowpats() |
|
305 | with repo.transaction('widening'): | |
312 | actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()} |
|
306 | repo.setnewnarrowpats() | |
313 | addgaction = actions['g'].append |
|
307 | narrowspec.updateworkingcopy(repo) | |
314 |
|
308 | narrowspec.copytoworkingcopy(repo) | ||
315 | mf = repo['.'].manifest().matches(newmatch) |
|
|||
316 | for f, fn in mf.iteritems(): |
|
|||
317 | if f not in repo.dirstate: |
|
|||
318 | addgaction((f, (mf.flags(f), False), |
|
|||
319 | "add from widened narrow clone")) |
|
|||
320 |
|
||||
321 | merge.applyupdates(repo, actions, wctx=repo[None], |
|
|||
322 | mctx=repo['.'], overwrite=False) |
|
|||
323 | merge.recordupdates(repo, actions, branchmerge=False) |
|
|||
324 |
|
309 | |||
325 | # TODO(rdamazio): Make new matcher format and update description |
|
310 | # TODO(rdamazio): Make new matcher format and update description | |
326 | @command('tracked', |
|
311 | @command('tracked', | |
@@ -332,6 +317,8 b' def _widen(ui, repo, remote, commoninc, ' | |||||
332 | ('', 'clear', False, _('whether to replace the existing narrowspec')), |
|
317 | ('', 'clear', False, _('whether to replace the existing narrowspec')), | |
333 | ('', 'force-delete-local-changes', False, |
|
318 | ('', 'force-delete-local-changes', False, | |
334 | _('forces deletion of local changes when narrowing')), |
|
319 | _('forces deletion of local changes when narrowing')), | |
|
320 | ('', 'update-working-copy', False, | |||
|
321 | _('update working copy when the store has changed')), | |||
335 | ] + commands.remoteopts, |
|
322 | ] + commands.remoteopts, | |
336 | _('[OPTIONS]... [REMOTE]'), |
|
323 | _('[OPTIONS]... [REMOTE]'), | |
337 | inferrepo=True) |
|
324 | inferrepo=True) | |
@@ -361,15 +348,13 b' def trackedcmd(ui, repo, remotepath=None' | |||||
361 | """ |
|
348 | """ | |
362 | opts = pycompat.byteskwargs(opts) |
|
349 | opts = pycompat.byteskwargs(opts) | |
363 | if repository.NARROW_REQUIREMENT not in repo.requirements: |
|
350 | if repository.NARROW_REQUIREMENT not in repo.requirements: | |
364 |
|
|
351 | raise error.Abort(_('the narrow command is only supported on ' | |
365 |
' with --narrow |
|
352 | 'respositories cloned with --narrow')) | |
366 | return 1 |
|
|||
367 |
|
353 | |||
368 | # Before supporting, decide whether it "hg tracked --clear" should mean |
|
354 | # Before supporting, decide whether it "hg tracked --clear" should mean | |
369 | # tracking no paths or all paths. |
|
355 | # tracking no paths or all paths. | |
370 | if opts['clear']: |
|
356 | if opts['clear']: | |
371 |
|
|
357 | raise error.Abort(_('the --clear option is not yet supported')) | |
372 | return 1 |
|
|||
373 |
|
358 | |||
374 | # import rules from a file |
|
359 | # import rules from a file | |
375 | newrules = opts.get('import_rules') |
|
360 | newrules = opts.get('import_rules') | |
@@ -392,27 +377,48 b' def trackedcmd(ui, repo, remotepath=None' | |||||
392 | removedincludes = narrowspec.parsepatterns(opts['removeinclude']) |
|
377 | removedincludes = narrowspec.parsepatterns(opts['removeinclude']) | |
393 | addedexcludes = narrowspec.parsepatterns(opts['addexclude']) |
|
378 | addedexcludes = narrowspec.parsepatterns(opts['addexclude']) | |
394 | removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) |
|
379 | removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) | |
|
380 | ||||
|
381 | update_working_copy = opts['update_working_copy'] | |||
|
382 | only_show = not (addedincludes or removedincludes or addedexcludes or | |||
|
383 | removedexcludes or newrules or update_working_copy) | |||
|
384 | ||||
|
385 | oldincludes, oldexcludes = repo.narrowpats | |||
|
386 | ||||
|
387 | # filter the user passed additions and deletions into actual additions and | |||
|
388 | # deletions of excludes and includes | |||
|
389 | addedincludes -= oldincludes | |||
|
390 | removedincludes &= oldincludes | |||
|
391 | addedexcludes -= oldexcludes | |||
|
392 | removedexcludes &= oldexcludes | |||
|
393 | ||||
395 | widening = addedincludes or removedexcludes |
|
394 | widening = addedincludes or removedexcludes | |
396 | narrowing = removedincludes or addedexcludes |
|
395 | narrowing = removedincludes or addedexcludes | |
397 | only_show = not widening and not narrowing |
|
|||
398 |
|
396 | |||
399 | # Only print the current narrowspec. |
|
397 | # Only print the current narrowspec. | |
400 | if only_show: |
|
398 | if only_show: | |
401 | include, exclude = repo.narrowpats |
|
|||
402 |
|
||||
403 | ui.pager('tracked') |
|
399 | ui.pager('tracked') | |
404 | fm = ui.formatter('narrow', opts) |
|
400 | fm = ui.formatter('narrow', opts) | |
405 | for i in sorted(include): |
|
401 | for i in sorted(oldincludes): | |
406 | fm.startitem() |
|
402 | fm.startitem() | |
407 | fm.write('status', '%s ', 'I', label='narrow.included') |
|
403 | fm.write('status', '%s ', 'I', label='narrow.included') | |
408 | fm.write('pat', '%s\n', i, label='narrow.included') |
|
404 | fm.write('pat', '%s\n', i, label='narrow.included') | |
409 | for i in sorted(exclude): |
|
405 | for i in sorted(oldexcludes): | |
410 | fm.startitem() |
|
406 | fm.startitem() | |
411 | fm.write('status', '%s ', 'X', label='narrow.excluded') |
|
407 | fm.write('status', '%s ', 'X', label='narrow.excluded') | |
412 | fm.write('pat', '%s\n', i, label='narrow.excluded') |
|
408 | fm.write('pat', '%s\n', i, label='narrow.excluded') | |
413 | fm.end() |
|
409 | fm.end() | |
414 | return 0 |
|
410 | return 0 | |
415 |
|
411 | |||
|
412 | if update_working_copy: | |||
|
413 | with repo.wlock(), repo.lock(), repo.transaction('narrow-wc'): | |||
|
414 | narrowspec.updateworkingcopy(repo) | |||
|
415 | narrowspec.copytoworkingcopy(repo) | |||
|
416 | return 0 | |||
|
417 | ||||
|
418 | if not widening and not narrowing: | |||
|
419 | ui.status(_("nothing to widen or narrow\n")) | |||
|
420 | return 0 | |||
|
421 | ||||
416 | with repo.wlock(), repo.lock(): |
|
422 | with repo.wlock(), repo.lock(): | |
417 | cmdutil.bailifchanged(repo) |
|
423 | cmdutil.bailifchanged(repo) | |
418 |
|
424 | |||
@@ -432,7 +438,6 b' def trackedcmd(ui, repo, remotepath=None' | |||||
432 |
|
438 | |||
433 | commoninc = discovery.findcommonincoming(repo, remote) |
|
439 | commoninc = discovery.findcommonincoming(repo, remote) | |
434 |
|
440 | |||
435 | oldincludes, oldexcludes = repo.narrowpats |
|
|||
436 | if narrowing: |
|
441 | if narrowing: | |
437 | newincludes = oldincludes - removedincludes |
|
442 | newincludes = oldincludes - removedincludes | |
438 | newexcludes = oldexcludes | addedexcludes |
|
443 | newexcludes = oldexcludes | addedexcludes |
@@ -41,6 +41,7 b' Config::' | |||||
41 |
|
41 | |||
42 | from __future__ import absolute_import |
|
42 | from __future__ import absolute_import | |
43 |
|
43 | |||
|
44 | import contextlib | |||
44 | import itertools |
|
45 | import itertools | |
45 | import json |
|
46 | import json | |
46 | import operator |
|
47 | import operator | |
@@ -58,6 +59,7 b' from mercurial import (' | |||||
58 | obsutil, |
|
59 | obsutil, | |
59 | parser, |
|
60 | parser, | |
60 | patch, |
|
61 | patch, | |
|
62 | phases, | |||
61 | registrar, |
|
63 | registrar, | |
62 | scmutil, |
|
64 | scmutil, | |
63 | smartset, |
|
65 | smartset, | |
@@ -121,7 +123,7 b' colortable = {' | |||||
121 | )), |
|
123 | )), | |
122 | ] |
|
124 | ] | |
123 |
|
125 | |||
124 | def vcrcommand(name, flags, spec): |
|
126 | def vcrcommand(name, flags, spec, helpcategory=None): | |
125 | fullflags = flags + _VCR_FLAGS |
|
127 | fullflags = flags + _VCR_FLAGS | |
126 | def decorate(fn): |
|
128 | def decorate(fn): | |
127 | def inner(*args, **kwargs): |
|
129 | def inner(*args, **kwargs): | |
@@ -143,7 +145,7 b' def vcrcommand(name, flags, spec):' | |||||
143 | return fn(*args, **kwargs) |
|
145 | return fn(*args, **kwargs) | |
144 | inner.__name__ = fn.__name__ |
|
146 | inner.__name__ = fn.__name__ | |
145 | inner.__doc__ = fn.__doc__ |
|
147 | inner.__doc__ = fn.__doc__ | |
146 | return command(name, fullflags, spec)(inner) |
|
148 | return command(name, fullflags, spec, helpcategory=helpcategory)(inner) | |
147 | return decorate |
|
149 | return decorate | |
148 |
|
150 | |||
149 | def urlencodenested(params): |
|
151 | def urlencodenested(params): | |
@@ -214,7 +216,8 b' def callconduit(repo, name, params):' | |||||
214 | else: |
|
216 | else: | |
215 | urlopener = urlmod.opener(repo.ui, authinfo) |
|
217 | urlopener = urlmod.opener(repo.ui, authinfo) | |
216 | request = util.urlreq.request(url, data=data) |
|
218 | request = util.urlreq.request(url, data=data) | |
217 |
|
|
219 | with contextlib.closing(urlopener.open(request)) as rsp: | |
|
220 | body = rsp.read() | |||
218 | repo.ui.debug(b'Conduit Response: %s\n' % body) |
|
221 | repo.ui.debug(b'Conduit Response: %s\n' % body) | |
219 | parsed = json.loads(body) |
|
222 | parsed = json.loads(body) | |
220 | if parsed.get(r'error_code'): |
|
223 | if parsed.get(r'error_code'): | |
@@ -465,7 +468,8 b' def userphids(repo, names):' | |||||
465 | (b'', b'amend', True, _(b'update commit messages')), |
|
468 | (b'', b'amend', True, _(b'update commit messages')), | |
466 | (b'', b'reviewer', [], _(b'specify reviewers')), |
|
469 | (b'', b'reviewer', [], _(b'specify reviewers')), | |
467 | (b'', b'confirm', None, _(b'ask for confirmation before sending'))], |
|
470 | (b'', b'confirm', None, _(b'ask for confirmation before sending'))], | |
468 |
_(b'REV [OPTIONS]') |
|
471 | _(b'REV [OPTIONS]'), | |
|
472 | helpcategory=command.CATEGORY_IMPORT_EXPORT) | |||
469 | def phabsend(ui, repo, *revs, **opts): |
|
473 | def phabsend(ui, repo, *revs, **opts): | |
470 | """upload changesets to Phabricator |
|
474 | """upload changesets to Phabricator | |
471 |
|
475 | |||
@@ -581,6 +585,10 b' def phabsend(ui, repo, *revs, **opts):' | |||||
581 | newdesc = encoding.unitolocal(newdesc) |
|
585 | newdesc = encoding.unitolocal(newdesc) | |
582 | # Make sure commit message contain "Differential Revision" |
|
586 | # Make sure commit message contain "Differential Revision" | |
583 | if old.description() != newdesc: |
|
587 | if old.description() != newdesc: | |
|
588 | if old.phase() == phases.public: | |||
|
589 | ui.warn(_("warning: not updating public commit %s\n") | |||
|
590 | % scmutil.formatchangeid(old)) | |||
|
591 | continue | |||
584 | parents = [ |
|
592 | parents = [ | |
585 | mapping.get(old.p1().node(), (old.p1(),))[0], |
|
593 | mapping.get(old.p1().node(), (old.p1(),))[0], | |
586 | mapping.get(old.p2().node(), (old.p2(),))[0], |
|
594 | mapping.get(old.p2().node(), (old.p2(),))[0], | |
@@ -919,7 +927,8 b' def readpatch(repo, drevs, write):' | |||||
919 |
|
927 | |||
920 | @vcrcommand(b'phabread', |
|
928 | @vcrcommand(b'phabread', | |
921 | [(b'', b'stack', False, _(b'read dependencies'))], |
|
929 | [(b'', b'stack', False, _(b'read dependencies'))], | |
922 |
_(b'DREVSPEC [OPTIONS]') |
|
930 | _(b'DREVSPEC [OPTIONS]'), | |
|
931 | helpcategory=command.CATEGORY_IMPORT_EXPORT) | |||
923 | def phabread(ui, repo, spec, **opts): |
|
932 | def phabread(ui, repo, spec, **opts): | |
924 | """print patches from Phabricator suitable for importing |
|
933 | """print patches from Phabricator suitable for importing | |
925 |
|
934 | |||
@@ -950,7 +959,8 b' def phabread(ui, repo, spec, **opts):' | |||||
950 | (b'', b'abandon', False, _(b'abandon revisions')), |
|
959 | (b'', b'abandon', False, _(b'abandon revisions')), | |
951 | (b'', b'reclaim', False, _(b'reclaim revisions')), |
|
960 | (b'', b'reclaim', False, _(b'reclaim revisions')), | |
952 | (b'm', b'comment', b'', _(b'comment on the last revision')), |
|
961 | (b'm', b'comment', b'', _(b'comment on the last revision')), | |
953 |
], _(b'DREVSPEC [OPTIONS]') |
|
962 | ], _(b'DREVSPEC [OPTIONS]'), | |
|
963 | helpcategory=command.CATEGORY_IMPORT_EXPORT) | |||
954 | def phabupdate(ui, repo, spec, **opts): |
|
964 | def phabupdate(ui, repo, spec, **opts): | |
955 | """update Differential Revision in batch |
|
965 | """update Differential Revision in batch | |
956 |
|
966 | |||
@@ -987,3 +997,17 b' def template_review(context, mapping):' | |||||
987 | b'url': m.group(b'url'), |
|
997 | b'url': m.group(b'url'), | |
988 | b'id': b"D{}".format(m.group(b'id')), |
|
998 | b'id': b"D{}".format(m.group(b'id')), | |
989 | }) |
|
999 | }) | |
|
1000 | else: | |||
|
1001 | tags = ctx.repo().nodetags(ctx.node()) | |||
|
1002 | for t in tags: | |||
|
1003 | if _differentialrevisiontagre.match(t): | |||
|
1004 | url = ctx.repo().ui.config(b'phabricator', b'url') | |||
|
1005 | if not url.endswith(b'/'): | |||
|
1006 | url += b'/' | |||
|
1007 | url += t | |||
|
1008 | ||||
|
1009 | return templateutil.hybriddict({ | |||
|
1010 | b'url': url, | |||
|
1011 | b'id': t, | |||
|
1012 | }) | |||
|
1013 | return None |
@@ -177,7 +177,7 b' class rebaseruntime(object):' | |||||
177 | if e: |
|
177 | if e: | |
178 | self.extrafns = [e] |
|
178 | self.extrafns = [e] | |
179 |
|
179 | |||
180 |
self.backupf = ui.configbool(' |
|
180 | self.backupf = ui.configbool('rewrite', 'backup-bundle') | |
181 | self.keepf = opts.get('keep', False) |
|
181 | self.keepf = opts.get('keep', False) | |
182 | self.keepbranchesf = opts.get('keepbranches', False) |
|
182 | self.keepbranchesf = opts.get('keepbranches', False) | |
183 | self.obsoletenotrebased = {} |
|
183 | self.obsoletenotrebased = {} | |
@@ -347,9 +347,7 b' class rebaseruntime(object):' | |||||
347 |
|
347 | |||
348 | if isabort: |
|
348 | if isabort: | |
349 | backup = backup and self.backupf |
|
349 | backup = backup and self.backupf | |
350 | return abort(self.repo, self.originalwd, self.destmap, self.state, |
|
350 | return self._abort(backup=backup, suppwarns=suppwarns) | |
351 | activebookmark=self.activebookmark, backup=backup, |
|
|||
352 | suppwarns=suppwarns) |
|
|||
353 |
|
351 | |||
354 | def _preparenewrebase(self, destmap): |
|
352 | def _preparenewrebase(self, destmap): | |
355 | if not destmap: |
|
353 | if not destmap: | |
@@ -404,7 +402,9 b' class rebaseruntime(object):' | |||||
404 | else: |
|
402 | else: | |
405 | self.wctx = self.repo[None] |
|
403 | self.wctx = self.repo[None] | |
406 | self.repo.ui.debug("rebasing on disk\n") |
|
404 | self.repo.ui.debug("rebasing on disk\n") | |
407 |
self.repo.ui.log("rebase", |
|
405 | self.repo.ui.log("rebase", | |
|
406 | "using in-memory rebase: %r\n", self.inmemory, | |||
|
407 | rebase_imm_used=self.inmemory) | |||
408 |
|
408 | |||
409 | def _performrebase(self, tr): |
|
409 | def _performrebase(self, tr): | |
410 | self._assignworkingcopy() |
|
410 | self._assignworkingcopy() | |
@@ -573,8 +573,8 b' class rebaseruntime(object):' | |||||
573 | ui.debug('rebased as %s\n' % short(newnode)) |
|
573 | ui.debug('rebased as %s\n' % short(newnode)) | |
574 | else: |
|
574 | else: | |
575 | if not self.collapsef: |
|
575 | if not self.collapsef: | |
576 |
ui.warn(_('note: |
|
576 | ui.warn(_('note: not rebasing %s, its destination already ' | |
577 |
' |
|
577 | 'has all its changes\n') % desc) | |
578 | self.skipped.add(rev) |
|
578 | self.skipped.add(rev) | |
579 | self.state[rev] = p1 |
|
579 | self.state[rev] = p1 | |
580 | ui.debug('next revision set to %d\n' % p1) |
|
580 | ui.debug('next revision set to %d\n' % p1) | |
@@ -651,6 +651,63 b' class rebaseruntime(object):' | |||||
651 | repo['.'].node() == repo._bookmarks[self.activebookmark]): |
|
651 | repo['.'].node() == repo._bookmarks[self.activebookmark]): | |
652 | bookmarks.activate(repo, self.activebookmark) |
|
652 | bookmarks.activate(repo, self.activebookmark) | |
653 |
|
653 | |||
|
654 | def _abort(self, backup=True, suppwarns=False): | |||
|
655 | '''Restore the repository to its original state.''' | |||
|
656 | ||||
|
657 | repo = self.repo | |||
|
658 | try: | |||
|
659 | # If the first commits in the rebased set get skipped during the | |||
|
660 | # rebase, their values within the state mapping will be the dest | |||
|
661 | # rev id. The rebased list must must not contain the dest rev | |||
|
662 | # (issue4896) | |||
|
663 | rebased = [s for r, s in self.state.items() | |||
|
664 | if s >= 0 and s != r and s != self.destmap[r]] | |||
|
665 | immutable = [d for d in rebased if not repo[d].mutable()] | |||
|
666 | cleanup = True | |||
|
667 | if immutable: | |||
|
668 | repo.ui.warn(_("warning: can't clean up public changesets %s\n") | |||
|
669 | % ', '.join(bytes(repo[r]) for r in immutable), | |||
|
670 | hint=_("see 'hg help phases' for details")) | |||
|
671 | cleanup = False | |||
|
672 | ||||
|
673 | descendants = set() | |||
|
674 | if rebased: | |||
|
675 | descendants = set(repo.changelog.descendants(rebased)) | |||
|
676 | if descendants - set(rebased): | |||
|
677 | repo.ui.warn(_("warning: new changesets detected on " | |||
|
678 | "destination branch, can't strip\n")) | |||
|
679 | cleanup = False | |||
|
680 | ||||
|
681 | if cleanup: | |||
|
682 | shouldupdate = False | |||
|
683 | if rebased: | |||
|
684 | strippoints = [ | |||
|
685 | c.node() for c in repo.set('roots(%ld)', rebased)] | |||
|
686 | ||||
|
687 | updateifonnodes = set(rebased) | |||
|
688 | updateifonnodes.update(self.destmap.values()) | |||
|
689 | updateifonnodes.add(self.originalwd) | |||
|
690 | shouldupdate = repo['.'].rev() in updateifonnodes | |||
|
691 | ||||
|
692 | # Update away from the rebase if necessary | |||
|
693 | if shouldupdate or needupdate(repo, self.state): | |||
|
694 | mergemod.update(repo, self.originalwd, branchmerge=False, | |||
|
695 | force=True) | |||
|
696 | ||||
|
697 | # Strip from the first rebased revision | |||
|
698 | if rebased: | |||
|
699 | repair.strip(repo.ui, repo, strippoints, backup=backup) | |||
|
700 | ||||
|
701 | if self.activebookmark and self.activebookmark in repo._bookmarks: | |||
|
702 | bookmarks.activate(repo, self.activebookmark) | |||
|
703 | ||||
|
704 | finally: | |||
|
705 | clearstatus(repo) | |||
|
706 | clearcollapsemsg(repo) | |||
|
707 | if not suppwarns: | |||
|
708 | repo.ui.warn(_('rebase aborted\n')) | |||
|
709 | return 0 | |||
|
710 | ||||
654 | @command('rebase', |
|
711 | @command('rebase', | |
655 | [('s', 'source', '', |
|
712 | [('s', 'source', '', | |
656 | _('rebase the specified changeset and descendants'), _('REV')), |
|
713 | _('rebase the specified changeset and descendants'), _('REV')), | |
@@ -1080,7 +1137,8 b' def _definedestmap(ui, repo, inmemory, d' | |||||
1080 | return None |
|
1137 | return None | |
1081 |
|
1138 | |||
1082 | rebasingwcp = repo['.'].rev() in rebaseset |
|
1139 | rebasingwcp = repo['.'].rev() in rebaseset | |
1083 |
ui.log("rebase", "", |
|
1140 | ui.log("rebase", "rebasing working copy parent: %r\n", rebasingwcp, | |
|
1141 | rebase_rebasing_wcp=rebasingwcp) | |||
1084 | if inmemory and rebasingwcp: |
|
1142 | if inmemory and rebasingwcp: | |
1085 | # Check these since we did not before. |
|
1143 | # Check these since we did not before. | |
1086 | cmdutil.checkunfinished(repo) |
|
1144 | cmdutil.checkunfinished(repo) | |
@@ -1606,64 +1664,6 b' def needupdate(repo, state):' | |||||
1606 |
|
1664 | |||
1607 | return False |
|
1665 | return False | |
1608 |
|
1666 | |||
1609 | def abort(repo, originalwd, destmap, state, activebookmark=None, backup=True, |
|
|||
1610 | suppwarns=False): |
|
|||
1611 | '''Restore the repository to its original state. Additional args: |
|
|||
1612 |
|
||||
1613 | activebookmark: the name of the bookmark that should be active after the |
|
|||
1614 | restore''' |
|
|||
1615 |
|
||||
1616 | try: |
|
|||
1617 | # If the first commits in the rebased set get skipped during the rebase, |
|
|||
1618 | # their values within the state mapping will be the dest rev id. The |
|
|||
1619 | # rebased list must must not contain the dest rev (issue4896) |
|
|||
1620 | rebased = [s for r, s in state.items() |
|
|||
1621 | if s >= 0 and s != r and s != destmap[r]] |
|
|||
1622 | immutable = [d for d in rebased if not repo[d].mutable()] |
|
|||
1623 | cleanup = True |
|
|||
1624 | if immutable: |
|
|||
1625 | repo.ui.warn(_("warning: can't clean up public changesets %s\n") |
|
|||
1626 | % ', '.join(bytes(repo[r]) for r in immutable), |
|
|||
1627 | hint=_("see 'hg help phases' for details")) |
|
|||
1628 | cleanup = False |
|
|||
1629 |
|
||||
1630 | descendants = set() |
|
|||
1631 | if rebased: |
|
|||
1632 | descendants = set(repo.changelog.descendants(rebased)) |
|
|||
1633 | if descendants - set(rebased): |
|
|||
1634 | repo.ui.warn(_("warning: new changesets detected on destination " |
|
|||
1635 | "branch, can't strip\n")) |
|
|||
1636 | cleanup = False |
|
|||
1637 |
|
||||
1638 | if cleanup: |
|
|||
1639 | shouldupdate = False |
|
|||
1640 | if rebased: |
|
|||
1641 | strippoints = [ |
|
|||
1642 | c.node() for c in repo.set('roots(%ld)', rebased)] |
|
|||
1643 |
|
||||
1644 | updateifonnodes = set(rebased) |
|
|||
1645 | updateifonnodes.update(destmap.values()) |
|
|||
1646 | updateifonnodes.add(originalwd) |
|
|||
1647 | shouldupdate = repo['.'].rev() in updateifonnodes |
|
|||
1648 |
|
||||
1649 | # Update away from the rebase if necessary |
|
|||
1650 | if shouldupdate or needupdate(repo, state): |
|
|||
1651 | mergemod.update(repo, originalwd, branchmerge=False, force=True) |
|
|||
1652 |
|
||||
1653 | # Strip from the first rebased revision |
|
|||
1654 | if rebased: |
|
|||
1655 | repair.strip(repo.ui, repo, strippoints, backup=backup) |
|
|||
1656 |
|
||||
1657 | if activebookmark and activebookmark in repo._bookmarks: |
|
|||
1658 | bookmarks.activate(repo, activebookmark) |
|
|||
1659 |
|
||||
1660 | finally: |
|
|||
1661 | clearstatus(repo) |
|
|||
1662 | clearcollapsemsg(repo) |
|
|||
1663 | if not suppwarns: |
|
|||
1664 | repo.ui.warn(_('rebase aborted\n')) |
|
|||
1665 | return 0 |
|
|||
1666 |
|
||||
1667 | def sortsource(destmap): |
|
1667 | def sortsource(destmap): | |
1668 | """yield source revisions in an order that we only rebase things once |
|
1668 | """yield source revisions in an order that we only rebase things once | |
1669 |
|
1669 |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file copied from hgext/blackbox.py to mercurial/loggingutil.py |
|
NO CONTENT: file copied from hgext/blackbox.py to mercurial/loggingutil.py | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file copied from rust/Cargo.lock to rust/hgcli/Cargo.lock |
|
NO CONTENT: file copied from rust/Cargo.lock to rust/hgcli/Cargo.lock | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file copied from tests/test-shelve.t to tests/test-shelve2.t |
|
NO CONTENT: file copied from tests/test-shelve.t to tests/test-shelve2.t | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
General Comments 0
You need to be logged in to leave comments.
Login now