##// END OF EJS Templates
merge with stable
Yuya Nishihara -
r41387:0ae3ddb4 merge default
parent child Browse files
Show More
@@ -1,2643 +1,2646
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import os
19 import os
20 import struct
20 import struct
21 import zlib
21 import zlib
22
22
23 # import stuff from node for others to import from revlog
23 # import stuff from node for others to import from revlog
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullhex,
27 nullhex,
28 nullid,
28 nullid,
29 nullrev,
29 nullrev,
30 short,
30 short,
31 wdirfilenodeids,
31 wdirfilenodeids,
32 wdirhex,
32 wdirhex,
33 wdirid,
33 wdirid,
34 wdirrev,
34 wdirrev,
35 )
35 )
36 from .i18n import _
36 from .i18n import _
37 from .revlogutils.constants import (
37 from .revlogutils.constants import (
38 FLAG_GENERALDELTA,
38 FLAG_GENERALDELTA,
39 FLAG_INLINE_DATA,
39 FLAG_INLINE_DATA,
40 REVIDX_DEFAULT_FLAGS,
40 REVIDX_DEFAULT_FLAGS,
41 REVIDX_ELLIPSIS,
41 REVIDX_ELLIPSIS,
42 REVIDX_EXTSTORED,
42 REVIDX_EXTSTORED,
43 REVIDX_FLAGS_ORDER,
43 REVIDX_FLAGS_ORDER,
44 REVIDX_ISCENSORED,
44 REVIDX_ISCENSORED,
45 REVIDX_KNOWN_FLAGS,
45 REVIDX_KNOWN_FLAGS,
46 REVIDX_RAWTEXT_CHANGING_FLAGS,
46 REVIDX_RAWTEXT_CHANGING_FLAGS,
47 REVLOGV0,
47 REVLOGV0,
48 REVLOGV1,
48 REVLOGV1,
49 REVLOGV1_FLAGS,
49 REVLOGV1_FLAGS,
50 REVLOGV2,
50 REVLOGV2,
51 REVLOGV2_FLAGS,
51 REVLOGV2_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
53 REVLOG_DEFAULT_FORMAT,
53 REVLOG_DEFAULT_FORMAT,
54 REVLOG_DEFAULT_VERSION,
54 REVLOG_DEFAULT_VERSION,
55 )
55 )
56 from .thirdparty import (
56 from .thirdparty import (
57 attr,
57 attr,
58 )
58 )
59 from . import (
59 from . import (
60 ancestor,
60 ancestor,
61 dagop,
61 dagop,
62 error,
62 error,
63 mdiff,
63 mdiff,
64 policy,
64 policy,
65 pycompat,
65 pycompat,
66 repository,
66 repository,
67 templatefilters,
67 templatefilters,
68 util,
68 util,
69 )
69 )
70 from .revlogutils import (
70 from .revlogutils import (
71 deltas as deltautil,
71 deltas as deltautil,
72 )
72 )
73 from .utils import (
73 from .utils import (
74 interfaceutil,
74 interfaceutil,
75 storageutil,
75 storageutil,
76 stringutil,
76 stringutil,
77 )
77 )
78
78
79 # blanked usage of all the name to prevent pyflakes constraints
79 # blanked usage of all the name to prevent pyflakes constraints
80 # We need these name available in the module for extensions.
80 # We need these name available in the module for extensions.
81 REVLOGV0
81 REVLOGV0
82 REVLOGV1
82 REVLOGV1
83 REVLOGV2
83 REVLOGV2
84 FLAG_INLINE_DATA
84 FLAG_INLINE_DATA
85 FLAG_GENERALDELTA
85 FLAG_GENERALDELTA
86 REVLOG_DEFAULT_FLAGS
86 REVLOG_DEFAULT_FLAGS
87 REVLOG_DEFAULT_FORMAT
87 REVLOG_DEFAULT_FORMAT
88 REVLOG_DEFAULT_VERSION
88 REVLOG_DEFAULT_VERSION
89 REVLOGV1_FLAGS
89 REVLOGV1_FLAGS
90 REVLOGV2_FLAGS
90 REVLOGV2_FLAGS
91 REVIDX_ISCENSORED
91 REVIDX_ISCENSORED
92 REVIDX_ELLIPSIS
92 REVIDX_ELLIPSIS
93 REVIDX_EXTSTORED
93 REVIDX_EXTSTORED
94 REVIDX_DEFAULT_FLAGS
94 REVIDX_DEFAULT_FLAGS
95 REVIDX_FLAGS_ORDER
95 REVIDX_FLAGS_ORDER
96 REVIDX_KNOWN_FLAGS
96 REVIDX_KNOWN_FLAGS
97 REVIDX_RAWTEXT_CHANGING_FLAGS
97 REVIDX_RAWTEXT_CHANGING_FLAGS
98
98
99 parsers = policy.importmod(r'parsers')
99 parsers = policy.importmod(r'parsers')
100 try:
100 try:
101 from . import rustext
101 from . import rustext
102 rustext.__name__ # force actual import (see hgdemandimport)
102 rustext.__name__ # force actual import (see hgdemandimport)
103 except ImportError:
103 except ImportError:
104 rustext = None
104 rustext = None
105
105
106 # Aliased for performance.
106 # Aliased for performance.
107 _zlibdecompress = zlib.decompress
107 _zlibdecompress = zlib.decompress
108
108
109 # max size of revlog with inline data
109 # max size of revlog with inline data
110 _maxinline = 131072
110 _maxinline = 131072
111 _chunksize = 1048576
111 _chunksize = 1048576
112
112
113 # Store flag processors (cf. 'addflagprocessor()' to register)
113 # Store flag processors (cf. 'addflagprocessor()' to register)
114 _flagprocessors = {
114 _flagprocessors = {
115 REVIDX_ISCENSORED: None,
115 REVIDX_ISCENSORED: None,
116 }
116 }
117
117
118 # Flag processors for REVIDX_ELLIPSIS.
118 # Flag processors for REVIDX_ELLIPSIS.
119 def ellipsisreadprocessor(rl, text):
119 def ellipsisreadprocessor(rl, text):
120 return text, False
120 return text, False
121
121
122 def ellipsiswriteprocessor(rl, text):
122 def ellipsiswriteprocessor(rl, text):
123 return text, False
123 return text, False
124
124
125 def ellipsisrawprocessor(rl, text):
125 def ellipsisrawprocessor(rl, text):
126 return False
126 return False
127
127
128 ellipsisprocessor = (
128 ellipsisprocessor = (
129 ellipsisreadprocessor,
129 ellipsisreadprocessor,
130 ellipsiswriteprocessor,
130 ellipsiswriteprocessor,
131 ellipsisrawprocessor,
131 ellipsisrawprocessor,
132 )
132 )
133
133
134 def addflagprocessor(flag, processor):
134 def addflagprocessor(flag, processor):
135 """Register a flag processor on a revision data flag.
135 """Register a flag processor on a revision data flag.
136
136
137 Invariant:
137 Invariant:
138 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
138 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
139 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
139 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
140 - Only one flag processor can be registered on a specific flag.
140 - Only one flag processor can be registered on a specific flag.
141 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
141 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
142 following signatures:
142 following signatures:
143 - (read) f(self, rawtext) -> text, bool
143 - (read) f(self, rawtext) -> text, bool
144 - (write) f(self, text) -> rawtext, bool
144 - (write) f(self, text) -> rawtext, bool
145 - (raw) f(self, rawtext) -> bool
145 - (raw) f(self, rawtext) -> bool
146 "text" is presented to the user. "rawtext" is stored in revlog data, not
146 "text" is presented to the user. "rawtext" is stored in revlog data, not
147 directly visible to the user.
147 directly visible to the user.
148 The boolean returned by these transforms is used to determine whether
148 The boolean returned by these transforms is used to determine whether
149 the returned text can be used for hash integrity checking. For example,
149 the returned text can be used for hash integrity checking. For example,
150 if "write" returns False, then "text" is used to generate hash. If
150 if "write" returns False, then "text" is used to generate hash. If
151 "write" returns True, that basically means "rawtext" returned by "write"
151 "write" returns True, that basically means "rawtext" returned by "write"
152 should be used to generate hash. Usually, "write" and "read" return
152 should be used to generate hash. Usually, "write" and "read" return
153 different booleans. And "raw" returns a same boolean as "write".
153 different booleans. And "raw" returns a same boolean as "write".
154
154
155 Note: The 'raw' transform is used for changegroup generation and in some
155 Note: The 'raw' transform is used for changegroup generation and in some
156 debug commands. In this case the transform only indicates whether the
156 debug commands. In this case the transform only indicates whether the
157 contents can be used for hash integrity checks.
157 contents can be used for hash integrity checks.
158 """
158 """
159 _insertflagprocessor(flag, processor, _flagprocessors)
159 _insertflagprocessor(flag, processor, _flagprocessors)
160
160
161 def _insertflagprocessor(flag, processor, flagprocessors):
161 def _insertflagprocessor(flag, processor, flagprocessors):
162 if not flag & REVIDX_KNOWN_FLAGS:
162 if not flag & REVIDX_KNOWN_FLAGS:
163 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
163 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
164 raise error.ProgrammingError(msg)
164 raise error.ProgrammingError(msg)
165 if flag not in REVIDX_FLAGS_ORDER:
165 if flag not in REVIDX_FLAGS_ORDER:
166 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
166 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
167 raise error.ProgrammingError(msg)
167 raise error.ProgrammingError(msg)
168 if flag in flagprocessors:
168 if flag in flagprocessors:
169 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
169 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 flagprocessors[flag] = processor
171 flagprocessors[flag] = processor
172
172
173 def getoffset(q):
173 def getoffset(q):
174 return int(q >> 16)
174 return int(q >> 16)
175
175
176 def gettype(q):
176 def gettype(q):
177 return int(q & 0xFFFF)
177 return int(q & 0xFFFF)
178
178
179 def offset_type(offset, type):
179 def offset_type(offset, type):
180 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
180 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
181 raise ValueError('unknown revlog index flags')
181 raise ValueError('unknown revlog index flags')
182 return int(int(offset) << 16 | type)
182 return int(int(offset) << 16 | type)
183
183
184 @attr.s(slots=True, frozen=True)
184 @attr.s(slots=True, frozen=True)
185 class _revisioninfo(object):
185 class _revisioninfo(object):
186 """Information about a revision that allows building its fulltext
186 """Information about a revision that allows building its fulltext
187 node: expected hash of the revision
187 node: expected hash of the revision
188 p1, p2: parent revs of the revision
188 p1, p2: parent revs of the revision
189 btext: built text cache consisting of a one-element list
189 btext: built text cache consisting of a one-element list
190 cachedelta: (baserev, uncompressed_delta) or None
190 cachedelta: (baserev, uncompressed_delta) or None
191 flags: flags associated to the revision storage
191 flags: flags associated to the revision storage
192
192
193 One of btext[0] or cachedelta must be set.
193 One of btext[0] or cachedelta must be set.
194 """
194 """
195 node = attr.ib()
195 node = attr.ib()
196 p1 = attr.ib()
196 p1 = attr.ib()
197 p2 = attr.ib()
197 p2 = attr.ib()
198 btext = attr.ib()
198 btext = attr.ib()
199 textlen = attr.ib()
199 textlen = attr.ib()
200 cachedelta = attr.ib()
200 cachedelta = attr.ib()
201 flags = attr.ib()
201 flags = attr.ib()
202
202
203 @interfaceutil.implementer(repository.irevisiondelta)
203 @interfaceutil.implementer(repository.irevisiondelta)
204 @attr.s(slots=True)
204 @attr.s(slots=True)
205 class revlogrevisiondelta(object):
205 class revlogrevisiondelta(object):
206 node = attr.ib()
206 node = attr.ib()
207 p1node = attr.ib()
207 p1node = attr.ib()
208 p2node = attr.ib()
208 p2node = attr.ib()
209 basenode = attr.ib()
209 basenode = attr.ib()
210 flags = attr.ib()
210 flags = attr.ib()
211 baserevisionsize = attr.ib()
211 baserevisionsize = attr.ib()
212 revision = attr.ib()
212 revision = attr.ib()
213 delta = attr.ib()
213 delta = attr.ib()
214 linknode = attr.ib(default=None)
214 linknode = attr.ib(default=None)
215
215
216 @interfaceutil.implementer(repository.iverifyproblem)
216 @interfaceutil.implementer(repository.iverifyproblem)
217 @attr.s(frozen=True)
217 @attr.s(frozen=True)
218 class revlogproblem(object):
218 class revlogproblem(object):
219 warning = attr.ib(default=None)
219 warning = attr.ib(default=None)
220 error = attr.ib(default=None)
220 error = attr.ib(default=None)
221 node = attr.ib(default=None)
221 node = attr.ib(default=None)
222
222
223 # index v0:
223 # index v0:
224 # 4 bytes: offset
224 # 4 bytes: offset
225 # 4 bytes: compressed length
225 # 4 bytes: compressed length
226 # 4 bytes: base rev
226 # 4 bytes: base rev
227 # 4 bytes: link rev
227 # 4 bytes: link rev
228 # 20 bytes: parent 1 nodeid
228 # 20 bytes: parent 1 nodeid
229 # 20 bytes: parent 2 nodeid
229 # 20 bytes: parent 2 nodeid
230 # 20 bytes: nodeid
230 # 20 bytes: nodeid
231 indexformatv0 = struct.Struct(">4l20s20s20s")
231 indexformatv0 = struct.Struct(">4l20s20s20s")
232 indexformatv0_pack = indexformatv0.pack
232 indexformatv0_pack = indexformatv0.pack
233 indexformatv0_unpack = indexformatv0.unpack
233 indexformatv0_unpack = indexformatv0.unpack
234
234
235 class revlogoldindex(list):
235 class revlogoldindex(list):
236 def __getitem__(self, i):
236 def __getitem__(self, i):
237 if i == -1:
237 if i == -1:
238 return (0, 0, 0, -1, -1, -1, -1, nullid)
238 return (0, 0, 0, -1, -1, -1, -1, nullid)
239 return list.__getitem__(self, i)
239 return list.__getitem__(self, i)
240
240
241 class revlogoldio(object):
241 class revlogoldio(object):
242 def __init__(self):
242 def __init__(self):
243 self.size = indexformatv0.size
243 self.size = indexformatv0.size
244
244
245 def parseindex(self, data, inline):
245 def parseindex(self, data, inline):
246 s = self.size
246 s = self.size
247 index = []
247 index = []
248 nodemap = {nullid: nullrev}
248 nodemap = {nullid: nullrev}
249 n = off = 0
249 n = off = 0
250 l = len(data)
250 l = len(data)
251 while off + s <= l:
251 while off + s <= l:
252 cur = data[off:off + s]
252 cur = data[off:off + s]
253 off += s
253 off += s
254 e = indexformatv0_unpack(cur)
254 e = indexformatv0_unpack(cur)
255 # transform to revlogv1 format
255 # transform to revlogv1 format
256 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
256 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
257 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
257 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
258 index.append(e2)
258 index.append(e2)
259 nodemap[e[6]] = n
259 nodemap[e[6]] = n
260 n += 1
260 n += 1
261
261
262 return revlogoldindex(index), nodemap, None
262 return revlogoldindex(index), nodemap, None
263
263
264 def packentry(self, entry, node, version, rev):
264 def packentry(self, entry, node, version, rev):
265 if gettype(entry[0]):
265 if gettype(entry[0]):
266 raise error.RevlogError(_('index entry flags need revlog '
266 raise error.RevlogError(_('index entry flags need revlog '
267 'version 1'))
267 'version 1'))
268 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
268 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
269 node(entry[5]), node(entry[6]), entry[7])
269 node(entry[5]), node(entry[6]), entry[7])
270 return indexformatv0_pack(*e2)
270 return indexformatv0_pack(*e2)
271
271
272 # index ng:
272 # index ng:
273 # 6 bytes: offset
273 # 6 bytes: offset
274 # 2 bytes: flags
274 # 2 bytes: flags
275 # 4 bytes: compressed length
275 # 4 bytes: compressed length
276 # 4 bytes: uncompressed length
276 # 4 bytes: uncompressed length
277 # 4 bytes: base rev
277 # 4 bytes: base rev
278 # 4 bytes: link rev
278 # 4 bytes: link rev
279 # 4 bytes: parent 1 rev
279 # 4 bytes: parent 1 rev
280 # 4 bytes: parent 2 rev
280 # 4 bytes: parent 2 rev
281 # 32 bytes: nodeid
281 # 32 bytes: nodeid
282 indexformatng = struct.Struct(">Qiiiiii20s12x")
282 indexformatng = struct.Struct(">Qiiiiii20s12x")
283 indexformatng_pack = indexformatng.pack
283 indexformatng_pack = indexformatng.pack
284 versionformat = struct.Struct(">I")
284 versionformat = struct.Struct(">I")
285 versionformat_pack = versionformat.pack
285 versionformat_pack = versionformat.pack
286 versionformat_unpack = versionformat.unpack
286 versionformat_unpack = versionformat.unpack
287
287
288 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
288 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
289 # signed integer)
289 # signed integer)
290 _maxentrysize = 0x7fffffff
290 _maxentrysize = 0x7fffffff
291
291
292 class revlogio(object):
292 class revlogio(object):
293 def __init__(self):
293 def __init__(self):
294 self.size = indexformatng.size
294 self.size = indexformatng.size
295
295
296 def parseindex(self, data, inline):
296 def parseindex(self, data, inline):
297 # call the C implementation to parse the index data
297 # call the C implementation to parse the index data
298 index, cache = parsers.parse_index2(data, inline)
298 index, cache = parsers.parse_index2(data, inline)
299 return index, getattr(index, 'nodemap', None), cache
299 return index, getattr(index, 'nodemap', None), cache
300
300
301 def packentry(self, entry, node, version, rev):
301 def packentry(self, entry, node, version, rev):
302 p = indexformatng_pack(*entry)
302 p = indexformatng_pack(*entry)
303 if rev == 0:
303 if rev == 0:
304 p = versionformat_pack(version) + p[4:]
304 p = versionformat_pack(version) + p[4:]
305 return p
305 return p
306
306
307 class revlog(object):
307 class revlog(object):
308 """
308 """
309 the underlying revision storage object
309 the underlying revision storage object
310
310
311 A revlog consists of two parts, an index and the revision data.
311 A revlog consists of two parts, an index and the revision data.
312
312
313 The index is a file with a fixed record size containing
313 The index is a file with a fixed record size containing
314 information on each revision, including its nodeid (hash), the
314 information on each revision, including its nodeid (hash), the
315 nodeids of its parents, the position and offset of its data within
315 nodeids of its parents, the position and offset of its data within
316 the data file, and the revision it's based on. Finally, each entry
316 the data file, and the revision it's based on. Finally, each entry
317 contains a linkrev entry that can serve as a pointer to external
317 contains a linkrev entry that can serve as a pointer to external
318 data.
318 data.
319
319
320 The revision data itself is a linear collection of data chunks.
320 The revision data itself is a linear collection of data chunks.
321 Each chunk represents a revision and is usually represented as a
321 Each chunk represents a revision and is usually represented as a
322 delta against the previous chunk. To bound lookup time, runs of
322 delta against the previous chunk. To bound lookup time, runs of
323 deltas are limited to about 2 times the length of the original
323 deltas are limited to about 2 times the length of the original
324 version data. This makes retrieval of a version proportional to
324 version data. This makes retrieval of a version proportional to
325 its size, or O(1) relative to the number of revisions.
325 its size, or O(1) relative to the number of revisions.
326
326
327 Both pieces of the revlog are written to in an append-only
327 Both pieces of the revlog are written to in an append-only
328 fashion, which means we never need to rewrite a file to insert or
328 fashion, which means we never need to rewrite a file to insert or
329 remove data, and can use some simple techniques to avoid the need
329 remove data, and can use some simple techniques to avoid the need
330 for locking while reading.
330 for locking while reading.
331
331
332 If checkambig, indexfile is opened with checkambig=True at
332 If checkambig, indexfile is opened with checkambig=True at
333 writing, to avoid file stat ambiguity.
333 writing, to avoid file stat ambiguity.
334
334
335 If mmaplargeindex is True, and an mmapindexthreshold is set, the
335 If mmaplargeindex is True, and an mmapindexthreshold is set, the
336 index will be mmapped rather than read if it is larger than the
336 index will be mmapped rather than read if it is larger than the
337 configured threshold.
337 configured threshold.
338
338
339 If censorable is True, the revlog can have censored revisions.
339 If censorable is True, the revlog can have censored revisions.
340 """
340 """
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
342 mmaplargeindex=False, censorable=False):
342 mmaplargeindex=False, censorable=False):
343 """
343 """
344 create a revlog object
344 create a revlog object
345
345
346 opener is a function that abstracts the file opening operation
346 opener is a function that abstracts the file opening operation
347 and can be used to implement COW semantics or the like.
347 and can be used to implement COW semantics or the like.
348 """
348 """
349 self.indexfile = indexfile
349 self.indexfile = indexfile
350 self.datafile = datafile or (indexfile[:-2] + ".d")
350 self.datafile = datafile or (indexfile[:-2] + ".d")
351 self.opener = opener
351 self.opener = opener
352 # When True, indexfile is opened with checkambig=True at writing, to
352 # When True, indexfile is opened with checkambig=True at writing, to
353 # avoid file stat ambiguity.
353 # avoid file stat ambiguity.
354 self._checkambig = checkambig
354 self._checkambig = checkambig
355 self._mmaplargeindex = mmaplargeindex
355 self._mmaplargeindex = mmaplargeindex
356 self._censorable = censorable
356 self._censorable = censorable
357 # 3-tuple of (node, rev, text) for a raw revision.
357 # 3-tuple of (node, rev, text) for a raw revision.
358 self._revisioncache = None
358 self._revisioncache = None
359 # Maps rev to chain base rev.
359 # Maps rev to chain base rev.
360 self._chainbasecache = util.lrucachedict(100)
360 self._chainbasecache = util.lrucachedict(100)
361 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
361 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
362 self._chunkcache = (0, '')
362 self._chunkcache = (0, '')
363 # How much data to read and cache into the raw revlog data cache.
363 # How much data to read and cache into the raw revlog data cache.
364 self._chunkcachesize = 65536
364 self._chunkcachesize = 65536
365 self._maxchainlen = None
365 self._maxchainlen = None
366 self._deltabothparents = True
366 self._deltabothparents = True
367 self.index = []
367 self.index = []
368 # Mapping of partial identifiers to full nodes.
368 # Mapping of partial identifiers to full nodes.
369 self._pcache = {}
369 self._pcache = {}
370 # Mapping of revision integer to full node.
370 # Mapping of revision integer to full node.
371 self._nodecache = {nullid: nullrev}
371 self._nodecache = {nullid: nullrev}
372 self._nodepos = None
372 self._nodepos = None
373 self._compengine = 'zlib'
373 self._compengine = 'zlib'
374 self._maxdeltachainspan = -1
374 self._maxdeltachainspan = -1
375 self._withsparseread = False
375 self._withsparseread = False
376 self._sparserevlog = False
376 self._sparserevlog = False
377 self._srdensitythreshold = 0.50
377 self._srdensitythreshold = 0.50
378 self._srmingapsize = 262144
378 self._srmingapsize = 262144
379
379
380 # Make copy of flag processors so each revlog instance can support
380 # Make copy of flag processors so each revlog instance can support
381 # custom flags.
381 # custom flags.
382 self._flagprocessors = dict(_flagprocessors)
382 self._flagprocessors = dict(_flagprocessors)
383
383
384 # 2-tuple of file handles being used for active writing.
384 # 2-tuple of file handles being used for active writing.
385 self._writinghandles = None
385 self._writinghandles = None
386
386
387 self._loadindex()
387 self._loadindex()
388
388
389 def _loadindex(self):
389 def _loadindex(self):
390 mmapindexthreshold = None
390 mmapindexthreshold = None
391 opts = getattr(self.opener, 'options', {}) or {}
391 opts = getattr(self.opener, 'options', {}) or {}
392
392
393 if 'revlogv2' in opts:
393 if 'revlogv2' in opts:
394 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
394 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
395 elif 'revlogv1' in opts:
395 elif 'revlogv1' in opts:
396 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
396 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
397 if 'generaldelta' in opts:
397 if 'generaldelta' in opts:
398 newversionflags |= FLAG_GENERALDELTA
398 newversionflags |= FLAG_GENERALDELTA
399 elif getattr(self.opener, 'options', None) is not None:
400 # If options provided but no 'revlog*' found, the repository
401 # would have no 'requires' file in it, which means we have to
402 # stick to the old format.
403 newversionflags = REVLOGV0
399 else:
404 else:
400 newversionflags = REVLOG_DEFAULT_VERSION
405 newversionflags = REVLOG_DEFAULT_VERSION
401
406
402 if 'chunkcachesize' in opts:
407 if 'chunkcachesize' in opts:
403 self._chunkcachesize = opts['chunkcachesize']
408 self._chunkcachesize = opts['chunkcachesize']
404 if 'maxchainlen' in opts:
409 if 'maxchainlen' in opts:
405 self._maxchainlen = opts['maxchainlen']
410 self._maxchainlen = opts['maxchainlen']
406 if 'deltabothparents' in opts:
411 if 'deltabothparents' in opts:
407 self._deltabothparents = opts['deltabothparents']
412 self._deltabothparents = opts['deltabothparents']
408 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
413 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
409 if 'compengine' in opts:
414 if 'compengine' in opts:
410 self._compengine = opts['compengine']
415 self._compengine = opts['compengine']
411 if 'maxdeltachainspan' in opts:
416 if 'maxdeltachainspan' in opts:
412 self._maxdeltachainspan = opts['maxdeltachainspan']
417 self._maxdeltachainspan = opts['maxdeltachainspan']
413 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
418 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
414 mmapindexthreshold = opts['mmapindexthreshold']
419 mmapindexthreshold = opts['mmapindexthreshold']
415 self._sparserevlog = bool(opts.get('sparse-revlog', False))
420 self._sparserevlog = bool(opts.get('sparse-revlog', False))
416 withsparseread = bool(opts.get('with-sparse-read', False))
421 withsparseread = bool(opts.get('with-sparse-read', False))
417 # sparse-revlog forces sparse-read
422 # sparse-revlog forces sparse-read
418 self._withsparseread = self._sparserevlog or withsparseread
423 self._withsparseread = self._sparserevlog or withsparseread
419 if 'sparse-read-density-threshold' in opts:
424 if 'sparse-read-density-threshold' in opts:
420 self._srdensitythreshold = opts['sparse-read-density-threshold']
425 self._srdensitythreshold = opts['sparse-read-density-threshold']
421 if 'sparse-read-min-gap-size' in opts:
426 if 'sparse-read-min-gap-size' in opts:
422 self._srmingapsize = opts['sparse-read-min-gap-size']
427 self._srmingapsize = opts['sparse-read-min-gap-size']
423 if opts.get('enableellipsis'):
428 if opts.get('enableellipsis'):
424 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
429 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
425
430
426 # revlog v0 doesn't have flag processors
431 # revlog v0 doesn't have flag processors
427 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
432 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
428 _insertflagprocessor(flag, processor, self._flagprocessors)
433 _insertflagprocessor(flag, processor, self._flagprocessors)
429
434
430 if self._chunkcachesize <= 0:
435 if self._chunkcachesize <= 0:
431 raise error.RevlogError(_('revlog chunk cache size %r is not '
436 raise error.RevlogError(_('revlog chunk cache size %r is not '
432 'greater than 0') % self._chunkcachesize)
437 'greater than 0') % self._chunkcachesize)
433 elif self._chunkcachesize & (self._chunkcachesize - 1):
438 elif self._chunkcachesize & (self._chunkcachesize - 1):
434 raise error.RevlogError(_('revlog chunk cache size %r is not a '
439 raise error.RevlogError(_('revlog chunk cache size %r is not a '
435 'power of 2') % self._chunkcachesize)
440 'power of 2') % self._chunkcachesize)
436
441
437 indexdata = ''
442 indexdata = ''
438 self._initempty = True
443 self._initempty = True
439 try:
444 try:
440 with self._indexfp() as f:
445 with self._indexfp() as f:
441 if (mmapindexthreshold is not None and
446 if (mmapindexthreshold is not None and
442 self.opener.fstat(f).st_size >= mmapindexthreshold):
447 self.opener.fstat(f).st_size >= mmapindexthreshold):
443 # TODO: should .close() to release resources without
448 # TODO: should .close() to release resources without
444 # relying on Python GC
449 # relying on Python GC
445 indexdata = util.buffer(util.mmapread(f))
450 indexdata = util.buffer(util.mmapread(f))
446 else:
451 else:
447 indexdata = f.read()
452 indexdata = f.read()
448 if len(indexdata) > 0:
453 if len(indexdata) > 0:
449 versionflags = versionformat_unpack(indexdata[:4])[0]
454 versionflags = versionformat_unpack(indexdata[:4])[0]
450 self._initempty = False
455 self._initempty = False
451 else:
456 else:
452 versionflags = newversionflags
457 versionflags = newversionflags
453 except IOError as inst:
458 except IOError as inst:
454 if inst.errno != errno.ENOENT:
459 if inst.errno != errno.ENOENT:
455 raise
460 raise
456
461
457 versionflags = newversionflags
462 versionflags = newversionflags
458
463
459 self.version = versionflags
464 self.version = versionflags
460
465
461 flags = versionflags & ~0xFFFF
466 flags = versionflags & ~0xFFFF
462 fmt = versionflags & 0xFFFF
467 fmt = versionflags & 0xFFFF
463
468
464 if fmt == REVLOGV0:
469 if fmt == REVLOGV0:
465 if flags:
470 if flags:
466 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
471 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
467 'revlog %s') %
472 'revlog %s') %
468 (flags >> 16, fmt, self.indexfile))
473 (flags >> 16, fmt, self.indexfile))
469
474
470 self._inline = False
475 self._inline = False
471 self._generaldelta = False
476 self._generaldelta = False
472
477
473 elif fmt == REVLOGV1:
478 elif fmt == REVLOGV1:
474 if flags & ~REVLOGV1_FLAGS:
479 if flags & ~REVLOGV1_FLAGS:
475 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
480 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
476 'revlog %s') %
481 'revlog %s') %
477 (flags >> 16, fmt, self.indexfile))
482 (flags >> 16, fmt, self.indexfile))
478
483
479 self._inline = versionflags & FLAG_INLINE_DATA
484 self._inline = versionflags & FLAG_INLINE_DATA
480 self._generaldelta = versionflags & FLAG_GENERALDELTA
485 self._generaldelta = versionflags & FLAG_GENERALDELTA
481
486
482 elif fmt == REVLOGV2:
487 elif fmt == REVLOGV2:
483 if flags & ~REVLOGV2_FLAGS:
488 if flags & ~REVLOGV2_FLAGS:
484 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
489 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
485 'revlog %s') %
490 'revlog %s') %
486 (flags >> 16, fmt, self.indexfile))
491 (flags >> 16, fmt, self.indexfile))
487
492
488 self._inline = versionflags & FLAG_INLINE_DATA
493 self._inline = versionflags & FLAG_INLINE_DATA
489 # generaldelta implied by version 2 revlogs.
494 # generaldelta implied by version 2 revlogs.
490 self._generaldelta = True
495 self._generaldelta = True
491
496
492 else:
497 else:
493 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
498 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
494 (fmt, self.indexfile))
499 (fmt, self.indexfile))
495
500
496 self._storedeltachains = True
501 self._storedeltachains = True
497
502
498 self._io = revlogio()
503 self._io = revlogio()
499 if self.version == REVLOGV0:
504 if self.version == REVLOGV0:
500 self._io = revlogoldio()
505 self._io = revlogoldio()
501 try:
506 try:
502 d = self._io.parseindex(indexdata, self._inline)
507 d = self._io.parseindex(indexdata, self._inline)
503 except (ValueError, IndexError):
508 except (ValueError, IndexError):
504 raise error.RevlogError(_("index %s is corrupted") %
509 raise error.RevlogError(_("index %s is corrupted") %
505 self.indexfile)
510 self.indexfile)
506 self.index, nodemap, self._chunkcache = d
511 self.index, nodemap, self._chunkcache = d
507 if nodemap is not None:
512 if nodemap is not None:
508 self.nodemap = self._nodecache = nodemap
513 self.nodemap = self._nodecache = nodemap
509 if not self._chunkcache:
514 if not self._chunkcache:
510 self._chunkclear()
515 self._chunkclear()
511 # revnum -> (chain-length, sum-delta-length)
516 # revnum -> (chain-length, sum-delta-length)
512 self._chaininfocache = {}
517 self._chaininfocache = {}
513 # revlog header -> revlog compressor
518 # revlog header -> revlog compressor
514 self._decompressors = {}
519 self._decompressors = {}
515
520
516 @util.propertycache
521 @util.propertycache
517 def _compressor(self):
522 def _compressor(self):
518 return util.compengines[self._compengine].revlogcompressor()
523 return util.compengines[self._compengine].revlogcompressor()
519
524
520 def _indexfp(self, mode='r'):
525 def _indexfp(self, mode='r'):
521 """file object for the revlog's index file"""
526 """file object for the revlog's index file"""
522 args = {r'mode': mode}
527 args = {r'mode': mode}
523 if mode != 'r':
528 if mode != 'r':
524 args[r'checkambig'] = self._checkambig
529 args[r'checkambig'] = self._checkambig
525 if mode == 'w':
530 if mode == 'w':
526 args[r'atomictemp'] = True
531 args[r'atomictemp'] = True
527 return self.opener(self.indexfile, **args)
532 return self.opener(self.indexfile, **args)
528
533
529 def _datafp(self, mode='r'):
534 def _datafp(self, mode='r'):
530 """file object for the revlog's data file"""
535 """file object for the revlog's data file"""
531 return self.opener(self.datafile, mode=mode)
536 return self.opener(self.datafile, mode=mode)
532
537
533 @contextlib.contextmanager
538 @contextlib.contextmanager
534 def _datareadfp(self, existingfp=None):
539 def _datareadfp(self, existingfp=None):
535 """file object suitable to read data"""
540 """file object suitable to read data"""
536 # Use explicit file handle, if given.
541 # Use explicit file handle, if given.
537 if existingfp is not None:
542 if existingfp is not None:
538 yield existingfp
543 yield existingfp
539
544
540 # Use a file handle being actively used for writes, if available.
545 # Use a file handle being actively used for writes, if available.
541 # There is some danger to doing this because reads will seek the
546 # There is some danger to doing this because reads will seek the
542 # file. However, _writeentry() performs a SEEK_END before all writes,
547 # file. However, _writeentry() performs a SEEK_END before all writes,
543 # so we should be safe.
548 # so we should be safe.
544 elif self._writinghandles:
549 elif self._writinghandles:
545 if self._inline:
550 if self._inline:
546 yield self._writinghandles[0]
551 yield self._writinghandles[0]
547 else:
552 else:
548 yield self._writinghandles[1]
553 yield self._writinghandles[1]
549
554
550 # Otherwise open a new file handle.
555 # Otherwise open a new file handle.
551 else:
556 else:
552 if self._inline:
557 if self._inline:
553 func = self._indexfp
558 func = self._indexfp
554 else:
559 else:
555 func = self._datafp
560 func = self._datafp
556 with func() as fp:
561 with func() as fp:
557 yield fp
562 yield fp
558
563
559 def tip(self):
564 def tip(self):
560 return self.node(len(self.index) - 1)
565 return self.node(len(self.index) - 1)
561 def __contains__(self, rev):
566 def __contains__(self, rev):
562 return 0 <= rev < len(self)
567 return 0 <= rev < len(self)
563 def __len__(self):
568 def __len__(self):
564 return len(self.index)
569 return len(self.index)
565 def __iter__(self):
570 def __iter__(self):
566 return iter(pycompat.xrange(len(self)))
571 return iter(pycompat.xrange(len(self)))
567 def revs(self, start=0, stop=None):
572 def revs(self, start=0, stop=None):
568 """iterate over all rev in this revlog (from start to stop)"""
573 """iterate over all rev in this revlog (from start to stop)"""
569 return storageutil.iterrevs(len(self), start=start, stop=stop)
574 return storageutil.iterrevs(len(self), start=start, stop=stop)
570
575
571 @util.propertycache
576 @util.propertycache
572 def nodemap(self):
577 def nodemap(self):
573 if self.index:
578 if self.index:
574 # populate mapping down to the initial node
579 # populate mapping down to the initial node
575 node0 = self.index[0][7] # get around changelog filtering
580 node0 = self.index[0][7] # get around changelog filtering
576 self.rev(node0)
581 self.rev(node0)
577 return self._nodecache
582 return self._nodecache
578
583
579 def hasnode(self, node):
584 def hasnode(self, node):
580 try:
585 try:
581 self.rev(node)
586 self.rev(node)
582 return True
587 return True
583 except KeyError:
588 except KeyError:
584 return False
589 return False
585
590
586 def candelta(self, baserev, rev):
591 def candelta(self, baserev, rev):
587 """whether two revisions (baserev, rev) can be delta-ed or not"""
592 """whether two revisions (baserev, rev) can be delta-ed or not"""
588 # Disable delta if either rev requires a content-changing flag
593 # Disable delta if either rev requires a content-changing flag
589 # processor (ex. LFS). This is because such flag processor can alter
594 # processor (ex. LFS). This is because such flag processor can alter
590 # the rawtext content that the delta will be based on, and two clients
595 # the rawtext content that the delta will be based on, and two clients
591 # could have a same revlog node with different flags (i.e. different
596 # could have a same revlog node with different flags (i.e. different
592 # rawtext contents) and the delta could be incompatible.
597 # rawtext contents) and the delta could be incompatible.
593 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
598 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
594 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
599 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
595 return False
600 return False
596 return True
601 return True
597
602
598 def clearcaches(self):
603 def clearcaches(self):
599 self._revisioncache = None
604 self._revisioncache = None
600 self._chainbasecache.clear()
605 self._chainbasecache.clear()
601 self._chunkcache = (0, '')
606 self._chunkcache = (0, '')
602 self._pcache = {}
607 self._pcache = {}
603
608
604 try:
609 try:
605 self._nodecache.clearcaches()
610 self._nodecache.clearcaches()
606 except AttributeError:
611 except AttributeError:
607 self._nodecache = {nullid: nullrev}
612 self._nodecache = {nullid: nullrev}
608 self._nodepos = None
613 self._nodepos = None
609
614
610 def rev(self, node):
615 def rev(self, node):
611 try:
616 try:
612 return self._nodecache[node]
617 return self._nodecache[node]
613 except TypeError:
618 except TypeError:
614 raise
619 raise
615 except error.RevlogError:
620 except error.RevlogError:
616 # parsers.c radix tree lookup failed
621 # parsers.c radix tree lookup failed
617 if node == wdirid or node in wdirfilenodeids:
622 if node == wdirid or node in wdirfilenodeids:
618 raise error.WdirUnsupported
623 raise error.WdirUnsupported
619 raise error.LookupError(node, self.indexfile, _('no node'))
624 raise error.LookupError(node, self.indexfile, _('no node'))
620 except KeyError:
625 except KeyError:
621 # pure python cache lookup failed
626 # pure python cache lookup failed
622 n = self._nodecache
627 n = self._nodecache
623 i = self.index
628 i = self.index
624 p = self._nodepos
629 p = self._nodepos
625 if p is None:
630 if p is None:
626 p = len(i) - 1
631 p = len(i) - 1
627 else:
632 else:
628 assert p < len(i)
633 assert p < len(i)
629 for r in pycompat.xrange(p, -1, -1):
634 for r in pycompat.xrange(p, -1, -1):
630 v = i[r][7]
635 v = i[r][7]
631 n[v] = r
636 n[v] = r
632 if v == node:
637 if v == node:
633 self._nodepos = r - 1
638 self._nodepos = r - 1
634 return r
639 return r
635 if node == wdirid or node in wdirfilenodeids:
640 if node == wdirid or node in wdirfilenodeids:
636 raise error.WdirUnsupported
641 raise error.WdirUnsupported
637 raise error.LookupError(node, self.indexfile, _('no node'))
642 raise error.LookupError(node, self.indexfile, _('no node'))
638
643
639 # Accessors for index entries.
644 # Accessors for index entries.
640
645
641 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
646 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
642 # are flags.
647 # are flags.
643 def start(self, rev):
648 def start(self, rev):
644 return int(self.index[rev][0] >> 16)
649 return int(self.index[rev][0] >> 16)
645
650
646 def flags(self, rev):
651 def flags(self, rev):
647 return self.index[rev][0] & 0xFFFF
652 return self.index[rev][0] & 0xFFFF
648
653
649 def length(self, rev):
654 def length(self, rev):
650 return self.index[rev][1]
655 return self.index[rev][1]
651
656
652 def rawsize(self, rev):
657 def rawsize(self, rev):
653 """return the length of the uncompressed text for a given revision"""
658 """return the length of the uncompressed text for a given revision"""
654 l = self.index[rev][2]
659 l = self.index[rev][2]
655 if l >= 0:
660 if l >= 0:
656 return l
661 return l
657
662
658 t = self.revision(rev, raw=True)
663 t = self.revision(rev, raw=True)
659 return len(t)
664 return len(t)
660
665
661 def size(self, rev):
666 def size(self, rev):
662 """length of non-raw text (processed by a "read" flag processor)"""
667 """length of non-raw text (processed by a "read" flag processor)"""
663 # fast path: if no "read" flag processor could change the content,
668 # fast path: if no "read" flag processor could change the content,
664 # size is rawsize. note: ELLIPSIS is known to not change the content.
669 # size is rawsize. note: ELLIPSIS is known to not change the content.
665 flags = self.flags(rev)
670 flags = self.flags(rev)
666 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
671 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
667 return self.rawsize(rev)
672 return self.rawsize(rev)
668
673
669 return len(self.revision(rev, raw=False))
674 return len(self.revision(rev, raw=False))
670
675
671 def chainbase(self, rev):
676 def chainbase(self, rev):
672 base = self._chainbasecache.get(rev)
677 base = self._chainbasecache.get(rev)
673 if base is not None:
678 if base is not None:
674 return base
679 return base
675
680
676 index = self.index
681 index = self.index
677 iterrev = rev
682 iterrev = rev
678 base = index[iterrev][3]
683 base = index[iterrev][3]
679 while base != iterrev:
684 while base != iterrev:
680 iterrev = base
685 iterrev = base
681 base = index[iterrev][3]
686 base = index[iterrev][3]
682
687
683 self._chainbasecache[rev] = base
688 self._chainbasecache[rev] = base
684 return base
689 return base
685
690
686 def linkrev(self, rev):
691 def linkrev(self, rev):
687 return self.index[rev][4]
692 return self.index[rev][4]
688
693
689 def parentrevs(self, rev):
694 def parentrevs(self, rev):
690 try:
695 try:
691 entry = self.index[rev]
696 entry = self.index[rev]
692 except IndexError:
697 except IndexError:
693 if rev == wdirrev:
698 if rev == wdirrev:
694 raise error.WdirUnsupported
699 raise error.WdirUnsupported
695 raise
700 raise
696
701
697 return entry[5], entry[6]
702 return entry[5], entry[6]
698
703
699 # fast parentrevs(rev) where rev isn't filtered
704 # fast parentrevs(rev) where rev isn't filtered
700 _uncheckedparentrevs = parentrevs
705 _uncheckedparentrevs = parentrevs
701
706
702 def node(self, rev):
707 def node(self, rev):
703 try:
708 try:
704 return self.index[rev][7]
709 return self.index[rev][7]
705 except IndexError:
710 except IndexError:
706 if rev == wdirrev:
711 if rev == wdirrev:
707 raise error.WdirUnsupported
712 raise error.WdirUnsupported
708 raise
713 raise
709
714
710 # Derived from index values.
715 # Derived from index values.
711
716
712 def end(self, rev):
717 def end(self, rev):
713 return self.start(rev) + self.length(rev)
718 return self.start(rev) + self.length(rev)
714
719
715 def parents(self, node):
720 def parents(self, node):
716 i = self.index
721 i = self.index
717 d = i[self.rev(node)]
722 d = i[self.rev(node)]
718 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
723 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
719
724
720 def chainlen(self, rev):
725 def chainlen(self, rev):
721 return self._chaininfo(rev)[0]
726 return self._chaininfo(rev)[0]
722
727
723 def _chaininfo(self, rev):
728 def _chaininfo(self, rev):
724 chaininfocache = self._chaininfocache
729 chaininfocache = self._chaininfocache
725 if rev in chaininfocache:
730 if rev in chaininfocache:
726 return chaininfocache[rev]
731 return chaininfocache[rev]
727 index = self.index
732 index = self.index
728 generaldelta = self._generaldelta
733 generaldelta = self._generaldelta
729 iterrev = rev
734 iterrev = rev
730 e = index[iterrev]
735 e = index[iterrev]
731 clen = 0
736 clen = 0
732 compresseddeltalen = 0
737 compresseddeltalen = 0
733 while iterrev != e[3]:
738 while iterrev != e[3]:
734 clen += 1
739 clen += 1
735 compresseddeltalen += e[1]
740 compresseddeltalen += e[1]
736 if generaldelta:
741 if generaldelta:
737 iterrev = e[3]
742 iterrev = e[3]
738 else:
743 else:
739 iterrev -= 1
744 iterrev -= 1
740 if iterrev in chaininfocache:
745 if iterrev in chaininfocache:
741 t = chaininfocache[iterrev]
746 t = chaininfocache[iterrev]
742 clen += t[0]
747 clen += t[0]
743 compresseddeltalen += t[1]
748 compresseddeltalen += t[1]
744 break
749 break
745 e = index[iterrev]
750 e = index[iterrev]
746 else:
751 else:
747 # Add text length of base since decompressing that also takes
752 # Add text length of base since decompressing that also takes
748 # work. For cache hits the length is already included.
753 # work. For cache hits the length is already included.
749 compresseddeltalen += e[1]
754 compresseddeltalen += e[1]
750 r = (clen, compresseddeltalen)
755 r = (clen, compresseddeltalen)
751 chaininfocache[rev] = r
756 chaininfocache[rev] = r
752 return r
757 return r
753
758
754 def _deltachain(self, rev, stoprev=None):
759 def _deltachain(self, rev, stoprev=None):
755 """Obtain the delta chain for a revision.
760 """Obtain the delta chain for a revision.
756
761
757 ``stoprev`` specifies a revision to stop at. If not specified, we
762 ``stoprev`` specifies a revision to stop at. If not specified, we
758 stop at the base of the chain.
763 stop at the base of the chain.
759
764
760 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
765 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
761 revs in ascending order and ``stopped`` is a bool indicating whether
766 revs in ascending order and ``stopped`` is a bool indicating whether
762 ``stoprev`` was hit.
767 ``stoprev`` was hit.
763 """
768 """
764 # Try C implementation.
769 # Try C implementation.
765 try:
770 try:
766 return self.index.deltachain(rev, stoprev, self._generaldelta)
771 return self.index.deltachain(rev, stoprev, self._generaldelta)
767 except AttributeError:
772 except AttributeError:
768 pass
773 pass
769
774
770 chain = []
775 chain = []
771
776
772 # Alias to prevent attribute lookup in tight loop.
777 # Alias to prevent attribute lookup in tight loop.
773 index = self.index
778 index = self.index
774 generaldelta = self._generaldelta
779 generaldelta = self._generaldelta
775
780
776 iterrev = rev
781 iterrev = rev
777 e = index[iterrev]
782 e = index[iterrev]
778 while iterrev != e[3] and iterrev != stoprev:
783 while iterrev != e[3] and iterrev != stoprev:
779 chain.append(iterrev)
784 chain.append(iterrev)
780 if generaldelta:
785 if generaldelta:
781 iterrev = e[3]
786 iterrev = e[3]
782 else:
787 else:
783 iterrev -= 1
788 iterrev -= 1
784 e = index[iterrev]
789 e = index[iterrev]
785
790
786 if iterrev == stoprev:
791 if iterrev == stoprev:
787 stopped = True
792 stopped = True
788 else:
793 else:
789 chain.append(iterrev)
794 chain.append(iterrev)
790 stopped = False
795 stopped = False
791
796
792 chain.reverse()
797 chain.reverse()
793 return chain, stopped
798 return chain, stopped
794
799
795 def ancestors(self, revs, stoprev=0, inclusive=False):
800 def ancestors(self, revs, stoprev=0, inclusive=False):
796 """Generate the ancestors of 'revs' in reverse revision order.
801 """Generate the ancestors of 'revs' in reverse revision order.
797 Does not generate revs lower than stoprev.
802 Does not generate revs lower than stoprev.
798
803
799 See the documentation for ancestor.lazyancestors for more details."""
804 See the documentation for ancestor.lazyancestors for more details."""
800
805
801 # first, make sure start revisions aren't filtered
806 # first, make sure start revisions aren't filtered
802 revs = list(revs)
807 revs = list(revs)
803 checkrev = self.node
808 checkrev = self.node
804 for r in revs:
809 for r in revs:
805 checkrev(r)
810 checkrev(r)
806 # and we're sure ancestors aren't filtered as well
811 # and we're sure ancestors aren't filtered as well
807
812
808 if rustext is not None:
813 if rustext is not None:
809 lazyancestors = rustext.ancestor.LazyAncestors
814 lazyancestors = rustext.ancestor.LazyAncestors
810 arg = self.index
815 arg = self.index
811 elif util.safehasattr(parsers, 'rustlazyancestors'):
816 elif util.safehasattr(parsers, 'rustlazyancestors'):
812 lazyancestors = ancestor.rustlazyancestors
817 lazyancestors = ancestor.rustlazyancestors
813 arg = self.index
818 arg = self.index
814 else:
819 else:
815 lazyancestors = ancestor.lazyancestors
820 lazyancestors = ancestor.lazyancestors
816 arg = self._uncheckedparentrevs
821 arg = self._uncheckedparentrevs
817 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
822 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
818
823
819 def descendants(self, revs):
824 def descendants(self, revs):
820 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
825 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
821
826
822 def findcommonmissing(self, common=None, heads=None):
827 def findcommonmissing(self, common=None, heads=None):
823 """Return a tuple of the ancestors of common and the ancestors of heads
828 """Return a tuple of the ancestors of common and the ancestors of heads
824 that are not ancestors of common. In revset terminology, we return the
829 that are not ancestors of common. In revset terminology, we return the
825 tuple:
830 tuple:
826
831
827 ::common, (::heads) - (::common)
832 ::common, (::heads) - (::common)
828
833
829 The list is sorted by revision number, meaning it is
834 The list is sorted by revision number, meaning it is
830 topologically sorted.
835 topologically sorted.
831
836
832 'heads' and 'common' are both lists of node IDs. If heads is
837 'heads' and 'common' are both lists of node IDs. If heads is
833 not supplied, uses all of the revlog's heads. If common is not
838 not supplied, uses all of the revlog's heads. If common is not
834 supplied, uses nullid."""
839 supplied, uses nullid."""
835 if common is None:
840 if common is None:
836 common = [nullid]
841 common = [nullid]
837 if heads is None:
842 if heads is None:
838 heads = self.heads()
843 heads = self.heads()
839
844
840 common = [self.rev(n) for n in common]
845 common = [self.rev(n) for n in common]
841 heads = [self.rev(n) for n in heads]
846 heads = [self.rev(n) for n in heads]
842
847
843 # we want the ancestors, but inclusive
848 # we want the ancestors, but inclusive
844 class lazyset(object):
849 class lazyset(object):
845 def __init__(self, lazyvalues):
850 def __init__(self, lazyvalues):
846 self.addedvalues = set()
851 self.addedvalues = set()
847 self.lazyvalues = lazyvalues
852 self.lazyvalues = lazyvalues
848
853
849 def __contains__(self, value):
854 def __contains__(self, value):
850 return value in self.addedvalues or value in self.lazyvalues
855 return value in self.addedvalues or value in self.lazyvalues
851
856
852 def __iter__(self):
857 def __iter__(self):
853 added = self.addedvalues
858 added = self.addedvalues
854 for r in added:
859 for r in added:
855 yield r
860 yield r
856 for r in self.lazyvalues:
861 for r in self.lazyvalues:
857 if not r in added:
862 if not r in added:
858 yield r
863 yield r
859
864
860 def add(self, value):
865 def add(self, value):
861 self.addedvalues.add(value)
866 self.addedvalues.add(value)
862
867
863 def update(self, values):
868 def update(self, values):
864 self.addedvalues.update(values)
869 self.addedvalues.update(values)
865
870
866 has = lazyset(self.ancestors(common))
871 has = lazyset(self.ancestors(common))
867 has.add(nullrev)
872 has.add(nullrev)
868 has.update(common)
873 has.update(common)
869
874
870 # take all ancestors from heads that aren't in has
875 # take all ancestors from heads that aren't in has
871 missing = set()
876 missing = set()
872 visit = collections.deque(r for r in heads if r not in has)
877 visit = collections.deque(r for r in heads if r not in has)
873 while visit:
878 while visit:
874 r = visit.popleft()
879 r = visit.popleft()
875 if r in missing:
880 if r in missing:
876 continue
881 continue
877 else:
882 else:
878 missing.add(r)
883 missing.add(r)
879 for p in self.parentrevs(r):
884 for p in self.parentrevs(r):
880 if p not in has:
885 if p not in has:
881 visit.append(p)
886 visit.append(p)
882 missing = list(missing)
887 missing = list(missing)
883 missing.sort()
888 missing.sort()
884 return has, [self.node(miss) for miss in missing]
889 return has, [self.node(miss) for miss in missing]
885
890
886 def incrementalmissingrevs(self, common=None):
891 def incrementalmissingrevs(self, common=None):
887 """Return an object that can be used to incrementally compute the
892 """Return an object that can be used to incrementally compute the
888 revision numbers of the ancestors of arbitrary sets that are not
893 revision numbers of the ancestors of arbitrary sets that are not
889 ancestors of common. This is an ancestor.incrementalmissingancestors
894 ancestors of common. This is an ancestor.incrementalmissingancestors
890 object.
895 object.
891
896
892 'common' is a list of revision numbers. If common is not supplied, uses
897 'common' is a list of revision numbers. If common is not supplied, uses
893 nullrev.
898 nullrev.
894 """
899 """
895 if common is None:
900 if common is None:
896 common = [nullrev]
901 common = [nullrev]
897
902
898 if rustext is not None:
903 if rustext is not None:
899 # TODO: WdirUnsupported should be raised instead of GraphError
900 # if common includes wdirrev
901 return rustext.ancestor.MissingAncestors(self.index, common)
904 return rustext.ancestor.MissingAncestors(self.index, common)
902 return ancestor.incrementalmissingancestors(self.parentrevs, common)
905 return ancestor.incrementalmissingancestors(self.parentrevs, common)
903
906
904 def findmissingrevs(self, common=None, heads=None):
907 def findmissingrevs(self, common=None, heads=None):
905 """Return the revision numbers of the ancestors of heads that
908 """Return the revision numbers of the ancestors of heads that
906 are not ancestors of common.
909 are not ancestors of common.
907
910
908 More specifically, return a list of revision numbers corresponding to
911 More specifically, return a list of revision numbers corresponding to
909 nodes N such that every N satisfies the following constraints:
912 nodes N such that every N satisfies the following constraints:
910
913
911 1. N is an ancestor of some node in 'heads'
914 1. N is an ancestor of some node in 'heads'
912 2. N is not an ancestor of any node in 'common'
915 2. N is not an ancestor of any node in 'common'
913
916
914 The list is sorted by revision number, meaning it is
917 The list is sorted by revision number, meaning it is
915 topologically sorted.
918 topologically sorted.
916
919
917 'heads' and 'common' are both lists of revision numbers. If heads is
920 'heads' and 'common' are both lists of revision numbers. If heads is
918 not supplied, uses all of the revlog's heads. If common is not
921 not supplied, uses all of the revlog's heads. If common is not
919 supplied, uses nullid."""
922 supplied, uses nullid."""
920 if common is None:
923 if common is None:
921 common = [nullrev]
924 common = [nullrev]
922 if heads is None:
925 if heads is None:
923 heads = self.headrevs()
926 heads = self.headrevs()
924
927
925 inc = self.incrementalmissingrevs(common=common)
928 inc = self.incrementalmissingrevs(common=common)
926 return inc.missingancestors(heads)
929 return inc.missingancestors(heads)
927
930
928 def findmissing(self, common=None, heads=None):
931 def findmissing(self, common=None, heads=None):
929 """Return the ancestors of heads that are not ancestors of common.
932 """Return the ancestors of heads that are not ancestors of common.
930
933
931 More specifically, return a list of nodes N such that every N
934 More specifically, return a list of nodes N such that every N
932 satisfies the following constraints:
935 satisfies the following constraints:
933
936
934 1. N is an ancestor of some node in 'heads'
937 1. N is an ancestor of some node in 'heads'
935 2. N is not an ancestor of any node in 'common'
938 2. N is not an ancestor of any node in 'common'
936
939
937 The list is sorted by revision number, meaning it is
940 The list is sorted by revision number, meaning it is
938 topologically sorted.
941 topologically sorted.
939
942
940 'heads' and 'common' are both lists of node IDs. If heads is
943 'heads' and 'common' are both lists of node IDs. If heads is
941 not supplied, uses all of the revlog's heads. If common is not
944 not supplied, uses all of the revlog's heads. If common is not
942 supplied, uses nullid."""
945 supplied, uses nullid."""
943 if common is None:
946 if common is None:
944 common = [nullid]
947 common = [nullid]
945 if heads is None:
948 if heads is None:
946 heads = self.heads()
949 heads = self.heads()
947
950
948 common = [self.rev(n) for n in common]
951 common = [self.rev(n) for n in common]
949 heads = [self.rev(n) for n in heads]
952 heads = [self.rev(n) for n in heads]
950
953
951 inc = self.incrementalmissingrevs(common=common)
954 inc = self.incrementalmissingrevs(common=common)
952 return [self.node(r) for r in inc.missingancestors(heads)]
955 return [self.node(r) for r in inc.missingancestors(heads)]
953
956
954 def nodesbetween(self, roots=None, heads=None):
957 def nodesbetween(self, roots=None, heads=None):
955 """Return a topological path from 'roots' to 'heads'.
958 """Return a topological path from 'roots' to 'heads'.
956
959
957 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
960 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
958 topologically sorted list of all nodes N that satisfy both of
961 topologically sorted list of all nodes N that satisfy both of
959 these constraints:
962 these constraints:
960
963
961 1. N is a descendant of some node in 'roots'
964 1. N is a descendant of some node in 'roots'
962 2. N is an ancestor of some node in 'heads'
965 2. N is an ancestor of some node in 'heads'
963
966
964 Every node is considered to be both a descendant and an ancestor
967 Every node is considered to be both a descendant and an ancestor
965 of itself, so every reachable node in 'roots' and 'heads' will be
968 of itself, so every reachable node in 'roots' and 'heads' will be
966 included in 'nodes'.
969 included in 'nodes'.
967
970
968 'outroots' is the list of reachable nodes in 'roots', i.e., the
971 'outroots' is the list of reachable nodes in 'roots', i.e., the
969 subset of 'roots' that is returned in 'nodes'. Likewise,
972 subset of 'roots' that is returned in 'nodes'. Likewise,
970 'outheads' is the subset of 'heads' that is also in 'nodes'.
973 'outheads' is the subset of 'heads' that is also in 'nodes'.
971
974
972 'roots' and 'heads' are both lists of node IDs. If 'roots' is
975 'roots' and 'heads' are both lists of node IDs. If 'roots' is
973 unspecified, uses nullid as the only root. If 'heads' is
976 unspecified, uses nullid as the only root. If 'heads' is
974 unspecified, uses list of all of the revlog's heads."""
977 unspecified, uses list of all of the revlog's heads."""
975 nonodes = ([], [], [])
978 nonodes = ([], [], [])
976 if roots is not None:
979 if roots is not None:
977 roots = list(roots)
980 roots = list(roots)
978 if not roots:
981 if not roots:
979 return nonodes
982 return nonodes
980 lowestrev = min([self.rev(n) for n in roots])
983 lowestrev = min([self.rev(n) for n in roots])
981 else:
984 else:
982 roots = [nullid] # Everybody's a descendant of nullid
985 roots = [nullid] # Everybody's a descendant of nullid
983 lowestrev = nullrev
986 lowestrev = nullrev
984 if (lowestrev == nullrev) and (heads is None):
987 if (lowestrev == nullrev) and (heads is None):
985 # We want _all_ the nodes!
988 # We want _all_ the nodes!
986 return ([self.node(r) for r in self], [nullid], list(self.heads()))
989 return ([self.node(r) for r in self], [nullid], list(self.heads()))
987 if heads is None:
990 if heads is None:
988 # All nodes are ancestors, so the latest ancestor is the last
991 # All nodes are ancestors, so the latest ancestor is the last
989 # node.
992 # node.
990 highestrev = len(self) - 1
993 highestrev = len(self) - 1
991 # Set ancestors to None to signal that every node is an ancestor.
994 # Set ancestors to None to signal that every node is an ancestor.
992 ancestors = None
995 ancestors = None
993 # Set heads to an empty dictionary for later discovery of heads
996 # Set heads to an empty dictionary for later discovery of heads
994 heads = {}
997 heads = {}
995 else:
998 else:
996 heads = list(heads)
999 heads = list(heads)
997 if not heads:
1000 if not heads:
998 return nonodes
1001 return nonodes
999 ancestors = set()
1002 ancestors = set()
1000 # Turn heads into a dictionary so we can remove 'fake' heads.
1003 # Turn heads into a dictionary so we can remove 'fake' heads.
1001 # Also, later we will be using it to filter out the heads we can't
1004 # Also, later we will be using it to filter out the heads we can't
1002 # find from roots.
1005 # find from roots.
1003 heads = dict.fromkeys(heads, False)
1006 heads = dict.fromkeys(heads, False)
1004 # Start at the top and keep marking parents until we're done.
1007 # Start at the top and keep marking parents until we're done.
1005 nodestotag = set(heads)
1008 nodestotag = set(heads)
1006 # Remember where the top was so we can use it as a limit later.
1009 # Remember where the top was so we can use it as a limit later.
1007 highestrev = max([self.rev(n) for n in nodestotag])
1010 highestrev = max([self.rev(n) for n in nodestotag])
1008 while nodestotag:
1011 while nodestotag:
1009 # grab a node to tag
1012 # grab a node to tag
1010 n = nodestotag.pop()
1013 n = nodestotag.pop()
1011 # Never tag nullid
1014 # Never tag nullid
1012 if n == nullid:
1015 if n == nullid:
1013 continue
1016 continue
1014 # A node's revision number represents its place in a
1017 # A node's revision number represents its place in a
1015 # topologically sorted list of nodes.
1018 # topologically sorted list of nodes.
1016 r = self.rev(n)
1019 r = self.rev(n)
1017 if r >= lowestrev:
1020 if r >= lowestrev:
1018 if n not in ancestors:
1021 if n not in ancestors:
1019 # If we are possibly a descendant of one of the roots
1022 # If we are possibly a descendant of one of the roots
1020 # and we haven't already been marked as an ancestor
1023 # and we haven't already been marked as an ancestor
1021 ancestors.add(n) # Mark as ancestor
1024 ancestors.add(n) # Mark as ancestor
1022 # Add non-nullid parents to list of nodes to tag.
1025 # Add non-nullid parents to list of nodes to tag.
1023 nodestotag.update([p for p in self.parents(n) if
1026 nodestotag.update([p for p in self.parents(n) if
1024 p != nullid])
1027 p != nullid])
1025 elif n in heads: # We've seen it before, is it a fake head?
1028 elif n in heads: # We've seen it before, is it a fake head?
1026 # So it is, real heads should not be the ancestors of
1029 # So it is, real heads should not be the ancestors of
1027 # any other heads.
1030 # any other heads.
1028 heads.pop(n)
1031 heads.pop(n)
1029 if not ancestors:
1032 if not ancestors:
1030 return nonodes
1033 return nonodes
1031 # Now that we have our set of ancestors, we want to remove any
1034 # Now that we have our set of ancestors, we want to remove any
1032 # roots that are not ancestors.
1035 # roots that are not ancestors.
1033
1036
1034 # If one of the roots was nullid, everything is included anyway.
1037 # If one of the roots was nullid, everything is included anyway.
1035 if lowestrev > nullrev:
1038 if lowestrev > nullrev:
1036 # But, since we weren't, let's recompute the lowest rev to not
1039 # But, since we weren't, let's recompute the lowest rev to not
1037 # include roots that aren't ancestors.
1040 # include roots that aren't ancestors.
1038
1041
1039 # Filter out roots that aren't ancestors of heads
1042 # Filter out roots that aren't ancestors of heads
1040 roots = [root for root in roots if root in ancestors]
1043 roots = [root for root in roots if root in ancestors]
1041 # Recompute the lowest revision
1044 # Recompute the lowest revision
1042 if roots:
1045 if roots:
1043 lowestrev = min([self.rev(root) for root in roots])
1046 lowestrev = min([self.rev(root) for root in roots])
1044 else:
1047 else:
1045 # No more roots? Return empty list
1048 # No more roots? Return empty list
1046 return nonodes
1049 return nonodes
1047 else:
1050 else:
1048 # We are descending from nullid, and don't need to care about
1051 # We are descending from nullid, and don't need to care about
1049 # any other roots.
1052 # any other roots.
1050 lowestrev = nullrev
1053 lowestrev = nullrev
1051 roots = [nullid]
1054 roots = [nullid]
1052 # Transform our roots list into a set.
1055 # Transform our roots list into a set.
1053 descendants = set(roots)
1056 descendants = set(roots)
1054 # Also, keep the original roots so we can filter out roots that aren't
1057 # Also, keep the original roots so we can filter out roots that aren't
1055 # 'real' roots (i.e. are descended from other roots).
1058 # 'real' roots (i.e. are descended from other roots).
1056 roots = descendants.copy()
1059 roots = descendants.copy()
1057 # Our topologically sorted list of output nodes.
1060 # Our topologically sorted list of output nodes.
1058 orderedout = []
1061 orderedout = []
1059 # Don't start at nullid since we don't want nullid in our output list,
1062 # Don't start at nullid since we don't want nullid in our output list,
1060 # and if nullid shows up in descendants, empty parents will look like
1063 # and if nullid shows up in descendants, empty parents will look like
1061 # they're descendants.
1064 # they're descendants.
1062 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1065 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1063 n = self.node(r)
1066 n = self.node(r)
1064 isdescendant = False
1067 isdescendant = False
1065 if lowestrev == nullrev: # Everybody is a descendant of nullid
1068 if lowestrev == nullrev: # Everybody is a descendant of nullid
1066 isdescendant = True
1069 isdescendant = True
1067 elif n in descendants:
1070 elif n in descendants:
1068 # n is already a descendant
1071 # n is already a descendant
1069 isdescendant = True
1072 isdescendant = True
1070 # This check only needs to be done here because all the roots
1073 # This check only needs to be done here because all the roots
1071 # will start being marked is descendants before the loop.
1074 # will start being marked is descendants before the loop.
1072 if n in roots:
1075 if n in roots:
1073 # If n was a root, check if it's a 'real' root.
1076 # If n was a root, check if it's a 'real' root.
1074 p = tuple(self.parents(n))
1077 p = tuple(self.parents(n))
1075 # If any of its parents are descendants, it's not a root.
1078 # If any of its parents are descendants, it's not a root.
1076 if (p[0] in descendants) or (p[1] in descendants):
1079 if (p[0] in descendants) or (p[1] in descendants):
1077 roots.remove(n)
1080 roots.remove(n)
1078 else:
1081 else:
1079 p = tuple(self.parents(n))
1082 p = tuple(self.parents(n))
1080 # A node is a descendant if either of its parents are
1083 # A node is a descendant if either of its parents are
1081 # descendants. (We seeded the dependents list with the roots
1084 # descendants. (We seeded the dependents list with the roots
1082 # up there, remember?)
1085 # up there, remember?)
1083 if (p[0] in descendants) or (p[1] in descendants):
1086 if (p[0] in descendants) or (p[1] in descendants):
1084 descendants.add(n)
1087 descendants.add(n)
1085 isdescendant = True
1088 isdescendant = True
1086 if isdescendant and ((ancestors is None) or (n in ancestors)):
1089 if isdescendant and ((ancestors is None) or (n in ancestors)):
1087 # Only include nodes that are both descendants and ancestors.
1090 # Only include nodes that are both descendants and ancestors.
1088 orderedout.append(n)
1091 orderedout.append(n)
1089 if (ancestors is not None) and (n in heads):
1092 if (ancestors is not None) and (n in heads):
1090 # We're trying to figure out which heads are reachable
1093 # We're trying to figure out which heads are reachable
1091 # from roots.
1094 # from roots.
1092 # Mark this head as having been reached
1095 # Mark this head as having been reached
1093 heads[n] = True
1096 heads[n] = True
1094 elif ancestors is None:
1097 elif ancestors is None:
1095 # Otherwise, we're trying to discover the heads.
1098 # Otherwise, we're trying to discover the heads.
1096 # Assume this is a head because if it isn't, the next step
1099 # Assume this is a head because if it isn't, the next step
1097 # will eventually remove it.
1100 # will eventually remove it.
1098 heads[n] = True
1101 heads[n] = True
1099 # But, obviously its parents aren't.
1102 # But, obviously its parents aren't.
1100 for p in self.parents(n):
1103 for p in self.parents(n):
1101 heads.pop(p, None)
1104 heads.pop(p, None)
1102 heads = [head for head, flag in heads.iteritems() if flag]
1105 heads = [head for head, flag in heads.iteritems() if flag]
1103 roots = list(roots)
1106 roots = list(roots)
1104 assert orderedout
1107 assert orderedout
1105 assert roots
1108 assert roots
1106 assert heads
1109 assert heads
1107 return (orderedout, roots, heads)
1110 return (orderedout, roots, heads)
1108
1111
1109 def headrevs(self, revs=None):
1112 def headrevs(self, revs=None):
1110 if revs is None:
1113 if revs is None:
1111 try:
1114 try:
1112 return self.index.headrevs()
1115 return self.index.headrevs()
1113 except AttributeError:
1116 except AttributeError:
1114 return self._headrevs()
1117 return self._headrevs()
1115 return dagop.headrevs(revs, self.parentrevs)
1118 return dagop.headrevs(revs, self.parentrevs)
1116
1119
1117 def computephases(self, roots):
1120 def computephases(self, roots):
1118 return self.index.computephasesmapsets(roots)
1121 return self.index.computephasesmapsets(roots)
1119
1122
1120 def _headrevs(self):
1123 def _headrevs(self):
1121 count = len(self)
1124 count = len(self)
1122 if not count:
1125 if not count:
1123 return [nullrev]
1126 return [nullrev]
1124 # we won't iter over filtered rev so nobody is a head at start
1127 # we won't iter over filtered rev so nobody is a head at start
1125 ishead = [0] * (count + 1)
1128 ishead = [0] * (count + 1)
1126 index = self.index
1129 index = self.index
1127 for r in self:
1130 for r in self:
1128 ishead[r] = 1 # I may be an head
1131 ishead[r] = 1 # I may be an head
1129 e = index[r]
1132 e = index[r]
1130 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1133 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1131 return [r for r, val in enumerate(ishead) if val]
1134 return [r for r, val in enumerate(ishead) if val]
1132
1135
1133 def heads(self, start=None, stop=None):
1136 def heads(self, start=None, stop=None):
1134 """return the list of all nodes that have no children
1137 """return the list of all nodes that have no children
1135
1138
1136 if start is specified, only heads that are descendants of
1139 if start is specified, only heads that are descendants of
1137 start will be returned
1140 start will be returned
1138 if stop is specified, it will consider all the revs from stop
1141 if stop is specified, it will consider all the revs from stop
1139 as if they had no children
1142 as if they had no children
1140 """
1143 """
1141 if start is None and stop is None:
1144 if start is None and stop is None:
1142 if not len(self):
1145 if not len(self):
1143 return [nullid]
1146 return [nullid]
1144 return [self.node(r) for r in self.headrevs()]
1147 return [self.node(r) for r in self.headrevs()]
1145
1148
1146 if start is None:
1149 if start is None:
1147 start = nullrev
1150 start = nullrev
1148 else:
1151 else:
1149 start = self.rev(start)
1152 start = self.rev(start)
1150
1153
1151 stoprevs = set(self.rev(n) for n in stop or [])
1154 stoprevs = set(self.rev(n) for n in stop or [])
1152
1155
1153 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1156 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1154 stoprevs=stoprevs)
1157 stoprevs=stoprevs)
1155
1158
1156 return [self.node(rev) for rev in revs]
1159 return [self.node(rev) for rev in revs]
1157
1160
1158 def children(self, node):
1161 def children(self, node):
1159 """find the children of a given node"""
1162 """find the children of a given node"""
1160 c = []
1163 c = []
1161 p = self.rev(node)
1164 p = self.rev(node)
1162 for r in self.revs(start=p + 1):
1165 for r in self.revs(start=p + 1):
1163 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1166 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1164 if prevs:
1167 if prevs:
1165 for pr in prevs:
1168 for pr in prevs:
1166 if pr == p:
1169 if pr == p:
1167 c.append(self.node(r))
1170 c.append(self.node(r))
1168 elif p == nullrev:
1171 elif p == nullrev:
1169 c.append(self.node(r))
1172 c.append(self.node(r))
1170 return c
1173 return c
1171
1174
1172 def commonancestorsheads(self, a, b):
1175 def commonancestorsheads(self, a, b):
1173 """calculate all the heads of the common ancestors of nodes a and b"""
1176 """calculate all the heads of the common ancestors of nodes a and b"""
1174 a, b = self.rev(a), self.rev(b)
1177 a, b = self.rev(a), self.rev(b)
1175 ancs = self._commonancestorsheads(a, b)
1178 ancs = self._commonancestorsheads(a, b)
1176 return pycompat.maplist(self.node, ancs)
1179 return pycompat.maplist(self.node, ancs)
1177
1180
1178 def _commonancestorsheads(self, *revs):
1181 def _commonancestorsheads(self, *revs):
1179 """calculate all the heads of the common ancestors of revs"""
1182 """calculate all the heads of the common ancestors of revs"""
1180 try:
1183 try:
1181 ancs = self.index.commonancestorsheads(*revs)
1184 ancs = self.index.commonancestorsheads(*revs)
1182 except (AttributeError, OverflowError): # C implementation failed
1185 except (AttributeError, OverflowError): # C implementation failed
1183 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1186 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1184 return ancs
1187 return ancs
1185
1188
1186 def isancestor(self, a, b):
1189 def isancestor(self, a, b):
1187 """return True if node a is an ancestor of node b
1190 """return True if node a is an ancestor of node b
1188
1191
1189 A revision is considered an ancestor of itself."""
1192 A revision is considered an ancestor of itself."""
1190 a, b = self.rev(a), self.rev(b)
1193 a, b = self.rev(a), self.rev(b)
1191 return self.isancestorrev(a, b)
1194 return self.isancestorrev(a, b)
1192
1195
1193 def isancestorrev(self, a, b):
1196 def isancestorrev(self, a, b):
1194 """return True if revision a is an ancestor of revision b
1197 """return True if revision a is an ancestor of revision b
1195
1198
1196 A revision is considered an ancestor of itself.
1199 A revision is considered an ancestor of itself.
1197
1200
1198 The implementation of this is trivial but the use of
1201 The implementation of this is trivial but the use of
1199 commonancestorsheads is not."""
1202 commonancestorsheads is not."""
1200 if a == nullrev:
1203 if a == nullrev:
1201 return True
1204 return True
1202 elif a == b:
1205 elif a == b:
1203 return True
1206 return True
1204 elif a > b:
1207 elif a > b:
1205 return False
1208 return False
1206 return a in self._commonancestorsheads(a, b)
1209 return a in self._commonancestorsheads(a, b)
1207
1210
1208 def ancestor(self, a, b):
1211 def ancestor(self, a, b):
1209 """calculate the "best" common ancestor of nodes a and b"""
1212 """calculate the "best" common ancestor of nodes a and b"""
1210
1213
1211 a, b = self.rev(a), self.rev(b)
1214 a, b = self.rev(a), self.rev(b)
1212 try:
1215 try:
1213 ancs = self.index.ancestors(a, b)
1216 ancs = self.index.ancestors(a, b)
1214 except (AttributeError, OverflowError):
1217 except (AttributeError, OverflowError):
1215 ancs = ancestor.ancestors(self.parentrevs, a, b)
1218 ancs = ancestor.ancestors(self.parentrevs, a, b)
1216 if ancs:
1219 if ancs:
1217 # choose a consistent winner when there's a tie
1220 # choose a consistent winner when there's a tie
1218 return min(map(self.node, ancs))
1221 return min(map(self.node, ancs))
1219 return nullid
1222 return nullid
1220
1223
1221 def _match(self, id):
1224 def _match(self, id):
1222 if isinstance(id, int):
1225 if isinstance(id, int):
1223 # rev
1226 # rev
1224 return self.node(id)
1227 return self.node(id)
1225 if len(id) == 20:
1228 if len(id) == 20:
1226 # possibly a binary node
1229 # possibly a binary node
1227 # odds of a binary node being all hex in ASCII are 1 in 10**25
1230 # odds of a binary node being all hex in ASCII are 1 in 10**25
1228 try:
1231 try:
1229 node = id
1232 node = id
1230 self.rev(node) # quick search the index
1233 self.rev(node) # quick search the index
1231 return node
1234 return node
1232 except error.LookupError:
1235 except error.LookupError:
1233 pass # may be partial hex id
1236 pass # may be partial hex id
1234 try:
1237 try:
1235 # str(rev)
1238 # str(rev)
1236 rev = int(id)
1239 rev = int(id)
1237 if "%d" % rev != id:
1240 if "%d" % rev != id:
1238 raise ValueError
1241 raise ValueError
1239 if rev < 0:
1242 if rev < 0:
1240 rev = len(self) + rev
1243 rev = len(self) + rev
1241 if rev < 0 or rev >= len(self):
1244 if rev < 0 or rev >= len(self):
1242 raise ValueError
1245 raise ValueError
1243 return self.node(rev)
1246 return self.node(rev)
1244 except (ValueError, OverflowError):
1247 except (ValueError, OverflowError):
1245 pass
1248 pass
1246 if len(id) == 40:
1249 if len(id) == 40:
1247 try:
1250 try:
1248 # a full hex nodeid?
1251 # a full hex nodeid?
1249 node = bin(id)
1252 node = bin(id)
1250 self.rev(node)
1253 self.rev(node)
1251 return node
1254 return node
1252 except (TypeError, error.LookupError):
1255 except (TypeError, error.LookupError):
1253 pass
1256 pass
1254
1257
1255 def _partialmatch(self, id):
1258 def _partialmatch(self, id):
1256 # we don't care wdirfilenodeids as they should be always full hash
1259 # we don't care wdirfilenodeids as they should be always full hash
1257 maybewdir = wdirhex.startswith(id)
1260 maybewdir = wdirhex.startswith(id)
1258 try:
1261 try:
1259 partial = self.index.partialmatch(id)
1262 partial = self.index.partialmatch(id)
1260 if partial and self.hasnode(partial):
1263 if partial and self.hasnode(partial):
1261 if maybewdir:
1264 if maybewdir:
1262 # single 'ff...' match in radix tree, ambiguous with wdir
1265 # single 'ff...' match in radix tree, ambiguous with wdir
1263 raise error.RevlogError
1266 raise error.RevlogError
1264 return partial
1267 return partial
1265 if maybewdir:
1268 if maybewdir:
1266 # no 'ff...' match in radix tree, wdir identified
1269 # no 'ff...' match in radix tree, wdir identified
1267 raise error.WdirUnsupported
1270 raise error.WdirUnsupported
1268 return None
1271 return None
1269 except error.RevlogError:
1272 except error.RevlogError:
1270 # parsers.c radix tree lookup gave multiple matches
1273 # parsers.c radix tree lookup gave multiple matches
1271 # fast path: for unfiltered changelog, radix tree is accurate
1274 # fast path: for unfiltered changelog, radix tree is accurate
1272 if not getattr(self, 'filteredrevs', None):
1275 if not getattr(self, 'filteredrevs', None):
1273 raise error.AmbiguousPrefixLookupError(
1276 raise error.AmbiguousPrefixLookupError(
1274 id, self.indexfile, _('ambiguous identifier'))
1277 id, self.indexfile, _('ambiguous identifier'))
1275 # fall through to slow path that filters hidden revisions
1278 # fall through to slow path that filters hidden revisions
1276 except (AttributeError, ValueError):
1279 except (AttributeError, ValueError):
1277 # we are pure python, or key was too short to search radix tree
1280 # we are pure python, or key was too short to search radix tree
1278 pass
1281 pass
1279
1282
1280 if id in self._pcache:
1283 if id in self._pcache:
1281 return self._pcache[id]
1284 return self._pcache[id]
1282
1285
1283 if len(id) <= 40:
1286 if len(id) <= 40:
1284 try:
1287 try:
1285 # hex(node)[:...]
1288 # hex(node)[:...]
1286 l = len(id) // 2 # grab an even number of digits
1289 l = len(id) // 2 # grab an even number of digits
1287 prefix = bin(id[:l * 2])
1290 prefix = bin(id[:l * 2])
1288 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1291 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1289 nl = [n for n in nl if hex(n).startswith(id) and
1292 nl = [n for n in nl if hex(n).startswith(id) and
1290 self.hasnode(n)]
1293 self.hasnode(n)]
1291 if nullhex.startswith(id):
1294 if nullhex.startswith(id):
1292 nl.append(nullid)
1295 nl.append(nullid)
1293 if len(nl) > 0:
1296 if len(nl) > 0:
1294 if len(nl) == 1 and not maybewdir:
1297 if len(nl) == 1 and not maybewdir:
1295 self._pcache[id] = nl[0]
1298 self._pcache[id] = nl[0]
1296 return nl[0]
1299 return nl[0]
1297 raise error.AmbiguousPrefixLookupError(
1300 raise error.AmbiguousPrefixLookupError(
1298 id, self.indexfile, _('ambiguous identifier'))
1301 id, self.indexfile, _('ambiguous identifier'))
1299 if maybewdir:
1302 if maybewdir:
1300 raise error.WdirUnsupported
1303 raise error.WdirUnsupported
1301 return None
1304 return None
1302 except TypeError:
1305 except TypeError:
1303 pass
1306 pass
1304
1307
1305 def lookup(self, id):
1308 def lookup(self, id):
1306 """locate a node based on:
1309 """locate a node based on:
1307 - revision number or str(revision number)
1310 - revision number or str(revision number)
1308 - nodeid or subset of hex nodeid
1311 - nodeid or subset of hex nodeid
1309 """
1312 """
1310 n = self._match(id)
1313 n = self._match(id)
1311 if n is not None:
1314 if n is not None:
1312 return n
1315 return n
1313 n = self._partialmatch(id)
1316 n = self._partialmatch(id)
1314 if n:
1317 if n:
1315 return n
1318 return n
1316
1319
1317 raise error.LookupError(id, self.indexfile, _('no match found'))
1320 raise error.LookupError(id, self.indexfile, _('no match found'))
1318
1321
1319 def shortest(self, node, minlength=1):
1322 def shortest(self, node, minlength=1):
1320 """Find the shortest unambiguous prefix that matches node."""
1323 """Find the shortest unambiguous prefix that matches node."""
1321 def isvalid(prefix):
1324 def isvalid(prefix):
1322 try:
1325 try:
1323 node = self._partialmatch(prefix)
1326 node = self._partialmatch(prefix)
1324 except error.AmbiguousPrefixLookupError:
1327 except error.AmbiguousPrefixLookupError:
1325 return False
1328 return False
1326 except error.WdirUnsupported:
1329 except error.WdirUnsupported:
1327 # single 'ff...' match
1330 # single 'ff...' match
1328 return True
1331 return True
1329 if node is None:
1332 if node is None:
1330 raise error.LookupError(node, self.indexfile, _('no node'))
1333 raise error.LookupError(node, self.indexfile, _('no node'))
1331 return True
1334 return True
1332
1335
1333 def maybewdir(prefix):
1336 def maybewdir(prefix):
1334 return all(c == 'f' for c in prefix)
1337 return all(c == 'f' for c in prefix)
1335
1338
1336 hexnode = hex(node)
1339 hexnode = hex(node)
1337
1340
1338 def disambiguate(hexnode, minlength):
1341 def disambiguate(hexnode, minlength):
1339 """Disambiguate against wdirid."""
1342 """Disambiguate against wdirid."""
1340 for length in range(minlength, 41):
1343 for length in range(minlength, 41):
1341 prefix = hexnode[:length]
1344 prefix = hexnode[:length]
1342 if not maybewdir(prefix):
1345 if not maybewdir(prefix):
1343 return prefix
1346 return prefix
1344
1347
1345 if not getattr(self, 'filteredrevs', None):
1348 if not getattr(self, 'filteredrevs', None):
1346 try:
1349 try:
1347 length = max(self.index.shortest(node), minlength)
1350 length = max(self.index.shortest(node), minlength)
1348 return disambiguate(hexnode, length)
1351 return disambiguate(hexnode, length)
1349 except error.RevlogError:
1352 except error.RevlogError:
1350 if node != wdirid:
1353 if node != wdirid:
1351 raise error.LookupError(node, self.indexfile, _('no node'))
1354 raise error.LookupError(node, self.indexfile, _('no node'))
1352 except AttributeError:
1355 except AttributeError:
1353 # Fall through to pure code
1356 # Fall through to pure code
1354 pass
1357 pass
1355
1358
1356 if node == wdirid:
1359 if node == wdirid:
1357 for length in range(minlength, 41):
1360 for length in range(minlength, 41):
1358 prefix = hexnode[:length]
1361 prefix = hexnode[:length]
1359 if isvalid(prefix):
1362 if isvalid(prefix):
1360 return prefix
1363 return prefix
1361
1364
1362 for length in range(minlength, 41):
1365 for length in range(minlength, 41):
1363 prefix = hexnode[:length]
1366 prefix = hexnode[:length]
1364 if isvalid(prefix):
1367 if isvalid(prefix):
1365 return disambiguate(hexnode, length)
1368 return disambiguate(hexnode, length)
1366
1369
1367 def cmp(self, node, text):
1370 def cmp(self, node, text):
1368 """compare text with a given file revision
1371 """compare text with a given file revision
1369
1372
1370 returns True if text is different than what is stored.
1373 returns True if text is different than what is stored.
1371 """
1374 """
1372 p1, p2 = self.parents(node)
1375 p1, p2 = self.parents(node)
1373 return storageutil.hashrevisionsha1(text, p1, p2) != node
1376 return storageutil.hashrevisionsha1(text, p1, p2) != node
1374
1377
1375 def _cachesegment(self, offset, data):
1378 def _cachesegment(self, offset, data):
1376 """Add a segment to the revlog cache.
1379 """Add a segment to the revlog cache.
1377
1380
1378 Accepts an absolute offset and the data that is at that location.
1381 Accepts an absolute offset and the data that is at that location.
1379 """
1382 """
1380 o, d = self._chunkcache
1383 o, d = self._chunkcache
1381 # try to add to existing cache
1384 # try to add to existing cache
1382 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1385 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1383 self._chunkcache = o, d + data
1386 self._chunkcache = o, d + data
1384 else:
1387 else:
1385 self._chunkcache = offset, data
1388 self._chunkcache = offset, data
1386
1389
1387 def _readsegment(self, offset, length, df=None):
1390 def _readsegment(self, offset, length, df=None):
1388 """Load a segment of raw data from the revlog.
1391 """Load a segment of raw data from the revlog.
1389
1392
1390 Accepts an absolute offset, length to read, and an optional existing
1393 Accepts an absolute offset, length to read, and an optional existing
1391 file handle to read from.
1394 file handle to read from.
1392
1395
1393 If an existing file handle is passed, it will be seeked and the
1396 If an existing file handle is passed, it will be seeked and the
1394 original seek position will NOT be restored.
1397 original seek position will NOT be restored.
1395
1398
1396 Returns a str or buffer of raw byte data.
1399 Returns a str or buffer of raw byte data.
1397
1400
1398 Raises if the requested number of bytes could not be read.
1401 Raises if the requested number of bytes could not be read.
1399 """
1402 """
1400 # Cache data both forward and backward around the requested
1403 # Cache data both forward and backward around the requested
1401 # data, in a fixed size window. This helps speed up operations
1404 # data, in a fixed size window. This helps speed up operations
1402 # involving reading the revlog backwards.
1405 # involving reading the revlog backwards.
1403 cachesize = self._chunkcachesize
1406 cachesize = self._chunkcachesize
1404 realoffset = offset & ~(cachesize - 1)
1407 realoffset = offset & ~(cachesize - 1)
1405 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1408 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1406 - realoffset)
1409 - realoffset)
1407 with self._datareadfp(df) as df:
1410 with self._datareadfp(df) as df:
1408 df.seek(realoffset)
1411 df.seek(realoffset)
1409 d = df.read(reallength)
1412 d = df.read(reallength)
1410
1413
1411 self._cachesegment(realoffset, d)
1414 self._cachesegment(realoffset, d)
1412 if offset != realoffset or reallength != length:
1415 if offset != realoffset or reallength != length:
1413 startoffset = offset - realoffset
1416 startoffset = offset - realoffset
1414 if len(d) - startoffset < length:
1417 if len(d) - startoffset < length:
1415 raise error.RevlogError(
1418 raise error.RevlogError(
1416 _('partial read of revlog %s; expected %d bytes from '
1419 _('partial read of revlog %s; expected %d bytes from '
1417 'offset %d, got %d') %
1420 'offset %d, got %d') %
1418 (self.indexfile if self._inline else self.datafile,
1421 (self.indexfile if self._inline else self.datafile,
1419 length, realoffset, len(d) - startoffset))
1422 length, realoffset, len(d) - startoffset))
1420
1423
1421 return util.buffer(d, startoffset, length)
1424 return util.buffer(d, startoffset, length)
1422
1425
1423 if len(d) < length:
1426 if len(d) < length:
1424 raise error.RevlogError(
1427 raise error.RevlogError(
1425 _('partial read of revlog %s; expected %d bytes from offset '
1428 _('partial read of revlog %s; expected %d bytes from offset '
1426 '%d, got %d') %
1429 '%d, got %d') %
1427 (self.indexfile if self._inline else self.datafile,
1430 (self.indexfile if self._inline else self.datafile,
1428 length, offset, len(d)))
1431 length, offset, len(d)))
1429
1432
1430 return d
1433 return d
1431
1434
1432 def _getsegment(self, offset, length, df=None):
1435 def _getsegment(self, offset, length, df=None):
1433 """Obtain a segment of raw data from the revlog.
1436 """Obtain a segment of raw data from the revlog.
1434
1437
1435 Accepts an absolute offset, length of bytes to obtain, and an
1438 Accepts an absolute offset, length of bytes to obtain, and an
1436 optional file handle to the already-opened revlog. If the file
1439 optional file handle to the already-opened revlog. If the file
1437 handle is used, it's original seek position will not be preserved.
1440 handle is used, it's original seek position will not be preserved.
1438
1441
1439 Requests for data may be returned from a cache.
1442 Requests for data may be returned from a cache.
1440
1443
1441 Returns a str or a buffer instance of raw byte data.
1444 Returns a str or a buffer instance of raw byte data.
1442 """
1445 """
1443 o, d = self._chunkcache
1446 o, d = self._chunkcache
1444 l = len(d)
1447 l = len(d)
1445
1448
1446 # is it in the cache?
1449 # is it in the cache?
1447 cachestart = offset - o
1450 cachestart = offset - o
1448 cacheend = cachestart + length
1451 cacheend = cachestart + length
1449 if cachestart >= 0 and cacheend <= l:
1452 if cachestart >= 0 and cacheend <= l:
1450 if cachestart == 0 and cacheend == l:
1453 if cachestart == 0 and cacheend == l:
1451 return d # avoid a copy
1454 return d # avoid a copy
1452 return util.buffer(d, cachestart, cacheend - cachestart)
1455 return util.buffer(d, cachestart, cacheend - cachestart)
1453
1456
1454 return self._readsegment(offset, length, df=df)
1457 return self._readsegment(offset, length, df=df)
1455
1458
1456 def _getsegmentforrevs(self, startrev, endrev, df=None):
1459 def _getsegmentforrevs(self, startrev, endrev, df=None):
1457 """Obtain a segment of raw data corresponding to a range of revisions.
1460 """Obtain a segment of raw data corresponding to a range of revisions.
1458
1461
1459 Accepts the start and end revisions and an optional already-open
1462 Accepts the start and end revisions and an optional already-open
1460 file handle to be used for reading. If the file handle is read, its
1463 file handle to be used for reading. If the file handle is read, its
1461 seek position will not be preserved.
1464 seek position will not be preserved.
1462
1465
1463 Requests for data may be satisfied by a cache.
1466 Requests for data may be satisfied by a cache.
1464
1467
1465 Returns a 2-tuple of (offset, data) for the requested range of
1468 Returns a 2-tuple of (offset, data) for the requested range of
1466 revisions. Offset is the integer offset from the beginning of the
1469 revisions. Offset is the integer offset from the beginning of the
1467 revlog and data is a str or buffer of the raw byte data.
1470 revlog and data is a str or buffer of the raw byte data.
1468
1471
1469 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1472 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1470 to determine where each revision's data begins and ends.
1473 to determine where each revision's data begins and ends.
1471 """
1474 """
1472 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1475 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1473 # (functions are expensive).
1476 # (functions are expensive).
1474 index = self.index
1477 index = self.index
1475 istart = index[startrev]
1478 istart = index[startrev]
1476 start = int(istart[0] >> 16)
1479 start = int(istart[0] >> 16)
1477 if startrev == endrev:
1480 if startrev == endrev:
1478 end = start + istart[1]
1481 end = start + istart[1]
1479 else:
1482 else:
1480 iend = index[endrev]
1483 iend = index[endrev]
1481 end = int(iend[0] >> 16) + iend[1]
1484 end = int(iend[0] >> 16) + iend[1]
1482
1485
1483 if self._inline:
1486 if self._inline:
1484 start += (startrev + 1) * self._io.size
1487 start += (startrev + 1) * self._io.size
1485 end += (endrev + 1) * self._io.size
1488 end += (endrev + 1) * self._io.size
1486 length = end - start
1489 length = end - start
1487
1490
1488 return start, self._getsegment(start, length, df=df)
1491 return start, self._getsegment(start, length, df=df)
1489
1492
1490 def _chunk(self, rev, df=None):
1493 def _chunk(self, rev, df=None):
1491 """Obtain a single decompressed chunk for a revision.
1494 """Obtain a single decompressed chunk for a revision.
1492
1495
1493 Accepts an integer revision and an optional already-open file handle
1496 Accepts an integer revision and an optional already-open file handle
1494 to be used for reading. If used, the seek position of the file will not
1497 to be used for reading. If used, the seek position of the file will not
1495 be preserved.
1498 be preserved.
1496
1499
1497 Returns a str holding uncompressed data for the requested revision.
1500 Returns a str holding uncompressed data for the requested revision.
1498 """
1501 """
1499 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1502 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1500
1503
1501 def _chunks(self, revs, df=None, targetsize=None):
1504 def _chunks(self, revs, df=None, targetsize=None):
1502 """Obtain decompressed chunks for the specified revisions.
1505 """Obtain decompressed chunks for the specified revisions.
1503
1506
1504 Accepts an iterable of numeric revisions that are assumed to be in
1507 Accepts an iterable of numeric revisions that are assumed to be in
1505 ascending order. Also accepts an optional already-open file handle
1508 ascending order. Also accepts an optional already-open file handle
1506 to be used for reading. If used, the seek position of the file will
1509 to be used for reading. If used, the seek position of the file will
1507 not be preserved.
1510 not be preserved.
1508
1511
1509 This function is similar to calling ``self._chunk()`` multiple times,
1512 This function is similar to calling ``self._chunk()`` multiple times,
1510 but is faster.
1513 but is faster.
1511
1514
1512 Returns a list with decompressed data for each requested revision.
1515 Returns a list with decompressed data for each requested revision.
1513 """
1516 """
1514 if not revs:
1517 if not revs:
1515 return []
1518 return []
1516 start = self.start
1519 start = self.start
1517 length = self.length
1520 length = self.length
1518 inline = self._inline
1521 inline = self._inline
1519 iosize = self._io.size
1522 iosize = self._io.size
1520 buffer = util.buffer
1523 buffer = util.buffer
1521
1524
1522 l = []
1525 l = []
1523 ladd = l.append
1526 ladd = l.append
1524
1527
1525 if not self._withsparseread:
1528 if not self._withsparseread:
1526 slicedchunks = (revs,)
1529 slicedchunks = (revs,)
1527 else:
1530 else:
1528 slicedchunks = deltautil.slicechunk(self, revs,
1531 slicedchunks = deltautil.slicechunk(self, revs,
1529 targetsize=targetsize)
1532 targetsize=targetsize)
1530
1533
1531 for revschunk in slicedchunks:
1534 for revschunk in slicedchunks:
1532 firstrev = revschunk[0]
1535 firstrev = revschunk[0]
1533 # Skip trailing revisions with empty diff
1536 # Skip trailing revisions with empty diff
1534 for lastrev in revschunk[::-1]:
1537 for lastrev in revschunk[::-1]:
1535 if length(lastrev) != 0:
1538 if length(lastrev) != 0:
1536 break
1539 break
1537
1540
1538 try:
1541 try:
1539 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1542 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1540 except OverflowError:
1543 except OverflowError:
1541 # issue4215 - we can't cache a run of chunks greater than
1544 # issue4215 - we can't cache a run of chunks greater than
1542 # 2G on Windows
1545 # 2G on Windows
1543 return [self._chunk(rev, df=df) for rev in revschunk]
1546 return [self._chunk(rev, df=df) for rev in revschunk]
1544
1547
1545 decomp = self.decompress
1548 decomp = self.decompress
1546 for rev in revschunk:
1549 for rev in revschunk:
1547 chunkstart = start(rev)
1550 chunkstart = start(rev)
1548 if inline:
1551 if inline:
1549 chunkstart += (rev + 1) * iosize
1552 chunkstart += (rev + 1) * iosize
1550 chunklength = length(rev)
1553 chunklength = length(rev)
1551 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1554 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1552
1555
1553 return l
1556 return l
1554
1557
1555 def _chunkclear(self):
1558 def _chunkclear(self):
1556 """Clear the raw chunk cache."""
1559 """Clear the raw chunk cache."""
1557 self._chunkcache = (0, '')
1560 self._chunkcache = (0, '')
1558
1561
1559 def deltaparent(self, rev):
1562 def deltaparent(self, rev):
1560 """return deltaparent of the given revision"""
1563 """return deltaparent of the given revision"""
1561 base = self.index[rev][3]
1564 base = self.index[rev][3]
1562 if base == rev:
1565 if base == rev:
1563 return nullrev
1566 return nullrev
1564 elif self._generaldelta:
1567 elif self._generaldelta:
1565 return base
1568 return base
1566 else:
1569 else:
1567 return rev - 1
1570 return rev - 1
1568
1571
1569 def issnapshot(self, rev):
1572 def issnapshot(self, rev):
1570 """tells whether rev is a snapshot
1573 """tells whether rev is a snapshot
1571 """
1574 """
1572 if not self._sparserevlog:
1575 if not self._sparserevlog:
1573 return self.deltaparent(rev) == nullrev
1576 return self.deltaparent(rev) == nullrev
1574 elif util.safehasattr(self.index, 'issnapshot'):
1577 elif util.safehasattr(self.index, 'issnapshot'):
1575 # directly assign the method to cache the testing and access
1578 # directly assign the method to cache the testing and access
1576 self.issnapshot = self.index.issnapshot
1579 self.issnapshot = self.index.issnapshot
1577 return self.issnapshot(rev)
1580 return self.issnapshot(rev)
1578 if rev == nullrev:
1581 if rev == nullrev:
1579 return True
1582 return True
1580 entry = self.index[rev]
1583 entry = self.index[rev]
1581 base = entry[3]
1584 base = entry[3]
1582 if base == rev:
1585 if base == rev:
1583 return True
1586 return True
1584 if base == nullrev:
1587 if base == nullrev:
1585 return True
1588 return True
1586 p1 = entry[5]
1589 p1 = entry[5]
1587 p2 = entry[6]
1590 p2 = entry[6]
1588 if base == p1 or base == p2:
1591 if base == p1 or base == p2:
1589 return False
1592 return False
1590 return self.issnapshot(base)
1593 return self.issnapshot(base)
1591
1594
1592 def snapshotdepth(self, rev):
1595 def snapshotdepth(self, rev):
1593 """number of snapshot in the chain before this one"""
1596 """number of snapshot in the chain before this one"""
1594 if not self.issnapshot(rev):
1597 if not self.issnapshot(rev):
1595 raise error.ProgrammingError('revision %d not a snapshot')
1598 raise error.ProgrammingError('revision %d not a snapshot')
1596 return len(self._deltachain(rev)[0]) - 1
1599 return len(self._deltachain(rev)[0]) - 1
1597
1600
1598 def revdiff(self, rev1, rev2):
1601 def revdiff(self, rev1, rev2):
1599 """return or calculate a delta between two revisions
1602 """return or calculate a delta between two revisions
1600
1603
1601 The delta calculated is in binary form and is intended to be written to
1604 The delta calculated is in binary form and is intended to be written to
1602 revlog data directly. So this function needs raw revision data.
1605 revlog data directly. So this function needs raw revision data.
1603 """
1606 """
1604 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1607 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1605 return bytes(self._chunk(rev2))
1608 return bytes(self._chunk(rev2))
1606
1609
1607 return mdiff.textdiff(self.revision(rev1, raw=True),
1610 return mdiff.textdiff(self.revision(rev1, raw=True),
1608 self.revision(rev2, raw=True))
1611 self.revision(rev2, raw=True))
1609
1612
1610 def revision(self, nodeorrev, _df=None, raw=False):
1613 def revision(self, nodeorrev, _df=None, raw=False):
1611 """return an uncompressed revision of a given node or revision
1614 """return an uncompressed revision of a given node or revision
1612 number.
1615 number.
1613
1616
1614 _df - an existing file handle to read from. (internal-only)
1617 _df - an existing file handle to read from. (internal-only)
1615 raw - an optional argument specifying if the revision data is to be
1618 raw - an optional argument specifying if the revision data is to be
1616 treated as raw data when applying flag transforms. 'raw' should be set
1619 treated as raw data when applying flag transforms. 'raw' should be set
1617 to True when generating changegroups or in debug commands.
1620 to True when generating changegroups or in debug commands.
1618 """
1621 """
1619 if isinstance(nodeorrev, int):
1622 if isinstance(nodeorrev, int):
1620 rev = nodeorrev
1623 rev = nodeorrev
1621 node = self.node(rev)
1624 node = self.node(rev)
1622 else:
1625 else:
1623 node = nodeorrev
1626 node = nodeorrev
1624 rev = None
1627 rev = None
1625
1628
1626 cachedrev = None
1629 cachedrev = None
1627 flags = None
1630 flags = None
1628 rawtext = None
1631 rawtext = None
1629 if node == nullid:
1632 if node == nullid:
1630 return ""
1633 return ""
1631 if self._revisioncache:
1634 if self._revisioncache:
1632 if self._revisioncache[0] == node:
1635 if self._revisioncache[0] == node:
1633 # _cache only stores rawtext
1636 # _cache only stores rawtext
1634 if raw:
1637 if raw:
1635 return self._revisioncache[2]
1638 return self._revisioncache[2]
1636 # duplicated, but good for perf
1639 # duplicated, but good for perf
1637 if rev is None:
1640 if rev is None:
1638 rev = self.rev(node)
1641 rev = self.rev(node)
1639 if flags is None:
1642 if flags is None:
1640 flags = self.flags(rev)
1643 flags = self.flags(rev)
1641 # no extra flags set, no flag processor runs, text = rawtext
1644 # no extra flags set, no flag processor runs, text = rawtext
1642 if flags == REVIDX_DEFAULT_FLAGS:
1645 if flags == REVIDX_DEFAULT_FLAGS:
1643 return self._revisioncache[2]
1646 return self._revisioncache[2]
1644 # rawtext is reusable. need to run flag processor
1647 # rawtext is reusable. need to run flag processor
1645 rawtext = self._revisioncache[2]
1648 rawtext = self._revisioncache[2]
1646
1649
1647 cachedrev = self._revisioncache[1]
1650 cachedrev = self._revisioncache[1]
1648
1651
1649 # look up what we need to read
1652 # look up what we need to read
1650 if rawtext is None:
1653 if rawtext is None:
1651 if rev is None:
1654 if rev is None:
1652 rev = self.rev(node)
1655 rev = self.rev(node)
1653
1656
1654 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1657 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1655 if stopped:
1658 if stopped:
1656 rawtext = self._revisioncache[2]
1659 rawtext = self._revisioncache[2]
1657
1660
1658 # drop cache to save memory
1661 # drop cache to save memory
1659 self._revisioncache = None
1662 self._revisioncache = None
1660
1663
1661 targetsize = None
1664 targetsize = None
1662 rawsize = self.index[rev][2]
1665 rawsize = self.index[rev][2]
1663 if 0 <= rawsize:
1666 if 0 <= rawsize:
1664 targetsize = 4 * rawsize
1667 targetsize = 4 * rawsize
1665
1668
1666 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1669 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1667 if rawtext is None:
1670 if rawtext is None:
1668 rawtext = bytes(bins[0])
1671 rawtext = bytes(bins[0])
1669 bins = bins[1:]
1672 bins = bins[1:]
1670
1673
1671 rawtext = mdiff.patches(rawtext, bins)
1674 rawtext = mdiff.patches(rawtext, bins)
1672 self._revisioncache = (node, rev, rawtext)
1675 self._revisioncache = (node, rev, rawtext)
1673
1676
1674 if flags is None:
1677 if flags is None:
1675 if rev is None:
1678 if rev is None:
1676 rev = self.rev(node)
1679 rev = self.rev(node)
1677 flags = self.flags(rev)
1680 flags = self.flags(rev)
1678
1681
1679 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1682 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1680 if validatehash:
1683 if validatehash:
1681 self.checkhash(text, node, rev=rev)
1684 self.checkhash(text, node, rev=rev)
1682
1685
1683 return text
1686 return text
1684
1687
1685 def hash(self, text, p1, p2):
1688 def hash(self, text, p1, p2):
1686 """Compute a node hash.
1689 """Compute a node hash.
1687
1690
1688 Available as a function so that subclasses can replace the hash
1691 Available as a function so that subclasses can replace the hash
1689 as needed.
1692 as needed.
1690 """
1693 """
1691 return storageutil.hashrevisionsha1(text, p1, p2)
1694 return storageutil.hashrevisionsha1(text, p1, p2)
1692
1695
1693 def _processflags(self, text, flags, operation, raw=False):
1696 def _processflags(self, text, flags, operation, raw=False):
1694 """Inspect revision data flags and applies transforms defined by
1697 """Inspect revision data flags and applies transforms defined by
1695 registered flag processors.
1698 registered flag processors.
1696
1699
1697 ``text`` - the revision data to process
1700 ``text`` - the revision data to process
1698 ``flags`` - the revision flags
1701 ``flags`` - the revision flags
1699 ``operation`` - the operation being performed (read or write)
1702 ``operation`` - the operation being performed (read or write)
1700 ``raw`` - an optional argument describing if the raw transform should be
1703 ``raw`` - an optional argument describing if the raw transform should be
1701 applied.
1704 applied.
1702
1705
1703 This method processes the flags in the order (or reverse order if
1706 This method processes the flags in the order (or reverse order if
1704 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1707 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1705 flag processors registered for present flags. The order of flags defined
1708 flag processors registered for present flags. The order of flags defined
1706 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1709 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1707
1710
1708 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1711 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1709 processed text and ``validatehash`` is a bool indicating whether the
1712 processed text and ``validatehash`` is a bool indicating whether the
1710 returned text should be checked for hash integrity.
1713 returned text should be checked for hash integrity.
1711
1714
1712 Note: If the ``raw`` argument is set, it has precedence over the
1715 Note: If the ``raw`` argument is set, it has precedence over the
1713 operation and will only update the value of ``validatehash``.
1716 operation and will only update the value of ``validatehash``.
1714 """
1717 """
1715 # fast path: no flag processors will run
1718 # fast path: no flag processors will run
1716 if flags == 0:
1719 if flags == 0:
1717 return text, True
1720 return text, True
1718 if not operation in ('read', 'write'):
1721 if not operation in ('read', 'write'):
1719 raise error.ProgrammingError(_("invalid '%s' operation") %
1722 raise error.ProgrammingError(_("invalid '%s' operation") %
1720 operation)
1723 operation)
1721 # Check all flags are known.
1724 # Check all flags are known.
1722 if flags & ~REVIDX_KNOWN_FLAGS:
1725 if flags & ~REVIDX_KNOWN_FLAGS:
1723 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1726 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1724 (flags & ~REVIDX_KNOWN_FLAGS))
1727 (flags & ~REVIDX_KNOWN_FLAGS))
1725 validatehash = True
1728 validatehash = True
1726 # Depending on the operation (read or write), the order might be
1729 # Depending on the operation (read or write), the order might be
1727 # reversed due to non-commutative transforms.
1730 # reversed due to non-commutative transforms.
1728 orderedflags = REVIDX_FLAGS_ORDER
1731 orderedflags = REVIDX_FLAGS_ORDER
1729 if operation == 'write':
1732 if operation == 'write':
1730 orderedflags = reversed(orderedflags)
1733 orderedflags = reversed(orderedflags)
1731
1734
1732 for flag in orderedflags:
1735 for flag in orderedflags:
1733 # If a flagprocessor has been registered for a known flag, apply the
1736 # If a flagprocessor has been registered for a known flag, apply the
1734 # related operation transform and update result tuple.
1737 # related operation transform and update result tuple.
1735 if flag & flags:
1738 if flag & flags:
1736 vhash = True
1739 vhash = True
1737
1740
1738 if flag not in self._flagprocessors:
1741 if flag not in self._flagprocessors:
1739 message = _("missing processor for flag '%#x'") % (flag)
1742 message = _("missing processor for flag '%#x'") % (flag)
1740 raise error.RevlogError(message)
1743 raise error.RevlogError(message)
1741
1744
1742 processor = self._flagprocessors[flag]
1745 processor = self._flagprocessors[flag]
1743 if processor is not None:
1746 if processor is not None:
1744 readtransform, writetransform, rawtransform = processor
1747 readtransform, writetransform, rawtransform = processor
1745
1748
1746 if raw:
1749 if raw:
1747 vhash = rawtransform(self, text)
1750 vhash = rawtransform(self, text)
1748 elif operation == 'read':
1751 elif operation == 'read':
1749 text, vhash = readtransform(self, text)
1752 text, vhash = readtransform(self, text)
1750 else: # write operation
1753 else: # write operation
1751 text, vhash = writetransform(self, text)
1754 text, vhash = writetransform(self, text)
1752 validatehash = validatehash and vhash
1755 validatehash = validatehash and vhash
1753
1756
1754 return text, validatehash
1757 return text, validatehash
1755
1758
1756 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1759 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1757 """Check node hash integrity.
1760 """Check node hash integrity.
1758
1761
1759 Available as a function so that subclasses can extend hash mismatch
1762 Available as a function so that subclasses can extend hash mismatch
1760 behaviors as needed.
1763 behaviors as needed.
1761 """
1764 """
1762 try:
1765 try:
1763 if p1 is None and p2 is None:
1766 if p1 is None and p2 is None:
1764 p1, p2 = self.parents(node)
1767 p1, p2 = self.parents(node)
1765 if node != self.hash(text, p1, p2):
1768 if node != self.hash(text, p1, p2):
1766 # Clear the revision cache on hash failure. The revision cache
1769 # Clear the revision cache on hash failure. The revision cache
1767 # only stores the raw revision and clearing the cache does have
1770 # only stores the raw revision and clearing the cache does have
1768 # the side-effect that we won't have a cache hit when the raw
1771 # the side-effect that we won't have a cache hit when the raw
1769 # revision data is accessed. But this case should be rare and
1772 # revision data is accessed. But this case should be rare and
1770 # it is extra work to teach the cache about the hash
1773 # it is extra work to teach the cache about the hash
1771 # verification state.
1774 # verification state.
1772 if self._revisioncache and self._revisioncache[0] == node:
1775 if self._revisioncache and self._revisioncache[0] == node:
1773 self._revisioncache = None
1776 self._revisioncache = None
1774
1777
1775 revornode = rev
1778 revornode = rev
1776 if revornode is None:
1779 if revornode is None:
1777 revornode = templatefilters.short(hex(node))
1780 revornode = templatefilters.short(hex(node))
1778 raise error.RevlogError(_("integrity check failed on %s:%s")
1781 raise error.RevlogError(_("integrity check failed on %s:%s")
1779 % (self.indexfile, pycompat.bytestr(revornode)))
1782 % (self.indexfile, pycompat.bytestr(revornode)))
1780 except error.RevlogError:
1783 except error.RevlogError:
1781 if self._censorable and storageutil.iscensoredtext(text):
1784 if self._censorable and storageutil.iscensoredtext(text):
1782 raise error.CensoredNodeError(self.indexfile, node, text)
1785 raise error.CensoredNodeError(self.indexfile, node, text)
1783 raise
1786 raise
1784
1787
1785 def _enforceinlinesize(self, tr, fp=None):
1788 def _enforceinlinesize(self, tr, fp=None):
1786 """Check if the revlog is too big for inline and convert if so.
1789 """Check if the revlog is too big for inline and convert if so.
1787
1790
1788 This should be called after revisions are added to the revlog. If the
1791 This should be called after revisions are added to the revlog. If the
1789 revlog has grown too large to be an inline revlog, it will convert it
1792 revlog has grown too large to be an inline revlog, it will convert it
1790 to use multiple index and data files.
1793 to use multiple index and data files.
1791 """
1794 """
1792 tiprev = len(self) - 1
1795 tiprev = len(self) - 1
1793 if (not self._inline or
1796 if (not self._inline or
1794 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1797 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1795 return
1798 return
1796
1799
1797 trinfo = tr.find(self.indexfile)
1800 trinfo = tr.find(self.indexfile)
1798 if trinfo is None:
1801 if trinfo is None:
1799 raise error.RevlogError(_("%s not found in the transaction")
1802 raise error.RevlogError(_("%s not found in the transaction")
1800 % self.indexfile)
1803 % self.indexfile)
1801
1804
1802 trindex = trinfo[2]
1805 trindex = trinfo[2]
1803 if trindex is not None:
1806 if trindex is not None:
1804 dataoff = self.start(trindex)
1807 dataoff = self.start(trindex)
1805 else:
1808 else:
1806 # revlog was stripped at start of transaction, use all leftover data
1809 # revlog was stripped at start of transaction, use all leftover data
1807 trindex = len(self) - 1
1810 trindex = len(self) - 1
1808 dataoff = self.end(tiprev)
1811 dataoff = self.end(tiprev)
1809
1812
1810 tr.add(self.datafile, dataoff)
1813 tr.add(self.datafile, dataoff)
1811
1814
1812 if fp:
1815 if fp:
1813 fp.flush()
1816 fp.flush()
1814 fp.close()
1817 fp.close()
1815 # We can't use the cached file handle after close(). So prevent
1818 # We can't use the cached file handle after close(). So prevent
1816 # its usage.
1819 # its usage.
1817 self._writinghandles = None
1820 self._writinghandles = None
1818
1821
1819 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1822 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1820 for r in self:
1823 for r in self:
1821 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1824 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1822
1825
1823 with self._indexfp('w') as fp:
1826 with self._indexfp('w') as fp:
1824 self.version &= ~FLAG_INLINE_DATA
1827 self.version &= ~FLAG_INLINE_DATA
1825 self._inline = False
1828 self._inline = False
1826 io = self._io
1829 io = self._io
1827 for i in self:
1830 for i in self:
1828 e = io.packentry(self.index[i], self.node, self.version, i)
1831 e = io.packentry(self.index[i], self.node, self.version, i)
1829 fp.write(e)
1832 fp.write(e)
1830
1833
1831 # the temp file replace the real index when we exit the context
1834 # the temp file replace the real index when we exit the context
1832 # manager
1835 # manager
1833
1836
1834 tr.replace(self.indexfile, trindex * self._io.size)
1837 tr.replace(self.indexfile, trindex * self._io.size)
1835 self._chunkclear()
1838 self._chunkclear()
1836
1839
1837 def _nodeduplicatecallback(self, transaction, node):
1840 def _nodeduplicatecallback(self, transaction, node):
1838 """called when trying to add a node already stored.
1841 """called when trying to add a node already stored.
1839 """
1842 """
1840
1843
1841 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1844 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1842 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1845 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1843 """add a revision to the log
1846 """add a revision to the log
1844
1847
1845 text - the revision data to add
1848 text - the revision data to add
1846 transaction - the transaction object used for rollback
1849 transaction - the transaction object used for rollback
1847 link - the linkrev data to add
1850 link - the linkrev data to add
1848 p1, p2 - the parent nodeids of the revision
1851 p1, p2 - the parent nodeids of the revision
1849 cachedelta - an optional precomputed delta
1852 cachedelta - an optional precomputed delta
1850 node - nodeid of revision; typically node is not specified, and it is
1853 node - nodeid of revision; typically node is not specified, and it is
1851 computed by default as hash(text, p1, p2), however subclasses might
1854 computed by default as hash(text, p1, p2), however subclasses might
1852 use different hashing method (and override checkhash() in such case)
1855 use different hashing method (and override checkhash() in such case)
1853 flags - the known flags to set on the revision
1856 flags - the known flags to set on the revision
1854 deltacomputer - an optional deltacomputer instance shared between
1857 deltacomputer - an optional deltacomputer instance shared between
1855 multiple calls
1858 multiple calls
1856 """
1859 """
1857 if link == nullrev:
1860 if link == nullrev:
1858 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1861 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1859 % self.indexfile)
1862 % self.indexfile)
1860
1863
1861 if flags:
1864 if flags:
1862 node = node or self.hash(text, p1, p2)
1865 node = node or self.hash(text, p1, p2)
1863
1866
1864 rawtext, validatehash = self._processflags(text, flags, 'write')
1867 rawtext, validatehash = self._processflags(text, flags, 'write')
1865
1868
1866 # If the flag processor modifies the revision data, ignore any provided
1869 # If the flag processor modifies the revision data, ignore any provided
1867 # cachedelta.
1870 # cachedelta.
1868 if rawtext != text:
1871 if rawtext != text:
1869 cachedelta = None
1872 cachedelta = None
1870
1873
1871 if len(rawtext) > _maxentrysize:
1874 if len(rawtext) > _maxentrysize:
1872 raise error.RevlogError(
1875 raise error.RevlogError(
1873 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1876 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1874 % (self.indexfile, len(rawtext)))
1877 % (self.indexfile, len(rawtext)))
1875
1878
1876 node = node or self.hash(rawtext, p1, p2)
1879 node = node or self.hash(rawtext, p1, p2)
1877 if node in self.nodemap:
1880 if node in self.nodemap:
1878 return node
1881 return node
1879
1882
1880 if validatehash:
1883 if validatehash:
1881 self.checkhash(rawtext, node, p1=p1, p2=p2)
1884 self.checkhash(rawtext, node, p1=p1, p2=p2)
1882
1885
1883 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1886 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1884 flags, cachedelta=cachedelta,
1887 flags, cachedelta=cachedelta,
1885 deltacomputer=deltacomputer)
1888 deltacomputer=deltacomputer)
1886
1889
1887 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1890 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1888 cachedelta=None, deltacomputer=None):
1891 cachedelta=None, deltacomputer=None):
1889 """add a raw revision with known flags, node and parents
1892 """add a raw revision with known flags, node and parents
1890 useful when reusing a revision not stored in this revlog (ex: received
1893 useful when reusing a revision not stored in this revlog (ex: received
1891 over wire, or read from an external bundle).
1894 over wire, or read from an external bundle).
1892 """
1895 """
1893 dfh = None
1896 dfh = None
1894 if not self._inline:
1897 if not self._inline:
1895 dfh = self._datafp("a+")
1898 dfh = self._datafp("a+")
1896 ifh = self._indexfp("a+")
1899 ifh = self._indexfp("a+")
1897 try:
1900 try:
1898 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1901 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1899 flags, cachedelta, ifh, dfh,
1902 flags, cachedelta, ifh, dfh,
1900 deltacomputer=deltacomputer)
1903 deltacomputer=deltacomputer)
1901 finally:
1904 finally:
1902 if dfh:
1905 if dfh:
1903 dfh.close()
1906 dfh.close()
1904 ifh.close()
1907 ifh.close()
1905
1908
1906 def compress(self, data):
1909 def compress(self, data):
1907 """Generate a possibly-compressed representation of data."""
1910 """Generate a possibly-compressed representation of data."""
1908 if not data:
1911 if not data:
1909 return '', data
1912 return '', data
1910
1913
1911 compressed = self._compressor.compress(data)
1914 compressed = self._compressor.compress(data)
1912
1915
1913 if compressed:
1916 if compressed:
1914 # The revlog compressor added the header in the returned data.
1917 # The revlog compressor added the header in the returned data.
1915 return '', compressed
1918 return '', compressed
1916
1919
1917 if data[0:1] == '\0':
1920 if data[0:1] == '\0':
1918 return '', data
1921 return '', data
1919 return 'u', data
1922 return 'u', data
1920
1923
1921 def decompress(self, data):
1924 def decompress(self, data):
1922 """Decompress a revlog chunk.
1925 """Decompress a revlog chunk.
1923
1926
1924 The chunk is expected to begin with a header identifying the
1927 The chunk is expected to begin with a header identifying the
1925 format type so it can be routed to an appropriate decompressor.
1928 format type so it can be routed to an appropriate decompressor.
1926 """
1929 """
1927 if not data:
1930 if not data:
1928 return data
1931 return data
1929
1932
1930 # Revlogs are read much more frequently than they are written and many
1933 # Revlogs are read much more frequently than they are written and many
1931 # chunks only take microseconds to decompress, so performance is
1934 # chunks only take microseconds to decompress, so performance is
1932 # important here.
1935 # important here.
1933 #
1936 #
1934 # We can make a few assumptions about revlogs:
1937 # We can make a few assumptions about revlogs:
1935 #
1938 #
1936 # 1) the majority of chunks will be compressed (as opposed to inline
1939 # 1) the majority of chunks will be compressed (as opposed to inline
1937 # raw data).
1940 # raw data).
1938 # 2) decompressing *any* data will likely by at least 10x slower than
1941 # 2) decompressing *any* data will likely by at least 10x slower than
1939 # returning raw inline data.
1942 # returning raw inline data.
1940 # 3) we want to prioritize common and officially supported compression
1943 # 3) we want to prioritize common and officially supported compression
1941 # engines
1944 # engines
1942 #
1945 #
1943 # It follows that we want to optimize for "decompress compressed data
1946 # It follows that we want to optimize for "decompress compressed data
1944 # when encoded with common and officially supported compression engines"
1947 # when encoded with common and officially supported compression engines"
1945 # case over "raw data" and "data encoded by less common or non-official
1948 # case over "raw data" and "data encoded by less common or non-official
1946 # compression engines." That is why we have the inline lookup first
1949 # compression engines." That is why we have the inline lookup first
1947 # followed by the compengines lookup.
1950 # followed by the compengines lookup.
1948 #
1951 #
1949 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1952 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1950 # compressed chunks. And this matters for changelog and manifest reads.
1953 # compressed chunks. And this matters for changelog and manifest reads.
1951 t = data[0:1]
1954 t = data[0:1]
1952
1955
1953 if t == 'x':
1956 if t == 'x':
1954 try:
1957 try:
1955 return _zlibdecompress(data)
1958 return _zlibdecompress(data)
1956 except zlib.error as e:
1959 except zlib.error as e:
1957 raise error.RevlogError(_('revlog decompress error: %s') %
1960 raise error.RevlogError(_('revlog decompress error: %s') %
1958 stringutil.forcebytestr(e))
1961 stringutil.forcebytestr(e))
1959 # '\0' is more common than 'u' so it goes first.
1962 # '\0' is more common than 'u' so it goes first.
1960 elif t == '\0':
1963 elif t == '\0':
1961 return data
1964 return data
1962 elif t == 'u':
1965 elif t == 'u':
1963 return util.buffer(data, 1)
1966 return util.buffer(data, 1)
1964
1967
1965 try:
1968 try:
1966 compressor = self._decompressors[t]
1969 compressor = self._decompressors[t]
1967 except KeyError:
1970 except KeyError:
1968 try:
1971 try:
1969 engine = util.compengines.forrevlogheader(t)
1972 engine = util.compengines.forrevlogheader(t)
1970 compressor = engine.revlogcompressor()
1973 compressor = engine.revlogcompressor()
1971 self._decompressors[t] = compressor
1974 self._decompressors[t] = compressor
1972 except KeyError:
1975 except KeyError:
1973 raise error.RevlogError(_('unknown compression type %r') % t)
1976 raise error.RevlogError(_('unknown compression type %r') % t)
1974
1977
1975 return compressor.decompress(data)
1978 return compressor.decompress(data)
1976
1979
1977 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1980 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1978 cachedelta, ifh, dfh, alwayscache=False,
1981 cachedelta, ifh, dfh, alwayscache=False,
1979 deltacomputer=None):
1982 deltacomputer=None):
1980 """internal function to add revisions to the log
1983 """internal function to add revisions to the log
1981
1984
1982 see addrevision for argument descriptions.
1985 see addrevision for argument descriptions.
1983
1986
1984 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1987 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1985
1988
1986 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1989 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1987 be used.
1990 be used.
1988
1991
1989 invariants:
1992 invariants:
1990 - rawtext is optional (can be None); if not set, cachedelta must be set.
1993 - rawtext is optional (can be None); if not set, cachedelta must be set.
1991 if both are set, they must correspond to each other.
1994 if both are set, they must correspond to each other.
1992 """
1995 """
1993 if node == nullid:
1996 if node == nullid:
1994 raise error.RevlogError(_("%s: attempt to add null revision") %
1997 raise error.RevlogError(_("%s: attempt to add null revision") %
1995 self.indexfile)
1998 self.indexfile)
1996 if node == wdirid or node in wdirfilenodeids:
1999 if node == wdirid or node in wdirfilenodeids:
1997 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2000 raise error.RevlogError(_("%s: attempt to add wdir revision") %
1998 self.indexfile)
2001 self.indexfile)
1999
2002
2000 if self._inline:
2003 if self._inline:
2001 fh = ifh
2004 fh = ifh
2002 else:
2005 else:
2003 fh = dfh
2006 fh = dfh
2004
2007
2005 btext = [rawtext]
2008 btext = [rawtext]
2006
2009
2007 curr = len(self)
2010 curr = len(self)
2008 prev = curr - 1
2011 prev = curr - 1
2009 offset = self.end(prev)
2012 offset = self.end(prev)
2010 p1r, p2r = self.rev(p1), self.rev(p2)
2013 p1r, p2r = self.rev(p1), self.rev(p2)
2011
2014
2012 # full versions are inserted when the needed deltas
2015 # full versions are inserted when the needed deltas
2013 # become comparable to the uncompressed text
2016 # become comparable to the uncompressed text
2014 if rawtext is None:
2017 if rawtext is None:
2015 # need rawtext size, before changed by flag processors, which is
2018 # need rawtext size, before changed by flag processors, which is
2016 # the non-raw size. use revlog explicitly to avoid filelog's extra
2019 # the non-raw size. use revlog explicitly to avoid filelog's extra
2017 # logic that might remove metadata size.
2020 # logic that might remove metadata size.
2018 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2021 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2019 cachedelta[1])
2022 cachedelta[1])
2020 else:
2023 else:
2021 textlen = len(rawtext)
2024 textlen = len(rawtext)
2022
2025
2023 if deltacomputer is None:
2026 if deltacomputer is None:
2024 deltacomputer = deltautil.deltacomputer(self)
2027 deltacomputer = deltautil.deltacomputer(self)
2025
2028
2026 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2029 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2027
2030
2028 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2031 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2029
2032
2030 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2033 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2031 deltainfo.base, link, p1r, p2r, node)
2034 deltainfo.base, link, p1r, p2r, node)
2032 self.index.append(e)
2035 self.index.append(e)
2033 self.nodemap[node] = curr
2036 self.nodemap[node] = curr
2034
2037
2035 # Reset the pure node cache start lookup offset to account for new
2038 # Reset the pure node cache start lookup offset to account for new
2036 # revision.
2039 # revision.
2037 if self._nodepos is not None:
2040 if self._nodepos is not None:
2038 self._nodepos = curr
2041 self._nodepos = curr
2039
2042
2040 entry = self._io.packentry(e, self.node, self.version, curr)
2043 entry = self._io.packentry(e, self.node, self.version, curr)
2041 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2044 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2042 link, offset)
2045 link, offset)
2043
2046
2044 rawtext = btext[0]
2047 rawtext = btext[0]
2045
2048
2046 if alwayscache and rawtext is None:
2049 if alwayscache and rawtext is None:
2047 rawtext = deltacomputer.buildtext(revinfo, fh)
2050 rawtext = deltacomputer.buildtext(revinfo, fh)
2048
2051
2049 if type(rawtext) == bytes: # only accept immutable objects
2052 if type(rawtext) == bytes: # only accept immutable objects
2050 self._revisioncache = (node, curr, rawtext)
2053 self._revisioncache = (node, curr, rawtext)
2051 self._chainbasecache[curr] = deltainfo.chainbase
2054 self._chainbasecache[curr] = deltainfo.chainbase
2052 return node
2055 return node
2053
2056
2054 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2057 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2055 # Files opened in a+ mode have inconsistent behavior on various
2058 # Files opened in a+ mode have inconsistent behavior on various
2056 # platforms. Windows requires that a file positioning call be made
2059 # platforms. Windows requires that a file positioning call be made
2057 # when the file handle transitions between reads and writes. See
2060 # when the file handle transitions between reads and writes. See
2058 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2061 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2059 # platforms, Python or the platform itself can be buggy. Some versions
2062 # platforms, Python or the platform itself can be buggy. Some versions
2060 # of Solaris have been observed to not append at the end of the file
2063 # of Solaris have been observed to not append at the end of the file
2061 # if the file was seeked to before the end. See issue4943 for more.
2064 # if the file was seeked to before the end. See issue4943 for more.
2062 #
2065 #
2063 # We work around this issue by inserting a seek() before writing.
2066 # We work around this issue by inserting a seek() before writing.
2064 # Note: This is likely not necessary on Python 3. However, because
2067 # Note: This is likely not necessary on Python 3. However, because
2065 # the file handle is reused for reads and may be seeked there, we need
2068 # the file handle is reused for reads and may be seeked there, we need
2066 # to be careful before changing this.
2069 # to be careful before changing this.
2067 ifh.seek(0, os.SEEK_END)
2070 ifh.seek(0, os.SEEK_END)
2068 if dfh:
2071 if dfh:
2069 dfh.seek(0, os.SEEK_END)
2072 dfh.seek(0, os.SEEK_END)
2070
2073
2071 curr = len(self) - 1
2074 curr = len(self) - 1
2072 if not self._inline:
2075 if not self._inline:
2073 transaction.add(self.datafile, offset)
2076 transaction.add(self.datafile, offset)
2074 transaction.add(self.indexfile, curr * len(entry))
2077 transaction.add(self.indexfile, curr * len(entry))
2075 if data[0]:
2078 if data[0]:
2076 dfh.write(data[0])
2079 dfh.write(data[0])
2077 dfh.write(data[1])
2080 dfh.write(data[1])
2078 ifh.write(entry)
2081 ifh.write(entry)
2079 else:
2082 else:
2080 offset += curr * self._io.size
2083 offset += curr * self._io.size
2081 transaction.add(self.indexfile, offset, curr)
2084 transaction.add(self.indexfile, offset, curr)
2082 ifh.write(entry)
2085 ifh.write(entry)
2083 ifh.write(data[0])
2086 ifh.write(data[0])
2084 ifh.write(data[1])
2087 ifh.write(data[1])
2085 self._enforceinlinesize(transaction, ifh)
2088 self._enforceinlinesize(transaction, ifh)
2086
2089
2087 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2090 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2088 """
2091 """
2089 add a delta group
2092 add a delta group
2090
2093
2091 given a set of deltas, add them to the revision log. the
2094 given a set of deltas, add them to the revision log. the
2092 first delta is against its parent, which should be in our
2095 first delta is against its parent, which should be in our
2093 log, the rest are against the previous delta.
2096 log, the rest are against the previous delta.
2094
2097
2095 If ``addrevisioncb`` is defined, it will be called with arguments of
2098 If ``addrevisioncb`` is defined, it will be called with arguments of
2096 this revlog and the node that was added.
2099 this revlog and the node that was added.
2097 """
2100 """
2098
2101
2099 if self._writinghandles:
2102 if self._writinghandles:
2100 raise error.ProgrammingError('cannot nest addgroup() calls')
2103 raise error.ProgrammingError('cannot nest addgroup() calls')
2101
2104
2102 nodes = []
2105 nodes = []
2103
2106
2104 r = len(self)
2107 r = len(self)
2105 end = 0
2108 end = 0
2106 if r:
2109 if r:
2107 end = self.end(r - 1)
2110 end = self.end(r - 1)
2108 ifh = self._indexfp("a+")
2111 ifh = self._indexfp("a+")
2109 isize = r * self._io.size
2112 isize = r * self._io.size
2110 if self._inline:
2113 if self._inline:
2111 transaction.add(self.indexfile, end + isize, r)
2114 transaction.add(self.indexfile, end + isize, r)
2112 dfh = None
2115 dfh = None
2113 else:
2116 else:
2114 transaction.add(self.indexfile, isize, r)
2117 transaction.add(self.indexfile, isize, r)
2115 transaction.add(self.datafile, end)
2118 transaction.add(self.datafile, end)
2116 dfh = self._datafp("a+")
2119 dfh = self._datafp("a+")
2117 def flush():
2120 def flush():
2118 if dfh:
2121 if dfh:
2119 dfh.flush()
2122 dfh.flush()
2120 ifh.flush()
2123 ifh.flush()
2121
2124
2122 self._writinghandles = (ifh, dfh)
2125 self._writinghandles = (ifh, dfh)
2123
2126
2124 try:
2127 try:
2125 deltacomputer = deltautil.deltacomputer(self)
2128 deltacomputer = deltautil.deltacomputer(self)
2126 # loop through our set of deltas
2129 # loop through our set of deltas
2127 for data in deltas:
2130 for data in deltas:
2128 node, p1, p2, linknode, deltabase, delta, flags = data
2131 node, p1, p2, linknode, deltabase, delta, flags = data
2129 link = linkmapper(linknode)
2132 link = linkmapper(linknode)
2130 flags = flags or REVIDX_DEFAULT_FLAGS
2133 flags = flags or REVIDX_DEFAULT_FLAGS
2131
2134
2132 nodes.append(node)
2135 nodes.append(node)
2133
2136
2134 if node in self.nodemap:
2137 if node in self.nodemap:
2135 self._nodeduplicatecallback(transaction, node)
2138 self._nodeduplicatecallback(transaction, node)
2136 # this can happen if two branches make the same change
2139 # this can happen if two branches make the same change
2137 continue
2140 continue
2138
2141
2139 for p in (p1, p2):
2142 for p in (p1, p2):
2140 if p not in self.nodemap:
2143 if p not in self.nodemap:
2141 raise error.LookupError(p, self.indexfile,
2144 raise error.LookupError(p, self.indexfile,
2142 _('unknown parent'))
2145 _('unknown parent'))
2143
2146
2144 if deltabase not in self.nodemap:
2147 if deltabase not in self.nodemap:
2145 raise error.LookupError(deltabase, self.indexfile,
2148 raise error.LookupError(deltabase, self.indexfile,
2146 _('unknown delta base'))
2149 _('unknown delta base'))
2147
2150
2148 baserev = self.rev(deltabase)
2151 baserev = self.rev(deltabase)
2149
2152
2150 if baserev != nullrev and self.iscensored(baserev):
2153 if baserev != nullrev and self.iscensored(baserev):
2151 # if base is censored, delta must be full replacement in a
2154 # if base is censored, delta must be full replacement in a
2152 # single patch operation
2155 # single patch operation
2153 hlen = struct.calcsize(">lll")
2156 hlen = struct.calcsize(">lll")
2154 oldlen = self.rawsize(baserev)
2157 oldlen = self.rawsize(baserev)
2155 newlen = len(delta) - hlen
2158 newlen = len(delta) - hlen
2156 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2159 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2157 raise error.CensoredBaseError(self.indexfile,
2160 raise error.CensoredBaseError(self.indexfile,
2158 self.node(baserev))
2161 self.node(baserev))
2159
2162
2160 if not flags and self._peek_iscensored(baserev, delta, flush):
2163 if not flags and self._peek_iscensored(baserev, delta, flush):
2161 flags |= REVIDX_ISCENSORED
2164 flags |= REVIDX_ISCENSORED
2162
2165
2163 # We assume consumers of addrevisioncb will want to retrieve
2166 # We assume consumers of addrevisioncb will want to retrieve
2164 # the added revision, which will require a call to
2167 # the added revision, which will require a call to
2165 # revision(). revision() will fast path if there is a cache
2168 # revision(). revision() will fast path if there is a cache
2166 # hit. So, we tell _addrevision() to always cache in this case.
2169 # hit. So, we tell _addrevision() to always cache in this case.
2167 # We're only using addgroup() in the context of changegroup
2170 # We're only using addgroup() in the context of changegroup
2168 # generation so the revision data can always be handled as raw
2171 # generation so the revision data can always be handled as raw
2169 # by the flagprocessor.
2172 # by the flagprocessor.
2170 self._addrevision(node, None, transaction, link,
2173 self._addrevision(node, None, transaction, link,
2171 p1, p2, flags, (baserev, delta),
2174 p1, p2, flags, (baserev, delta),
2172 ifh, dfh,
2175 ifh, dfh,
2173 alwayscache=bool(addrevisioncb),
2176 alwayscache=bool(addrevisioncb),
2174 deltacomputer=deltacomputer)
2177 deltacomputer=deltacomputer)
2175
2178
2176 if addrevisioncb:
2179 if addrevisioncb:
2177 addrevisioncb(self, node)
2180 addrevisioncb(self, node)
2178
2181
2179 if not dfh and not self._inline:
2182 if not dfh and not self._inline:
2180 # addrevision switched from inline to conventional
2183 # addrevision switched from inline to conventional
2181 # reopen the index
2184 # reopen the index
2182 ifh.close()
2185 ifh.close()
2183 dfh = self._datafp("a+")
2186 dfh = self._datafp("a+")
2184 ifh = self._indexfp("a+")
2187 ifh = self._indexfp("a+")
2185 self._writinghandles = (ifh, dfh)
2188 self._writinghandles = (ifh, dfh)
2186 finally:
2189 finally:
2187 self._writinghandles = None
2190 self._writinghandles = None
2188
2191
2189 if dfh:
2192 if dfh:
2190 dfh.close()
2193 dfh.close()
2191 ifh.close()
2194 ifh.close()
2192
2195
2193 return nodes
2196 return nodes
2194
2197
2195 def iscensored(self, rev):
2198 def iscensored(self, rev):
2196 """Check if a file revision is censored."""
2199 """Check if a file revision is censored."""
2197 if not self._censorable:
2200 if not self._censorable:
2198 return False
2201 return False
2199
2202
2200 return self.flags(rev) & REVIDX_ISCENSORED
2203 return self.flags(rev) & REVIDX_ISCENSORED
2201
2204
2202 def _peek_iscensored(self, baserev, delta, flush):
2205 def _peek_iscensored(self, baserev, delta, flush):
2203 """Quickly check if a delta produces a censored revision."""
2206 """Quickly check if a delta produces a censored revision."""
2204 if not self._censorable:
2207 if not self._censorable:
2205 return False
2208 return False
2206
2209
2207 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2210 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2208
2211
2209 def getstrippoint(self, minlink):
2212 def getstrippoint(self, minlink):
2210 """find the minimum rev that must be stripped to strip the linkrev
2213 """find the minimum rev that must be stripped to strip the linkrev
2211
2214
2212 Returns a tuple containing the minimum rev and a set of all revs that
2215 Returns a tuple containing the minimum rev and a set of all revs that
2213 have linkrevs that will be broken by this strip.
2216 have linkrevs that will be broken by this strip.
2214 """
2217 """
2215 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2218 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2216 self.headrevs(),
2219 self.headrevs(),
2217 self.linkrev, self.parentrevs)
2220 self.linkrev, self.parentrevs)
2218
2221
2219 def strip(self, minlink, transaction):
2222 def strip(self, minlink, transaction):
2220 """truncate the revlog on the first revision with a linkrev >= minlink
2223 """truncate the revlog on the first revision with a linkrev >= minlink
2221
2224
2222 This function is called when we're stripping revision minlink and
2225 This function is called when we're stripping revision minlink and
2223 its descendants from the repository.
2226 its descendants from the repository.
2224
2227
2225 We have to remove all revisions with linkrev >= minlink, because
2228 We have to remove all revisions with linkrev >= minlink, because
2226 the equivalent changelog revisions will be renumbered after the
2229 the equivalent changelog revisions will be renumbered after the
2227 strip.
2230 strip.
2228
2231
2229 So we truncate the revlog on the first of these revisions, and
2232 So we truncate the revlog on the first of these revisions, and
2230 trust that the caller has saved the revisions that shouldn't be
2233 trust that the caller has saved the revisions that shouldn't be
2231 removed and that it'll re-add them after this truncation.
2234 removed and that it'll re-add them after this truncation.
2232 """
2235 """
2233 if len(self) == 0:
2236 if len(self) == 0:
2234 return
2237 return
2235
2238
2236 rev, _ = self.getstrippoint(minlink)
2239 rev, _ = self.getstrippoint(minlink)
2237 if rev == len(self):
2240 if rev == len(self):
2238 return
2241 return
2239
2242
2240 # first truncate the files on disk
2243 # first truncate the files on disk
2241 end = self.start(rev)
2244 end = self.start(rev)
2242 if not self._inline:
2245 if not self._inline:
2243 transaction.add(self.datafile, end)
2246 transaction.add(self.datafile, end)
2244 end = rev * self._io.size
2247 end = rev * self._io.size
2245 else:
2248 else:
2246 end += rev * self._io.size
2249 end += rev * self._io.size
2247
2250
2248 transaction.add(self.indexfile, end)
2251 transaction.add(self.indexfile, end)
2249
2252
2250 # then reset internal state in memory to forget those revisions
2253 # then reset internal state in memory to forget those revisions
2251 self._revisioncache = None
2254 self._revisioncache = None
2252 self._chaininfocache = {}
2255 self._chaininfocache = {}
2253 self._chunkclear()
2256 self._chunkclear()
2254 for x in pycompat.xrange(rev, len(self)):
2257 for x in pycompat.xrange(rev, len(self)):
2255 del self.nodemap[self.node(x)]
2258 del self.nodemap[self.node(x)]
2256
2259
2257 del self.index[rev:-1]
2260 del self.index[rev:-1]
2258 self._nodepos = None
2261 self._nodepos = None
2259
2262
2260 def checksize(self):
2263 def checksize(self):
2261 expected = 0
2264 expected = 0
2262 if len(self):
2265 if len(self):
2263 expected = max(0, self.end(len(self) - 1))
2266 expected = max(0, self.end(len(self) - 1))
2264
2267
2265 try:
2268 try:
2266 with self._datafp() as f:
2269 with self._datafp() as f:
2267 f.seek(0, 2)
2270 f.seek(0, 2)
2268 actual = f.tell()
2271 actual = f.tell()
2269 dd = actual - expected
2272 dd = actual - expected
2270 except IOError as inst:
2273 except IOError as inst:
2271 if inst.errno != errno.ENOENT:
2274 if inst.errno != errno.ENOENT:
2272 raise
2275 raise
2273 dd = 0
2276 dd = 0
2274
2277
2275 try:
2278 try:
2276 f = self.opener(self.indexfile)
2279 f = self.opener(self.indexfile)
2277 f.seek(0, 2)
2280 f.seek(0, 2)
2278 actual = f.tell()
2281 actual = f.tell()
2279 f.close()
2282 f.close()
2280 s = self._io.size
2283 s = self._io.size
2281 i = max(0, actual // s)
2284 i = max(0, actual // s)
2282 di = actual - (i * s)
2285 di = actual - (i * s)
2283 if self._inline:
2286 if self._inline:
2284 databytes = 0
2287 databytes = 0
2285 for r in self:
2288 for r in self:
2286 databytes += max(0, self.length(r))
2289 databytes += max(0, self.length(r))
2287 dd = 0
2290 dd = 0
2288 di = actual - len(self) * s - databytes
2291 di = actual - len(self) * s - databytes
2289 except IOError as inst:
2292 except IOError as inst:
2290 if inst.errno != errno.ENOENT:
2293 if inst.errno != errno.ENOENT:
2291 raise
2294 raise
2292 di = 0
2295 di = 0
2293
2296
2294 return (dd, di)
2297 return (dd, di)
2295
2298
2296 def files(self):
2299 def files(self):
2297 res = [self.indexfile]
2300 res = [self.indexfile]
2298 if not self._inline:
2301 if not self._inline:
2299 res.append(self.datafile)
2302 res.append(self.datafile)
2300 return res
2303 return res
2301
2304
2302 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2305 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2303 assumehaveparentrevisions=False,
2306 assumehaveparentrevisions=False,
2304 deltamode=repository.CG_DELTAMODE_STD):
2307 deltamode=repository.CG_DELTAMODE_STD):
2305 if nodesorder not in ('nodes', 'storage', 'linear', None):
2308 if nodesorder not in ('nodes', 'storage', 'linear', None):
2306 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2309 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2307 nodesorder)
2310 nodesorder)
2308
2311
2309 if nodesorder is None and not self._generaldelta:
2312 if nodesorder is None and not self._generaldelta:
2310 nodesorder = 'storage'
2313 nodesorder = 'storage'
2311
2314
2312 if (not self._storedeltachains and
2315 if (not self._storedeltachains and
2313 deltamode != repository.CG_DELTAMODE_PREV):
2316 deltamode != repository.CG_DELTAMODE_PREV):
2314 deltamode = repository.CG_DELTAMODE_FULL
2317 deltamode = repository.CG_DELTAMODE_FULL
2315
2318
2316 return storageutil.emitrevisions(
2319 return storageutil.emitrevisions(
2317 self, nodes, nodesorder, revlogrevisiondelta,
2320 self, nodes, nodesorder, revlogrevisiondelta,
2318 deltaparentfn=self.deltaparent,
2321 deltaparentfn=self.deltaparent,
2319 candeltafn=self.candelta,
2322 candeltafn=self.candelta,
2320 rawsizefn=self.rawsize,
2323 rawsizefn=self.rawsize,
2321 revdifffn=self.revdiff,
2324 revdifffn=self.revdiff,
2322 flagsfn=self.flags,
2325 flagsfn=self.flags,
2323 deltamode=deltamode,
2326 deltamode=deltamode,
2324 revisiondata=revisiondata,
2327 revisiondata=revisiondata,
2325 assumehaveparentrevisions=assumehaveparentrevisions)
2328 assumehaveparentrevisions=assumehaveparentrevisions)
2326
2329
2327 DELTAREUSEALWAYS = 'always'
2330 DELTAREUSEALWAYS = 'always'
2328 DELTAREUSESAMEREVS = 'samerevs'
2331 DELTAREUSESAMEREVS = 'samerevs'
2329 DELTAREUSENEVER = 'never'
2332 DELTAREUSENEVER = 'never'
2330
2333
2331 DELTAREUSEFULLADD = 'fulladd'
2334 DELTAREUSEFULLADD = 'fulladd'
2332
2335
2333 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2336 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2334
2337
2335 def clone(self, tr, destrevlog, addrevisioncb=None,
2338 def clone(self, tr, destrevlog, addrevisioncb=None,
2336 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2339 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2337 """Copy this revlog to another, possibly with format changes.
2340 """Copy this revlog to another, possibly with format changes.
2338
2341
2339 The destination revlog will contain the same revisions and nodes.
2342 The destination revlog will contain the same revisions and nodes.
2340 However, it may not be bit-for-bit identical due to e.g. delta encoding
2343 However, it may not be bit-for-bit identical due to e.g. delta encoding
2341 differences.
2344 differences.
2342
2345
2343 The ``deltareuse`` argument control how deltas from the existing revlog
2346 The ``deltareuse`` argument control how deltas from the existing revlog
2344 are preserved in the destination revlog. The argument can have the
2347 are preserved in the destination revlog. The argument can have the
2345 following values:
2348 following values:
2346
2349
2347 DELTAREUSEALWAYS
2350 DELTAREUSEALWAYS
2348 Deltas will always be reused (if possible), even if the destination
2351 Deltas will always be reused (if possible), even if the destination
2349 revlog would not select the same revisions for the delta. This is the
2352 revlog would not select the same revisions for the delta. This is the
2350 fastest mode of operation.
2353 fastest mode of operation.
2351 DELTAREUSESAMEREVS
2354 DELTAREUSESAMEREVS
2352 Deltas will be reused if the destination revlog would pick the same
2355 Deltas will be reused if the destination revlog would pick the same
2353 revisions for the delta. This mode strikes a balance between speed
2356 revisions for the delta. This mode strikes a balance between speed
2354 and optimization.
2357 and optimization.
2355 DELTAREUSENEVER
2358 DELTAREUSENEVER
2356 Deltas will never be reused. This is the slowest mode of execution.
2359 Deltas will never be reused. This is the slowest mode of execution.
2357 This mode can be used to recompute deltas (e.g. if the diff/delta
2360 This mode can be used to recompute deltas (e.g. if the diff/delta
2358 algorithm changes).
2361 algorithm changes).
2359
2362
2360 Delta computation can be slow, so the choice of delta reuse policy can
2363 Delta computation can be slow, so the choice of delta reuse policy can
2361 significantly affect run time.
2364 significantly affect run time.
2362
2365
2363 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2366 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2364 two extremes. Deltas will be reused if they are appropriate. But if the
2367 two extremes. Deltas will be reused if they are appropriate. But if the
2365 delta could choose a better revision, it will do so. This means if you
2368 delta could choose a better revision, it will do so. This means if you
2366 are converting a non-generaldelta revlog to a generaldelta revlog,
2369 are converting a non-generaldelta revlog to a generaldelta revlog,
2367 deltas will be recomputed if the delta's parent isn't a parent of the
2370 deltas will be recomputed if the delta's parent isn't a parent of the
2368 revision.
2371 revision.
2369
2372
2370 In addition to the delta policy, the ``forcedeltabothparents``
2373 In addition to the delta policy, the ``forcedeltabothparents``
2371 argument controls whether to force compute deltas against both parents
2374 argument controls whether to force compute deltas against both parents
2372 for merges. By default, the current default is used.
2375 for merges. By default, the current default is used.
2373 """
2376 """
2374 if deltareuse not in self.DELTAREUSEALL:
2377 if deltareuse not in self.DELTAREUSEALL:
2375 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2378 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2376
2379
2377 if len(destrevlog):
2380 if len(destrevlog):
2378 raise ValueError(_('destination revlog is not empty'))
2381 raise ValueError(_('destination revlog is not empty'))
2379
2382
2380 if getattr(self, 'filteredrevs', None):
2383 if getattr(self, 'filteredrevs', None):
2381 raise ValueError(_('source revlog has filtered revisions'))
2384 raise ValueError(_('source revlog has filtered revisions'))
2382 if getattr(destrevlog, 'filteredrevs', None):
2385 if getattr(destrevlog, 'filteredrevs', None):
2383 raise ValueError(_('destination revlog has filtered revisions'))
2386 raise ValueError(_('destination revlog has filtered revisions'))
2384
2387
2385 # lazydeltabase controls whether to reuse a cached delta, if possible.
2388 # lazydeltabase controls whether to reuse a cached delta, if possible.
2386 oldlazydeltabase = destrevlog._lazydeltabase
2389 oldlazydeltabase = destrevlog._lazydeltabase
2387 oldamd = destrevlog._deltabothparents
2390 oldamd = destrevlog._deltabothparents
2388
2391
2389 try:
2392 try:
2390 if deltareuse == self.DELTAREUSEALWAYS:
2393 if deltareuse == self.DELTAREUSEALWAYS:
2391 destrevlog._lazydeltabase = True
2394 destrevlog._lazydeltabase = True
2392 elif deltareuse == self.DELTAREUSESAMEREVS:
2395 elif deltareuse == self.DELTAREUSESAMEREVS:
2393 destrevlog._lazydeltabase = False
2396 destrevlog._lazydeltabase = False
2394
2397
2395 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2398 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2396
2399
2397 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2400 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2398 self.DELTAREUSESAMEREVS)
2401 self.DELTAREUSESAMEREVS)
2399
2402
2400 deltacomputer = deltautil.deltacomputer(destrevlog)
2403 deltacomputer = deltautil.deltacomputer(destrevlog)
2401 index = self.index
2404 index = self.index
2402 for rev in self:
2405 for rev in self:
2403 entry = index[rev]
2406 entry = index[rev]
2404
2407
2405 # Some classes override linkrev to take filtered revs into
2408 # Some classes override linkrev to take filtered revs into
2406 # account. Use raw entry from index.
2409 # account. Use raw entry from index.
2407 flags = entry[0] & 0xffff
2410 flags = entry[0] & 0xffff
2408 linkrev = entry[4]
2411 linkrev = entry[4]
2409 p1 = index[entry[5]][7]
2412 p1 = index[entry[5]][7]
2410 p2 = index[entry[6]][7]
2413 p2 = index[entry[6]][7]
2411 node = entry[7]
2414 node = entry[7]
2412
2415
2413 # (Possibly) reuse the delta from the revlog if allowed and
2416 # (Possibly) reuse the delta from the revlog if allowed and
2414 # the revlog chunk is a delta.
2417 # the revlog chunk is a delta.
2415 cachedelta = None
2418 cachedelta = None
2416 rawtext = None
2419 rawtext = None
2417 if populatecachedelta:
2420 if populatecachedelta:
2418 dp = self.deltaparent(rev)
2421 dp = self.deltaparent(rev)
2419 if dp != nullrev:
2422 if dp != nullrev:
2420 cachedelta = (dp, bytes(self._chunk(rev)))
2423 cachedelta = (dp, bytes(self._chunk(rev)))
2421
2424
2422 if not cachedelta:
2425 if not cachedelta:
2423 rawtext = self.revision(rev, raw=True)
2426 rawtext = self.revision(rev, raw=True)
2424
2427
2425
2428
2426 if deltareuse == self.DELTAREUSEFULLADD:
2429 if deltareuse == self.DELTAREUSEFULLADD:
2427 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2430 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2428 cachedelta=cachedelta,
2431 cachedelta=cachedelta,
2429 node=node, flags=flags,
2432 node=node, flags=flags,
2430 deltacomputer=deltacomputer)
2433 deltacomputer=deltacomputer)
2431 else:
2434 else:
2432 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2435 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2433 checkambig=False)
2436 checkambig=False)
2434 dfh = None
2437 dfh = None
2435 if not destrevlog._inline:
2438 if not destrevlog._inline:
2436 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2439 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2437 try:
2440 try:
2438 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2441 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2439 p2, flags, cachedelta, ifh, dfh,
2442 p2, flags, cachedelta, ifh, dfh,
2440 deltacomputer=deltacomputer)
2443 deltacomputer=deltacomputer)
2441 finally:
2444 finally:
2442 if dfh:
2445 if dfh:
2443 dfh.close()
2446 dfh.close()
2444 ifh.close()
2447 ifh.close()
2445
2448
2446 if addrevisioncb:
2449 if addrevisioncb:
2447 addrevisioncb(self, rev, node)
2450 addrevisioncb(self, rev, node)
2448 finally:
2451 finally:
2449 destrevlog._lazydeltabase = oldlazydeltabase
2452 destrevlog._lazydeltabase = oldlazydeltabase
2450 destrevlog._deltabothparents = oldamd
2453 destrevlog._deltabothparents = oldamd
2451
2454
2452 def censorrevision(self, tr, censornode, tombstone=b''):
2455 def censorrevision(self, tr, censornode, tombstone=b''):
2453 if (self.version & 0xFFFF) == REVLOGV0:
2456 if (self.version & 0xFFFF) == REVLOGV0:
2454 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2457 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2455 self.version)
2458 self.version)
2456
2459
2457 censorrev = self.rev(censornode)
2460 censorrev = self.rev(censornode)
2458 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2461 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2459
2462
2460 if len(tombstone) > self.rawsize(censorrev):
2463 if len(tombstone) > self.rawsize(censorrev):
2461 raise error.Abort(_('censor tombstone must be no longer than '
2464 raise error.Abort(_('censor tombstone must be no longer than '
2462 'censored data'))
2465 'censored data'))
2463
2466
2464 # Rewriting the revlog in place is hard. Our strategy for censoring is
2467 # Rewriting the revlog in place is hard. Our strategy for censoring is
2465 # to create a new revlog, copy all revisions to it, then replace the
2468 # to create a new revlog, copy all revisions to it, then replace the
2466 # revlogs on transaction close.
2469 # revlogs on transaction close.
2467
2470
2468 newindexfile = self.indexfile + b'.tmpcensored'
2471 newindexfile = self.indexfile + b'.tmpcensored'
2469 newdatafile = self.datafile + b'.tmpcensored'
2472 newdatafile = self.datafile + b'.tmpcensored'
2470
2473
2471 # This is a bit dangerous. We could easily have a mismatch of state.
2474 # This is a bit dangerous. We could easily have a mismatch of state.
2472 newrl = revlog(self.opener, newindexfile, newdatafile,
2475 newrl = revlog(self.opener, newindexfile, newdatafile,
2473 censorable=True)
2476 censorable=True)
2474 newrl.version = self.version
2477 newrl.version = self.version
2475 newrl._generaldelta = self._generaldelta
2478 newrl._generaldelta = self._generaldelta
2476 newrl._io = self._io
2479 newrl._io = self._io
2477
2480
2478 for rev in self.revs():
2481 for rev in self.revs():
2479 node = self.node(rev)
2482 node = self.node(rev)
2480 p1, p2 = self.parents(node)
2483 p1, p2 = self.parents(node)
2481
2484
2482 if rev == censorrev:
2485 if rev == censorrev:
2483 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2486 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2484 p1, p2, censornode, REVIDX_ISCENSORED)
2487 p1, p2, censornode, REVIDX_ISCENSORED)
2485
2488
2486 if newrl.deltaparent(rev) != nullrev:
2489 if newrl.deltaparent(rev) != nullrev:
2487 raise error.Abort(_('censored revision stored as delta; '
2490 raise error.Abort(_('censored revision stored as delta; '
2488 'cannot censor'),
2491 'cannot censor'),
2489 hint=_('censoring of revlogs is not '
2492 hint=_('censoring of revlogs is not '
2490 'fully implemented; please report '
2493 'fully implemented; please report '
2491 'this bug'))
2494 'this bug'))
2492 continue
2495 continue
2493
2496
2494 if self.iscensored(rev):
2497 if self.iscensored(rev):
2495 if self.deltaparent(rev) != nullrev:
2498 if self.deltaparent(rev) != nullrev:
2496 raise error.Abort(_('cannot censor due to censored '
2499 raise error.Abort(_('cannot censor due to censored '
2497 'revision having delta stored'))
2500 'revision having delta stored'))
2498 rawtext = self._chunk(rev)
2501 rawtext = self._chunk(rev)
2499 else:
2502 else:
2500 rawtext = self.revision(rev, raw=True)
2503 rawtext = self.revision(rev, raw=True)
2501
2504
2502 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2505 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2503 self.flags(rev))
2506 self.flags(rev))
2504
2507
2505 tr.addbackup(self.indexfile, location='store')
2508 tr.addbackup(self.indexfile, location='store')
2506 if not self._inline:
2509 if not self._inline:
2507 tr.addbackup(self.datafile, location='store')
2510 tr.addbackup(self.datafile, location='store')
2508
2511
2509 self.opener.rename(newrl.indexfile, self.indexfile)
2512 self.opener.rename(newrl.indexfile, self.indexfile)
2510 if not self._inline:
2513 if not self._inline:
2511 self.opener.rename(newrl.datafile, self.datafile)
2514 self.opener.rename(newrl.datafile, self.datafile)
2512
2515
2513 self.clearcaches()
2516 self.clearcaches()
2514 self._loadindex()
2517 self._loadindex()
2515
2518
2516 def verifyintegrity(self, state):
2519 def verifyintegrity(self, state):
2517 """Verifies the integrity of the revlog.
2520 """Verifies the integrity of the revlog.
2518
2521
2519 Yields ``revlogproblem`` instances describing problems that are
2522 Yields ``revlogproblem`` instances describing problems that are
2520 found.
2523 found.
2521 """
2524 """
2522 dd, di = self.checksize()
2525 dd, di = self.checksize()
2523 if dd:
2526 if dd:
2524 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2527 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2525 if di:
2528 if di:
2526 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2529 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2527
2530
2528 version = self.version & 0xFFFF
2531 version = self.version & 0xFFFF
2529
2532
2530 # The verifier tells us what version revlog we should be.
2533 # The verifier tells us what version revlog we should be.
2531 if version != state['expectedversion']:
2534 if version != state['expectedversion']:
2532 yield revlogproblem(
2535 yield revlogproblem(
2533 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2536 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2534 (self.indexfile, version, state['expectedversion']))
2537 (self.indexfile, version, state['expectedversion']))
2535
2538
2536 state['skipread'] = set()
2539 state['skipread'] = set()
2537
2540
2538 for rev in self:
2541 for rev in self:
2539 node = self.node(rev)
2542 node = self.node(rev)
2540
2543
2541 # Verify contents. 4 cases to care about:
2544 # Verify contents. 4 cases to care about:
2542 #
2545 #
2543 # common: the most common case
2546 # common: the most common case
2544 # rename: with a rename
2547 # rename: with a rename
2545 # meta: file content starts with b'\1\n', the metadata
2548 # meta: file content starts with b'\1\n', the metadata
2546 # header defined in filelog.py, but without a rename
2549 # header defined in filelog.py, but without a rename
2547 # ext: content stored externally
2550 # ext: content stored externally
2548 #
2551 #
2549 # More formally, their differences are shown below:
2552 # More formally, their differences are shown below:
2550 #
2553 #
2551 # | common | rename | meta | ext
2554 # | common | rename | meta | ext
2552 # -------------------------------------------------------
2555 # -------------------------------------------------------
2553 # flags() | 0 | 0 | 0 | not 0
2556 # flags() | 0 | 0 | 0 | not 0
2554 # renamed() | False | True | False | ?
2557 # renamed() | False | True | False | ?
2555 # rawtext[0:2]=='\1\n'| False | True | True | ?
2558 # rawtext[0:2]=='\1\n'| False | True | True | ?
2556 #
2559 #
2557 # "rawtext" means the raw text stored in revlog data, which
2560 # "rawtext" means the raw text stored in revlog data, which
2558 # could be retrieved by "revision(rev, raw=True)". "text"
2561 # could be retrieved by "revision(rev, raw=True)". "text"
2559 # mentioned below is "revision(rev, raw=False)".
2562 # mentioned below is "revision(rev, raw=False)".
2560 #
2563 #
2561 # There are 3 different lengths stored physically:
2564 # There are 3 different lengths stored physically:
2562 # 1. L1: rawsize, stored in revlog index
2565 # 1. L1: rawsize, stored in revlog index
2563 # 2. L2: len(rawtext), stored in revlog data
2566 # 2. L2: len(rawtext), stored in revlog data
2564 # 3. L3: len(text), stored in revlog data if flags==0, or
2567 # 3. L3: len(text), stored in revlog data if flags==0, or
2565 # possibly somewhere else if flags!=0
2568 # possibly somewhere else if flags!=0
2566 #
2569 #
2567 # L1 should be equal to L2. L3 could be different from them.
2570 # L1 should be equal to L2. L3 could be different from them.
2568 # "text" may or may not affect commit hash depending on flag
2571 # "text" may or may not affect commit hash depending on flag
2569 # processors (see revlog.addflagprocessor).
2572 # processors (see revlog.addflagprocessor).
2570 #
2573 #
2571 # | common | rename | meta | ext
2574 # | common | rename | meta | ext
2572 # -------------------------------------------------
2575 # -------------------------------------------------
2573 # rawsize() | L1 | L1 | L1 | L1
2576 # rawsize() | L1 | L1 | L1 | L1
2574 # size() | L1 | L2-LM | L1(*) | L1 (?)
2577 # size() | L1 | L2-LM | L1(*) | L1 (?)
2575 # len(rawtext) | L2 | L2 | L2 | L2
2578 # len(rawtext) | L2 | L2 | L2 | L2
2576 # len(text) | L2 | L2 | L2 | L3
2579 # len(text) | L2 | L2 | L2 | L3
2577 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2580 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2578 #
2581 #
2579 # LM: length of metadata, depending on rawtext
2582 # LM: length of metadata, depending on rawtext
2580 # (*): not ideal, see comment in filelog.size
2583 # (*): not ideal, see comment in filelog.size
2581 # (?): could be "- len(meta)" if the resolved content has
2584 # (?): could be "- len(meta)" if the resolved content has
2582 # rename metadata
2585 # rename metadata
2583 #
2586 #
2584 # Checks needed to be done:
2587 # Checks needed to be done:
2585 # 1. length check: L1 == L2, in all cases.
2588 # 1. length check: L1 == L2, in all cases.
2586 # 2. hash check: depending on flag processor, we may need to
2589 # 2. hash check: depending on flag processor, we may need to
2587 # use either "text" (external), or "rawtext" (in revlog).
2590 # use either "text" (external), or "rawtext" (in revlog).
2588
2591
2589 try:
2592 try:
2590 skipflags = state.get('skipflags', 0)
2593 skipflags = state.get('skipflags', 0)
2591 if skipflags:
2594 if skipflags:
2592 skipflags &= self.flags(rev)
2595 skipflags &= self.flags(rev)
2593
2596
2594 if skipflags:
2597 if skipflags:
2595 state['skipread'].add(node)
2598 state['skipread'].add(node)
2596 else:
2599 else:
2597 # Side-effect: read content and verify hash.
2600 # Side-effect: read content and verify hash.
2598 self.revision(node)
2601 self.revision(node)
2599
2602
2600 l1 = self.rawsize(rev)
2603 l1 = self.rawsize(rev)
2601 l2 = len(self.revision(node, raw=True))
2604 l2 = len(self.revision(node, raw=True))
2602
2605
2603 if l1 != l2:
2606 if l1 != l2:
2604 yield revlogproblem(
2607 yield revlogproblem(
2605 error=_('unpacked size is %d, %d expected') % (l2, l1),
2608 error=_('unpacked size is %d, %d expected') % (l2, l1),
2606 node=node)
2609 node=node)
2607
2610
2608 except error.CensoredNodeError:
2611 except error.CensoredNodeError:
2609 if state['erroroncensored']:
2612 if state['erroroncensored']:
2610 yield revlogproblem(error=_('censored file data'),
2613 yield revlogproblem(error=_('censored file data'),
2611 node=node)
2614 node=node)
2612 state['skipread'].add(node)
2615 state['skipread'].add(node)
2613 except Exception as e:
2616 except Exception as e:
2614 yield revlogproblem(
2617 yield revlogproblem(
2615 error=_('unpacking %s: %s') % (short(node),
2618 error=_('unpacking %s: %s') % (short(node),
2616 stringutil.forcebytestr(e)),
2619 stringutil.forcebytestr(e)),
2617 node=node)
2620 node=node)
2618 state['skipread'].add(node)
2621 state['skipread'].add(node)
2619
2622
2620 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2623 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2621 revisionscount=False, trackedsize=False,
2624 revisionscount=False, trackedsize=False,
2622 storedsize=False):
2625 storedsize=False):
2623 d = {}
2626 d = {}
2624
2627
2625 if exclusivefiles:
2628 if exclusivefiles:
2626 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2629 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2627 if not self._inline:
2630 if not self._inline:
2628 d['exclusivefiles'].append((self.opener, self.datafile))
2631 d['exclusivefiles'].append((self.opener, self.datafile))
2629
2632
2630 if sharedfiles:
2633 if sharedfiles:
2631 d['sharedfiles'] = []
2634 d['sharedfiles'] = []
2632
2635
2633 if revisionscount:
2636 if revisionscount:
2634 d['revisionscount'] = len(self)
2637 d['revisionscount'] = len(self)
2635
2638
2636 if trackedsize:
2639 if trackedsize:
2637 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2640 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2638
2641
2639 if storedsize:
2642 if storedsize:
2640 d['storedsize'] = sum(self.opener.stat(path).st_size
2643 d['storedsize'] = sum(self.opener.stat(path).st_size
2641 for path in self.files())
2644 for path in self.files())
2642
2645
2643 return d
2646 return d
@@ -1,355 +1,356
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
1 # setdiscovery.py - improved discovery of common nodeset for mercurial
2 #
2 #
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
4 # and Peter Arrenbrecht <peter@arrenbrecht.ch>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 """
8 """
9 Algorithm works in the following way. You have two repository: local and
9 Algorithm works in the following way. You have two repository: local and
10 remote. They both contains a DAG of changelists.
10 remote. They both contains a DAG of changelists.
11
11
12 The goal of the discovery protocol is to find one set of node *common*,
12 The goal of the discovery protocol is to find one set of node *common*,
13 the set of nodes shared by local and remote.
13 the set of nodes shared by local and remote.
14
14
15 One of the issue with the original protocol was latency, it could
15 One of the issue with the original protocol was latency, it could
16 potentially require lots of roundtrips to discover that the local repo was a
16 potentially require lots of roundtrips to discover that the local repo was a
17 subset of remote (which is a very common case, you usually have few changes
17 subset of remote (which is a very common case, you usually have few changes
18 compared to upstream, while upstream probably had lots of development).
18 compared to upstream, while upstream probably had lots of development).
19
19
20 The new protocol only requires one interface for the remote repo: `known()`,
20 The new protocol only requires one interface for the remote repo: `known()`,
21 which given a set of changelists tells you if they are present in the DAG.
21 which given a set of changelists tells you if they are present in the DAG.
22
22
23 The algorithm then works as follow:
23 The algorithm then works as follow:
24
24
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
25 - We will be using three sets, `common`, `missing`, `unknown`. Originally
26 all nodes are in `unknown`.
26 all nodes are in `unknown`.
27 - Take a sample from `unknown`, call `remote.known(sample)`
27 - Take a sample from `unknown`, call `remote.known(sample)`
28 - For each node that remote knows, move it and all its ancestors to `common`
28 - For each node that remote knows, move it and all its ancestors to `common`
29 - For each node that remote doesn't know, move it and all its descendants
29 - For each node that remote doesn't know, move it and all its descendants
30 to `missing`
30 to `missing`
31 - Iterate until `unknown` is empty
31 - Iterate until `unknown` is empty
32
32
33 There are a couple optimizations, first is instead of starting with a random
33 There are a couple optimizations, first is instead of starting with a random
34 sample of missing, start by sending all heads, in the case where the local
34 sample of missing, start by sending all heads, in the case where the local
35 repo is a subset, you computed the answer in one round trip.
35 repo is a subset, you computed the answer in one round trip.
36
36
37 Then you can do something similar to the bisecting strategy used when
37 Then you can do something similar to the bisecting strategy used when
38 finding faulty changesets. Instead of random samples, you can try picking
38 finding faulty changesets. Instead of random samples, you can try picking
39 nodes that will maximize the number of nodes that will be
39 nodes that will maximize the number of nodes that will be
40 classified with it (since all ancestors or descendants will be marked as well).
40 classified with it (since all ancestors or descendants will be marked as well).
41 """
41 """
42
42
43 from __future__ import absolute_import
43 from __future__ import absolute_import
44
44
45 import collections
45 import collections
46 import random
46 import random
47
47
48 from .i18n import _
48 from .i18n import _
49 from .node import (
49 from .node import (
50 nullid,
50 nullid,
51 nullrev,
51 nullrev,
52 )
52 )
53 from . import (
53 from . import (
54 error,
54 error,
55 util,
55 util,
56 )
56 )
57
57
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
58 def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0):
59 """update an existing sample to match the expected size
59 """update an existing sample to match the expected size
60
60
61 The sample is updated with revs exponentially distant from each head of the
61 The sample is updated with revs exponentially distant from each head of the
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
62 <revs> set. (H~1, H~2, H~4, H~8, etc).
63
63
64 If a target size is specified, the sampling will stop once this size is
64 If a target size is specified, the sampling will stop once this size is
65 reached. Otherwise sampling will happen until roots of the <revs> set are
65 reached. Otherwise sampling will happen until roots of the <revs> set are
66 reached.
66 reached.
67
67
68 :revs: set of revs we want to discover (if None, assume the whole dag)
68 :revs: set of revs we want to discover (if None, assume the whole dag)
69 :heads: set of DAG head revs
69 :heads: set of DAG head revs
70 :sample: a sample to update
70 :sample: a sample to update
71 :parentfn: a callable to resolve parents for a revision
71 :parentfn: a callable to resolve parents for a revision
72 :quicksamplesize: optional target size of the sample"""
72 :quicksamplesize: optional target size of the sample"""
73 dist = {}
73 dist = {}
74 visit = collections.deque(heads)
74 visit = collections.deque(heads)
75 seen = set()
75 seen = set()
76 factor = 1
76 factor = 1
77 while visit:
77 while visit:
78 curr = visit.popleft()
78 curr = visit.popleft()
79 if curr in seen:
79 if curr in seen:
80 continue
80 continue
81 d = dist.setdefault(curr, 1)
81 d = dist.setdefault(curr, 1)
82 if d > factor:
82 if d > factor:
83 factor *= 2
83 factor *= 2
84 if d == factor:
84 if d == factor:
85 sample.add(curr)
85 sample.add(curr)
86 if quicksamplesize and (len(sample) >= quicksamplesize):
86 if quicksamplesize and (len(sample) >= quicksamplesize):
87 return
87 return
88 seen.add(curr)
88 seen.add(curr)
89
89
90 for p in parentfn(curr):
90 for p in parentfn(curr):
91 if p != nullrev and (not revs or p in revs):
91 if p != nullrev and (not revs or p in revs):
92 dist.setdefault(p, d + 1)
92 dist.setdefault(p, d + 1)
93 visit.append(p)
93 visit.append(p)
94
94
95 def _takequicksample(repo, headrevs, revs, size):
95 def _takequicksample(repo, headrevs, revs, size):
96 """takes a quick sample of size <size>
96 """takes a quick sample of size <size>
97
97
98 It is meant for initial sampling and focuses on querying heads and close
98 It is meant for initial sampling and focuses on querying heads and close
99 ancestors of heads.
99 ancestors of heads.
100
100
101 :dag: a dag object
101 :dag: a dag object
102 :headrevs: set of head revisions in local DAG to consider
102 :headrevs: set of head revisions in local DAG to consider
103 :revs: set of revs to discover
103 :revs: set of revs to discover
104 :size: the maximum size of the sample"""
104 :size: the maximum size of the sample"""
105 if len(revs) <= size:
105 if len(revs) <= size:
106 return list(revs)
106 return list(revs)
107 sample = set(repo.revs('heads(%ld)', revs))
107 sample = set(repo.revs('heads(%ld)', revs))
108
108
109 if len(sample) >= size:
109 if len(sample) >= size:
110 return _limitsample(sample, size)
110 return _limitsample(sample, size)
111
111
112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
112 _updatesample(None, headrevs, sample, repo.changelog.parentrevs,
113 quicksamplesize=size)
113 quicksamplesize=size)
114 return sample
114 return sample
115
115
116 def _takefullsample(repo, headrevs, revs, size):
116 def _takefullsample(repo, headrevs, revs, size):
117 if len(revs) <= size:
117 if len(revs) <= size:
118 return list(revs)
118 return list(revs)
119 sample = set(repo.revs('heads(%ld)', revs))
119 sample = set(repo.revs('heads(%ld)', revs))
120
120
121 # update from heads
121 # update from heads
122 revsheads = set(repo.revs('heads(%ld)', revs))
122 revsheads = set(repo.revs('heads(%ld)', revs))
123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
123 _updatesample(revs, revsheads, sample, repo.changelog.parentrevs)
124
124
125 # update from roots
125 # update from roots
126 revsroots = set(repo.revs('roots(%ld)', revs))
126 revsroots = set(repo.revs('roots(%ld)', revs))
127
127
128 # _updatesample() essentially does interaction over revisions to look up
128 # _updatesample() essentially does interaction over revisions to look up
129 # their children. This lookup is expensive and doing it in a loop is
129 # their children. This lookup is expensive and doing it in a loop is
130 # quadratic. We precompute the children for all relevant revisions and
130 # quadratic. We precompute the children for all relevant revisions and
131 # make the lookup in _updatesample() a simple dict lookup.
131 # make the lookup in _updatesample() a simple dict lookup.
132 #
132 #
133 # Because this function can be called multiple times during discovery, we
133 # Because this function can be called multiple times during discovery, we
134 # may still perform redundant work and there is room to optimize this by
134 # may still perform redundant work and there is room to optimize this by
135 # keeping a persistent cache of children across invocations.
135 # keeping a persistent cache of children across invocations.
136 children = {}
136 children = {}
137
137
138 parentrevs = repo.changelog.parentrevs
138 parentrevs = repo.changelog.parentrevs
139 for rev in repo.changelog.revs(start=min(revsroots)):
139 for rev in repo.changelog.revs(start=min(revsroots)):
140 # Always ensure revision has an entry so we don't need to worry about
140 # Always ensure revision has an entry so we don't need to worry about
141 # missing keys.
141 # missing keys.
142 children.setdefault(rev, [])
142 children.setdefault(rev, [])
143
143
144 for prev in parentrevs(rev):
144 for prev in parentrevs(rev):
145 if prev == nullrev:
145 if prev == nullrev:
146 continue
146 continue
147
147
148 children.setdefault(prev, []).append(rev)
148 children.setdefault(prev, []).append(rev)
149
149
150 _updatesample(revs, revsroots, sample, children.__getitem__)
150 _updatesample(revs, revsroots, sample, children.__getitem__)
151 assert sample
151 assert sample
152 sample = _limitsample(sample, size)
152 sample = _limitsample(sample, size)
153 if len(sample) < size:
153 if len(sample) < size:
154 more = size - len(sample)
154 more = size - len(sample)
155 sample.update(random.sample(list(revs - sample), more))
155 sample.update(random.sample(list(revs - sample), more))
156 return sample
156 return sample
157
157
158 def _limitsample(sample, desiredlen):
158 def _limitsample(sample, desiredlen):
159 """return a random subset of sample of at most desiredlen item"""
159 """return a random subset of sample of at most desiredlen item"""
160 if len(sample) > desiredlen:
160 if len(sample) > desiredlen:
161 sample = set(random.sample(sample, desiredlen))
161 sample = set(random.sample(sample, desiredlen))
162 return sample
162 return sample
163
163
164 class partialdiscovery(object):
164 class partialdiscovery(object):
165 """an object representing ongoing discovery
165 """an object representing ongoing discovery
166
166
167 Feed with data from the remote repository, this object keep track of the
167 Feed with data from the remote repository, this object keep track of the
168 current set of changeset in various states:
168 current set of changeset in various states:
169
169
170 - common: revs also known remotely
170 - common: revs also known remotely
171 - undecided: revs we don't have information on yet
171 - undecided: revs we don't have information on yet
172 - missing: revs missing remotely
172 - missing: revs missing remotely
173 (all tracked revisions are known locally)
173 (all tracked revisions are known locally)
174 """
174 """
175
175
176 def __init__(self, repo, targetheads):
176 def __init__(self, repo, targetheads):
177 self._repo = repo
177 self._repo = repo
178 self._targetheads = targetheads
178 self._targetheads = targetheads
179 self._common = repo.changelog.incrementalmissingrevs()
179 self._common = repo.changelog.incrementalmissingrevs()
180 self._undecided = None
180 self._undecided = None
181 self.missing = set()
181 self.missing = set()
182
182
183 def addcommons(self, commons):
183 def addcommons(self, commons):
184 """registrer nodes known as common"""
184 """registrer nodes known as common"""
185 self._common.addbases(commons)
185 self._common.addbases(commons)
186 self._common.removeancestorsfrom(self.undecided)
186 if self._undecided is not None:
187 self._common.removeancestorsfrom(self._undecided)
187
188
188 def addmissings(self, missings):
189 def addmissings(self, missings):
189 """registrer some nodes as missing"""
190 """registrer some nodes as missing"""
190 newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
191 newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
191 if newmissing:
192 if newmissing:
192 self.missing.update(newmissing)
193 self.missing.update(newmissing)
193 self.undecided.difference_update(newmissing)
194 self.undecided.difference_update(newmissing)
194
195
195 def addinfo(self, sample):
196 def addinfo(self, sample):
196 """consume an iterable of (rev, known) tuples"""
197 """consume an iterable of (rev, known) tuples"""
197 common = set()
198 common = set()
198 missing = set()
199 missing = set()
199 for rev, known in sample:
200 for rev, known in sample:
200 if known:
201 if known:
201 common.add(rev)
202 common.add(rev)
202 else:
203 else:
203 missing.add(rev)
204 missing.add(rev)
204 if common:
205 if common:
205 self.addcommons(common)
206 self.addcommons(common)
206 if missing:
207 if missing:
207 self.addmissings(missing)
208 self.addmissings(missing)
208
209
209 def hasinfo(self):
210 def hasinfo(self):
210 """return True is we have any clue about the remote state"""
211 """return True is we have any clue about the remote state"""
211 return self._common.hasbases()
212 return self._common.hasbases()
212
213
213 def iscomplete(self):
214 def iscomplete(self):
214 """True if all the necessary data have been gathered"""
215 """True if all the necessary data have been gathered"""
215 return self._undecided is not None and not self._undecided
216 return self._undecided is not None and not self._undecided
216
217
217 @property
218 @property
218 def undecided(self):
219 def undecided(self):
219 if self._undecided is not None:
220 if self._undecided is not None:
220 return self._undecided
221 return self._undecided
221 self._undecided = set(self._common.missingancestors(self._targetheads))
222 self._undecided = set(self._common.missingancestors(self._targetheads))
222 return self._undecided
223 return self._undecided
223
224
224 def commonheads(self):
225 def commonheads(self):
225 """the heads of the known common set"""
226 """the heads of the known common set"""
226 # heads(common) == heads(common.bases) since common represents
227 # heads(common) == heads(common.bases) since common represents
227 # common.bases and all its ancestors
228 # common.bases and all its ancestors
228 return self._common.basesheads()
229 return self._common.basesheads()
229
230
230 def findcommonheads(ui, local, remote,
231 def findcommonheads(ui, local, remote,
231 initialsamplesize=100,
232 initialsamplesize=100,
232 fullsamplesize=200,
233 fullsamplesize=200,
233 abortwhenunrelated=True,
234 abortwhenunrelated=True,
234 ancestorsof=None):
235 ancestorsof=None):
235 '''Return a tuple (common, anyincoming, remoteheads) used to identify
236 '''Return a tuple (common, anyincoming, remoteheads) used to identify
236 missing nodes from or in remote.
237 missing nodes from or in remote.
237 '''
238 '''
238 start = util.timer()
239 start = util.timer()
239
240
240 roundtrips = 0
241 roundtrips = 0
241 cl = local.changelog
242 cl = local.changelog
242 clnode = cl.node
243 clnode = cl.node
243 clrev = cl.rev
244 clrev = cl.rev
244
245
245 if ancestorsof is not None:
246 if ancestorsof is not None:
246 ownheads = [clrev(n) for n in ancestorsof]
247 ownheads = [clrev(n) for n in ancestorsof]
247 else:
248 else:
248 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
249 ownheads = [rev for rev in cl.headrevs() if rev != nullrev]
249
250
250 # early exit if we know all the specified remote heads already
251 # early exit if we know all the specified remote heads already
251 ui.debug("query 1; heads\n")
252 ui.debug("query 1; heads\n")
252 roundtrips += 1
253 roundtrips += 1
253 sample = _limitsample(ownheads, initialsamplesize)
254 sample = _limitsample(ownheads, initialsamplesize)
254 # indices between sample and externalized version must match
255 # indices between sample and externalized version must match
255 sample = list(sample)
256 sample = list(sample)
256
257
257 with remote.commandexecutor() as e:
258 with remote.commandexecutor() as e:
258 fheads = e.callcommand('heads', {})
259 fheads = e.callcommand('heads', {})
259 fknown = e.callcommand('known', {
260 fknown = e.callcommand('known', {
260 'nodes': [clnode(r) for r in sample],
261 'nodes': [clnode(r) for r in sample],
261 })
262 })
262
263
263 srvheadhashes, yesno = fheads.result(), fknown.result()
264 srvheadhashes, yesno = fheads.result(), fknown.result()
264
265
265 if cl.tip() == nullid:
266 if cl.tip() == nullid:
266 if srvheadhashes != [nullid]:
267 if srvheadhashes != [nullid]:
267 return [nullid], True, srvheadhashes
268 return [nullid], True, srvheadhashes
268 return [nullid], False, []
269 return [nullid], False, []
269
270
270 # start actual discovery (we note this before the next "if" for
271 # start actual discovery (we note this before the next "if" for
271 # compatibility reasons)
272 # compatibility reasons)
272 ui.status(_("searching for changes\n"))
273 ui.status(_("searching for changes\n"))
273
274
274 srvheads = []
275 srvheads = []
275 for node in srvheadhashes:
276 for node in srvheadhashes:
276 if node == nullid:
277 if node == nullid:
277 continue
278 continue
278
279
279 try:
280 try:
280 srvheads.append(clrev(node))
281 srvheads.append(clrev(node))
281 # Catches unknown and filtered nodes.
282 # Catches unknown and filtered nodes.
282 except error.LookupError:
283 except error.LookupError:
283 continue
284 continue
284
285
285 if len(srvheads) == len(srvheadhashes):
286 if len(srvheads) == len(srvheadhashes):
286 ui.debug("all remote heads known locally\n")
287 ui.debug("all remote heads known locally\n")
287 return srvheadhashes, False, srvheadhashes
288 return srvheadhashes, False, srvheadhashes
288
289
289 if len(sample) == len(ownheads) and all(yesno):
290 if len(sample) == len(ownheads) and all(yesno):
290 ui.note(_("all local heads known remotely\n"))
291 ui.note(_("all local heads known remotely\n"))
291 ownheadhashes = [clnode(r) for r in ownheads]
292 ownheadhashes = [clnode(r) for r in ownheads]
292 return ownheadhashes, True, srvheadhashes
293 return ownheadhashes, True, srvheadhashes
293
294
294 # full blown discovery
295 # full blown discovery
295
296
296 disco = partialdiscovery(local, ownheads)
297 disco = partialdiscovery(local, ownheads)
297 # treat remote heads (and maybe own heads) as a first implicit sample
298 # treat remote heads (and maybe own heads) as a first implicit sample
298 # response
299 # response
299 disco.addcommons(srvheads)
300 disco.addcommons(srvheads)
300 disco.addinfo(zip(sample, yesno))
301 disco.addinfo(zip(sample, yesno))
301
302
302 full = False
303 full = False
303 progress = ui.makeprogress(_('searching'), unit=_('queries'))
304 progress = ui.makeprogress(_('searching'), unit=_('queries'))
304 while not disco.iscomplete():
305 while not disco.iscomplete():
305
306
306 if full or disco.hasinfo():
307 if full or disco.hasinfo():
307 if full:
308 if full:
308 ui.note(_("sampling from both directions\n"))
309 ui.note(_("sampling from both directions\n"))
309 else:
310 else:
310 ui.debug("taking initial sample\n")
311 ui.debug("taking initial sample\n")
311 samplefunc = _takefullsample
312 samplefunc = _takefullsample
312 targetsize = fullsamplesize
313 targetsize = fullsamplesize
313 else:
314 else:
314 # use even cheaper initial sample
315 # use even cheaper initial sample
315 ui.debug("taking quick initial sample\n")
316 ui.debug("taking quick initial sample\n")
316 samplefunc = _takequicksample
317 samplefunc = _takequicksample
317 targetsize = initialsamplesize
318 targetsize = initialsamplesize
318 sample = samplefunc(local, ownheads, disco.undecided, targetsize)
319 sample = samplefunc(local, ownheads, disco.undecided, targetsize)
319
320
320 roundtrips += 1
321 roundtrips += 1
321 progress.update(roundtrips)
322 progress.update(roundtrips)
322 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
323 ui.debug("query %i; still undecided: %i, sample size is: %i\n"
323 % (roundtrips, len(disco.undecided), len(sample)))
324 % (roundtrips, len(disco.undecided), len(sample)))
324 # indices between sample and externalized version must match
325 # indices between sample and externalized version must match
325 sample = list(sample)
326 sample = list(sample)
326
327
327 with remote.commandexecutor() as e:
328 with remote.commandexecutor() as e:
328 yesno = e.callcommand('known', {
329 yesno = e.callcommand('known', {
329 'nodes': [clnode(r) for r in sample],
330 'nodes': [clnode(r) for r in sample],
330 }).result()
331 }).result()
331
332
332 full = True
333 full = True
333
334
334 disco.addinfo(zip(sample, yesno))
335 disco.addinfo(zip(sample, yesno))
335
336
336 result = disco.commonheads()
337 result = disco.commonheads()
337 elapsed = util.timer() - start
338 elapsed = util.timer() - start
338 progress.complete()
339 progress.complete()
339 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
340 ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
340 msg = ('found %d common and %d unknown server heads,'
341 msg = ('found %d common and %d unknown server heads,'
341 ' %d roundtrips in %.4fs\n')
342 ' %d roundtrips in %.4fs\n')
342 missing = set(result) - set(srvheads)
343 missing = set(result) - set(srvheads)
343 ui.log('discovery', msg, len(result), len(missing), roundtrips,
344 ui.log('discovery', msg, len(result), len(missing), roundtrips,
344 elapsed)
345 elapsed)
345
346
346 if not result and srvheadhashes != [nullid]:
347 if not result and srvheadhashes != [nullid]:
347 if abortwhenunrelated:
348 if abortwhenunrelated:
348 raise error.Abort(_("repository is unrelated"))
349 raise error.Abort(_("repository is unrelated"))
349 else:
350 else:
350 ui.warn(_("warning: repository is unrelated\n"))
351 ui.warn(_("warning: repository is unrelated\n"))
351 return ({nullid}, True, srvheadhashes,)
352 return ({nullid}, True, srvheadhashes,)
352
353
353 anyincoming = (srvheadhashes != [nullid])
354 anyincoming = (srvheadhashes != [nullid])
354 result = {clnode(r) for r in result}
355 result = {clnode(r) for r in result}
355 return result, anyincoming, srvheadhashes
356 return result, anyincoming, srvheadhashes
@@ -1,2077 +1,2110
1 # ui.py - user interface bits for mercurial
1 # ui.py - user interface bits for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import getpass
13 import getpass
14 import inspect
14 import inspect
15 import os
15 import os
16 import re
16 import re
17 import signal
17 import signal
18 import socket
18 import socket
19 import subprocess
19 import subprocess
20 import sys
20 import sys
21 import traceback
21 import traceback
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import hex
24 from .node import hex
25
25
26 from . import (
26 from . import (
27 color,
27 color,
28 config,
28 config,
29 configitems,
29 configitems,
30 encoding,
30 encoding,
31 error,
31 error,
32 formatter,
32 formatter,
33 loggingutil,
33 loggingutil,
34 progress,
34 progress,
35 pycompat,
35 pycompat,
36 rcutil,
36 rcutil,
37 scmutil,
37 scmutil,
38 util,
38 util,
39 )
39 )
40 from .utils import (
40 from .utils import (
41 dateutil,
41 dateutil,
42 procutil,
42 procutil,
43 stringutil,
43 stringutil,
44 )
44 )
45
45
46 urlreq = util.urlreq
46 urlreq = util.urlreq
47
47
48 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
48 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
49 _keepalnum = ''.join(c for c in map(pycompat.bytechr, range(256))
49 _keepalnum = ''.join(c for c in map(pycompat.bytechr, range(256))
50 if not c.isalnum())
50 if not c.isalnum())
51
51
52 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
52 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
53 tweakrc = b"""
53 tweakrc = b"""
54 [ui]
54 [ui]
55 # The rollback command is dangerous. As a rule, don't use it.
55 # The rollback command is dangerous. As a rule, don't use it.
56 rollback = False
56 rollback = False
57 # Make `hg status` report copy information
57 # Make `hg status` report copy information
58 statuscopies = yes
58 statuscopies = yes
59 # Prefer curses UIs when available. Revert to plain-text with `text`.
59 # Prefer curses UIs when available. Revert to plain-text with `text`.
60 interface = curses
60 interface = curses
61
61
62 [commands]
62 [commands]
63 # Grep working directory by default.
63 # Grep working directory by default.
64 grep.all-files = True
64 grep.all-files = True
65 # Make `hg status` emit cwd-relative paths by default.
65 # Make `hg status` emit cwd-relative paths by default.
66 status.relative = yes
66 status.relative = yes
67 # Refuse to perform an `hg update` that would cause a file content merge
67 # Refuse to perform an `hg update` that would cause a file content merge
68 update.check = noconflict
68 update.check = noconflict
69 # Show conflicts information in `hg status`
69 # Show conflicts information in `hg status`
70 status.verbose = True
70 status.verbose = True
71
71
72 [diff]
72 [diff]
73 git = 1
73 git = 1
74 showfunc = 1
74 showfunc = 1
75 word-diff = 1
75 word-diff = 1
76 """
76 """
77
77
78 samplehgrcs = {
78 samplehgrcs = {
79 'user':
79 'user':
80 b"""# example user config (see 'hg help config' for more info)
80 b"""# example user config (see 'hg help config' for more info)
81 [ui]
81 [ui]
82 # name and email, e.g.
82 # name and email, e.g.
83 # username = Jane Doe <jdoe@example.com>
83 # username = Jane Doe <jdoe@example.com>
84 username =
84 username =
85
85
86 # We recommend enabling tweakdefaults to get slight improvements to
86 # We recommend enabling tweakdefaults to get slight improvements to
87 # the UI over time. Make sure to set HGPLAIN in the environment when
87 # the UI over time. Make sure to set HGPLAIN in the environment when
88 # writing scripts!
88 # writing scripts!
89 # tweakdefaults = True
89 # tweakdefaults = True
90
90
91 # uncomment to disable color in command output
91 # uncomment to disable color in command output
92 # (see 'hg help color' for details)
92 # (see 'hg help color' for details)
93 # color = never
93 # color = never
94
94
95 # uncomment to disable command output pagination
95 # uncomment to disable command output pagination
96 # (see 'hg help pager' for details)
96 # (see 'hg help pager' for details)
97 # paginate = never
97 # paginate = never
98
98
99 [extensions]
99 [extensions]
100 # uncomment these lines to enable some popular extensions
100 # uncomment these lines to enable some popular extensions
101 # (see 'hg help extensions' for more info)
101 # (see 'hg help extensions' for more info)
102 #
102 #
103 # churn =
103 # churn =
104 """,
104 """,
105
105
106 'cloned':
106 'cloned':
107 b"""# example repository config (see 'hg help config' for more info)
107 b"""# example repository config (see 'hg help config' for more info)
108 [paths]
108 [paths]
109 default = %s
109 default = %s
110
110
111 # path aliases to other clones of this repo in URLs or filesystem paths
111 # path aliases to other clones of this repo in URLs or filesystem paths
112 # (see 'hg help config.paths' for more info)
112 # (see 'hg help config.paths' for more info)
113 #
113 #
114 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
114 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
115 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
115 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
116 # my-clone = /home/jdoe/jdoes-clone
116 # my-clone = /home/jdoe/jdoes-clone
117
117
118 [ui]
118 [ui]
119 # name and email (local to this repository, optional), e.g.
119 # name and email (local to this repository, optional), e.g.
120 # username = Jane Doe <jdoe@example.com>
120 # username = Jane Doe <jdoe@example.com>
121 """,
121 """,
122
122
123 'local':
123 'local':
124 b"""# example repository config (see 'hg help config' for more info)
124 b"""# example repository config (see 'hg help config' for more info)
125 [paths]
125 [paths]
126 # path aliases to other clones of this repo in URLs or filesystem paths
126 # path aliases to other clones of this repo in URLs or filesystem paths
127 # (see 'hg help config.paths' for more info)
127 # (see 'hg help config.paths' for more info)
128 #
128 #
129 # default = http://example.com/hg/example-repo
129 # default = http://example.com/hg/example-repo
130 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
130 # default:pushurl = ssh://jdoe@example.net/hg/jdoes-fork
131 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
131 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
132 # my-clone = /home/jdoe/jdoes-clone
132 # my-clone = /home/jdoe/jdoes-clone
133
133
134 [ui]
134 [ui]
135 # name and email (local to this repository, optional), e.g.
135 # name and email (local to this repository, optional), e.g.
136 # username = Jane Doe <jdoe@example.com>
136 # username = Jane Doe <jdoe@example.com>
137 """,
137 """,
138
138
139 'global':
139 'global':
140 b"""# example system-wide hg config (see 'hg help config' for more info)
140 b"""# example system-wide hg config (see 'hg help config' for more info)
141
141
142 [ui]
142 [ui]
143 # uncomment to disable color in command output
143 # uncomment to disable color in command output
144 # (see 'hg help color' for details)
144 # (see 'hg help color' for details)
145 # color = never
145 # color = never
146
146
147 # uncomment to disable command output pagination
147 # uncomment to disable command output pagination
148 # (see 'hg help pager' for details)
148 # (see 'hg help pager' for details)
149 # paginate = never
149 # paginate = never
150
150
151 [extensions]
151 [extensions]
152 # uncomment these lines to enable some popular extensions
152 # uncomment these lines to enable some popular extensions
153 # (see 'hg help extensions' for more info)
153 # (see 'hg help extensions' for more info)
154 #
154 #
155 # blackbox =
155 # blackbox =
156 # churn =
156 # churn =
157 """,
157 """,
158 }
158 }
159
159
160 def _maybestrurl(maybebytes):
160 def _maybestrurl(maybebytes):
161 return pycompat.rapply(pycompat.strurl, maybebytes)
161 return pycompat.rapply(pycompat.strurl, maybebytes)
162
162
163 def _maybebytesurl(maybestr):
163 def _maybebytesurl(maybestr):
164 return pycompat.rapply(pycompat.bytesurl, maybestr)
164 return pycompat.rapply(pycompat.bytesurl, maybestr)
165
165
166 class httppasswordmgrdbproxy(object):
166 class httppasswordmgrdbproxy(object):
167 """Delays loading urllib2 until it's needed."""
167 """Delays loading urllib2 until it's needed."""
168 def __init__(self):
168 def __init__(self):
169 self._mgr = None
169 self._mgr = None
170
170
171 def _get_mgr(self):
171 def _get_mgr(self):
172 if self._mgr is None:
172 if self._mgr is None:
173 self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
173 self._mgr = urlreq.httppasswordmgrwithdefaultrealm()
174 return self._mgr
174 return self._mgr
175
175
176 def add_password(self, realm, uris, user, passwd):
176 def add_password(self, realm, uris, user, passwd):
177 return self._get_mgr().add_password(
177 return self._get_mgr().add_password(
178 _maybestrurl(realm), _maybestrurl(uris),
178 _maybestrurl(realm), _maybestrurl(uris),
179 _maybestrurl(user), _maybestrurl(passwd))
179 _maybestrurl(user), _maybestrurl(passwd))
180
180
181 def find_user_password(self, realm, uri):
181 def find_user_password(self, realm, uri):
182 mgr = self._get_mgr()
182 mgr = self._get_mgr()
183 return _maybebytesurl(mgr.find_user_password(_maybestrurl(realm),
183 return _maybebytesurl(mgr.find_user_password(_maybestrurl(realm),
184 _maybestrurl(uri)))
184 _maybestrurl(uri)))
185
185
186 def _catchterm(*args):
186 def _catchterm(*args):
187 raise error.SignalInterrupt
187 raise error.SignalInterrupt
188
188
189 # unique object used to detect no default value has been provided when
189 # unique object used to detect no default value has been provided when
190 # retrieving configuration value.
190 # retrieving configuration value.
191 _unset = object()
191 _unset = object()
192
192
193 # _reqexithandlers: callbacks run at the end of a request
193 # _reqexithandlers: callbacks run at the end of a request
194 _reqexithandlers = []
194 _reqexithandlers = []
195
195
196 class ui(object):
196 class ui(object):
197 def __init__(self, src=None):
197 def __init__(self, src=None):
198 """Create a fresh new ui object if no src given
198 """Create a fresh new ui object if no src given
199
199
200 Use uimod.ui.load() to create a ui which knows global and user configs.
200 Use uimod.ui.load() to create a ui which knows global and user configs.
201 In most cases, you should use ui.copy() to create a copy of an existing
201 In most cases, you should use ui.copy() to create a copy of an existing
202 ui object.
202 ui object.
203 """
203 """
204 # _buffers: used for temporary capture of output
204 # _buffers: used for temporary capture of output
205 self._buffers = []
205 self._buffers = []
206 # 3-tuple describing how each buffer in the stack behaves.
206 # 3-tuple describing how each buffer in the stack behaves.
207 # Values are (capture stderr, capture subprocesses, apply labels).
207 # Values are (capture stderr, capture subprocesses, apply labels).
208 self._bufferstates = []
208 self._bufferstates = []
209 # When a buffer is active, defines whether we are expanding labels.
209 # When a buffer is active, defines whether we are expanding labels.
210 # This exists to prevent an extra list lookup.
210 # This exists to prevent an extra list lookup.
211 self._bufferapplylabels = None
211 self._bufferapplylabels = None
212 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
212 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
213 self._reportuntrusted = True
213 self._reportuntrusted = True
214 self._knownconfig = configitems.coreitems
214 self._knownconfig = configitems.coreitems
215 self._ocfg = config.config() # overlay
215 self._ocfg = config.config() # overlay
216 self._tcfg = config.config() # trusted
216 self._tcfg = config.config() # trusted
217 self._ucfg = config.config() # untrusted
217 self._ucfg = config.config() # untrusted
218 self._trustusers = set()
218 self._trustusers = set()
219 self._trustgroups = set()
219 self._trustgroups = set()
220 self.callhooks = True
220 self.callhooks = True
221 # Insecure server connections requested.
221 # Insecure server connections requested.
222 self.insecureconnections = False
222 self.insecureconnections = False
223 # Blocked time
223 # Blocked time
224 self.logblockedtimes = False
224 self.logblockedtimes = False
225 # color mode: see mercurial/color.py for possible value
225 # color mode: see mercurial/color.py for possible value
226 self._colormode = None
226 self._colormode = None
227 self._terminfoparams = {}
227 self._terminfoparams = {}
228 self._styles = {}
228 self._styles = {}
229 self._uninterruptible = False
229 self._uninterruptible = False
230
230
231 if src:
231 if src:
232 self._fout = src._fout
232 self._fout = src._fout
233 self._ferr = src._ferr
233 self._ferr = src._ferr
234 self._fin = src._fin
234 self._fin = src._fin
235 self._fmsg = src._fmsg
235 self._fmsg = src._fmsg
236 self._fmsgout = src._fmsgout
236 self._fmsgout = src._fmsgout
237 self._fmsgerr = src._fmsgerr
237 self._fmsgerr = src._fmsgerr
238 self._finoutredirected = src._finoutredirected
238 self._finoutredirected = src._finoutredirected
239 self._loggers = src._loggers.copy()
239 self._loggers = src._loggers.copy()
240 self.pageractive = src.pageractive
240 self.pageractive = src.pageractive
241 self._disablepager = src._disablepager
241 self._disablepager = src._disablepager
242 self._tweaked = src._tweaked
242 self._tweaked = src._tweaked
243
243
244 self._tcfg = src._tcfg.copy()
244 self._tcfg = src._tcfg.copy()
245 self._ucfg = src._ucfg.copy()
245 self._ucfg = src._ucfg.copy()
246 self._ocfg = src._ocfg.copy()
246 self._ocfg = src._ocfg.copy()
247 self._trustusers = src._trustusers.copy()
247 self._trustusers = src._trustusers.copy()
248 self._trustgroups = src._trustgroups.copy()
248 self._trustgroups = src._trustgroups.copy()
249 self.environ = src.environ
249 self.environ = src.environ
250 self.callhooks = src.callhooks
250 self.callhooks = src.callhooks
251 self.insecureconnections = src.insecureconnections
251 self.insecureconnections = src.insecureconnections
252 self._colormode = src._colormode
252 self._colormode = src._colormode
253 self._terminfoparams = src._terminfoparams.copy()
253 self._terminfoparams = src._terminfoparams.copy()
254 self._styles = src._styles.copy()
254 self._styles = src._styles.copy()
255
255
256 self.fixconfig()
256 self.fixconfig()
257
257
258 self.httppasswordmgrdb = src.httppasswordmgrdb
258 self.httppasswordmgrdb = src.httppasswordmgrdb
259 self._blockedtimes = src._blockedtimes
259 self._blockedtimes = src._blockedtimes
260 else:
260 else:
261 self._fout = procutil.stdout
261 self._fout = procutil.stdout
262 self._ferr = procutil.stderr
262 self._ferr = procutil.stderr
263 self._fin = procutil.stdin
263 self._fin = procutil.stdin
264 self._fmsg = None
264 self._fmsg = None
265 self._fmsgout = self.fout # configurable
265 self._fmsgout = self.fout # configurable
266 self._fmsgerr = self.ferr # configurable
266 self._fmsgerr = self.ferr # configurable
267 self._finoutredirected = False
267 self._finoutredirected = False
268 self._loggers = {}
268 self._loggers = {}
269 self.pageractive = False
269 self.pageractive = False
270 self._disablepager = False
270 self._disablepager = False
271 self._tweaked = False
271 self._tweaked = False
272
272
273 # shared read-only environment
273 # shared read-only environment
274 self.environ = encoding.environ
274 self.environ = encoding.environ
275
275
276 self.httppasswordmgrdb = httppasswordmgrdbproxy()
276 self.httppasswordmgrdb = httppasswordmgrdbproxy()
277 self._blockedtimes = collections.defaultdict(int)
277 self._blockedtimes = collections.defaultdict(int)
278
278
279 allowed = self.configlist('experimental', 'exportableenviron')
279 allowed = self.configlist('experimental', 'exportableenviron')
280 if '*' in allowed:
280 if '*' in allowed:
281 self._exportableenviron = self.environ
281 self._exportableenviron = self.environ
282 else:
282 else:
283 self._exportableenviron = {}
283 self._exportableenviron = {}
284 for k in allowed:
284 for k in allowed:
285 if k in self.environ:
285 if k in self.environ:
286 self._exportableenviron[k] = self.environ[k]
286 self._exportableenviron[k] = self.environ[k]
287
287
288 @classmethod
288 @classmethod
289 def load(cls):
289 def load(cls):
290 """Create a ui and load global and user configs"""
290 """Create a ui and load global and user configs"""
291 u = cls()
291 u = cls()
292 # we always trust global config files and environment variables
292 # we always trust global config files and environment variables
293 for t, f in rcutil.rccomponents():
293 for t, f in rcutil.rccomponents():
294 if t == 'path':
294 if t == 'path':
295 u.readconfig(f, trust=True)
295 u.readconfig(f, trust=True)
296 elif t == 'items':
296 elif t == 'items':
297 sections = set()
297 sections = set()
298 for section, name, value, source in f:
298 for section, name, value, source in f:
299 # do not set u._ocfg
299 # do not set u._ocfg
300 # XXX clean this up once immutable config object is a thing
300 # XXX clean this up once immutable config object is a thing
301 u._tcfg.set(section, name, value, source)
301 u._tcfg.set(section, name, value, source)
302 u._ucfg.set(section, name, value, source)
302 u._ucfg.set(section, name, value, source)
303 sections.add(section)
303 sections.add(section)
304 for section in sections:
304 for section in sections:
305 u.fixconfig(section=section)
305 u.fixconfig(section=section)
306 else:
306 else:
307 raise error.ProgrammingError('unknown rctype: %s' % t)
307 raise error.ProgrammingError('unknown rctype: %s' % t)
308 u._maybetweakdefaults()
308 u._maybetweakdefaults()
309 return u
309 return u
310
310
311 def _maybetweakdefaults(self):
311 def _maybetweakdefaults(self):
312 if not self.configbool('ui', 'tweakdefaults'):
312 if not self.configbool('ui', 'tweakdefaults'):
313 return
313 return
314 if self._tweaked or self.plain('tweakdefaults'):
314 if self._tweaked or self.plain('tweakdefaults'):
315 return
315 return
316
316
317 # Note: it is SUPER IMPORTANT that you set self._tweaked to
317 # Note: it is SUPER IMPORTANT that you set self._tweaked to
318 # True *before* any calls to setconfig(), otherwise you'll get
318 # True *before* any calls to setconfig(), otherwise you'll get
319 # infinite recursion between setconfig and this method.
319 # infinite recursion between setconfig and this method.
320 #
320 #
321 # TODO: We should extract an inner method in setconfig() to
321 # TODO: We should extract an inner method in setconfig() to
322 # avoid this weirdness.
322 # avoid this weirdness.
323 self._tweaked = True
323 self._tweaked = True
324 tmpcfg = config.config()
324 tmpcfg = config.config()
325 tmpcfg.parse('<tweakdefaults>', tweakrc)
325 tmpcfg.parse('<tweakdefaults>', tweakrc)
326 for section in tmpcfg:
326 for section in tmpcfg:
327 for name, value in tmpcfg.items(section):
327 for name, value in tmpcfg.items(section):
328 if not self.hasconfig(section, name):
328 if not self.hasconfig(section, name):
329 self.setconfig(section, name, value, "<tweakdefaults>")
329 self.setconfig(section, name, value, "<tweakdefaults>")
330
330
331 def copy(self):
331 def copy(self):
332 return self.__class__(self)
332 return self.__class__(self)
333
333
334 def resetstate(self):
334 def resetstate(self):
335 """Clear internal state that shouldn't persist across commands"""
335 """Clear internal state that shouldn't persist across commands"""
336 if self._progbar:
336 if self._progbar:
337 self._progbar.resetstate() # reset last-print time of progress bar
337 self._progbar.resetstate() # reset last-print time of progress bar
338 self.httppasswordmgrdb = httppasswordmgrdbproxy()
338 self.httppasswordmgrdb = httppasswordmgrdbproxy()
339
339
340 @contextlib.contextmanager
340 @contextlib.contextmanager
341 def timeblockedsection(self, key):
341 def timeblockedsection(self, key):
342 # this is open-coded below - search for timeblockedsection to find them
342 # this is open-coded below - search for timeblockedsection to find them
343 starttime = util.timer()
343 starttime = util.timer()
344 try:
344 try:
345 yield
345 yield
346 finally:
346 finally:
347 self._blockedtimes[key + '_blocked'] += \
347 self._blockedtimes[key + '_blocked'] += \
348 (util.timer() - starttime) * 1000
348 (util.timer() - starttime) * 1000
349
349
350 @contextlib.contextmanager
350 @contextlib.contextmanager
351 def uninterruptible(self):
351 def uninterruptible(self):
352 """Mark an operation as unsafe.
352 """Mark an operation as unsafe.
353
353
354 Most operations on a repository are safe to interrupt, but a
354 Most operations on a repository are safe to interrupt, but a
355 few are risky (for example repair.strip). This context manager
355 few are risky (for example repair.strip). This context manager
356 lets you advise Mercurial that something risky is happening so
356 lets you advise Mercurial that something risky is happening so
357 that control-C etc can be blocked if desired.
357 that control-C etc can be blocked if desired.
358 """
358 """
359 enabled = self.configbool('experimental', 'nointerrupt')
359 enabled = self.configbool('experimental', 'nointerrupt')
360 if (enabled and
360 if (enabled and
361 self.configbool('experimental', 'nointerrupt-interactiveonly')):
361 self.configbool('experimental', 'nointerrupt-interactiveonly')):
362 enabled = self.interactive()
362 enabled = self.interactive()
363 if self._uninterruptible or not enabled:
363 if self._uninterruptible or not enabled:
364 # if nointerrupt support is turned off, the process isn't
364 # if nointerrupt support is turned off, the process isn't
365 # interactive, or we're already in an uninterruptible
365 # interactive, or we're already in an uninterruptible
366 # block, do nothing.
366 # block, do nothing.
367 yield
367 yield
368 return
368 return
369 def warn():
369 def warn():
370 self.warn(_("shutting down cleanly\n"))
370 self.warn(_("shutting down cleanly\n"))
371 self.warn(
371 self.warn(
372 _("press ^C again to terminate immediately (dangerous)\n"))
372 _("press ^C again to terminate immediately (dangerous)\n"))
373 return True
373 return True
374 with procutil.uninterruptible(warn):
374 with procutil.uninterruptible(warn):
375 try:
375 try:
376 self._uninterruptible = True
376 self._uninterruptible = True
377 yield
377 yield
378 finally:
378 finally:
379 self._uninterruptible = False
379 self._uninterruptible = False
380
380
381 def formatter(self, topic, opts):
381 def formatter(self, topic, opts):
382 return formatter.formatter(self, self, topic, opts)
382 return formatter.formatter(self, self, topic, opts)
383
383
384 def _trusted(self, fp, f):
384 def _trusted(self, fp, f):
385 st = util.fstat(fp)
385 st = util.fstat(fp)
386 if util.isowner(st):
386 if util.isowner(st):
387 return True
387 return True
388
388
389 tusers, tgroups = self._trustusers, self._trustgroups
389 tusers, tgroups = self._trustusers, self._trustgroups
390 if '*' in tusers or '*' in tgroups:
390 if '*' in tusers or '*' in tgroups:
391 return True
391 return True
392
392
393 user = util.username(st.st_uid)
393 user = util.username(st.st_uid)
394 group = util.groupname(st.st_gid)
394 group = util.groupname(st.st_gid)
395 if user in tusers or group in tgroups or user == util.username():
395 if user in tusers or group in tgroups or user == util.username():
396 return True
396 return True
397
397
398 if self._reportuntrusted:
398 if self._reportuntrusted:
399 self.warn(_('not trusting file %s from untrusted '
399 self.warn(_('not trusting file %s from untrusted '
400 'user %s, group %s\n') % (f, user, group))
400 'user %s, group %s\n') % (f, user, group))
401 return False
401 return False
402
402
403 def readconfig(self, filename, root=None, trust=False,
403 def readconfig(self, filename, root=None, trust=False,
404 sections=None, remap=None):
404 sections=None, remap=None):
405 try:
405 try:
406 fp = open(filename, r'rb')
406 fp = open(filename, r'rb')
407 except IOError:
407 except IOError:
408 if not sections: # ignore unless we were looking for something
408 if not sections: # ignore unless we were looking for something
409 return
409 return
410 raise
410 raise
411
411
412 cfg = config.config()
412 cfg = config.config()
413 trusted = sections or trust or self._trusted(fp, filename)
413 trusted = sections or trust or self._trusted(fp, filename)
414
414
415 try:
415 try:
416 cfg.read(filename, fp, sections=sections, remap=remap)
416 cfg.read(filename, fp, sections=sections, remap=remap)
417 fp.close()
417 fp.close()
418 except error.ConfigError as inst:
418 except error.ConfigError as inst:
419 if trusted:
419 if trusted:
420 raise
420 raise
421 self.warn(_("ignored: %s\n") % stringutil.forcebytestr(inst))
421 self.warn(_("ignored: %s\n") % stringutil.forcebytestr(inst))
422
422
423 if self.plain():
423 if self.plain():
424 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
424 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
425 'logtemplate', 'message-output', 'statuscopies', 'style',
425 'logtemplate', 'message-output', 'statuscopies', 'style',
426 'traceback', 'verbose'):
426 'traceback', 'verbose'):
427 if k in cfg['ui']:
427 if k in cfg['ui']:
428 del cfg['ui'][k]
428 del cfg['ui'][k]
429 for k, v in cfg.items('defaults'):
429 for k, v in cfg.items('defaults'):
430 del cfg['defaults'][k]
430 del cfg['defaults'][k]
431 for k, v in cfg.items('commands'):
431 for k, v in cfg.items('commands'):
432 del cfg['commands'][k]
432 del cfg['commands'][k]
433 # Don't remove aliases from the configuration if in the exceptionlist
433 # Don't remove aliases from the configuration if in the exceptionlist
434 if self.plain('alias'):
434 if self.plain('alias'):
435 for k, v in cfg.items('alias'):
435 for k, v in cfg.items('alias'):
436 del cfg['alias'][k]
436 del cfg['alias'][k]
437 if self.plain('revsetalias'):
437 if self.plain('revsetalias'):
438 for k, v in cfg.items('revsetalias'):
438 for k, v in cfg.items('revsetalias'):
439 del cfg['revsetalias'][k]
439 del cfg['revsetalias'][k]
440 if self.plain('templatealias'):
440 if self.plain('templatealias'):
441 for k, v in cfg.items('templatealias'):
441 for k, v in cfg.items('templatealias'):
442 del cfg['templatealias'][k]
442 del cfg['templatealias'][k]
443
443
444 if trusted:
444 if trusted:
445 self._tcfg.update(cfg)
445 self._tcfg.update(cfg)
446 self._tcfg.update(self._ocfg)
446 self._tcfg.update(self._ocfg)
447 self._ucfg.update(cfg)
447 self._ucfg.update(cfg)
448 self._ucfg.update(self._ocfg)
448 self._ucfg.update(self._ocfg)
449
449
450 if root is None:
450 if root is None:
451 root = os.path.expanduser('~')
451 root = os.path.expanduser('~')
452 self.fixconfig(root=root)
452 self.fixconfig(root=root)
453
453
454 def fixconfig(self, root=None, section=None):
454 def fixconfig(self, root=None, section=None):
455 if section in (None, 'paths'):
455 if section in (None, 'paths'):
456 # expand vars and ~
456 # expand vars and ~
457 # translate paths relative to root (or home) into absolute paths
457 # translate paths relative to root (or home) into absolute paths
458 root = root or encoding.getcwd()
458 root = root or encoding.getcwd()
459 for c in self._tcfg, self._ucfg, self._ocfg:
459 for c in self._tcfg, self._ucfg, self._ocfg:
460 for n, p in c.items('paths'):
460 for n, p in c.items('paths'):
461 # Ignore sub-options.
461 # Ignore sub-options.
462 if ':' in n:
462 if ':' in n:
463 continue
463 continue
464 if not p:
464 if not p:
465 continue
465 continue
466 if '%%' in p:
466 if '%%' in p:
467 s = self.configsource('paths', n) or 'none'
467 s = self.configsource('paths', n) or 'none'
468 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
468 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
469 % (n, p, s))
469 % (n, p, s))
470 p = p.replace('%%', '%')
470 p = p.replace('%%', '%')
471 p = util.expandpath(p)
471 p = util.expandpath(p)
472 if not util.hasscheme(p) and not os.path.isabs(p):
472 if not util.hasscheme(p) and not os.path.isabs(p):
473 p = os.path.normpath(os.path.join(root, p))
473 p = os.path.normpath(os.path.join(root, p))
474 c.set("paths", n, p)
474 c.set("paths", n, p)
475
475
476 if section in (None, 'ui'):
476 if section in (None, 'ui'):
477 # update ui options
477 # update ui options
478 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
478 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
479 self.debugflag = self.configbool('ui', 'debug')
479 self.debugflag = self.configbool('ui', 'debug')
480 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
480 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
481 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
481 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
482 if self.verbose and self.quiet:
482 if self.verbose and self.quiet:
483 self.quiet = self.verbose = False
483 self.quiet = self.verbose = False
484 self._reportuntrusted = self.debugflag or self.configbool("ui",
484 self._reportuntrusted = self.debugflag or self.configbool("ui",
485 "report_untrusted")
485 "report_untrusted")
486 self.tracebackflag = self.configbool('ui', 'traceback')
486 self.tracebackflag = self.configbool('ui', 'traceback')
487 self.logblockedtimes = self.configbool('ui', 'logblockedtimes')
487 self.logblockedtimes = self.configbool('ui', 'logblockedtimes')
488
488
489 if section in (None, 'trusted'):
489 if section in (None, 'trusted'):
490 # update trust information
490 # update trust information
491 self._trustusers.update(self.configlist('trusted', 'users'))
491 self._trustusers.update(self.configlist('trusted', 'users'))
492 self._trustgroups.update(self.configlist('trusted', 'groups'))
492 self._trustgroups.update(self.configlist('trusted', 'groups'))
493
493
494 if section in (None, b'devel', b'ui') and self.debugflag:
494 if section in (None, b'devel', b'ui') and self.debugflag:
495 tracked = set()
495 tracked = set()
496 if self.configbool(b'devel', b'debug.extensions'):
496 if self.configbool(b'devel', b'debug.extensions'):
497 tracked.add(b'extension')
497 tracked.add(b'extension')
498 if tracked:
498 if tracked:
499 logger = loggingutil.fileobjectlogger(self._ferr, tracked)
499 logger = loggingutil.fileobjectlogger(self._ferr, tracked)
500 self.setlogger(b'debug', logger)
500 self.setlogger(b'debug', logger)
501
501
502 def backupconfig(self, section, item):
502 def backupconfig(self, section, item):
503 return (self._ocfg.backup(section, item),
503 return (self._ocfg.backup(section, item),
504 self._tcfg.backup(section, item),
504 self._tcfg.backup(section, item),
505 self._ucfg.backup(section, item),)
505 self._ucfg.backup(section, item),)
506 def restoreconfig(self, data):
506 def restoreconfig(self, data):
507 self._ocfg.restore(data[0])
507 self._ocfg.restore(data[0])
508 self._tcfg.restore(data[1])
508 self._tcfg.restore(data[1])
509 self._ucfg.restore(data[2])
509 self._ucfg.restore(data[2])
510
510
511 def setconfig(self, section, name, value, source=''):
511 def setconfig(self, section, name, value, source=''):
512 for cfg in (self._ocfg, self._tcfg, self._ucfg):
512 for cfg in (self._ocfg, self._tcfg, self._ucfg):
513 cfg.set(section, name, value, source)
513 cfg.set(section, name, value, source)
514 self.fixconfig(section=section)
514 self.fixconfig(section=section)
515 self._maybetweakdefaults()
515 self._maybetweakdefaults()
516
516
517 def _data(self, untrusted):
517 def _data(self, untrusted):
518 return untrusted and self._ucfg or self._tcfg
518 return untrusted and self._ucfg or self._tcfg
519
519
520 def configsource(self, section, name, untrusted=False):
520 def configsource(self, section, name, untrusted=False):
521 return self._data(untrusted).source(section, name)
521 return self._data(untrusted).source(section, name)
522
522
523 def config(self, section, name, default=_unset, untrusted=False):
523 def config(self, section, name, default=_unset, untrusted=False):
524 """return the plain string version of a config"""
524 """return the plain string version of a config"""
525 value = self._config(section, name, default=default,
525 value = self._config(section, name, default=default,
526 untrusted=untrusted)
526 untrusted=untrusted)
527 if value is _unset:
527 if value is _unset:
528 return None
528 return None
529 return value
529 return value
530
530
531 def _config(self, section, name, default=_unset, untrusted=False):
531 def _config(self, section, name, default=_unset, untrusted=False):
532 value = itemdefault = default
532 value = itemdefault = default
533 item = self._knownconfig.get(section, {}).get(name)
533 item = self._knownconfig.get(section, {}).get(name)
534 alternates = [(section, name)]
534 alternates = [(section, name)]
535
535
536 if item is not None:
536 if item is not None:
537 alternates.extend(item.alias)
537 alternates.extend(item.alias)
538 if callable(item.default):
538 if callable(item.default):
539 itemdefault = item.default()
539 itemdefault = item.default()
540 else:
540 else:
541 itemdefault = item.default
541 itemdefault = item.default
542 else:
542 else:
543 msg = ("accessing unregistered config item: '%s.%s'")
543 msg = ("accessing unregistered config item: '%s.%s'")
544 msg %= (section, name)
544 msg %= (section, name)
545 self.develwarn(msg, 2, 'warn-config-unknown')
545 self.develwarn(msg, 2, 'warn-config-unknown')
546
546
547 if default is _unset:
547 if default is _unset:
548 if item is None:
548 if item is None:
549 value = default
549 value = default
550 elif item.default is configitems.dynamicdefault:
550 elif item.default is configitems.dynamicdefault:
551 value = None
551 value = None
552 msg = "config item requires an explicit default value: '%s.%s'"
552 msg = "config item requires an explicit default value: '%s.%s'"
553 msg %= (section, name)
553 msg %= (section, name)
554 self.develwarn(msg, 2, 'warn-config-default')
554 self.develwarn(msg, 2, 'warn-config-default')
555 else:
555 else:
556 value = itemdefault
556 value = itemdefault
557 elif (item is not None
557 elif (item is not None
558 and item.default is not configitems.dynamicdefault
558 and item.default is not configitems.dynamicdefault
559 and default != itemdefault):
559 and default != itemdefault):
560 msg = ("specifying a mismatched default value for a registered "
560 msg = ("specifying a mismatched default value for a registered "
561 "config item: '%s.%s' '%s'")
561 "config item: '%s.%s' '%s'")
562 msg %= (section, name, pycompat.bytestr(default))
562 msg %= (section, name, pycompat.bytestr(default))
563 self.develwarn(msg, 2, 'warn-config-default')
563 self.develwarn(msg, 2, 'warn-config-default')
564
564
565 for s, n in alternates:
565 for s, n in alternates:
566 candidate = self._data(untrusted).get(s, n, None)
566 candidate = self._data(untrusted).get(s, n, None)
567 if candidate is not None:
567 if candidate is not None:
568 value = candidate
568 value = candidate
569 section = s
569 section = s
570 name = n
570 name = n
571 break
571 break
572
572
573 if self.debugflag and not untrusted and self._reportuntrusted:
573 if self.debugflag and not untrusted and self._reportuntrusted:
574 for s, n in alternates:
574 for s, n in alternates:
575 uvalue = self._ucfg.get(s, n)
575 uvalue = self._ucfg.get(s, n)
576 if uvalue is not None and uvalue != value:
576 if uvalue is not None and uvalue != value:
577 self.debug("ignoring untrusted configuration option "
577 self.debug("ignoring untrusted configuration option "
578 "%s.%s = %s\n" % (s, n, uvalue))
578 "%s.%s = %s\n" % (s, n, uvalue))
579 return value
579 return value
580
580
581 def configsuboptions(self, section, name, default=_unset, untrusted=False):
581 def configsuboptions(self, section, name, default=_unset, untrusted=False):
582 """Get a config option and all sub-options.
582 """Get a config option and all sub-options.
583
583
584 Some config options have sub-options that are declared with the
584 Some config options have sub-options that are declared with the
585 format "key:opt = value". This method is used to return the main
585 format "key:opt = value". This method is used to return the main
586 option and all its declared sub-options.
586 option and all its declared sub-options.
587
587
588 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
588 Returns a 2-tuple of ``(option, sub-options)``, where `sub-options``
589 is a dict of defined sub-options where keys and values are strings.
589 is a dict of defined sub-options where keys and values are strings.
590 """
590 """
591 main = self.config(section, name, default, untrusted=untrusted)
591 main = self.config(section, name, default, untrusted=untrusted)
592 data = self._data(untrusted)
592 data = self._data(untrusted)
593 sub = {}
593 sub = {}
594 prefix = '%s:' % name
594 prefix = '%s:' % name
595 for k, v in data.items(section):
595 for k, v in data.items(section):
596 if k.startswith(prefix):
596 if k.startswith(prefix):
597 sub[k[len(prefix):]] = v
597 sub[k[len(prefix):]] = v
598
598
599 if self.debugflag and not untrusted and self._reportuntrusted:
599 if self.debugflag and not untrusted and self._reportuntrusted:
600 for k, v in sub.items():
600 for k, v in sub.items():
601 uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
601 uvalue = self._ucfg.get(section, '%s:%s' % (name, k))
602 if uvalue is not None and uvalue != v:
602 if uvalue is not None and uvalue != v:
603 self.debug('ignoring untrusted configuration option '
603 self.debug('ignoring untrusted configuration option '
604 '%s:%s.%s = %s\n' % (section, name, k, uvalue))
604 '%s:%s.%s = %s\n' % (section, name, k, uvalue))
605
605
606 return main, sub
606 return main, sub
607
607
608 def configpath(self, section, name, default=_unset, untrusted=False):
608 def configpath(self, section, name, default=_unset, untrusted=False):
609 'get a path config item, expanded relative to repo root or config file'
609 'get a path config item, expanded relative to repo root or config file'
610 v = self.config(section, name, default, untrusted)
610 v = self.config(section, name, default, untrusted)
611 if v is None:
611 if v is None:
612 return None
612 return None
613 if not os.path.isabs(v) or "://" not in v:
613 if not os.path.isabs(v) or "://" not in v:
614 src = self.configsource(section, name, untrusted)
614 src = self.configsource(section, name, untrusted)
615 if ':' in src:
615 if ':' in src:
616 base = os.path.dirname(src.rsplit(':')[0])
616 base = os.path.dirname(src.rsplit(':')[0])
617 v = os.path.join(base, os.path.expanduser(v))
617 v = os.path.join(base, os.path.expanduser(v))
618 return v
618 return v
619
619
620 def configbool(self, section, name, default=_unset, untrusted=False):
620 def configbool(self, section, name, default=_unset, untrusted=False):
621 """parse a configuration element as a boolean
621 """parse a configuration element as a boolean
622
622
623 >>> u = ui(); s = b'foo'
623 >>> u = ui(); s = b'foo'
624 >>> u.setconfig(s, b'true', b'yes')
624 >>> u.setconfig(s, b'true', b'yes')
625 >>> u.configbool(s, b'true')
625 >>> u.configbool(s, b'true')
626 True
626 True
627 >>> u.setconfig(s, b'false', b'no')
627 >>> u.setconfig(s, b'false', b'no')
628 >>> u.configbool(s, b'false')
628 >>> u.configbool(s, b'false')
629 False
629 False
630 >>> u.configbool(s, b'unknown')
630 >>> u.configbool(s, b'unknown')
631 False
631 False
632 >>> u.configbool(s, b'unknown', True)
632 >>> u.configbool(s, b'unknown', True)
633 True
633 True
634 >>> u.setconfig(s, b'invalid', b'somevalue')
634 >>> u.setconfig(s, b'invalid', b'somevalue')
635 >>> u.configbool(s, b'invalid')
635 >>> u.configbool(s, b'invalid')
636 Traceback (most recent call last):
636 Traceback (most recent call last):
637 ...
637 ...
638 ConfigError: foo.invalid is not a boolean ('somevalue')
638 ConfigError: foo.invalid is not a boolean ('somevalue')
639 """
639 """
640
640
641 v = self._config(section, name, default, untrusted=untrusted)
641 v = self._config(section, name, default, untrusted=untrusted)
642 if v is None:
642 if v is None:
643 return v
643 return v
644 if v is _unset:
644 if v is _unset:
645 if default is _unset:
645 if default is _unset:
646 return False
646 return False
647 return default
647 return default
648 if isinstance(v, bool):
648 if isinstance(v, bool):
649 return v
649 return v
650 b = stringutil.parsebool(v)
650 b = stringutil.parsebool(v)
651 if b is None:
651 if b is None:
652 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
652 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
653 % (section, name, v))
653 % (section, name, v))
654 return b
654 return b
655
655
656 def configwith(self, convert, section, name, default=_unset,
656 def configwith(self, convert, section, name, default=_unset,
657 desc=None, untrusted=False):
657 desc=None, untrusted=False):
658 """parse a configuration element with a conversion function
658 """parse a configuration element with a conversion function
659
659
660 >>> u = ui(); s = b'foo'
660 >>> u = ui(); s = b'foo'
661 >>> u.setconfig(s, b'float1', b'42')
661 >>> u.setconfig(s, b'float1', b'42')
662 >>> u.configwith(float, s, b'float1')
662 >>> u.configwith(float, s, b'float1')
663 42.0
663 42.0
664 >>> u.setconfig(s, b'float2', b'-4.25')
664 >>> u.setconfig(s, b'float2', b'-4.25')
665 >>> u.configwith(float, s, b'float2')
665 >>> u.configwith(float, s, b'float2')
666 -4.25
666 -4.25
667 >>> u.configwith(float, s, b'unknown', 7)
667 >>> u.configwith(float, s, b'unknown', 7)
668 7.0
668 7.0
669 >>> u.setconfig(s, b'invalid', b'somevalue')
669 >>> u.setconfig(s, b'invalid', b'somevalue')
670 >>> u.configwith(float, s, b'invalid')
670 >>> u.configwith(float, s, b'invalid')
671 Traceback (most recent call last):
671 Traceback (most recent call last):
672 ...
672 ...
673 ConfigError: foo.invalid is not a valid float ('somevalue')
673 ConfigError: foo.invalid is not a valid float ('somevalue')
674 >>> u.configwith(float, s, b'invalid', desc=b'womble')
674 >>> u.configwith(float, s, b'invalid', desc=b'womble')
675 Traceback (most recent call last):
675 Traceback (most recent call last):
676 ...
676 ...
677 ConfigError: foo.invalid is not a valid womble ('somevalue')
677 ConfigError: foo.invalid is not a valid womble ('somevalue')
678 """
678 """
679
679
680 v = self.config(section, name, default, untrusted)
680 v = self.config(section, name, default, untrusted)
681 if v is None:
681 if v is None:
682 return v # do not attempt to convert None
682 return v # do not attempt to convert None
683 try:
683 try:
684 return convert(v)
684 return convert(v)
685 except (ValueError, error.ParseError):
685 except (ValueError, error.ParseError):
686 if desc is None:
686 if desc is None:
687 desc = pycompat.sysbytes(convert.__name__)
687 desc = pycompat.sysbytes(convert.__name__)
688 raise error.ConfigError(_("%s.%s is not a valid %s ('%s')")
688 raise error.ConfigError(_("%s.%s is not a valid %s ('%s')")
689 % (section, name, desc, v))
689 % (section, name, desc, v))
690
690
691 def configint(self, section, name, default=_unset, untrusted=False):
691 def configint(self, section, name, default=_unset, untrusted=False):
692 """parse a configuration element as an integer
692 """parse a configuration element as an integer
693
693
694 >>> u = ui(); s = b'foo'
694 >>> u = ui(); s = b'foo'
695 >>> u.setconfig(s, b'int1', b'42')
695 >>> u.setconfig(s, b'int1', b'42')
696 >>> u.configint(s, b'int1')
696 >>> u.configint(s, b'int1')
697 42
697 42
698 >>> u.setconfig(s, b'int2', b'-42')
698 >>> u.setconfig(s, b'int2', b'-42')
699 >>> u.configint(s, b'int2')
699 >>> u.configint(s, b'int2')
700 -42
700 -42
701 >>> u.configint(s, b'unknown', 7)
701 >>> u.configint(s, b'unknown', 7)
702 7
702 7
703 >>> u.setconfig(s, b'invalid', b'somevalue')
703 >>> u.setconfig(s, b'invalid', b'somevalue')
704 >>> u.configint(s, b'invalid')
704 >>> u.configint(s, b'invalid')
705 Traceback (most recent call last):
705 Traceback (most recent call last):
706 ...
706 ...
707 ConfigError: foo.invalid is not a valid integer ('somevalue')
707 ConfigError: foo.invalid is not a valid integer ('somevalue')
708 """
708 """
709
709
710 return self.configwith(int, section, name, default, 'integer',
710 return self.configwith(int, section, name, default, 'integer',
711 untrusted)
711 untrusted)
712
712
713 def configbytes(self, section, name, default=_unset, untrusted=False):
713 def configbytes(self, section, name, default=_unset, untrusted=False):
714 """parse a configuration element as a quantity in bytes
714 """parse a configuration element as a quantity in bytes
715
715
716 Units can be specified as b (bytes), k or kb (kilobytes), m or
716 Units can be specified as b (bytes), k or kb (kilobytes), m or
717 mb (megabytes), g or gb (gigabytes).
717 mb (megabytes), g or gb (gigabytes).
718
718
719 >>> u = ui(); s = b'foo'
719 >>> u = ui(); s = b'foo'
720 >>> u.setconfig(s, b'val1', b'42')
720 >>> u.setconfig(s, b'val1', b'42')
721 >>> u.configbytes(s, b'val1')
721 >>> u.configbytes(s, b'val1')
722 42
722 42
723 >>> u.setconfig(s, b'val2', b'42.5 kb')
723 >>> u.setconfig(s, b'val2', b'42.5 kb')
724 >>> u.configbytes(s, b'val2')
724 >>> u.configbytes(s, b'val2')
725 43520
725 43520
726 >>> u.configbytes(s, b'unknown', b'7 MB')
726 >>> u.configbytes(s, b'unknown', b'7 MB')
727 7340032
727 7340032
728 >>> u.setconfig(s, b'invalid', b'somevalue')
728 >>> u.setconfig(s, b'invalid', b'somevalue')
729 >>> u.configbytes(s, b'invalid')
729 >>> u.configbytes(s, b'invalid')
730 Traceback (most recent call last):
730 Traceback (most recent call last):
731 ...
731 ...
732 ConfigError: foo.invalid is not a byte quantity ('somevalue')
732 ConfigError: foo.invalid is not a byte quantity ('somevalue')
733 """
733 """
734
734
735 value = self._config(section, name, default, untrusted)
735 value = self._config(section, name, default, untrusted)
736 if value is _unset:
736 if value is _unset:
737 if default is _unset:
737 if default is _unset:
738 default = 0
738 default = 0
739 value = default
739 value = default
740 if not isinstance(value, bytes):
740 if not isinstance(value, bytes):
741 return value
741 return value
742 try:
742 try:
743 return util.sizetoint(value)
743 return util.sizetoint(value)
744 except error.ParseError:
744 except error.ParseError:
745 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
745 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
746 % (section, name, value))
746 % (section, name, value))
747
747
748 def configlist(self, section, name, default=_unset, untrusted=False):
748 def configlist(self, section, name, default=_unset, untrusted=False):
749 """parse a configuration element as a list of comma/space separated
749 """parse a configuration element as a list of comma/space separated
750 strings
750 strings
751
751
752 >>> u = ui(); s = b'foo'
752 >>> u = ui(); s = b'foo'
753 >>> u.setconfig(s, b'list1', b'this,is "a small" ,test')
753 >>> u.setconfig(s, b'list1', b'this,is "a small" ,test')
754 >>> u.configlist(s, b'list1')
754 >>> u.configlist(s, b'list1')
755 ['this', 'is', 'a small', 'test']
755 ['this', 'is', 'a small', 'test']
756 >>> u.setconfig(s, b'list2', b'this, is "a small" , test ')
756 >>> u.setconfig(s, b'list2', b'this, is "a small" , test ')
757 >>> u.configlist(s, b'list2')
757 >>> u.configlist(s, b'list2')
758 ['this', 'is', 'a small', 'test']
758 ['this', 'is', 'a small', 'test']
759 """
759 """
760 # default is not always a list
760 # default is not always a list
761 v = self.configwith(config.parselist, section, name, default,
761 v = self.configwith(config.parselist, section, name, default,
762 'list', untrusted)
762 'list', untrusted)
763 if isinstance(v, bytes):
763 if isinstance(v, bytes):
764 return config.parselist(v)
764 return config.parselist(v)
765 elif v is None:
765 elif v is None:
766 return []
766 return []
767 return v
767 return v
768
768
769 def configdate(self, section, name, default=_unset, untrusted=False):
769 def configdate(self, section, name, default=_unset, untrusted=False):
770 """parse a configuration element as a tuple of ints
770 """parse a configuration element as a tuple of ints
771
771
772 >>> u = ui(); s = b'foo'
772 >>> u = ui(); s = b'foo'
773 >>> u.setconfig(s, b'date', b'0 0')
773 >>> u.setconfig(s, b'date', b'0 0')
774 >>> u.configdate(s, b'date')
774 >>> u.configdate(s, b'date')
775 (0, 0)
775 (0, 0)
776 """
776 """
777 if self.config(section, name, default, untrusted):
777 if self.config(section, name, default, untrusted):
778 return self.configwith(dateutil.parsedate, section, name, default,
778 return self.configwith(dateutil.parsedate, section, name, default,
779 'date', untrusted)
779 'date', untrusted)
780 if default is _unset:
780 if default is _unset:
781 return None
781 return None
782 return default
782 return default
783
783
784 def hasconfig(self, section, name, untrusted=False):
784 def hasconfig(self, section, name, untrusted=False):
785 return self._data(untrusted).hasitem(section, name)
785 return self._data(untrusted).hasitem(section, name)
786
786
787 def has_section(self, section, untrusted=False):
787 def has_section(self, section, untrusted=False):
788 '''tell whether section exists in config.'''
788 '''tell whether section exists in config.'''
789 return section in self._data(untrusted)
789 return section in self._data(untrusted)
790
790
791 def configitems(self, section, untrusted=False, ignoresub=False):
791 def configitems(self, section, untrusted=False, ignoresub=False):
792 items = self._data(untrusted).items(section)
792 items = self._data(untrusted).items(section)
793 if ignoresub:
793 if ignoresub:
794 items = [i for i in items if ':' not in i[0]]
794 items = [i for i in items if ':' not in i[0]]
795 if self.debugflag and not untrusted and self._reportuntrusted:
795 if self.debugflag and not untrusted and self._reportuntrusted:
796 for k, v in self._ucfg.items(section):
796 for k, v in self._ucfg.items(section):
797 if self._tcfg.get(section, k) != v:
797 if self._tcfg.get(section, k) != v:
798 self.debug("ignoring untrusted configuration option "
798 self.debug("ignoring untrusted configuration option "
799 "%s.%s = %s\n" % (section, k, v))
799 "%s.%s = %s\n" % (section, k, v))
800 return items
800 return items
801
801
802 def walkconfig(self, untrusted=False):
802 def walkconfig(self, untrusted=False):
803 cfg = self._data(untrusted)
803 cfg = self._data(untrusted)
804 for section in cfg.sections():
804 for section in cfg.sections():
805 for name, value in self.configitems(section, untrusted):
805 for name, value in self.configitems(section, untrusted):
806 yield section, name, value
806 yield section, name, value
807
807
808 def plain(self, feature=None):
808 def plain(self, feature=None):
809 '''is plain mode active?
809 '''is plain mode active?
810
810
811 Plain mode means that all configuration variables which affect
811 Plain mode means that all configuration variables which affect
812 the behavior and output of Mercurial should be
812 the behavior and output of Mercurial should be
813 ignored. Additionally, the output should be stable,
813 ignored. Additionally, the output should be stable,
814 reproducible and suitable for use in scripts or applications.
814 reproducible and suitable for use in scripts or applications.
815
815
816 The only way to trigger plain mode is by setting either the
816 The only way to trigger plain mode is by setting either the
817 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
817 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
818
818
819 The return value can either be
819 The return value can either be
820 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
820 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
821 - False if feature is disabled by default and not included in HGPLAIN
821 - False if feature is disabled by default and not included in HGPLAIN
822 - True otherwise
822 - True otherwise
823 '''
823 '''
824 if ('HGPLAIN' not in encoding.environ and
824 if ('HGPLAIN' not in encoding.environ and
825 'HGPLAINEXCEPT' not in encoding.environ):
825 'HGPLAINEXCEPT' not in encoding.environ):
826 return False
826 return False
827 exceptions = encoding.environ.get('HGPLAINEXCEPT',
827 exceptions = encoding.environ.get('HGPLAINEXCEPT',
828 '').strip().split(',')
828 '').strip().split(',')
829 # TODO: add support for HGPLAIN=+feature,-feature syntax
829 # TODO: add support for HGPLAIN=+feature,-feature syntax
830 if '+strictflags' not in encoding.environ.get('HGPLAIN', '').split(','):
830 if '+strictflags' not in encoding.environ.get('HGPLAIN', '').split(','):
831 exceptions.append('strictflags')
831 exceptions.append('strictflags')
832 if feature and exceptions:
832 if feature and exceptions:
833 return feature not in exceptions
833 return feature not in exceptions
834 return True
834 return True
835
835
836 def username(self, acceptempty=False):
836 def username(self, acceptempty=False):
837 """Return default username to be used in commits.
837 """Return default username to be used in commits.
838
838
839 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
839 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
840 and stop searching if one of these is set.
840 and stop searching if one of these is set.
841 If not found and acceptempty is True, returns None.
841 If not found and acceptempty is True, returns None.
842 If not found and ui.askusername is True, ask the user, else use
842 If not found and ui.askusername is True, ask the user, else use
843 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
843 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
844 If no username could be found, raise an Abort error.
844 If no username could be found, raise an Abort error.
845 """
845 """
846 user = encoding.environ.get("HGUSER")
846 user = encoding.environ.get("HGUSER")
847 if user is None:
847 if user is None:
848 user = self.config("ui", "username")
848 user = self.config("ui", "username")
849 if user is not None:
849 if user is not None:
850 user = os.path.expandvars(user)
850 user = os.path.expandvars(user)
851 if user is None:
851 if user is None:
852 user = encoding.environ.get("EMAIL")
852 user = encoding.environ.get("EMAIL")
853 if user is None and acceptempty:
853 if user is None and acceptempty:
854 return user
854 return user
855 if user is None and self.configbool("ui", "askusername"):
855 if user is None and self.configbool("ui", "askusername"):
856 user = self.prompt(_("enter a commit username:"), default=None)
856 user = self.prompt(_("enter a commit username:"), default=None)
857 if user is None and not self.interactive():
857 if user is None and not self.interactive():
858 try:
858 try:
859 user = '%s@%s' % (procutil.getuser(),
859 user = '%s@%s' % (procutil.getuser(),
860 encoding.strtolocal(socket.getfqdn()))
860 encoding.strtolocal(socket.getfqdn()))
861 self.warn(_("no username found, using '%s' instead\n") % user)
861 self.warn(_("no username found, using '%s' instead\n") % user)
862 except KeyError:
862 except KeyError:
863 pass
863 pass
864 if not user:
864 if not user:
865 raise error.Abort(_('no username supplied'),
865 raise error.Abort(_('no username supplied'),
866 hint=_("use 'hg config --edit' "
866 hint=_("use 'hg config --edit' "
867 'to set your username'))
867 'to set your username'))
868 if "\n" in user:
868 if "\n" in user:
869 raise error.Abort(_("username %r contains a newline\n")
869 raise error.Abort(_("username %r contains a newline\n")
870 % pycompat.bytestr(user))
870 % pycompat.bytestr(user))
871 return user
871 return user
872
872
873 def shortuser(self, user):
873 def shortuser(self, user):
874 """Return a short representation of a user name or email address."""
874 """Return a short representation of a user name or email address."""
875 if not self.verbose:
875 if not self.verbose:
876 user = stringutil.shortuser(user)
876 user = stringutil.shortuser(user)
877 return user
877 return user
878
878
879 def expandpath(self, loc, default=None):
879 def expandpath(self, loc, default=None):
880 """Return repository location relative to cwd or from [paths]"""
880 """Return repository location relative to cwd or from [paths]"""
881 try:
881 try:
882 p = self.paths.getpath(loc)
882 p = self.paths.getpath(loc)
883 if p:
883 if p:
884 return p.rawloc
884 return p.rawloc
885 except error.RepoError:
885 except error.RepoError:
886 pass
886 pass
887
887
888 if default:
888 if default:
889 try:
889 try:
890 p = self.paths.getpath(default)
890 p = self.paths.getpath(default)
891 if p:
891 if p:
892 return p.rawloc
892 return p.rawloc
893 except error.RepoError:
893 except error.RepoError:
894 pass
894 pass
895
895
896 return loc
896 return loc
897
897
898 @util.propertycache
898 @util.propertycache
899 def paths(self):
899 def paths(self):
900 return paths(self)
900 return paths(self)
901
901
902 @property
902 @property
903 def fout(self):
903 def fout(self):
904 return self._fout
904 return self._fout
905
905
906 @fout.setter
906 @fout.setter
907 def fout(self, f):
907 def fout(self, f):
908 self._fout = f
908 self._fout = f
909 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
909 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
910
910
911 @property
911 @property
912 def ferr(self):
912 def ferr(self):
913 return self._ferr
913 return self._ferr
914
914
915 @ferr.setter
915 @ferr.setter
916 def ferr(self, f):
916 def ferr(self, f):
917 self._ferr = f
917 self._ferr = f
918 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
918 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
919
919
920 @property
920 @property
921 def fin(self):
921 def fin(self):
922 return self._fin
922 return self._fin
923
923
924 @fin.setter
924 @fin.setter
925 def fin(self, f):
925 def fin(self, f):
926 self._fin = f
926 self._fin = f
927
927
928 @property
928 @property
929 def fmsg(self):
929 def fmsg(self):
930 """Stream dedicated for status/error messages; may be None if
930 """Stream dedicated for status/error messages; may be None if
931 fout/ferr are used"""
931 fout/ferr are used"""
932 return self._fmsg
932 return self._fmsg
933
933
934 @fmsg.setter
934 @fmsg.setter
935 def fmsg(self, f):
935 def fmsg(self, f):
936 self._fmsg = f
936 self._fmsg = f
937 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
937 self._fmsgout, self._fmsgerr = _selectmsgdests(self)
938
938
939 def pushbuffer(self, error=False, subproc=False, labeled=False):
939 def pushbuffer(self, error=False, subproc=False, labeled=False):
940 """install a buffer to capture standard output of the ui object
940 """install a buffer to capture standard output of the ui object
941
941
942 If error is True, the error output will be captured too.
942 If error is True, the error output will be captured too.
943
943
944 If subproc is True, output from subprocesses (typically hooks) will be
944 If subproc is True, output from subprocesses (typically hooks) will be
945 captured too.
945 captured too.
946
946
947 If labeled is True, any labels associated with buffered
947 If labeled is True, any labels associated with buffered
948 output will be handled. By default, this has no effect
948 output will be handled. By default, this has no effect
949 on the output returned, but extensions and GUI tools may
949 on the output returned, but extensions and GUI tools may
950 handle this argument and returned styled output. If output
950 handle this argument and returned styled output. If output
951 is being buffered so it can be captured and parsed or
951 is being buffered so it can be captured and parsed or
952 processed, labeled should not be set to True.
952 processed, labeled should not be set to True.
953 """
953 """
954 self._buffers.append([])
954 self._buffers.append([])
955 self._bufferstates.append((error, subproc, labeled))
955 self._bufferstates.append((error, subproc, labeled))
956 self._bufferapplylabels = labeled
956 self._bufferapplylabels = labeled
957
957
958 def popbuffer(self):
958 def popbuffer(self):
959 '''pop the last buffer and return the buffered output'''
959 '''pop the last buffer and return the buffered output'''
960 self._bufferstates.pop()
960 self._bufferstates.pop()
961 if self._bufferstates:
961 if self._bufferstates:
962 self._bufferapplylabels = self._bufferstates[-1][2]
962 self._bufferapplylabels = self._bufferstates[-1][2]
963 else:
963 else:
964 self._bufferapplylabels = None
964 self._bufferapplylabels = None
965
965
966 return "".join(self._buffers.pop())
966 return "".join(self._buffers.pop())
967
967
968 def _isbuffered(self, dest):
968 def _isbuffered(self, dest):
969 if dest is self._fout:
969 if dest is self._fout:
970 return bool(self._buffers)
970 return bool(self._buffers)
971 if dest is self._ferr:
971 if dest is self._ferr:
972 return bool(self._bufferstates and self._bufferstates[-1][0])
972 return bool(self._bufferstates and self._bufferstates[-1][0])
973 return False
973 return False
974
974
975 def canwritewithoutlabels(self):
975 def canwritewithoutlabels(self):
976 '''check if write skips the label'''
976 '''check if write skips the label'''
977 if self._buffers and not self._bufferapplylabels:
977 if self._buffers and not self._bufferapplylabels:
978 return True
978 return True
979 return self._colormode is None
979 return self._colormode is None
980
980
981 def canbatchlabeledwrites(self):
981 def canbatchlabeledwrites(self):
982 '''check if write calls with labels are batchable'''
982 '''check if write calls with labels are batchable'''
983 # Windows color printing is special, see ``write``.
983 # Windows color printing is special, see ``write``.
984 return self._colormode != 'win32'
984 return self._colormode != 'win32'
985
985
986 def write(self, *args, **opts):
986 def write(self, *args, **opts):
987 '''write args to output
987 '''write args to output
988
988
989 By default, this method simply writes to the buffer or stdout.
989 By default, this method simply writes to the buffer or stdout.
990 Color mode can be set on the UI class to have the output decorated
990 Color mode can be set on the UI class to have the output decorated
991 with color modifier before being written to stdout.
991 with color modifier before being written to stdout.
992
992
993 The color used is controlled by an optional keyword argument, "label".
993 The color used is controlled by an optional keyword argument, "label".
994 This should be a string containing label names separated by space.
994 This should be a string containing label names separated by space.
995 Label names take the form of "topic.type". For example, ui.debug()
995 Label names take the form of "topic.type". For example, ui.debug()
996 issues a label of "ui.debug".
996 issues a label of "ui.debug".
997
997
998 When labeling output for a specific command, a label of
998 When labeling output for a specific command, a label of
999 "cmdname.type" is recommended. For example, status issues
999 "cmdname.type" is recommended. For example, status issues
1000 a label of "status.modified" for modified files.
1000 a label of "status.modified" for modified files.
1001 '''
1001 '''
1002 self._write(self._fout, *args, **opts)
1002 dest = self._fout
1003
1004 # inlined _write() for speed
1005 if self._buffers:
1006 label = opts.get(r'label', '')
1007 if label and self._bufferapplylabels:
1008 self._buffers[-1].extend(self.label(a, label) for a in args)
1009 else:
1010 self._buffers[-1].extend(args)
1011 return
1012
1013 # inliend _writenobuf() for speed
1014 self._progclear()
1015 msg = b''.join(args)
1016
1017 # opencode timeblockedsection because this is a critical path
1018 starttime = util.timer()
1019 try:
1020 if self._colormode == 'win32':
1021 # windows color printing is its own can of crab, defer to
1022 # the color module and that is it.
1023 color.win32print(self, dest.write, msg, **opts)
1024 else:
1025 if self._colormode is not None:
1026 label = opts.get(r'label', '')
1027 msg = self.label(msg, label)
1028 dest.write(msg)
1029 except IOError as err:
1030 raise error.StdioError(err)
1031 finally:
1032 self._blockedtimes['stdio_blocked'] += \
1033 (util.timer() - starttime) * 1000
1003
1034
1004 def write_err(self, *args, **opts):
1035 def write_err(self, *args, **opts):
1005 self._write(self._ferr, *args, **opts)
1036 self._write(self._ferr, *args, **opts)
1006
1037
1007 def _write(self, dest, *args, **opts):
1038 def _write(self, dest, *args, **opts):
1039 # update write() as well if you touch this code
1008 if self._isbuffered(dest):
1040 if self._isbuffered(dest):
1009 if self._bufferapplylabels:
1041 label = opts.get(r'label', '')
1010 label = opts.get(r'label', '')
1042 if label and self._bufferapplylabels:
1011 self._buffers[-1].extend(self.label(a, label) for a in args)
1043 self._buffers[-1].extend(self.label(a, label) for a in args)
1012 else:
1044 else:
1013 self._buffers[-1].extend(args)
1045 self._buffers[-1].extend(args)
1014 else:
1046 else:
1015 self._writenobuf(dest, *args, **opts)
1047 self._writenobuf(dest, *args, **opts)
1016
1048
1017 def _writenobuf(self, dest, *args, **opts):
1049 def _writenobuf(self, dest, *args, **opts):
1050 # update write() as well if you touch this code
1018 self._progclear()
1051 self._progclear()
1019 msg = b''.join(args)
1052 msg = b''.join(args)
1020
1053
1021 # opencode timeblockedsection because this is a critical path
1054 # opencode timeblockedsection because this is a critical path
1022 starttime = util.timer()
1055 starttime = util.timer()
1023 try:
1056 try:
1024 if dest is self._ferr and not getattr(self._fout, 'closed', False):
1057 if dest is self._ferr and not getattr(self._fout, 'closed', False):
1025 self._fout.flush()
1058 self._fout.flush()
1026 if getattr(dest, 'structured', False):
1059 if getattr(dest, 'structured', False):
1027 # channel for machine-readable output with metadata, where
1060 # channel for machine-readable output with metadata, where
1028 # no extra colorization is necessary.
1061 # no extra colorization is necessary.
1029 dest.write(msg, **opts)
1062 dest.write(msg, **opts)
1030 elif self._colormode == 'win32':
1063 elif self._colormode == 'win32':
1031 # windows color printing is its own can of crab, defer to
1064 # windows color printing is its own can of crab, defer to
1032 # the color module and that is it.
1065 # the color module and that is it.
1033 color.win32print(self, dest.write, msg, **opts)
1066 color.win32print(self, dest.write, msg, **opts)
1034 else:
1067 else:
1035 if self._colormode is not None:
1068 if self._colormode is not None:
1036 label = opts.get(r'label', '')
1069 label = opts.get(r'label', '')
1037 msg = self.label(msg, label)
1070 msg = self.label(msg, label)
1038 dest.write(msg)
1071 dest.write(msg)
1039 # stderr may be buffered under win32 when redirected to files,
1072 # stderr may be buffered under win32 when redirected to files,
1040 # including stdout.
1073 # including stdout.
1041 if dest is self._ferr and not getattr(self._ferr, 'closed', False):
1074 if dest is self._ferr and not getattr(self._ferr, 'closed', False):
1042 dest.flush()
1075 dest.flush()
1043 except IOError as err:
1076 except IOError as err:
1044 if (dest is self._ferr
1077 if (dest is self._ferr
1045 and err.errno in (errno.EPIPE, errno.EIO, errno.EBADF)):
1078 and err.errno in (errno.EPIPE, errno.EIO, errno.EBADF)):
1046 # no way to report the error, so ignore it
1079 # no way to report the error, so ignore it
1047 return
1080 return
1048 raise error.StdioError(err)
1081 raise error.StdioError(err)
1049 finally:
1082 finally:
1050 self._blockedtimes['stdio_blocked'] += \
1083 self._blockedtimes['stdio_blocked'] += \
1051 (util.timer() - starttime) * 1000
1084 (util.timer() - starttime) * 1000
1052
1085
1053 def _writemsg(self, dest, *args, **opts):
1086 def _writemsg(self, dest, *args, **opts):
1054 _writemsgwith(self._write, dest, *args, **opts)
1087 _writemsgwith(self._write, dest, *args, **opts)
1055
1088
1056 def _writemsgnobuf(self, dest, *args, **opts):
1089 def _writemsgnobuf(self, dest, *args, **opts):
1057 _writemsgwith(self._writenobuf, dest, *args, **opts)
1090 _writemsgwith(self._writenobuf, dest, *args, **opts)
1058
1091
1059 def flush(self):
1092 def flush(self):
1060 # opencode timeblockedsection because this is a critical path
1093 # opencode timeblockedsection because this is a critical path
1061 starttime = util.timer()
1094 starttime = util.timer()
1062 try:
1095 try:
1063 try:
1096 try:
1064 self._fout.flush()
1097 self._fout.flush()
1065 except IOError as err:
1098 except IOError as err:
1066 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1099 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1067 raise error.StdioError(err)
1100 raise error.StdioError(err)
1068 finally:
1101 finally:
1069 try:
1102 try:
1070 self._ferr.flush()
1103 self._ferr.flush()
1071 except IOError as err:
1104 except IOError as err:
1072 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1105 if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
1073 raise error.StdioError(err)
1106 raise error.StdioError(err)
1074 finally:
1107 finally:
1075 self._blockedtimes['stdio_blocked'] += \
1108 self._blockedtimes['stdio_blocked'] += \
1076 (util.timer() - starttime) * 1000
1109 (util.timer() - starttime) * 1000
1077
1110
1078 def _isatty(self, fh):
1111 def _isatty(self, fh):
1079 if self.configbool('ui', 'nontty'):
1112 if self.configbool('ui', 'nontty'):
1080 return False
1113 return False
1081 return procutil.isatty(fh)
1114 return procutil.isatty(fh)
1082
1115
1083 def protectfinout(self):
1116 def protectfinout(self):
1084 """Duplicate ui streams and redirect original if they are stdio
1117 """Duplicate ui streams and redirect original if they are stdio
1085
1118
1086 Returns (fin, fout) which point to the original ui fds, but may be
1119 Returns (fin, fout) which point to the original ui fds, but may be
1087 copy of them. The returned streams can be considered "owned" in that
1120 copy of them. The returned streams can be considered "owned" in that
1088 print(), exec(), etc. never reach to them.
1121 print(), exec(), etc. never reach to them.
1089 """
1122 """
1090 if self._finoutredirected:
1123 if self._finoutredirected:
1091 # if already redirected, protectstdio() would just create another
1124 # if already redirected, protectstdio() would just create another
1092 # nullfd pair, which is equivalent to returning self._fin/_fout.
1125 # nullfd pair, which is equivalent to returning self._fin/_fout.
1093 return self._fin, self._fout
1126 return self._fin, self._fout
1094 fin, fout = procutil.protectstdio(self._fin, self._fout)
1127 fin, fout = procutil.protectstdio(self._fin, self._fout)
1095 self._finoutredirected = (fin, fout) != (self._fin, self._fout)
1128 self._finoutredirected = (fin, fout) != (self._fin, self._fout)
1096 return fin, fout
1129 return fin, fout
1097
1130
1098 def restorefinout(self, fin, fout):
1131 def restorefinout(self, fin, fout):
1099 """Restore ui streams from possibly duplicated (fin, fout)"""
1132 """Restore ui streams from possibly duplicated (fin, fout)"""
1100 if (fin, fout) == (self._fin, self._fout):
1133 if (fin, fout) == (self._fin, self._fout):
1101 return
1134 return
1102 procutil.restorestdio(self._fin, self._fout, fin, fout)
1135 procutil.restorestdio(self._fin, self._fout, fin, fout)
1103 # protectfinout() won't create more than one duplicated streams,
1136 # protectfinout() won't create more than one duplicated streams,
1104 # so we can just turn the redirection flag off.
1137 # so we can just turn the redirection flag off.
1105 self._finoutredirected = False
1138 self._finoutredirected = False
1106
1139
1107 @contextlib.contextmanager
1140 @contextlib.contextmanager
1108 def protectedfinout(self):
1141 def protectedfinout(self):
1109 """Run code block with protected standard streams"""
1142 """Run code block with protected standard streams"""
1110 fin, fout = self.protectfinout()
1143 fin, fout = self.protectfinout()
1111 try:
1144 try:
1112 yield fin, fout
1145 yield fin, fout
1113 finally:
1146 finally:
1114 self.restorefinout(fin, fout)
1147 self.restorefinout(fin, fout)
1115
1148
1116 def disablepager(self):
1149 def disablepager(self):
1117 self._disablepager = True
1150 self._disablepager = True
1118
1151
1119 def pager(self, command):
1152 def pager(self, command):
1120 """Start a pager for subsequent command output.
1153 """Start a pager for subsequent command output.
1121
1154
1122 Commands which produce a long stream of output should call
1155 Commands which produce a long stream of output should call
1123 this function to activate the user's preferred pagination
1156 this function to activate the user's preferred pagination
1124 mechanism (which may be no pager). Calling this function
1157 mechanism (which may be no pager). Calling this function
1125 precludes any future use of interactive functionality, such as
1158 precludes any future use of interactive functionality, such as
1126 prompting the user or activating curses.
1159 prompting the user or activating curses.
1127
1160
1128 Args:
1161 Args:
1129 command: The full, non-aliased name of the command. That is, "log"
1162 command: The full, non-aliased name of the command. That is, "log"
1130 not "history, "summary" not "summ", etc.
1163 not "history, "summary" not "summ", etc.
1131 """
1164 """
1132 if (self._disablepager
1165 if (self._disablepager
1133 or self.pageractive):
1166 or self.pageractive):
1134 # how pager should do is already determined
1167 # how pager should do is already determined
1135 return
1168 return
1136
1169
1137 if not command.startswith('internal-always-') and (
1170 if not command.startswith('internal-always-') and (
1138 # explicit --pager=on (= 'internal-always-' prefix) should
1171 # explicit --pager=on (= 'internal-always-' prefix) should
1139 # take precedence over disabling factors below
1172 # take precedence over disabling factors below
1140 command in self.configlist('pager', 'ignore')
1173 command in self.configlist('pager', 'ignore')
1141 or not self.configbool('ui', 'paginate')
1174 or not self.configbool('ui', 'paginate')
1142 or not self.configbool('pager', 'attend-' + command, True)
1175 or not self.configbool('pager', 'attend-' + command, True)
1143 or encoding.environ.get('TERM') == 'dumb'
1176 or encoding.environ.get('TERM') == 'dumb'
1144 # TODO: if we want to allow HGPLAINEXCEPT=pager,
1177 # TODO: if we want to allow HGPLAINEXCEPT=pager,
1145 # formatted() will need some adjustment.
1178 # formatted() will need some adjustment.
1146 or not self.formatted()
1179 or not self.formatted()
1147 or self.plain()
1180 or self.plain()
1148 or self._buffers
1181 or self._buffers
1149 # TODO: expose debugger-enabled on the UI object
1182 # TODO: expose debugger-enabled on the UI object
1150 or '--debugger' in pycompat.sysargv):
1183 or '--debugger' in pycompat.sysargv):
1151 # We only want to paginate if the ui appears to be
1184 # We only want to paginate if the ui appears to be
1152 # interactive, the user didn't say HGPLAIN or
1185 # interactive, the user didn't say HGPLAIN or
1153 # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
1186 # HGPLAINEXCEPT=pager, and the user didn't specify --debug.
1154 return
1187 return
1155
1188
1156 pagercmd = self.config('pager', 'pager', rcutil.fallbackpager)
1189 pagercmd = self.config('pager', 'pager', rcutil.fallbackpager)
1157 if not pagercmd:
1190 if not pagercmd:
1158 return
1191 return
1159
1192
1160 pagerenv = {}
1193 pagerenv = {}
1161 for name, value in rcutil.defaultpagerenv().items():
1194 for name, value in rcutil.defaultpagerenv().items():
1162 if name not in encoding.environ:
1195 if name not in encoding.environ:
1163 pagerenv[name] = value
1196 pagerenv[name] = value
1164
1197
1165 self.debug('starting pager for command %s\n' %
1198 self.debug('starting pager for command %s\n' %
1166 stringutil.pprint(command))
1199 stringutil.pprint(command))
1167 self.flush()
1200 self.flush()
1168
1201
1169 wasformatted = self.formatted()
1202 wasformatted = self.formatted()
1170 if util.safehasattr(signal, "SIGPIPE"):
1203 if util.safehasattr(signal, "SIGPIPE"):
1171 signal.signal(signal.SIGPIPE, _catchterm)
1204 signal.signal(signal.SIGPIPE, _catchterm)
1172 if self._runpager(pagercmd, pagerenv):
1205 if self._runpager(pagercmd, pagerenv):
1173 self.pageractive = True
1206 self.pageractive = True
1174 # Preserve the formatted-ness of the UI. This is important
1207 # Preserve the formatted-ness of the UI. This is important
1175 # because we mess with stdout, which might confuse
1208 # because we mess with stdout, which might confuse
1176 # auto-detection of things being formatted.
1209 # auto-detection of things being formatted.
1177 self.setconfig('ui', 'formatted', wasformatted, 'pager')
1210 self.setconfig('ui', 'formatted', wasformatted, 'pager')
1178 self.setconfig('ui', 'interactive', False, 'pager')
1211 self.setconfig('ui', 'interactive', False, 'pager')
1179
1212
1180 # If pagermode differs from color.mode, reconfigure color now that
1213 # If pagermode differs from color.mode, reconfigure color now that
1181 # pageractive is set.
1214 # pageractive is set.
1182 cm = self._colormode
1215 cm = self._colormode
1183 if cm != self.config('color', 'pagermode', cm):
1216 if cm != self.config('color', 'pagermode', cm):
1184 color.setup(self)
1217 color.setup(self)
1185 else:
1218 else:
1186 # If the pager can't be spawned in dispatch when --pager=on is
1219 # If the pager can't be spawned in dispatch when --pager=on is
1187 # given, don't try again when the command runs, to avoid a duplicate
1220 # given, don't try again when the command runs, to avoid a duplicate
1188 # warning about a missing pager command.
1221 # warning about a missing pager command.
1189 self.disablepager()
1222 self.disablepager()
1190
1223
1191 def _runpager(self, command, env=None):
1224 def _runpager(self, command, env=None):
1192 """Actually start the pager and set up file descriptors.
1225 """Actually start the pager and set up file descriptors.
1193
1226
1194 This is separate in part so that extensions (like chg) can
1227 This is separate in part so that extensions (like chg) can
1195 override how a pager is invoked.
1228 override how a pager is invoked.
1196 """
1229 """
1197 if command == 'cat':
1230 if command == 'cat':
1198 # Save ourselves some work.
1231 # Save ourselves some work.
1199 return False
1232 return False
1200 # If the command doesn't contain any of these characters, we
1233 # If the command doesn't contain any of these characters, we
1201 # assume it's a binary and exec it directly. This means for
1234 # assume it's a binary and exec it directly. This means for
1202 # simple pager command configurations, we can degrade
1235 # simple pager command configurations, we can degrade
1203 # gracefully and tell the user about their broken pager.
1236 # gracefully and tell the user about their broken pager.
1204 shell = any(c in command for c in "|&;<>()$`\\\"' \t\n*?[#~=%")
1237 shell = any(c in command for c in "|&;<>()$`\\\"' \t\n*?[#~=%")
1205
1238
1206 if pycompat.iswindows and not shell:
1239 if pycompat.iswindows and not shell:
1207 # Window's built-in `more` cannot be invoked with shell=False, but
1240 # Window's built-in `more` cannot be invoked with shell=False, but
1208 # its `more.com` can. Hide this implementation detail from the
1241 # its `more.com` can. Hide this implementation detail from the
1209 # user so we can also get sane bad PAGER behavior. MSYS has
1242 # user so we can also get sane bad PAGER behavior. MSYS has
1210 # `more.exe`, so do a cmd.exe style resolution of the executable to
1243 # `more.exe`, so do a cmd.exe style resolution of the executable to
1211 # determine which one to use.
1244 # determine which one to use.
1212 fullcmd = procutil.findexe(command)
1245 fullcmd = procutil.findexe(command)
1213 if not fullcmd:
1246 if not fullcmd:
1214 self.warn(_("missing pager command '%s', skipping pager\n")
1247 self.warn(_("missing pager command '%s', skipping pager\n")
1215 % command)
1248 % command)
1216 return False
1249 return False
1217
1250
1218 command = fullcmd
1251 command = fullcmd
1219
1252
1220 try:
1253 try:
1221 pager = subprocess.Popen(
1254 pager = subprocess.Popen(
1222 procutil.tonativestr(command), shell=shell, bufsize=-1,
1255 procutil.tonativestr(command), shell=shell, bufsize=-1,
1223 close_fds=procutil.closefds, stdin=subprocess.PIPE,
1256 close_fds=procutil.closefds, stdin=subprocess.PIPE,
1224 stdout=procutil.stdout, stderr=procutil.stderr,
1257 stdout=procutil.stdout, stderr=procutil.stderr,
1225 env=procutil.tonativeenv(procutil.shellenviron(env)))
1258 env=procutil.tonativeenv(procutil.shellenviron(env)))
1226 except OSError as e:
1259 except OSError as e:
1227 if e.errno == errno.ENOENT and not shell:
1260 if e.errno == errno.ENOENT and not shell:
1228 self.warn(_("missing pager command '%s', skipping pager\n")
1261 self.warn(_("missing pager command '%s', skipping pager\n")
1229 % command)
1262 % command)
1230 return False
1263 return False
1231 raise
1264 raise
1232
1265
1233 # back up original file descriptors
1266 # back up original file descriptors
1234 stdoutfd = os.dup(procutil.stdout.fileno())
1267 stdoutfd = os.dup(procutil.stdout.fileno())
1235 stderrfd = os.dup(procutil.stderr.fileno())
1268 stderrfd = os.dup(procutil.stderr.fileno())
1236
1269
1237 os.dup2(pager.stdin.fileno(), procutil.stdout.fileno())
1270 os.dup2(pager.stdin.fileno(), procutil.stdout.fileno())
1238 if self._isatty(procutil.stderr):
1271 if self._isatty(procutil.stderr):
1239 os.dup2(pager.stdin.fileno(), procutil.stderr.fileno())
1272 os.dup2(pager.stdin.fileno(), procutil.stderr.fileno())
1240
1273
1241 @self.atexit
1274 @self.atexit
1242 def killpager():
1275 def killpager():
1243 if util.safehasattr(signal, "SIGINT"):
1276 if util.safehasattr(signal, "SIGINT"):
1244 signal.signal(signal.SIGINT, signal.SIG_IGN)
1277 signal.signal(signal.SIGINT, signal.SIG_IGN)
1245 # restore original fds, closing pager.stdin copies in the process
1278 # restore original fds, closing pager.stdin copies in the process
1246 os.dup2(stdoutfd, procutil.stdout.fileno())
1279 os.dup2(stdoutfd, procutil.stdout.fileno())
1247 os.dup2(stderrfd, procutil.stderr.fileno())
1280 os.dup2(stderrfd, procutil.stderr.fileno())
1248 pager.stdin.close()
1281 pager.stdin.close()
1249 pager.wait()
1282 pager.wait()
1250
1283
1251 return True
1284 return True
1252
1285
1253 @property
1286 @property
1254 def _exithandlers(self):
1287 def _exithandlers(self):
1255 return _reqexithandlers
1288 return _reqexithandlers
1256
1289
1257 def atexit(self, func, *args, **kwargs):
1290 def atexit(self, func, *args, **kwargs):
1258 '''register a function to run after dispatching a request
1291 '''register a function to run after dispatching a request
1259
1292
1260 Handlers do not stay registered across request boundaries.'''
1293 Handlers do not stay registered across request boundaries.'''
1261 self._exithandlers.append((func, args, kwargs))
1294 self._exithandlers.append((func, args, kwargs))
1262 return func
1295 return func
1263
1296
1264 def interface(self, feature):
1297 def interface(self, feature):
1265 """what interface to use for interactive console features?
1298 """what interface to use for interactive console features?
1266
1299
1267 The interface is controlled by the value of `ui.interface` but also by
1300 The interface is controlled by the value of `ui.interface` but also by
1268 the value of feature-specific configuration. For example:
1301 the value of feature-specific configuration. For example:
1269
1302
1270 ui.interface.histedit = text
1303 ui.interface.histedit = text
1271 ui.interface.chunkselector = curses
1304 ui.interface.chunkselector = curses
1272
1305
1273 Here the features are "histedit" and "chunkselector".
1306 Here the features are "histedit" and "chunkselector".
1274
1307
1275 The configuration above means that the default interfaces for commands
1308 The configuration above means that the default interfaces for commands
1276 is curses, the interface for histedit is text and the interface for
1309 is curses, the interface for histedit is text and the interface for
1277 selecting chunk is crecord (the best curses interface available).
1310 selecting chunk is crecord (the best curses interface available).
1278
1311
1279 Consider the following example:
1312 Consider the following example:
1280 ui.interface = curses
1313 ui.interface = curses
1281 ui.interface.histedit = text
1314 ui.interface.histedit = text
1282
1315
1283 Then histedit will use the text interface and chunkselector will use
1316 Then histedit will use the text interface and chunkselector will use
1284 the default curses interface (crecord at the moment).
1317 the default curses interface (crecord at the moment).
1285 """
1318 """
1286 alldefaults = frozenset(["text", "curses"])
1319 alldefaults = frozenset(["text", "curses"])
1287
1320
1288 featureinterfaces = {
1321 featureinterfaces = {
1289 "chunkselector": [
1322 "chunkselector": [
1290 "text",
1323 "text",
1291 "curses",
1324 "curses",
1292 ],
1325 ],
1293 "histedit": [
1326 "histedit": [
1294 "text",
1327 "text",
1295 "curses",
1328 "curses",
1296 ],
1329 ],
1297 }
1330 }
1298
1331
1299 # Feature-specific interface
1332 # Feature-specific interface
1300 if feature not in featureinterfaces.keys():
1333 if feature not in featureinterfaces.keys():
1301 # Programming error, not user error
1334 # Programming error, not user error
1302 raise ValueError("Unknown feature requested %s" % feature)
1335 raise ValueError("Unknown feature requested %s" % feature)
1303
1336
1304 availableinterfaces = frozenset(featureinterfaces[feature])
1337 availableinterfaces = frozenset(featureinterfaces[feature])
1305 if alldefaults > availableinterfaces:
1338 if alldefaults > availableinterfaces:
1306 # Programming error, not user error. We need a use case to
1339 # Programming error, not user error. We need a use case to
1307 # define the right thing to do here.
1340 # define the right thing to do here.
1308 raise ValueError(
1341 raise ValueError(
1309 "Feature %s does not handle all default interfaces" %
1342 "Feature %s does not handle all default interfaces" %
1310 feature)
1343 feature)
1311
1344
1312 if self.plain() or encoding.environ.get('TERM') == 'dumb':
1345 if self.plain() or encoding.environ.get('TERM') == 'dumb':
1313 return "text"
1346 return "text"
1314
1347
1315 # Default interface for all the features
1348 # Default interface for all the features
1316 defaultinterface = "text"
1349 defaultinterface = "text"
1317 i = self.config("ui", "interface")
1350 i = self.config("ui", "interface")
1318 if i in alldefaults:
1351 if i in alldefaults:
1319 defaultinterface = i
1352 defaultinterface = i
1320
1353
1321 choseninterface = defaultinterface
1354 choseninterface = defaultinterface
1322 f = self.config("ui", "interface.%s" % feature)
1355 f = self.config("ui", "interface.%s" % feature)
1323 if f in availableinterfaces:
1356 if f in availableinterfaces:
1324 choseninterface = f
1357 choseninterface = f
1325
1358
1326 if i is not None and defaultinterface != i:
1359 if i is not None and defaultinterface != i:
1327 if f is not None:
1360 if f is not None:
1328 self.warn(_("invalid value for ui.interface: %s\n") %
1361 self.warn(_("invalid value for ui.interface: %s\n") %
1329 (i,))
1362 (i,))
1330 else:
1363 else:
1331 self.warn(_("invalid value for ui.interface: %s (using %s)\n") %
1364 self.warn(_("invalid value for ui.interface: %s (using %s)\n") %
1332 (i, choseninterface))
1365 (i, choseninterface))
1333 if f is not None and choseninterface != f:
1366 if f is not None and choseninterface != f:
1334 self.warn(_("invalid value for ui.interface.%s: %s (using %s)\n") %
1367 self.warn(_("invalid value for ui.interface.%s: %s (using %s)\n") %
1335 (feature, f, choseninterface))
1368 (feature, f, choseninterface))
1336
1369
1337 return choseninterface
1370 return choseninterface
1338
1371
1339 def interactive(self):
1372 def interactive(self):
1340 '''is interactive input allowed?
1373 '''is interactive input allowed?
1341
1374
1342 An interactive session is a session where input can be reasonably read
1375 An interactive session is a session where input can be reasonably read
1343 from `sys.stdin'. If this function returns false, any attempt to read
1376 from `sys.stdin'. If this function returns false, any attempt to read
1344 from stdin should fail with an error, unless a sensible default has been
1377 from stdin should fail with an error, unless a sensible default has been
1345 specified.
1378 specified.
1346
1379
1347 Interactiveness is triggered by the value of the `ui.interactive'
1380 Interactiveness is triggered by the value of the `ui.interactive'
1348 configuration variable or - if it is unset - when `sys.stdin' points
1381 configuration variable or - if it is unset - when `sys.stdin' points
1349 to a terminal device.
1382 to a terminal device.
1350
1383
1351 This function refers to input only; for output, see `ui.formatted()'.
1384 This function refers to input only; for output, see `ui.formatted()'.
1352 '''
1385 '''
1353 i = self.configbool("ui", "interactive")
1386 i = self.configbool("ui", "interactive")
1354 if i is None:
1387 if i is None:
1355 # some environments replace stdin without implementing isatty
1388 # some environments replace stdin without implementing isatty
1356 # usually those are non-interactive
1389 # usually those are non-interactive
1357 return self._isatty(self._fin)
1390 return self._isatty(self._fin)
1358
1391
1359 return i
1392 return i
1360
1393
1361 def termwidth(self):
1394 def termwidth(self):
1362 '''how wide is the terminal in columns?
1395 '''how wide is the terminal in columns?
1363 '''
1396 '''
1364 if 'COLUMNS' in encoding.environ:
1397 if 'COLUMNS' in encoding.environ:
1365 try:
1398 try:
1366 return int(encoding.environ['COLUMNS'])
1399 return int(encoding.environ['COLUMNS'])
1367 except ValueError:
1400 except ValueError:
1368 pass
1401 pass
1369 return scmutil.termsize(self)[0]
1402 return scmutil.termsize(self)[0]
1370
1403
1371 def formatted(self):
1404 def formatted(self):
1372 '''should formatted output be used?
1405 '''should formatted output be used?
1373
1406
1374 It is often desirable to format the output to suite the output medium.
1407 It is often desirable to format the output to suite the output medium.
1375 Examples of this are truncating long lines or colorizing messages.
1408 Examples of this are truncating long lines or colorizing messages.
1376 However, this is not often not desirable when piping output into other
1409 However, this is not often not desirable when piping output into other
1377 utilities, e.g. `grep'.
1410 utilities, e.g. `grep'.
1378
1411
1379 Formatted output is triggered by the value of the `ui.formatted'
1412 Formatted output is triggered by the value of the `ui.formatted'
1380 configuration variable or - if it is unset - when `sys.stdout' points
1413 configuration variable or - if it is unset - when `sys.stdout' points
1381 to a terminal device. Please note that `ui.formatted' should be
1414 to a terminal device. Please note that `ui.formatted' should be
1382 considered an implementation detail; it is not intended for use outside
1415 considered an implementation detail; it is not intended for use outside
1383 Mercurial or its extensions.
1416 Mercurial or its extensions.
1384
1417
1385 This function refers to output only; for input, see `ui.interactive()'.
1418 This function refers to output only; for input, see `ui.interactive()'.
1386 This function always returns false when in plain mode, see `ui.plain()'.
1419 This function always returns false when in plain mode, see `ui.plain()'.
1387 '''
1420 '''
1388 if self.plain():
1421 if self.plain():
1389 return False
1422 return False
1390
1423
1391 i = self.configbool("ui", "formatted")
1424 i = self.configbool("ui", "formatted")
1392 if i is None:
1425 if i is None:
1393 # some environments replace stdout without implementing isatty
1426 # some environments replace stdout without implementing isatty
1394 # usually those are non-interactive
1427 # usually those are non-interactive
1395 return self._isatty(self._fout)
1428 return self._isatty(self._fout)
1396
1429
1397 return i
1430 return i
1398
1431
1399 def _readline(self):
1432 def _readline(self):
1400 # Replacing stdin/stdout temporarily is a hard problem on Python 3
1433 # Replacing stdin/stdout temporarily is a hard problem on Python 3
1401 # because they have to be text streams with *no buffering*. Instead,
1434 # because they have to be text streams with *no buffering*. Instead,
1402 # we use rawinput() only if call_readline() will be invoked by
1435 # we use rawinput() only if call_readline() will be invoked by
1403 # PyOS_Readline(), so no I/O will be made at Python layer.
1436 # PyOS_Readline(), so no I/O will be made at Python layer.
1404 usereadline = (self._isatty(self._fin) and self._isatty(self._fout)
1437 usereadline = (self._isatty(self._fin) and self._isatty(self._fout)
1405 and procutil.isstdin(self._fin)
1438 and procutil.isstdin(self._fin)
1406 and procutil.isstdout(self._fout))
1439 and procutil.isstdout(self._fout))
1407 if usereadline:
1440 if usereadline:
1408 try:
1441 try:
1409 # magically add command line editing support, where
1442 # magically add command line editing support, where
1410 # available
1443 # available
1411 import readline
1444 import readline
1412 # force demandimport to really load the module
1445 # force demandimport to really load the module
1413 readline.read_history_file
1446 readline.read_history_file
1414 # windows sometimes raises something other than ImportError
1447 # windows sometimes raises something other than ImportError
1415 except Exception:
1448 except Exception:
1416 usereadline = False
1449 usereadline = False
1417
1450
1418 # prompt ' ' must exist; otherwise readline may delete entire line
1451 # prompt ' ' must exist; otherwise readline may delete entire line
1419 # - http://bugs.python.org/issue12833
1452 # - http://bugs.python.org/issue12833
1420 with self.timeblockedsection('stdio'):
1453 with self.timeblockedsection('stdio'):
1421 if usereadline:
1454 if usereadline:
1422 line = encoding.strtolocal(pycompat.rawinput(r' '))
1455 line = encoding.strtolocal(pycompat.rawinput(r' '))
1423 # When stdin is in binary mode on Windows, it can cause
1456 # When stdin is in binary mode on Windows, it can cause
1424 # raw_input() to emit an extra trailing carriage return
1457 # raw_input() to emit an extra trailing carriage return
1425 if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
1458 if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
1426 line = line[:-1]
1459 line = line[:-1]
1427 else:
1460 else:
1428 self._fout.write(b' ')
1461 self._fout.write(b' ')
1429 self._fout.flush()
1462 self._fout.flush()
1430 line = self._fin.readline()
1463 line = self._fin.readline()
1431 if not line:
1464 if not line:
1432 raise EOFError
1465 raise EOFError
1433 line = line.rstrip(pycompat.oslinesep)
1466 line = line.rstrip(pycompat.oslinesep)
1434
1467
1435 return line
1468 return line
1436
1469
1437 def prompt(self, msg, default="y"):
1470 def prompt(self, msg, default="y"):
1438 """Prompt user with msg, read response.
1471 """Prompt user with msg, read response.
1439 If ui is not interactive, the default is returned.
1472 If ui is not interactive, the default is returned.
1440 """
1473 """
1441 return self._prompt(msg, default=default)
1474 return self._prompt(msg, default=default)
1442
1475
1443 def _prompt(self, msg, **opts):
1476 def _prompt(self, msg, **opts):
1444 default = opts[r'default']
1477 default = opts[r'default']
1445 if not self.interactive():
1478 if not self.interactive():
1446 self._writemsg(self._fmsgout, msg, ' ', type='prompt', **opts)
1479 self._writemsg(self._fmsgout, msg, ' ', type='prompt', **opts)
1447 self._writemsg(self._fmsgout, default or '', "\n",
1480 self._writemsg(self._fmsgout, default or '', "\n",
1448 type='promptecho')
1481 type='promptecho')
1449 return default
1482 return default
1450 self._writemsgnobuf(self._fmsgout, msg, type='prompt', **opts)
1483 self._writemsgnobuf(self._fmsgout, msg, type='prompt', **opts)
1451 self.flush()
1484 self.flush()
1452 try:
1485 try:
1453 r = self._readline()
1486 r = self._readline()
1454 if not r:
1487 if not r:
1455 r = default
1488 r = default
1456 if self.configbool('ui', 'promptecho'):
1489 if self.configbool('ui', 'promptecho'):
1457 self._writemsg(self._fmsgout, r, "\n", type='promptecho')
1490 self._writemsg(self._fmsgout, r, "\n", type='promptecho')
1458 return r
1491 return r
1459 except EOFError:
1492 except EOFError:
1460 raise error.ResponseExpected()
1493 raise error.ResponseExpected()
1461
1494
1462 @staticmethod
1495 @staticmethod
1463 def extractchoices(prompt):
1496 def extractchoices(prompt):
1464 """Extract prompt message and list of choices from specified prompt.
1497 """Extract prompt message and list of choices from specified prompt.
1465
1498
1466 This returns tuple "(message, choices)", and "choices" is the
1499 This returns tuple "(message, choices)", and "choices" is the
1467 list of tuple "(response character, text without &)".
1500 list of tuple "(response character, text without &)".
1468
1501
1469 >>> ui.extractchoices(b"awake? $$ &Yes $$ &No")
1502 >>> ui.extractchoices(b"awake? $$ &Yes $$ &No")
1470 ('awake? ', [('y', 'Yes'), ('n', 'No')])
1503 ('awake? ', [('y', 'Yes'), ('n', 'No')])
1471 >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No")
1504 >>> ui.extractchoices(b"line\\nbreak? $$ &Yes $$ &No")
1472 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
1505 ('line\\nbreak? ', [('y', 'Yes'), ('n', 'No')])
1473 >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o")
1506 >>> ui.extractchoices(b"want lots of $$money$$?$$Ye&s$$N&o")
1474 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
1507 ('want lots of $$money$$?', [('s', 'Yes'), ('o', 'No')])
1475 """
1508 """
1476
1509
1477 # Sadly, the prompt string may have been built with a filename
1510 # Sadly, the prompt string may have been built with a filename
1478 # containing "$$" so let's try to find the first valid-looking
1511 # containing "$$" so let's try to find the first valid-looking
1479 # prompt to start parsing. Sadly, we also can't rely on
1512 # prompt to start parsing. Sadly, we also can't rely on
1480 # choices containing spaces, ASCII, or basically anything
1513 # choices containing spaces, ASCII, or basically anything
1481 # except an ampersand followed by a character.
1514 # except an ampersand followed by a character.
1482 m = re.match(br'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
1515 m = re.match(br'(?s)(.+?)\$\$([^\$]*&[^ \$].*)', prompt)
1483 msg = m.group(1)
1516 msg = m.group(1)
1484 choices = [p.strip(' ') for p in m.group(2).split('$$')]
1517 choices = [p.strip(' ') for p in m.group(2).split('$$')]
1485 def choicetuple(s):
1518 def choicetuple(s):
1486 ampidx = s.index('&')
1519 ampidx = s.index('&')
1487 return s[ampidx + 1:ampidx + 2].lower(), s.replace('&', '', 1)
1520 return s[ampidx + 1:ampidx + 2].lower(), s.replace('&', '', 1)
1488 return (msg, [choicetuple(s) for s in choices])
1521 return (msg, [choicetuple(s) for s in choices])
1489
1522
1490 def promptchoice(self, prompt, default=0):
1523 def promptchoice(self, prompt, default=0):
1491 """Prompt user with a message, read response, and ensure it matches
1524 """Prompt user with a message, read response, and ensure it matches
1492 one of the provided choices. The prompt is formatted as follows:
1525 one of the provided choices. The prompt is formatted as follows:
1493
1526
1494 "would you like fries with that (Yn)? $$ &Yes $$ &No"
1527 "would you like fries with that (Yn)? $$ &Yes $$ &No"
1495
1528
1496 The index of the choice is returned. Responses are case
1529 The index of the choice is returned. Responses are case
1497 insensitive. If ui is not interactive, the default is
1530 insensitive. If ui is not interactive, the default is
1498 returned.
1531 returned.
1499 """
1532 """
1500
1533
1501 msg, choices = self.extractchoices(prompt)
1534 msg, choices = self.extractchoices(prompt)
1502 resps = [r for r, t in choices]
1535 resps = [r for r, t in choices]
1503 while True:
1536 while True:
1504 r = self._prompt(msg, default=resps[default], choices=choices)
1537 r = self._prompt(msg, default=resps[default], choices=choices)
1505 if r.lower() in resps:
1538 if r.lower() in resps:
1506 return resps.index(r.lower())
1539 return resps.index(r.lower())
1507 # TODO: shouldn't it be a warning?
1540 # TODO: shouldn't it be a warning?
1508 self._writemsg(self._fmsgout, _("unrecognized response\n"))
1541 self._writemsg(self._fmsgout, _("unrecognized response\n"))
1509
1542
1510 def getpass(self, prompt=None, default=None):
1543 def getpass(self, prompt=None, default=None):
1511 if not self.interactive():
1544 if not self.interactive():
1512 return default
1545 return default
1513 try:
1546 try:
1514 self._writemsg(self._fmsgerr, prompt or _('password: '),
1547 self._writemsg(self._fmsgerr, prompt or _('password: '),
1515 type='prompt', password=True)
1548 type='prompt', password=True)
1516 # disable getpass() only if explicitly specified. it's still valid
1549 # disable getpass() only if explicitly specified. it's still valid
1517 # to interact with tty even if fin is not a tty.
1550 # to interact with tty even if fin is not a tty.
1518 with self.timeblockedsection('stdio'):
1551 with self.timeblockedsection('stdio'):
1519 if self.configbool('ui', 'nontty'):
1552 if self.configbool('ui', 'nontty'):
1520 l = self._fin.readline()
1553 l = self._fin.readline()
1521 if not l:
1554 if not l:
1522 raise EOFError
1555 raise EOFError
1523 return l.rstrip('\n')
1556 return l.rstrip('\n')
1524 else:
1557 else:
1525 return getpass.getpass('')
1558 return getpass.getpass('')
1526 except EOFError:
1559 except EOFError:
1527 raise error.ResponseExpected()
1560 raise error.ResponseExpected()
1528
1561
1529 def status(self, *msg, **opts):
1562 def status(self, *msg, **opts):
1530 '''write status message to output (if ui.quiet is False)
1563 '''write status message to output (if ui.quiet is False)
1531
1564
1532 This adds an output label of "ui.status".
1565 This adds an output label of "ui.status".
1533 '''
1566 '''
1534 if not self.quiet:
1567 if not self.quiet:
1535 self._writemsg(self._fmsgout, type='status', *msg, **opts)
1568 self._writemsg(self._fmsgout, type='status', *msg, **opts)
1536
1569
1537 def warn(self, *msg, **opts):
1570 def warn(self, *msg, **opts):
1538 '''write warning message to output (stderr)
1571 '''write warning message to output (stderr)
1539
1572
1540 This adds an output label of "ui.warning".
1573 This adds an output label of "ui.warning".
1541 '''
1574 '''
1542 self._writemsg(self._fmsgerr, type='warning', *msg, **opts)
1575 self._writemsg(self._fmsgerr, type='warning', *msg, **opts)
1543
1576
1544 def error(self, *msg, **opts):
1577 def error(self, *msg, **opts):
1545 '''write error message to output (stderr)
1578 '''write error message to output (stderr)
1546
1579
1547 This adds an output label of "ui.error".
1580 This adds an output label of "ui.error".
1548 '''
1581 '''
1549 self._writemsg(self._fmsgerr, type='error', *msg, **opts)
1582 self._writemsg(self._fmsgerr, type='error', *msg, **opts)
1550
1583
1551 def note(self, *msg, **opts):
1584 def note(self, *msg, **opts):
1552 '''write note to output (if ui.verbose is True)
1585 '''write note to output (if ui.verbose is True)
1553
1586
1554 This adds an output label of "ui.note".
1587 This adds an output label of "ui.note".
1555 '''
1588 '''
1556 if self.verbose:
1589 if self.verbose:
1557 self._writemsg(self._fmsgout, type='note', *msg, **opts)
1590 self._writemsg(self._fmsgout, type='note', *msg, **opts)
1558
1591
1559 def debug(self, *msg, **opts):
1592 def debug(self, *msg, **opts):
1560 '''write debug message to output (if ui.debugflag is True)
1593 '''write debug message to output (if ui.debugflag is True)
1561
1594
1562 This adds an output label of "ui.debug".
1595 This adds an output label of "ui.debug".
1563 '''
1596 '''
1564 if self.debugflag:
1597 if self.debugflag:
1565 self._writemsg(self._fmsgout, type='debug', *msg, **opts)
1598 self._writemsg(self._fmsgout, type='debug', *msg, **opts)
1566 self.log(b'debug', b'%s', b''.join(msg))
1599 self.log(b'debug', b'%s', b''.join(msg))
1567
1600
1568 def edit(self, text, user, extra=None, editform=None, pending=None,
1601 def edit(self, text, user, extra=None, editform=None, pending=None,
1569 repopath=None, action=None):
1602 repopath=None, action=None):
1570 if action is None:
1603 if action is None:
1571 self.develwarn('action is None but will soon be a required '
1604 self.develwarn('action is None but will soon be a required '
1572 'parameter to ui.edit()')
1605 'parameter to ui.edit()')
1573 extra_defaults = {
1606 extra_defaults = {
1574 'prefix': 'editor',
1607 'prefix': 'editor',
1575 'suffix': '.txt',
1608 'suffix': '.txt',
1576 }
1609 }
1577 if extra is not None:
1610 if extra is not None:
1578 if extra.get('suffix') is not None:
1611 if extra.get('suffix') is not None:
1579 self.develwarn('extra.suffix is not None but will soon be '
1612 self.develwarn('extra.suffix is not None but will soon be '
1580 'ignored by ui.edit()')
1613 'ignored by ui.edit()')
1581 extra_defaults.update(extra)
1614 extra_defaults.update(extra)
1582 extra = extra_defaults
1615 extra = extra_defaults
1583
1616
1584 if action == 'diff':
1617 if action == 'diff':
1585 suffix = '.diff'
1618 suffix = '.diff'
1586 elif action:
1619 elif action:
1587 suffix = '.%s.hg.txt' % action
1620 suffix = '.%s.hg.txt' % action
1588 else:
1621 else:
1589 suffix = extra['suffix']
1622 suffix = extra['suffix']
1590
1623
1591 rdir = None
1624 rdir = None
1592 if self.configbool('experimental', 'editortmpinhg'):
1625 if self.configbool('experimental', 'editortmpinhg'):
1593 rdir = repopath
1626 rdir = repopath
1594 (fd, name) = pycompat.mkstemp(prefix='hg-' + extra['prefix'] + '-',
1627 (fd, name) = pycompat.mkstemp(prefix='hg-' + extra['prefix'] + '-',
1595 suffix=suffix,
1628 suffix=suffix,
1596 dir=rdir)
1629 dir=rdir)
1597 try:
1630 try:
1598 f = os.fdopen(fd, r'wb')
1631 f = os.fdopen(fd, r'wb')
1599 f.write(util.tonativeeol(text))
1632 f.write(util.tonativeeol(text))
1600 f.close()
1633 f.close()
1601
1634
1602 environ = {'HGUSER': user}
1635 environ = {'HGUSER': user}
1603 if 'transplant_source' in extra:
1636 if 'transplant_source' in extra:
1604 environ.update({'HGREVISION': hex(extra['transplant_source'])})
1637 environ.update({'HGREVISION': hex(extra['transplant_source'])})
1605 for label in ('intermediate-source', 'source', 'rebase_source'):
1638 for label in ('intermediate-source', 'source', 'rebase_source'):
1606 if label in extra:
1639 if label in extra:
1607 environ.update({'HGREVISION': extra[label]})
1640 environ.update({'HGREVISION': extra[label]})
1608 break
1641 break
1609 if editform:
1642 if editform:
1610 environ.update({'HGEDITFORM': editform})
1643 environ.update({'HGEDITFORM': editform})
1611 if pending:
1644 if pending:
1612 environ.update({'HG_PENDING': pending})
1645 environ.update({'HG_PENDING': pending})
1613
1646
1614 editor = self.geteditor()
1647 editor = self.geteditor()
1615
1648
1616 self.system("%s \"%s\"" % (editor, name),
1649 self.system("%s \"%s\"" % (editor, name),
1617 environ=environ,
1650 environ=environ,
1618 onerr=error.Abort, errprefix=_("edit failed"),
1651 onerr=error.Abort, errprefix=_("edit failed"),
1619 blockedtag='editor')
1652 blockedtag='editor')
1620
1653
1621 f = open(name, r'rb')
1654 f = open(name, r'rb')
1622 t = util.fromnativeeol(f.read())
1655 t = util.fromnativeeol(f.read())
1623 f.close()
1656 f.close()
1624 finally:
1657 finally:
1625 os.unlink(name)
1658 os.unlink(name)
1626
1659
1627 return t
1660 return t
1628
1661
1629 def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None,
1662 def system(self, cmd, environ=None, cwd=None, onerr=None, errprefix=None,
1630 blockedtag=None):
1663 blockedtag=None):
1631 '''execute shell command with appropriate output stream. command
1664 '''execute shell command with appropriate output stream. command
1632 output will be redirected if fout is not stdout.
1665 output will be redirected if fout is not stdout.
1633
1666
1634 if command fails and onerr is None, return status, else raise onerr
1667 if command fails and onerr is None, return status, else raise onerr
1635 object as exception.
1668 object as exception.
1636 '''
1669 '''
1637 if blockedtag is None:
1670 if blockedtag is None:
1638 # Long cmds tend to be because of an absolute path on cmd. Keep
1671 # Long cmds tend to be because of an absolute path on cmd. Keep
1639 # the tail end instead
1672 # the tail end instead
1640 cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
1673 cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
1641 blockedtag = 'unknown_system_' + cmdsuffix
1674 blockedtag = 'unknown_system_' + cmdsuffix
1642 out = self._fout
1675 out = self._fout
1643 if any(s[1] for s in self._bufferstates):
1676 if any(s[1] for s in self._bufferstates):
1644 out = self
1677 out = self
1645 with self.timeblockedsection(blockedtag):
1678 with self.timeblockedsection(blockedtag):
1646 rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
1679 rc = self._runsystem(cmd, environ=environ, cwd=cwd, out=out)
1647 if rc and onerr:
1680 if rc and onerr:
1648 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
1681 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
1649 procutil.explainexit(rc))
1682 procutil.explainexit(rc))
1650 if errprefix:
1683 if errprefix:
1651 errmsg = '%s: %s' % (errprefix, errmsg)
1684 errmsg = '%s: %s' % (errprefix, errmsg)
1652 raise onerr(errmsg)
1685 raise onerr(errmsg)
1653 return rc
1686 return rc
1654
1687
1655 def _runsystem(self, cmd, environ, cwd, out):
1688 def _runsystem(self, cmd, environ, cwd, out):
1656 """actually execute the given shell command (can be overridden by
1689 """actually execute the given shell command (can be overridden by
1657 extensions like chg)"""
1690 extensions like chg)"""
1658 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
1691 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
1659
1692
1660 def traceback(self, exc=None, force=False):
1693 def traceback(self, exc=None, force=False):
1661 '''print exception traceback if traceback printing enabled or forced.
1694 '''print exception traceback if traceback printing enabled or forced.
1662 only to call in exception handler. returns true if traceback
1695 only to call in exception handler. returns true if traceback
1663 printed.'''
1696 printed.'''
1664 if self.tracebackflag or force:
1697 if self.tracebackflag or force:
1665 if exc is None:
1698 if exc is None:
1666 exc = sys.exc_info()
1699 exc = sys.exc_info()
1667 cause = getattr(exc[1], 'cause', None)
1700 cause = getattr(exc[1], 'cause', None)
1668
1701
1669 if cause is not None:
1702 if cause is not None:
1670 causetb = traceback.format_tb(cause[2])
1703 causetb = traceback.format_tb(cause[2])
1671 exctb = traceback.format_tb(exc[2])
1704 exctb = traceback.format_tb(exc[2])
1672 exconly = traceback.format_exception_only(cause[0], cause[1])
1705 exconly = traceback.format_exception_only(cause[0], cause[1])
1673
1706
1674 # exclude frame where 'exc' was chained and rethrown from exctb
1707 # exclude frame where 'exc' was chained and rethrown from exctb
1675 self.write_err('Traceback (most recent call last):\n',
1708 self.write_err('Traceback (most recent call last):\n',
1676 ''.join(exctb[:-1]),
1709 ''.join(exctb[:-1]),
1677 ''.join(causetb),
1710 ''.join(causetb),
1678 ''.join(exconly))
1711 ''.join(exconly))
1679 else:
1712 else:
1680 output = traceback.format_exception(exc[0], exc[1], exc[2])
1713 output = traceback.format_exception(exc[0], exc[1], exc[2])
1681 self.write_err(encoding.strtolocal(r''.join(output)))
1714 self.write_err(encoding.strtolocal(r''.join(output)))
1682 return self.tracebackflag or force
1715 return self.tracebackflag or force
1683
1716
1684 def geteditor(self):
1717 def geteditor(self):
1685 '''return editor to use'''
1718 '''return editor to use'''
1686 if pycompat.sysplatform == 'plan9':
1719 if pycompat.sysplatform == 'plan9':
1687 # vi is the MIPS instruction simulator on Plan 9. We
1720 # vi is the MIPS instruction simulator on Plan 9. We
1688 # instead default to E to plumb commit messages to
1721 # instead default to E to plumb commit messages to
1689 # avoid confusion.
1722 # avoid confusion.
1690 editor = 'E'
1723 editor = 'E'
1691 else:
1724 else:
1692 editor = 'vi'
1725 editor = 'vi'
1693 return (encoding.environ.get("HGEDITOR") or
1726 return (encoding.environ.get("HGEDITOR") or
1694 self.config("ui", "editor", editor))
1727 self.config("ui", "editor", editor))
1695
1728
1696 @util.propertycache
1729 @util.propertycache
1697 def _progbar(self):
1730 def _progbar(self):
1698 """setup the progbar singleton to the ui object"""
1731 """setup the progbar singleton to the ui object"""
1699 if (self.quiet or self.debugflag
1732 if (self.quiet or self.debugflag
1700 or self.configbool('progress', 'disable')
1733 or self.configbool('progress', 'disable')
1701 or not progress.shouldprint(self)):
1734 or not progress.shouldprint(self)):
1702 return None
1735 return None
1703 return getprogbar(self)
1736 return getprogbar(self)
1704
1737
1705 def _progclear(self):
1738 def _progclear(self):
1706 """clear progress bar output if any. use it before any output"""
1739 """clear progress bar output if any. use it before any output"""
1707 if not haveprogbar(): # nothing loaded yet
1740 if not haveprogbar(): # nothing loaded yet
1708 return
1741 return
1709 if self._progbar is not None and self._progbar.printed:
1742 if self._progbar is not None and self._progbar.printed:
1710 self._progbar.clear()
1743 self._progbar.clear()
1711
1744
1712 def progress(self, topic, pos, item="", unit="", total=None):
1745 def progress(self, topic, pos, item="", unit="", total=None):
1713 '''show a progress message
1746 '''show a progress message
1714
1747
1715 By default a textual progress bar will be displayed if an operation
1748 By default a textual progress bar will be displayed if an operation
1716 takes too long. 'topic' is the current operation, 'item' is a
1749 takes too long. 'topic' is the current operation, 'item' is a
1717 non-numeric marker of the current position (i.e. the currently
1750 non-numeric marker of the current position (i.e. the currently
1718 in-process file), 'pos' is the current numeric position (i.e.
1751 in-process file), 'pos' is the current numeric position (i.e.
1719 revision, bytes, etc.), unit is a corresponding unit label,
1752 revision, bytes, etc.), unit is a corresponding unit label,
1720 and total is the highest expected pos.
1753 and total is the highest expected pos.
1721
1754
1722 Multiple nested topics may be active at a time.
1755 Multiple nested topics may be active at a time.
1723
1756
1724 All topics should be marked closed by setting pos to None at
1757 All topics should be marked closed by setting pos to None at
1725 termination.
1758 termination.
1726 '''
1759 '''
1727 self.deprecwarn("use ui.makeprogress() instead of ui.progress()",
1760 self.deprecwarn("use ui.makeprogress() instead of ui.progress()",
1728 "5.1")
1761 "5.1")
1729 progress = self.makeprogress(topic, unit, total)
1762 progress = self.makeprogress(topic, unit, total)
1730 if pos is not None:
1763 if pos is not None:
1731 progress.update(pos, item=item)
1764 progress.update(pos, item=item)
1732 else:
1765 else:
1733 progress.complete()
1766 progress.complete()
1734
1767
1735 def makeprogress(self, topic, unit="", total=None):
1768 def makeprogress(self, topic, unit="", total=None):
1736 """Create a progress helper for the specified topic"""
1769 """Create a progress helper for the specified topic"""
1737 if getattr(self._fmsgerr, 'structured', False):
1770 if getattr(self._fmsgerr, 'structured', False):
1738 # channel for machine-readable output with metadata, just send
1771 # channel for machine-readable output with metadata, just send
1739 # raw information
1772 # raw information
1740 # TODO: consider porting some useful information (e.g. estimated
1773 # TODO: consider porting some useful information (e.g. estimated
1741 # time) from progbar. we might want to support update delay to
1774 # time) from progbar. we might want to support update delay to
1742 # reduce the cost of transferring progress messages.
1775 # reduce the cost of transferring progress messages.
1743 def updatebar(topic, pos, item, unit, total):
1776 def updatebar(topic, pos, item, unit, total):
1744 self._fmsgerr.write(None, type=b'progress', topic=topic,
1777 self._fmsgerr.write(None, type=b'progress', topic=topic,
1745 pos=pos, item=item, unit=unit, total=total)
1778 pos=pos, item=item, unit=unit, total=total)
1746 elif self._progbar is not None:
1779 elif self._progbar is not None:
1747 updatebar = self._progbar.progress
1780 updatebar = self._progbar.progress
1748 else:
1781 else:
1749 def updatebar(topic, pos, item, unit, total):
1782 def updatebar(topic, pos, item, unit, total):
1750 pass
1783 pass
1751 return scmutil.progress(self, updatebar, topic, unit, total)
1784 return scmutil.progress(self, updatebar, topic, unit, total)
1752
1785
1753 def getlogger(self, name):
1786 def getlogger(self, name):
1754 """Returns a logger of the given name; or None if not registered"""
1787 """Returns a logger of the given name; or None if not registered"""
1755 return self._loggers.get(name)
1788 return self._loggers.get(name)
1756
1789
1757 def setlogger(self, name, logger):
1790 def setlogger(self, name, logger):
1758 """Install logger which can be identified later by the given name
1791 """Install logger which can be identified later by the given name
1759
1792
1760 More than one loggers can be registered. Use extension or module
1793 More than one loggers can be registered. Use extension or module
1761 name to uniquely identify the logger instance.
1794 name to uniquely identify the logger instance.
1762 """
1795 """
1763 self._loggers[name] = logger
1796 self._loggers[name] = logger
1764
1797
1765 def log(self, event, msgfmt, *msgargs, **opts):
1798 def log(self, event, msgfmt, *msgargs, **opts):
1766 '''hook for logging facility extensions
1799 '''hook for logging facility extensions
1767
1800
1768 event should be a readily-identifiable subsystem, which will
1801 event should be a readily-identifiable subsystem, which will
1769 allow filtering.
1802 allow filtering.
1770
1803
1771 msgfmt should be a newline-terminated format string to log, and
1804 msgfmt should be a newline-terminated format string to log, and
1772 *msgargs are %-formatted into it.
1805 *msgargs are %-formatted into it.
1773
1806
1774 **opts currently has no defined meanings.
1807 **opts currently has no defined meanings.
1775 '''
1808 '''
1776 if not self._loggers:
1809 if not self._loggers:
1777 return
1810 return
1778 activeloggers = [l for l in self._loggers.itervalues()
1811 activeloggers = [l for l in self._loggers.itervalues()
1779 if l.tracked(event)]
1812 if l.tracked(event)]
1780 if not activeloggers:
1813 if not activeloggers:
1781 return
1814 return
1782 msg = msgfmt % msgargs
1815 msg = msgfmt % msgargs
1783 opts = pycompat.byteskwargs(opts)
1816 opts = pycompat.byteskwargs(opts)
1784 # guard against recursion from e.g. ui.debug()
1817 # guard against recursion from e.g. ui.debug()
1785 registeredloggers = self._loggers
1818 registeredloggers = self._loggers
1786 self._loggers = {}
1819 self._loggers = {}
1787 try:
1820 try:
1788 for logger in activeloggers:
1821 for logger in activeloggers:
1789 logger.log(self, event, msg, opts)
1822 logger.log(self, event, msg, opts)
1790 finally:
1823 finally:
1791 self._loggers = registeredloggers
1824 self._loggers = registeredloggers
1792
1825
1793 def label(self, msg, label):
1826 def label(self, msg, label):
1794 '''style msg based on supplied label
1827 '''style msg based on supplied label
1795
1828
1796 If some color mode is enabled, this will add the necessary control
1829 If some color mode is enabled, this will add the necessary control
1797 characters to apply such color. In addition, 'debug' color mode adds
1830 characters to apply such color. In addition, 'debug' color mode adds
1798 markup showing which label affects a piece of text.
1831 markup showing which label affects a piece of text.
1799
1832
1800 ui.write(s, 'label') is equivalent to
1833 ui.write(s, 'label') is equivalent to
1801 ui.write(ui.label(s, 'label')).
1834 ui.write(ui.label(s, 'label')).
1802 '''
1835 '''
1803 if self._colormode is not None:
1836 if self._colormode is not None:
1804 return color.colorlabel(self, msg, label)
1837 return color.colorlabel(self, msg, label)
1805 return msg
1838 return msg
1806
1839
1807 def develwarn(self, msg, stacklevel=1, config=None):
1840 def develwarn(self, msg, stacklevel=1, config=None):
1808 """issue a developer warning message
1841 """issue a developer warning message
1809
1842
1810 Use 'stacklevel' to report the offender some layers further up in the
1843 Use 'stacklevel' to report the offender some layers further up in the
1811 stack.
1844 stack.
1812 """
1845 """
1813 if not self.configbool('devel', 'all-warnings'):
1846 if not self.configbool('devel', 'all-warnings'):
1814 if config is None or not self.configbool('devel', config):
1847 if config is None or not self.configbool('devel', config):
1815 return
1848 return
1816 msg = 'devel-warn: ' + msg
1849 msg = 'devel-warn: ' + msg
1817 stacklevel += 1 # get in develwarn
1850 stacklevel += 1 # get in develwarn
1818 if self.tracebackflag:
1851 if self.tracebackflag:
1819 util.debugstacktrace(msg, stacklevel, self._ferr, self._fout)
1852 util.debugstacktrace(msg, stacklevel, self._ferr, self._fout)
1820 self.log('develwarn', '%s at:\n%s' %
1853 self.log('develwarn', '%s at:\n%s' %
1821 (msg, ''.join(util.getstackframes(stacklevel))))
1854 (msg, ''.join(util.getstackframes(stacklevel))))
1822 else:
1855 else:
1823 curframe = inspect.currentframe()
1856 curframe = inspect.currentframe()
1824 calframe = inspect.getouterframes(curframe, 2)
1857 calframe = inspect.getouterframes(curframe, 2)
1825 fname, lineno, fmsg = calframe[stacklevel][1:4]
1858 fname, lineno, fmsg = calframe[stacklevel][1:4]
1826 fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
1859 fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
1827 self.write_err('%s at: %s:%d (%s)\n'
1860 self.write_err('%s at: %s:%d (%s)\n'
1828 % (msg, fname, lineno, fmsg))
1861 % (msg, fname, lineno, fmsg))
1829 self.log('develwarn', '%s at: %s:%d (%s)\n',
1862 self.log('develwarn', '%s at: %s:%d (%s)\n',
1830 msg, fname, lineno, fmsg)
1863 msg, fname, lineno, fmsg)
1831 curframe = calframe = None # avoid cycles
1864 curframe = calframe = None # avoid cycles
1832
1865
1833 def deprecwarn(self, msg, version, stacklevel=2):
1866 def deprecwarn(self, msg, version, stacklevel=2):
1834 """issue a deprecation warning
1867 """issue a deprecation warning
1835
1868
1836 - msg: message explaining what is deprecated and how to upgrade,
1869 - msg: message explaining what is deprecated and how to upgrade,
1837 - version: last version where the API will be supported,
1870 - version: last version where the API will be supported,
1838 """
1871 """
1839 if not (self.configbool('devel', 'all-warnings')
1872 if not (self.configbool('devel', 'all-warnings')
1840 or self.configbool('devel', 'deprec-warn')):
1873 or self.configbool('devel', 'deprec-warn')):
1841 return
1874 return
1842 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
1875 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
1843 " update your code.)") % version
1876 " update your code.)") % version
1844 self.develwarn(msg, stacklevel=stacklevel, config='deprec-warn')
1877 self.develwarn(msg, stacklevel=stacklevel, config='deprec-warn')
1845
1878
1846 def exportableenviron(self):
1879 def exportableenviron(self):
1847 """The environment variables that are safe to export, e.g. through
1880 """The environment variables that are safe to export, e.g. through
1848 hgweb.
1881 hgweb.
1849 """
1882 """
1850 return self._exportableenviron
1883 return self._exportableenviron
1851
1884
1852 @contextlib.contextmanager
1885 @contextlib.contextmanager
1853 def configoverride(self, overrides, source=""):
1886 def configoverride(self, overrides, source=""):
1854 """Context manager for temporary config overrides
1887 """Context manager for temporary config overrides
1855 `overrides` must be a dict of the following structure:
1888 `overrides` must be a dict of the following structure:
1856 {(section, name) : value}"""
1889 {(section, name) : value}"""
1857 backups = {}
1890 backups = {}
1858 try:
1891 try:
1859 for (section, name), value in overrides.items():
1892 for (section, name), value in overrides.items():
1860 backups[(section, name)] = self.backupconfig(section, name)
1893 backups[(section, name)] = self.backupconfig(section, name)
1861 self.setconfig(section, name, value, source)
1894 self.setconfig(section, name, value, source)
1862 yield
1895 yield
1863 finally:
1896 finally:
1864 for __, backup in backups.items():
1897 for __, backup in backups.items():
1865 self.restoreconfig(backup)
1898 self.restoreconfig(backup)
1866 # just restoring ui.quiet config to the previous value is not enough
1899 # just restoring ui.quiet config to the previous value is not enough
1867 # as it does not update ui.quiet class member
1900 # as it does not update ui.quiet class member
1868 if ('ui', 'quiet') in overrides:
1901 if ('ui', 'quiet') in overrides:
1869 self.fixconfig(section='ui')
1902 self.fixconfig(section='ui')
1870
1903
1871 class paths(dict):
1904 class paths(dict):
1872 """Represents a collection of paths and their configs.
1905 """Represents a collection of paths and their configs.
1873
1906
1874 Data is initially derived from ui instances and the config files they have
1907 Data is initially derived from ui instances and the config files they have
1875 loaded.
1908 loaded.
1876 """
1909 """
1877 def __init__(self, ui):
1910 def __init__(self, ui):
1878 dict.__init__(self)
1911 dict.__init__(self)
1879
1912
1880 for name, loc in ui.configitems('paths', ignoresub=True):
1913 for name, loc in ui.configitems('paths', ignoresub=True):
1881 # No location is the same as not existing.
1914 # No location is the same as not existing.
1882 if not loc:
1915 if not loc:
1883 continue
1916 continue
1884 loc, sub = ui.configsuboptions('paths', name)
1917 loc, sub = ui.configsuboptions('paths', name)
1885 self[name] = path(ui, name, rawloc=loc, suboptions=sub)
1918 self[name] = path(ui, name, rawloc=loc, suboptions=sub)
1886
1919
1887 def getpath(self, name, default=None):
1920 def getpath(self, name, default=None):
1888 """Return a ``path`` from a string, falling back to default.
1921 """Return a ``path`` from a string, falling back to default.
1889
1922
1890 ``name`` can be a named path or locations. Locations are filesystem
1923 ``name`` can be a named path or locations. Locations are filesystem
1891 paths or URIs.
1924 paths or URIs.
1892
1925
1893 Returns None if ``name`` is not a registered path, a URI, or a local
1926 Returns None if ``name`` is not a registered path, a URI, or a local
1894 path to a repo.
1927 path to a repo.
1895 """
1928 """
1896 # Only fall back to default if no path was requested.
1929 # Only fall back to default if no path was requested.
1897 if name is None:
1930 if name is None:
1898 if not default:
1931 if not default:
1899 default = ()
1932 default = ()
1900 elif not isinstance(default, (tuple, list)):
1933 elif not isinstance(default, (tuple, list)):
1901 default = (default,)
1934 default = (default,)
1902 for k in default:
1935 for k in default:
1903 try:
1936 try:
1904 return self[k]
1937 return self[k]
1905 except KeyError:
1938 except KeyError:
1906 continue
1939 continue
1907 return None
1940 return None
1908
1941
1909 # Most likely empty string.
1942 # Most likely empty string.
1910 # This may need to raise in the future.
1943 # This may need to raise in the future.
1911 if not name:
1944 if not name:
1912 return None
1945 return None
1913
1946
1914 try:
1947 try:
1915 return self[name]
1948 return self[name]
1916 except KeyError:
1949 except KeyError:
1917 # Try to resolve as a local path or URI.
1950 # Try to resolve as a local path or URI.
1918 try:
1951 try:
1919 # We don't pass sub-options in, so no need to pass ui instance.
1952 # We don't pass sub-options in, so no need to pass ui instance.
1920 return path(None, None, rawloc=name)
1953 return path(None, None, rawloc=name)
1921 except ValueError:
1954 except ValueError:
1922 raise error.RepoError(_('repository %s does not exist') %
1955 raise error.RepoError(_('repository %s does not exist') %
1923 name)
1956 name)
1924
1957
1925 _pathsuboptions = {}
1958 _pathsuboptions = {}
1926
1959
1927 def pathsuboption(option, attr):
1960 def pathsuboption(option, attr):
1928 """Decorator used to declare a path sub-option.
1961 """Decorator used to declare a path sub-option.
1929
1962
1930 Arguments are the sub-option name and the attribute it should set on
1963 Arguments are the sub-option name and the attribute it should set on
1931 ``path`` instances.
1964 ``path`` instances.
1932
1965
1933 The decorated function will receive as arguments a ``ui`` instance,
1966 The decorated function will receive as arguments a ``ui`` instance,
1934 ``path`` instance, and the string value of this option from the config.
1967 ``path`` instance, and the string value of this option from the config.
1935 The function should return the value that will be set on the ``path``
1968 The function should return the value that will be set on the ``path``
1936 instance.
1969 instance.
1937
1970
1938 This decorator can be used to perform additional verification of
1971 This decorator can be used to perform additional verification of
1939 sub-options and to change the type of sub-options.
1972 sub-options and to change the type of sub-options.
1940 """
1973 """
1941 def register(func):
1974 def register(func):
1942 _pathsuboptions[option] = (attr, func)
1975 _pathsuboptions[option] = (attr, func)
1943 return func
1976 return func
1944 return register
1977 return register
1945
1978
1946 @pathsuboption('pushurl', 'pushloc')
1979 @pathsuboption('pushurl', 'pushloc')
1947 def pushurlpathoption(ui, path, value):
1980 def pushurlpathoption(ui, path, value):
1948 u = util.url(value)
1981 u = util.url(value)
1949 # Actually require a URL.
1982 # Actually require a URL.
1950 if not u.scheme:
1983 if not u.scheme:
1951 ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
1984 ui.warn(_('(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
1952 return None
1985 return None
1953
1986
1954 # Don't support the #foo syntax in the push URL to declare branch to
1987 # Don't support the #foo syntax in the push URL to declare branch to
1955 # push.
1988 # push.
1956 if u.fragment:
1989 if u.fragment:
1957 ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
1990 ui.warn(_('("#fragment" in paths.%s:pushurl not supported; '
1958 'ignoring)\n') % path.name)
1991 'ignoring)\n') % path.name)
1959 u.fragment = None
1992 u.fragment = None
1960
1993
1961 return bytes(u)
1994 return bytes(u)
1962
1995
1963 @pathsuboption('pushrev', 'pushrev')
1996 @pathsuboption('pushrev', 'pushrev')
1964 def pushrevpathoption(ui, path, value):
1997 def pushrevpathoption(ui, path, value):
1965 return value
1998 return value
1966
1999
1967 class path(object):
2000 class path(object):
1968 """Represents an individual path and its configuration."""
2001 """Represents an individual path and its configuration."""
1969
2002
1970 def __init__(self, ui, name, rawloc=None, suboptions=None):
2003 def __init__(self, ui, name, rawloc=None, suboptions=None):
1971 """Construct a path from its config options.
2004 """Construct a path from its config options.
1972
2005
1973 ``ui`` is the ``ui`` instance the path is coming from.
2006 ``ui`` is the ``ui`` instance the path is coming from.
1974 ``name`` is the symbolic name of the path.
2007 ``name`` is the symbolic name of the path.
1975 ``rawloc`` is the raw location, as defined in the config.
2008 ``rawloc`` is the raw location, as defined in the config.
1976 ``pushloc`` is the raw locations pushes should be made to.
2009 ``pushloc`` is the raw locations pushes should be made to.
1977
2010
1978 If ``name`` is not defined, we require that the location be a) a local
2011 If ``name`` is not defined, we require that the location be a) a local
1979 filesystem path with a .hg directory or b) a URL. If not,
2012 filesystem path with a .hg directory or b) a URL. If not,
1980 ``ValueError`` is raised.
2013 ``ValueError`` is raised.
1981 """
2014 """
1982 if not rawloc:
2015 if not rawloc:
1983 raise ValueError('rawloc must be defined')
2016 raise ValueError('rawloc must be defined')
1984
2017
1985 # Locations may define branches via syntax <base>#<branch>.
2018 # Locations may define branches via syntax <base>#<branch>.
1986 u = util.url(rawloc)
2019 u = util.url(rawloc)
1987 branch = None
2020 branch = None
1988 if u.fragment:
2021 if u.fragment:
1989 branch = u.fragment
2022 branch = u.fragment
1990 u.fragment = None
2023 u.fragment = None
1991
2024
1992 self.url = u
2025 self.url = u
1993 self.branch = branch
2026 self.branch = branch
1994
2027
1995 self.name = name
2028 self.name = name
1996 self.rawloc = rawloc
2029 self.rawloc = rawloc
1997 self.loc = '%s' % u
2030 self.loc = '%s' % u
1998
2031
1999 # When given a raw location but not a symbolic name, validate the
2032 # When given a raw location but not a symbolic name, validate the
2000 # location is valid.
2033 # location is valid.
2001 if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
2034 if not name and not u.scheme and not self._isvalidlocalpath(self.loc):
2002 raise ValueError('location is not a URL or path to a local '
2035 raise ValueError('location is not a URL or path to a local '
2003 'repo: %s' % rawloc)
2036 'repo: %s' % rawloc)
2004
2037
2005 suboptions = suboptions or {}
2038 suboptions = suboptions or {}
2006
2039
2007 # Now process the sub-options. If a sub-option is registered, its
2040 # Now process the sub-options. If a sub-option is registered, its
2008 # attribute will always be present. The value will be None if there
2041 # attribute will always be present. The value will be None if there
2009 # was no valid sub-option.
2042 # was no valid sub-option.
2010 for suboption, (attr, func) in _pathsuboptions.iteritems():
2043 for suboption, (attr, func) in _pathsuboptions.iteritems():
2011 if suboption not in suboptions:
2044 if suboption not in suboptions:
2012 setattr(self, attr, None)
2045 setattr(self, attr, None)
2013 continue
2046 continue
2014
2047
2015 value = func(ui, self, suboptions[suboption])
2048 value = func(ui, self, suboptions[suboption])
2016 setattr(self, attr, value)
2049 setattr(self, attr, value)
2017
2050
2018 def _isvalidlocalpath(self, path):
2051 def _isvalidlocalpath(self, path):
2019 """Returns True if the given path is a potentially valid repository.
2052 """Returns True if the given path is a potentially valid repository.
2020 This is its own function so that extensions can change the definition of
2053 This is its own function so that extensions can change the definition of
2021 'valid' in this case (like when pulling from a git repo into a hg
2054 'valid' in this case (like when pulling from a git repo into a hg
2022 one)."""
2055 one)."""
2023 return os.path.isdir(os.path.join(path, '.hg'))
2056 return os.path.isdir(os.path.join(path, '.hg'))
2024
2057
2025 @property
2058 @property
2026 def suboptions(self):
2059 def suboptions(self):
2027 """Return sub-options and their values for this path.
2060 """Return sub-options and their values for this path.
2028
2061
2029 This is intended to be used for presentation purposes.
2062 This is intended to be used for presentation purposes.
2030 """
2063 """
2031 d = {}
2064 d = {}
2032 for subopt, (attr, _func) in _pathsuboptions.iteritems():
2065 for subopt, (attr, _func) in _pathsuboptions.iteritems():
2033 value = getattr(self, attr)
2066 value = getattr(self, attr)
2034 if value is not None:
2067 if value is not None:
2035 d[subopt] = value
2068 d[subopt] = value
2036 return d
2069 return d
2037
2070
2038 # we instantiate one globally shared progress bar to avoid
2071 # we instantiate one globally shared progress bar to avoid
2039 # competing progress bars when multiple UI objects get created
2072 # competing progress bars when multiple UI objects get created
2040 _progresssingleton = None
2073 _progresssingleton = None
2041
2074
2042 def getprogbar(ui):
2075 def getprogbar(ui):
2043 global _progresssingleton
2076 global _progresssingleton
2044 if _progresssingleton is None:
2077 if _progresssingleton is None:
2045 # passing 'ui' object to the singleton is fishy,
2078 # passing 'ui' object to the singleton is fishy,
2046 # this is how the extension used to work but feel free to rework it.
2079 # this is how the extension used to work but feel free to rework it.
2047 _progresssingleton = progress.progbar(ui)
2080 _progresssingleton = progress.progbar(ui)
2048 return _progresssingleton
2081 return _progresssingleton
2049
2082
2050 def haveprogbar():
2083 def haveprogbar():
2051 return _progresssingleton is not None
2084 return _progresssingleton is not None
2052
2085
2053 def _selectmsgdests(ui):
2086 def _selectmsgdests(ui):
2054 name = ui.config(b'ui', b'message-output')
2087 name = ui.config(b'ui', b'message-output')
2055 if name == b'channel':
2088 if name == b'channel':
2056 if ui.fmsg:
2089 if ui.fmsg:
2057 return ui.fmsg, ui.fmsg
2090 return ui.fmsg, ui.fmsg
2058 else:
2091 else:
2059 # fall back to ferr if channel isn't ready so that status/error
2092 # fall back to ferr if channel isn't ready so that status/error
2060 # messages can be printed
2093 # messages can be printed
2061 return ui.ferr, ui.ferr
2094 return ui.ferr, ui.ferr
2062 if name == b'stdio':
2095 if name == b'stdio':
2063 return ui.fout, ui.ferr
2096 return ui.fout, ui.ferr
2064 if name == b'stderr':
2097 if name == b'stderr':
2065 return ui.ferr, ui.ferr
2098 return ui.ferr, ui.ferr
2066 raise error.Abort(b'invalid ui.message-output destination: %s' % name)
2099 raise error.Abort(b'invalid ui.message-output destination: %s' % name)
2067
2100
2068 def _writemsgwith(write, dest, *args, **opts):
2101 def _writemsgwith(write, dest, *args, **opts):
2069 """Write ui message with the given ui._write*() function
2102 """Write ui message with the given ui._write*() function
2070
2103
2071 The specified message type is translated to 'ui.<type>' label if the dest
2104 The specified message type is translated to 'ui.<type>' label if the dest
2072 isn't a structured channel, so that the message will be colorized.
2105 isn't a structured channel, so that the message will be colorized.
2073 """
2106 """
2074 # TODO: maybe change 'type' to a mandatory option
2107 # TODO: maybe change 'type' to a mandatory option
2075 if r'type' in opts and not getattr(dest, 'structured', False):
2108 if r'type' in opts and not getattr(dest, 'structured', False):
2076 opts[r'label'] = opts.get(r'label', '') + ' ui.%s' % opts.pop(r'type')
2109 opts[r'label'] = opts.get(r'label', '') + ' ui.%s' % opts.pop(r'type')
2077 write(dest, *args, **opts)
2110 write(dest, *args, **opts)
@@ -1,30 +1,37
1 // Copyright 2018 Georges Racinet <gracinet@anybox.fr>
1 // Copyright 2018 Georges Racinet <gracinet@anybox.fr>
2 //
2 //
3 // This software may be used and distributed according to the terms of the
3 // This software may be used and distributed according to the terms of the
4 // GNU General Public License version 2 or any later version.
4 // GNU General Public License version 2 or any later version.
5 mod ancestors;
5 mod ancestors;
6 pub mod dagops;
6 pub mod dagops;
7 pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors};
7 pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors};
8 #[cfg(test)]
8 #[cfg(test)]
9 pub mod testing;
9 pub mod testing;
10
10
11 /// Mercurial revision numbers
11 /// Mercurial revision numbers
12 ///
12 ///
13 /// As noted in revlog.c, revision numbers are actually encoded in
13 /// As noted in revlog.c, revision numbers are actually encoded in
14 /// 4 bytes, and are liberally converted to ints, whence the i32
14 /// 4 bytes, and are liberally converted to ints, whence the i32
15 pub type Revision = i32;
15 pub type Revision = i32;
16
16
17 pub const NULL_REVISION: Revision = -1;
17 pub const NULL_REVISION: Revision = -1;
18
18
19 /// Same as `mercurial.node.wdirrev`
20 ///
21 /// This is also equal to `i32::max_value()`, but it's better to spell
22 /// it out explicitely, same as in `mercurial.node`
23 pub const WORKING_DIRECTORY_REVISION: Revision = 0x7fffffff;
24
19 /// The simplest expression of what we need of Mercurial DAGs.
25 /// The simplest expression of what we need of Mercurial DAGs.
20 pub trait Graph {
26 pub trait Graph {
21 /// Return the two parents of the given `Revision`.
27 /// Return the two parents of the given `Revision`.
22 ///
28 ///
23 /// Each of the parents can be independently `NULL_REVISION`
29 /// Each of the parents can be independently `NULL_REVISION`
24 fn parents(&self, Revision) -> Result<[Revision; 2], GraphError>;
30 fn parents(&self, Revision) -> Result<[Revision; 2], GraphError>;
25 }
31 }
26
32
27 #[derive(Clone, Debug, PartialEq)]
33 #[derive(Clone, Debug, PartialEq)]
28 pub enum GraphError {
34 pub enum GraphError {
29 ParentOutOfRange(Revision),
35 ParentOutOfRange(Revision),
36 WorkingDirectoryUnsupported,
30 }
37 }
@@ -1,130 +1,133
1 // cindex.rs
1 // cindex.rs
2 //
2 //
3 // Copyright 2018 Georges Racinet <gracinet@anybox.fr>
3 // Copyright 2018 Georges Racinet <gracinet@anybox.fr>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings to use the Index defined by the parsers C extension
8 //! Bindings to use the Index defined by the parsers C extension
9 //!
9 //!
10 //! Ideally, we should use an Index entirely implemented in Rust,
10 //! Ideally, we should use an Index entirely implemented in Rust,
11 //! but this will take some time to get there.
11 //! but this will take some time to get there.
12 #[cfg(feature = "python27")]
12 #[cfg(feature = "python27")]
13 extern crate python27_sys as python_sys;
13 extern crate python27_sys as python_sys;
14 #[cfg(feature = "python3")]
14 #[cfg(feature = "python3")]
15 extern crate python3_sys as python_sys;
15 extern crate python3_sys as python_sys;
16
16
17 use self::python_sys::PyCapsule_Import;
17 use self::python_sys::PyCapsule_Import;
18 use cpython::{PyClone, PyErr, PyObject, PyResult, Python};
18 use cpython::{PyClone, PyErr, PyObject, PyResult, Python};
19 use hg::{Graph, GraphError, Revision};
19 use hg::{Graph, GraphError, Revision, WORKING_DIRECTORY_REVISION};
20 use libc::c_int;
20 use libc::c_int;
21 use std::ffi::CStr;
21 use std::ffi::CStr;
22 use std::mem::transmute;
22 use std::mem::transmute;
23
23
24 type IndexParentsFn = unsafe extern "C" fn(
24 type IndexParentsFn = unsafe extern "C" fn(
25 index: *mut python_sys::PyObject,
25 index: *mut python_sys::PyObject,
26 rev: c_int,
26 rev: c_int,
27 ps: *mut [c_int; 2],
27 ps: *mut [c_int; 2],
28 ) -> c_int;
28 ) -> c_int;
29
29
30 /// A `Graph` backed up by objects and functions from revlog.c
30 /// A `Graph` backed up by objects and functions from revlog.c
31 ///
31 ///
32 /// This implementation of the `Graph` trait, relies on (pointers to)
32 /// This implementation of the `Graph` trait, relies on (pointers to)
33 /// - the C index object (`index` member)
33 /// - the C index object (`index` member)
34 /// - the `index_get_parents()` function (`parents` member)
34 /// - the `index_get_parents()` function (`parents` member)
35 ///
35 ///
36 /// # Safety
36 /// # Safety
37 ///
37 ///
38 /// The C index itself is mutable, and this Rust exposition is **not
38 /// The C index itself is mutable, and this Rust exposition is **not
39 /// protected by the GIL**, meaning that this construct isn't safe with respect
39 /// protected by the GIL**, meaning that this construct isn't safe with respect
40 /// to Python threads.
40 /// to Python threads.
41 ///
41 ///
42 /// All callers of this `Index` must acquire the GIL and must not release it
42 /// All callers of this `Index` must acquire the GIL and must not release it
43 /// while working.
43 /// while working.
44 ///
44 ///
45 /// # TODO find a solution to make it GIL safe again.
45 /// # TODO find a solution to make it GIL safe again.
46 ///
46 ///
47 /// This is non trivial, and can wait until we have a clearer picture with
47 /// This is non trivial, and can wait until we have a clearer picture with
48 /// more Rust Mercurial constructs.
48 /// more Rust Mercurial constructs.
49 ///
49 ///
50 /// One possibility would be to a `GILProtectedIndex` wrapper enclosing
50 /// One possibility would be to a `GILProtectedIndex` wrapper enclosing
51 /// a `Python<'p>` marker and have it be the one implementing the
51 /// a `Python<'p>` marker and have it be the one implementing the
52 /// `Graph` trait, but this would mean the `Graph` implementor would become
52 /// `Graph` trait, but this would mean the `Graph` implementor would become
53 /// likely to change between subsequent method invocations of the `hg-core`
53 /// likely to change between subsequent method invocations of the `hg-core`
54 /// objects (a serious change of the `hg-core` API):
54 /// objects (a serious change of the `hg-core` API):
55 /// either exposing ways to mutate the `Graph`, or making it a non persistent
55 /// either exposing ways to mutate the `Graph`, or making it a non persistent
56 /// parameter in the relevant methods that need one.
56 /// parameter in the relevant methods that need one.
57 ///
57 ///
58 /// Another possibility would be to introduce an abstract lock handle into
58 /// Another possibility would be to introduce an abstract lock handle into
59 /// the core API, that would be tied to `GILGuard` / `Python<'p>`
59 /// the core API, that would be tied to `GILGuard` / `Python<'p>`
60 /// in the case of the `cpython` crate bindings yet could leave room for other
60 /// in the case of the `cpython` crate bindings yet could leave room for other
61 /// mechanisms in other contexts.
61 /// mechanisms in other contexts.
62 pub struct Index {
62 pub struct Index {
63 index: PyObject,
63 index: PyObject,
64 parents: IndexParentsFn,
64 parents: IndexParentsFn,
65 }
65 }
66
66
67 impl Index {
67 impl Index {
68 pub fn new(py: Python, index: PyObject) -> PyResult<Self> {
68 pub fn new(py: Python, index: PyObject) -> PyResult<Self> {
69 Ok(Index {
69 Ok(Index {
70 index: index,
70 index: index,
71 parents: decapsule_parents_fn(py)?,
71 parents: decapsule_parents_fn(py)?,
72 })
72 })
73 }
73 }
74 }
74 }
75
75
76 impl Clone for Index {
76 impl Clone for Index {
77 fn clone(&self) -> Self {
77 fn clone(&self) -> Self {
78 let guard = Python::acquire_gil();
78 let guard = Python::acquire_gil();
79 Index {
79 Index {
80 index: self.index.clone_ref(guard.python()),
80 index: self.index.clone_ref(guard.python()),
81 parents: self.parents.clone(),
81 parents: self.parents.clone(),
82 }
82 }
83 }
83 }
84 }
84 }
85
85
86 impl Graph for Index {
86 impl Graph for Index {
87 /// wrap a call to the C extern parents function
87 /// wrap a call to the C extern parents function
88 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
88 fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
89 if rev == WORKING_DIRECTORY_REVISION {
90 return Err(GraphError::WorkingDirectoryUnsupported);
91 }
89 let mut res: [c_int; 2] = [0; 2];
92 let mut res: [c_int; 2] = [0; 2];
90 let code = unsafe {
93 let code = unsafe {
91 (self.parents)(
94 (self.parents)(
92 self.index.as_ptr(),
95 self.index.as_ptr(),
93 rev as c_int,
96 rev as c_int,
94 &mut res as *mut [c_int; 2],
97 &mut res as *mut [c_int; 2],
95 )
98 )
96 };
99 };
97 match code {
100 match code {
98 0 => Ok(res),
101 0 => Ok(res),
99 _ => Err(GraphError::ParentOutOfRange(rev)),
102 _ => Err(GraphError::ParentOutOfRange(rev)),
100 }
103 }
101 }
104 }
102 }
105 }
103
106
104 /// Return the `index_get_parents` function of the parsers C Extension module.
107 /// Return the `index_get_parents` function of the parsers C Extension module.
105 ///
108 ///
106 /// A pointer to the function is stored in the `parsers` module as a
109 /// A pointer to the function is stored in the `parsers` module as a
107 /// standard [Python capsule](https://docs.python.org/2/c-api/capsule.html).
110 /// standard [Python capsule](https://docs.python.org/2/c-api/capsule.html).
108 ///
111 ///
109 /// This function retrieves the capsule and casts the function pointer
112 /// This function retrieves the capsule and casts the function pointer
110 ///
113 ///
111 /// Casting function pointers is one of the rare cases of
114 /// Casting function pointers is one of the rare cases of
112 /// legitimate use cases of `mem::transmute()` (see
115 /// legitimate use cases of `mem::transmute()` (see
113 /// https://doc.rust-lang.org/std/mem/fn.transmute.html of
116 /// https://doc.rust-lang.org/std/mem/fn.transmute.html of
114 /// `mem::transmute()`.
117 /// `mem::transmute()`.
115 /// It is inappropriate for architectures where
118 /// It is inappropriate for architectures where
116 /// function and data pointer sizes differ (so-called "Harvard
119 /// function and data pointer sizes differ (so-called "Harvard
117 /// architectures"), but these are nowadays mostly DSPs
120 /// architectures"), but these are nowadays mostly DSPs
118 /// and microcontrollers, hence out of our scope.
121 /// and microcontrollers, hence out of our scope.
119 fn decapsule_parents_fn(py: Python) -> PyResult<IndexParentsFn> {
122 fn decapsule_parents_fn(py: Python) -> PyResult<IndexParentsFn> {
120 unsafe {
123 unsafe {
121 let caps_name = CStr::from_bytes_with_nul_unchecked(
124 let caps_name = CStr::from_bytes_with_nul_unchecked(
122 b"mercurial.cext.parsers.index_get_parents_CAPI\0",
125 b"mercurial.cext.parsers.index_get_parents_CAPI\0",
123 );
126 );
124 let from_caps = PyCapsule_Import(caps_name.as_ptr(), 0);
127 let from_caps = PyCapsule_Import(caps_name.as_ptr(), 0);
125 if from_caps.is_null() {
128 if from_caps.is_null() {
126 return Err(PyErr::fetch(py));
129 return Err(PyErr::fetch(py));
127 }
130 }
128 Ok(transmute(from_caps))
131 Ok(transmute(from_caps))
129 }
132 }
130 }
133 }
@@ -1,27 +1,38
1 // ancestors.rs
1 // ancestors.rs
2 //
2 //
3 // Copyright 2018 Georges Racinet <gracinet@anybox.fr>
3 // Copyright 2018 Georges Racinet <gracinet@anybox.fr>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for Rust errors
8 //! Bindings for Rust errors
9 //!
9 //!
10 //! [`GraphError`] exposes `hg::GraphError` as a subclass of `ValueError`
10 //! [`GraphError`] exposes `hg::GraphError` as a subclass of `ValueError`
11 //! but some variants of `hg::GraphError` can be converted directly to other
12 //! existing Python exceptions if appropriate.
11 //!
13 //!
12 //! [`GraphError`]: struct.GraphError.html
14 //! [`GraphError`]: struct.GraphError.html
13 use cpython::exc::ValueError;
15 use cpython::exc::ValueError;
14 use cpython::{PyErr, Python};
16 use cpython::{PyErr, Python};
15 use hg;
17 use hg;
16
18
17 py_exception!(rustext, GraphError, ValueError);
19 py_exception!(rustext, GraphError, ValueError);
18
20
19 impl GraphError {
21 impl GraphError {
20 pub fn pynew(py: Python, inner: hg::GraphError) -> PyErr {
22 pub fn pynew(py: Python, inner: hg::GraphError) -> PyErr {
21 match inner {
23 match inner {
22 hg::GraphError::ParentOutOfRange(r) => {
24 hg::GraphError::ParentOutOfRange(r) => {
23 GraphError::new(py, ("ParentOutOfRange", r))
25 GraphError::new(py, ("ParentOutOfRange", r))
24 }
26 }
27 hg::GraphError::WorkingDirectoryUnsupported => {
28 match py
29 .import("mercurial.error")
30 .and_then(|m| m.get(py, "WdirUnsupported"))
31 {
32 Err(e) => e,
33 Ok(cls) => PyErr::from_instance(py, cls),
34 }
35 }
25 }
36 }
26 }
37 }
27 }
38 }
@@ -1,1290 +1,1293
1 #testcases sshv1 sshv2
1 #testcases sshv1 sshv2
2
2
3 #if sshv2
3 #if sshv2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > sshpeer.advertise-v2 = true
6 > sshpeer.advertise-v2 = true
7 > sshserver.support-v2 = true
7 > sshserver.support-v2 = true
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 Prepare repo a:
11 Prepare repo a:
12
12
13 $ hg init a
13 $ hg init a
14 $ cd a
14 $ cd a
15 $ echo a > a
15 $ echo a > a
16 $ hg add a
16 $ hg add a
17 $ hg commit -m test
17 $ hg commit -m test
18 $ echo first line > b
18 $ echo first line > b
19 $ hg add b
19 $ hg add b
20
20
21 Create a non-inlined filelog:
21 Create a non-inlined filelog:
22
22
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 > cat data1 >> b
25 > cat data1 >> b
26 > hg commit -m test
26 > hg commit -m test
27 > done
27 > done
28
28
29 List files in store/data (should show a 'b.d'):
29 List files in store/data (should show a 'b.d'):
30
30
31 #if reporevlogstore
31 #if reporevlogstore
32 $ for i in .hg/store/data/*; do
32 $ for i in .hg/store/data/*; do
33 > echo $i
33 > echo $i
34 > done
34 > done
35 .hg/store/data/a.i
35 .hg/store/data/a.i
36 .hg/store/data/b.d
36 .hg/store/data/b.d
37 .hg/store/data/b.i
37 .hg/store/data/b.i
38 #endif
38 #endif
39
39
40 Trigger branchcache creation:
40 Trigger branchcache creation:
41
41
42 $ hg branches
42 $ hg branches
43 default 10:a7949464abda
43 default 10:a7949464abda
44 $ ls .hg/cache
44 $ ls .hg/cache
45 branch2-served
45 branch2-served
46 manifestfulltextcache (reporevlogstore !)
46 manifestfulltextcache (reporevlogstore !)
47 rbc-names-v1
47 rbc-names-v1
48 rbc-revs-v1
48 rbc-revs-v1
49
49
50 Default operation:
50 Default operation:
51
51
52 $ hg clone . ../b
52 $ hg clone . ../b
53 updating to branch default
53 updating to branch default
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 $ cd ../b
55 $ cd ../b
56
56
57 Ensure branchcache got copied over:
57 Ensure branchcache got copied over:
58
58
59 $ ls .hg/cache
59 $ ls .hg/cache
60 branch2-served
60 branch2-served
61 rbc-names-v1
61 rbc-names-v1
62 rbc-revs-v1
62 rbc-revs-v1
63
63
64 $ cat a
64 $ cat a
65 a
65 a
66 $ hg verify
66 $ hg verify
67 checking changesets
67 checking changesets
68 checking manifests
68 checking manifests
69 crosschecking files in changesets and manifests
69 crosschecking files in changesets and manifests
70 checking files
70 checking files
71 checked 11 changesets with 11 changes to 2 files
71 checked 11 changesets with 11 changes to 2 files
72
72
73 Invalid dest '' must abort:
73 Invalid dest '' must abort:
74
74
75 $ hg clone . ''
75 $ hg clone . ''
76 abort: empty destination path is not valid
76 abort: empty destination path is not valid
77 [255]
77 [255]
78
78
79 No update, with debug option:
79 No update, with debug option:
80
80
81 #if hardlink
81 #if hardlink
82 $ hg --debug clone -U . ../c --config progress.debug=true
82 $ hg --debug clone -U . ../c --config progress.debug=true
83 linking: 1 files
83 linking: 1 files
84 linking: 2 files
84 linking: 2 files
85 linking: 3 files
85 linking: 3 files
86 linking: 4 files
86 linking: 4 files
87 linking: 5 files
87 linking: 5 files
88 linking: 6 files
88 linking: 6 files
89 linking: 7 files
89 linking: 7 files
90 linking: 8 files
90 linking: 8 files
91 linked 8 files (reporevlogstore !)
91 linked 8 files (reporevlogstore !)
92 linking: 9 files (reposimplestore !)
92 linking: 9 files (reposimplestore !)
93 linking: 10 files (reposimplestore !)
93 linking: 10 files (reposimplestore !)
94 linking: 11 files (reposimplestore !)
94 linking: 11 files (reposimplestore !)
95 linking: 12 files (reposimplestore !)
95 linking: 12 files (reposimplestore !)
96 linking: 13 files (reposimplestore !)
96 linking: 13 files (reposimplestore !)
97 linking: 14 files (reposimplestore !)
97 linking: 14 files (reposimplestore !)
98 linking: 15 files (reposimplestore !)
98 linking: 15 files (reposimplestore !)
99 linking: 16 files (reposimplestore !)
99 linking: 16 files (reposimplestore !)
100 linking: 17 files (reposimplestore !)
100 linking: 17 files (reposimplestore !)
101 linking: 18 files (reposimplestore !)
101 linking: 18 files (reposimplestore !)
102 linked 18 files (reposimplestore !)
102 linked 18 files (reposimplestore !)
103 #else
103 #else
104 $ hg --debug clone -U . ../c --config progress.debug=true
104 $ hg --debug clone -U . ../c --config progress.debug=true
105 linking: 1 files
105 linking: 1 files
106 copying: 2 files
106 copying: 2 files
107 copying: 3 files
107 copying: 3 files
108 copying: 4 files
108 copying: 4 files
109 copying: 5 files
109 copying: 5 files
110 copying: 6 files
110 copying: 6 files
111 copying: 7 files
111 copying: 7 files
112 copying: 8 files
112 copying: 8 files
113 copied 8 files (reporevlogstore !)
113 copied 8 files (reporevlogstore !)
114 copying: 9 files (reposimplestore !)
114 copying: 9 files (reposimplestore !)
115 copying: 10 files (reposimplestore !)
115 copying: 10 files (reposimplestore !)
116 copying: 11 files (reposimplestore !)
116 copying: 11 files (reposimplestore !)
117 copying: 12 files (reposimplestore !)
117 copying: 12 files (reposimplestore !)
118 copying: 13 files (reposimplestore !)
118 copying: 13 files (reposimplestore !)
119 copying: 14 files (reposimplestore !)
119 copying: 14 files (reposimplestore !)
120 copying: 15 files (reposimplestore !)
120 copying: 15 files (reposimplestore !)
121 copying: 16 files (reposimplestore !)
121 copying: 16 files (reposimplestore !)
122 copying: 17 files (reposimplestore !)
122 copying: 17 files (reposimplestore !)
123 copying: 18 files (reposimplestore !)
123 copying: 18 files (reposimplestore !)
124 copied 18 files (reposimplestore !)
124 copied 18 files (reposimplestore !)
125 #endif
125 #endif
126 $ cd ../c
126 $ cd ../c
127
127
128 Ensure branchcache got copied over:
128 Ensure branchcache got copied over:
129
129
130 $ ls .hg/cache
130 $ ls .hg/cache
131 branch2-served
131 branch2-served
132 rbc-names-v1
132 rbc-names-v1
133 rbc-revs-v1
133 rbc-revs-v1
134
134
135 $ cat a 2>/dev/null || echo "a not present"
135 $ cat a 2>/dev/null || echo "a not present"
136 a not present
136 a not present
137 $ hg verify
137 $ hg verify
138 checking changesets
138 checking changesets
139 checking manifests
139 checking manifests
140 crosschecking files in changesets and manifests
140 crosschecking files in changesets and manifests
141 checking files
141 checking files
142 checked 11 changesets with 11 changes to 2 files
142 checked 11 changesets with 11 changes to 2 files
143
143
144 Default destination:
144 Default destination:
145
145
146 $ mkdir ../d
146 $ mkdir ../d
147 $ cd ../d
147 $ cd ../d
148 $ hg clone ../a
148 $ hg clone ../a
149 destination directory: a
149 destination directory: a
150 updating to branch default
150 updating to branch default
151 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 $ cd a
152 $ cd a
153 $ hg cat a
153 $ hg cat a
154 a
154 a
155 $ cd ../..
155 $ cd ../..
156
156
157 Check that we drop the 'file:' from the path before writing the .hgrc:
157 Check that we drop the 'file:' from the path before writing the .hgrc:
158
158
159 $ hg clone file:a e
159 $ hg clone file:a e
160 updating to branch default
160 updating to branch default
161 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 $ grep 'file:' e/.hg/hgrc
162 $ grep 'file:' e/.hg/hgrc
163 [1]
163 [1]
164
164
165 Check that path aliases are expanded:
165 Check that path aliases are expanded:
166
166
167 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
167 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
168 $ hg -R f showconfig paths.default
168 $ hg -R f showconfig paths.default
169 $TESTTMP/a#0
169 $TESTTMP/a#0
170
170
171 Use --pull:
171 Use --pull:
172
172
173 $ hg clone --pull a g
173 $ hg clone --pull a g
174 requesting all changes
174 requesting all changes
175 adding changesets
175 adding changesets
176 adding manifests
176 adding manifests
177 adding file changes
177 adding file changes
178 added 11 changesets with 11 changes to 2 files
178 added 11 changesets with 11 changes to 2 files
179 new changesets acb14030fe0a:a7949464abda
179 new changesets acb14030fe0a:a7949464abda
180 updating to branch default
180 updating to branch default
181 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
181 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
182 $ hg -R g verify
182 $ hg -R g verify
183 checking changesets
183 checking changesets
184 checking manifests
184 checking manifests
185 crosschecking files in changesets and manifests
185 crosschecking files in changesets and manifests
186 checking files
186 checking files
187 checked 11 changesets with 11 changes to 2 files
187 checked 11 changesets with 11 changes to 2 files
188
188
189 Invalid dest '' with --pull must abort (issue2528):
189 Invalid dest '' with --pull must abort (issue2528):
190
190
191 $ hg clone --pull a ''
191 $ hg clone --pull a ''
192 abort: empty destination path is not valid
192 abort: empty destination path is not valid
193 [255]
193 [255]
194
194
195 Clone to '.':
195 Clone to '.':
196
196
197 $ mkdir h
197 $ mkdir h
198 $ cd h
198 $ cd h
199 $ hg clone ../a .
199 $ hg clone ../a .
200 updating to branch default
200 updating to branch default
201 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 $ cd ..
202 $ cd ..
203
203
204
204
205 *** Tests for option -u ***
205 *** Tests for option -u ***
206
206
207 Adding some more history to repo a:
207 Adding some more history to repo a:
208
208
209 $ cd a
209 $ cd a
210 $ hg tag ref1
210 $ hg tag ref1
211 $ echo the quick brown fox >a
211 $ echo the quick brown fox >a
212 $ hg ci -m "hacked default"
212 $ hg ci -m "hacked default"
213 $ hg up ref1
213 $ hg up ref1
214 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
214 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
215 $ hg branch stable
215 $ hg branch stable
216 marked working directory as branch stable
216 marked working directory as branch stable
217 (branches are permanent and global, did you want a bookmark?)
217 (branches are permanent and global, did you want a bookmark?)
218 $ echo some text >a
218 $ echo some text >a
219 $ hg ci -m "starting branch stable"
219 $ hg ci -m "starting branch stable"
220 $ hg tag ref2
220 $ hg tag ref2
221 $ echo some more text >a
221 $ echo some more text >a
222 $ hg ci -m "another change for branch stable"
222 $ hg ci -m "another change for branch stable"
223 $ hg up ref2
223 $ hg up ref2
224 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
224 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
225 $ hg parents
225 $ hg parents
226 changeset: 13:e8ece76546a6
226 changeset: 13:e8ece76546a6
227 branch: stable
227 branch: stable
228 tag: ref2
228 tag: ref2
229 parent: 10:a7949464abda
229 parent: 10:a7949464abda
230 user: test
230 user: test
231 date: Thu Jan 01 00:00:00 1970 +0000
231 date: Thu Jan 01 00:00:00 1970 +0000
232 summary: starting branch stable
232 summary: starting branch stable
233
233
234
234
235 Repo a has two heads:
235 Repo a has two heads:
236
236
237 $ hg heads
237 $ hg heads
238 changeset: 15:0aae7cf88f0d
238 changeset: 15:0aae7cf88f0d
239 branch: stable
239 branch: stable
240 tag: tip
240 tag: tip
241 user: test
241 user: test
242 date: Thu Jan 01 00:00:00 1970 +0000
242 date: Thu Jan 01 00:00:00 1970 +0000
243 summary: another change for branch stable
243 summary: another change for branch stable
244
244
245 changeset: 12:f21241060d6a
245 changeset: 12:f21241060d6a
246 user: test
246 user: test
247 date: Thu Jan 01 00:00:00 1970 +0000
247 date: Thu Jan 01 00:00:00 1970 +0000
248 summary: hacked default
248 summary: hacked default
249
249
250
250
251 $ cd ..
251 $ cd ..
252
252
253
253
254 Testing --noupdate with --updaterev (must abort):
254 Testing --noupdate with --updaterev (must abort):
255
255
256 $ hg clone --noupdate --updaterev 1 a ua
256 $ hg clone --noupdate --updaterev 1 a ua
257 abort: cannot specify both --noupdate and --updaterev
257 abort: cannot specify both --noupdate and --updaterev
258 [255]
258 [255]
259
259
260
260
261 Testing clone -u:
261 Testing clone -u:
262
262
263 $ hg clone -u . a ua
263 $ hg clone -u . a ua
264 updating to branch stable
264 updating to branch stable
265 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
266
266
267 Repo ua has both heads:
267 Repo ua has both heads:
268
268
269 $ hg -R ua heads
269 $ hg -R ua heads
270 changeset: 15:0aae7cf88f0d
270 changeset: 15:0aae7cf88f0d
271 branch: stable
271 branch: stable
272 tag: tip
272 tag: tip
273 user: test
273 user: test
274 date: Thu Jan 01 00:00:00 1970 +0000
274 date: Thu Jan 01 00:00:00 1970 +0000
275 summary: another change for branch stable
275 summary: another change for branch stable
276
276
277 changeset: 12:f21241060d6a
277 changeset: 12:f21241060d6a
278 user: test
278 user: test
279 date: Thu Jan 01 00:00:00 1970 +0000
279 date: Thu Jan 01 00:00:00 1970 +0000
280 summary: hacked default
280 summary: hacked default
281
281
282
282
283 Same revision checked out in repo a and ua:
283 Same revision checked out in repo a and ua:
284
284
285 $ hg -R a parents --template "{node|short}\n"
285 $ hg -R a parents --template "{node|short}\n"
286 e8ece76546a6
286 e8ece76546a6
287 $ hg -R ua parents --template "{node|short}\n"
287 $ hg -R ua parents --template "{node|short}\n"
288 e8ece76546a6
288 e8ece76546a6
289
289
290 $ rm -r ua
290 $ rm -r ua
291
291
292
292
293 Testing clone --pull -u:
293 Testing clone --pull -u:
294
294
295 $ hg clone --pull -u . a ua
295 $ hg clone --pull -u . a ua
296 requesting all changes
296 requesting all changes
297 adding changesets
297 adding changesets
298 adding manifests
298 adding manifests
299 adding file changes
299 adding file changes
300 added 16 changesets with 16 changes to 3 files (+1 heads)
300 added 16 changesets with 16 changes to 3 files (+1 heads)
301 new changesets acb14030fe0a:0aae7cf88f0d
301 new changesets acb14030fe0a:0aae7cf88f0d
302 updating to branch stable
302 updating to branch stable
303 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
303 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
304
304
305 Repo ua has both heads:
305 Repo ua has both heads:
306
306
307 $ hg -R ua heads
307 $ hg -R ua heads
308 changeset: 15:0aae7cf88f0d
308 changeset: 15:0aae7cf88f0d
309 branch: stable
309 branch: stable
310 tag: tip
310 tag: tip
311 user: test
311 user: test
312 date: Thu Jan 01 00:00:00 1970 +0000
312 date: Thu Jan 01 00:00:00 1970 +0000
313 summary: another change for branch stable
313 summary: another change for branch stable
314
314
315 changeset: 12:f21241060d6a
315 changeset: 12:f21241060d6a
316 user: test
316 user: test
317 date: Thu Jan 01 00:00:00 1970 +0000
317 date: Thu Jan 01 00:00:00 1970 +0000
318 summary: hacked default
318 summary: hacked default
319
319
320
320
321 Same revision checked out in repo a and ua:
321 Same revision checked out in repo a and ua:
322
322
323 $ hg -R a parents --template "{node|short}\n"
323 $ hg -R a parents --template "{node|short}\n"
324 e8ece76546a6
324 e8ece76546a6
325 $ hg -R ua parents --template "{node|short}\n"
325 $ hg -R ua parents --template "{node|short}\n"
326 e8ece76546a6
326 e8ece76546a6
327
327
328 $ rm -r ua
328 $ rm -r ua
329
329
330
330
331 Testing clone -u <branch>:
331 Testing clone -u <branch>:
332
332
333 $ hg clone -u stable a ua
333 $ hg clone -u stable a ua
334 updating to branch stable
334 updating to branch stable
335 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
335 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
336
336
337 Repo ua has both heads:
337 Repo ua has both heads:
338
338
339 $ hg -R ua heads
339 $ hg -R ua heads
340 changeset: 15:0aae7cf88f0d
340 changeset: 15:0aae7cf88f0d
341 branch: stable
341 branch: stable
342 tag: tip
342 tag: tip
343 user: test
343 user: test
344 date: Thu Jan 01 00:00:00 1970 +0000
344 date: Thu Jan 01 00:00:00 1970 +0000
345 summary: another change for branch stable
345 summary: another change for branch stable
346
346
347 changeset: 12:f21241060d6a
347 changeset: 12:f21241060d6a
348 user: test
348 user: test
349 date: Thu Jan 01 00:00:00 1970 +0000
349 date: Thu Jan 01 00:00:00 1970 +0000
350 summary: hacked default
350 summary: hacked default
351
351
352
352
353 Branch 'stable' is checked out:
353 Branch 'stable' is checked out:
354
354
355 $ hg -R ua parents
355 $ hg -R ua parents
356 changeset: 15:0aae7cf88f0d
356 changeset: 15:0aae7cf88f0d
357 branch: stable
357 branch: stable
358 tag: tip
358 tag: tip
359 user: test
359 user: test
360 date: Thu Jan 01 00:00:00 1970 +0000
360 date: Thu Jan 01 00:00:00 1970 +0000
361 summary: another change for branch stable
361 summary: another change for branch stable
362
362
363
363
364 $ rm -r ua
364 $ rm -r ua
365
365
366
366
367 Testing default checkout:
367 Testing default checkout:
368
368
369 $ hg clone a ua
369 $ hg clone a ua
370 updating to branch default
370 updating to branch default
371 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
371 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
372
372
373 Repo ua has both heads:
373 Repo ua has both heads:
374
374
375 $ hg -R ua heads
375 $ hg -R ua heads
376 changeset: 15:0aae7cf88f0d
376 changeset: 15:0aae7cf88f0d
377 branch: stable
377 branch: stable
378 tag: tip
378 tag: tip
379 user: test
379 user: test
380 date: Thu Jan 01 00:00:00 1970 +0000
380 date: Thu Jan 01 00:00:00 1970 +0000
381 summary: another change for branch stable
381 summary: another change for branch stable
382
382
383 changeset: 12:f21241060d6a
383 changeset: 12:f21241060d6a
384 user: test
384 user: test
385 date: Thu Jan 01 00:00:00 1970 +0000
385 date: Thu Jan 01 00:00:00 1970 +0000
386 summary: hacked default
386 summary: hacked default
387
387
388
388
389 Branch 'default' is checked out:
389 Branch 'default' is checked out:
390
390
391 $ hg -R ua parents
391 $ hg -R ua parents
392 changeset: 12:f21241060d6a
392 changeset: 12:f21241060d6a
393 user: test
393 user: test
394 date: Thu Jan 01 00:00:00 1970 +0000
394 date: Thu Jan 01 00:00:00 1970 +0000
395 summary: hacked default
395 summary: hacked default
396
396
397 Test clone with a branch named "@" (issue3677)
397 Test clone with a branch named "@" (issue3677)
398
398
399 $ hg -R ua branch @
399 $ hg -R ua branch @
400 marked working directory as branch @
400 marked working directory as branch @
401 $ hg -R ua commit -m 'created branch @'
401 $ hg -R ua commit -m 'created branch @'
402 $ hg clone ua atbranch
402 $ hg clone ua atbranch
403 updating to branch default
403 updating to branch default
404 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
404 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
405 $ hg -R atbranch heads
405 $ hg -R atbranch heads
406 changeset: 16:798b6d97153e
406 changeset: 16:798b6d97153e
407 branch: @
407 branch: @
408 tag: tip
408 tag: tip
409 parent: 12:f21241060d6a
409 parent: 12:f21241060d6a
410 user: test
410 user: test
411 date: Thu Jan 01 00:00:00 1970 +0000
411 date: Thu Jan 01 00:00:00 1970 +0000
412 summary: created branch @
412 summary: created branch @
413
413
414 changeset: 15:0aae7cf88f0d
414 changeset: 15:0aae7cf88f0d
415 branch: stable
415 branch: stable
416 user: test
416 user: test
417 date: Thu Jan 01 00:00:00 1970 +0000
417 date: Thu Jan 01 00:00:00 1970 +0000
418 summary: another change for branch stable
418 summary: another change for branch stable
419
419
420 changeset: 12:f21241060d6a
420 changeset: 12:f21241060d6a
421 user: test
421 user: test
422 date: Thu Jan 01 00:00:00 1970 +0000
422 date: Thu Jan 01 00:00:00 1970 +0000
423 summary: hacked default
423 summary: hacked default
424
424
425 $ hg -R atbranch parents
425 $ hg -R atbranch parents
426 changeset: 12:f21241060d6a
426 changeset: 12:f21241060d6a
427 user: test
427 user: test
428 date: Thu Jan 01 00:00:00 1970 +0000
428 date: Thu Jan 01 00:00:00 1970 +0000
429 summary: hacked default
429 summary: hacked default
430
430
431
431
432 $ rm -r ua atbranch
432 $ rm -r ua atbranch
433
433
434
434
435 Testing #<branch>:
435 Testing #<branch>:
436
436
437 $ hg clone -u . a#stable ua
437 $ hg clone -u . a#stable ua
438 adding changesets
438 adding changesets
439 adding manifests
439 adding manifests
440 adding file changes
440 adding file changes
441 added 14 changesets with 14 changes to 3 files
441 added 14 changesets with 14 changes to 3 files
442 new changesets acb14030fe0a:0aae7cf88f0d
442 new changesets acb14030fe0a:0aae7cf88f0d
443 updating to branch stable
443 updating to branch stable
444 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
444 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
445
445
446 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
446 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
447
447
448 $ hg -R ua heads
448 $ hg -R ua heads
449 changeset: 13:0aae7cf88f0d
449 changeset: 13:0aae7cf88f0d
450 branch: stable
450 branch: stable
451 tag: tip
451 tag: tip
452 user: test
452 user: test
453 date: Thu Jan 01 00:00:00 1970 +0000
453 date: Thu Jan 01 00:00:00 1970 +0000
454 summary: another change for branch stable
454 summary: another change for branch stable
455
455
456 changeset: 10:a7949464abda
456 changeset: 10:a7949464abda
457 user: test
457 user: test
458 date: Thu Jan 01 00:00:00 1970 +0000
458 date: Thu Jan 01 00:00:00 1970 +0000
459 summary: test
459 summary: test
460
460
461
461
462 Same revision checked out in repo a and ua:
462 Same revision checked out in repo a and ua:
463
463
464 $ hg -R a parents --template "{node|short}\n"
464 $ hg -R a parents --template "{node|short}\n"
465 e8ece76546a6
465 e8ece76546a6
466 $ hg -R ua parents --template "{node|short}\n"
466 $ hg -R ua parents --template "{node|short}\n"
467 e8ece76546a6
467 e8ece76546a6
468
468
469 $ rm -r ua
469 $ rm -r ua
470
470
471
471
472 Testing -u -r <branch>:
472 Testing -u -r <branch>:
473
473
474 $ hg clone -u . -r stable a ua
474 $ hg clone -u . -r stable a ua
475 adding changesets
475 adding changesets
476 adding manifests
476 adding manifests
477 adding file changes
477 adding file changes
478 added 14 changesets with 14 changes to 3 files
478 added 14 changesets with 14 changes to 3 files
479 new changesets acb14030fe0a:0aae7cf88f0d
479 new changesets acb14030fe0a:0aae7cf88f0d
480 updating to branch stable
480 updating to branch stable
481 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
481 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
482
482
483 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
483 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
484
484
485 $ hg -R ua heads
485 $ hg -R ua heads
486 changeset: 13:0aae7cf88f0d
486 changeset: 13:0aae7cf88f0d
487 branch: stable
487 branch: stable
488 tag: tip
488 tag: tip
489 user: test
489 user: test
490 date: Thu Jan 01 00:00:00 1970 +0000
490 date: Thu Jan 01 00:00:00 1970 +0000
491 summary: another change for branch stable
491 summary: another change for branch stable
492
492
493 changeset: 10:a7949464abda
493 changeset: 10:a7949464abda
494 user: test
494 user: test
495 date: Thu Jan 01 00:00:00 1970 +0000
495 date: Thu Jan 01 00:00:00 1970 +0000
496 summary: test
496 summary: test
497
497
498
498
499 Same revision checked out in repo a and ua:
499 Same revision checked out in repo a and ua:
500
500
501 $ hg -R a parents --template "{node|short}\n"
501 $ hg -R a parents --template "{node|short}\n"
502 e8ece76546a6
502 e8ece76546a6
503 $ hg -R ua parents --template "{node|short}\n"
503 $ hg -R ua parents --template "{node|short}\n"
504 e8ece76546a6
504 e8ece76546a6
505
505
506 $ rm -r ua
506 $ rm -r ua
507
507
508
508
509 Testing -r <branch>:
509 Testing -r <branch>:
510
510
511 $ hg clone -r stable a ua
511 $ hg clone -r stable a ua
512 adding changesets
512 adding changesets
513 adding manifests
513 adding manifests
514 adding file changes
514 adding file changes
515 added 14 changesets with 14 changes to 3 files
515 added 14 changesets with 14 changes to 3 files
516 new changesets acb14030fe0a:0aae7cf88f0d
516 new changesets acb14030fe0a:0aae7cf88f0d
517 updating to branch stable
517 updating to branch stable
518 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
518 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
519
519
520 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
520 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
521
521
522 $ hg -R ua heads
522 $ hg -R ua heads
523 changeset: 13:0aae7cf88f0d
523 changeset: 13:0aae7cf88f0d
524 branch: stable
524 branch: stable
525 tag: tip
525 tag: tip
526 user: test
526 user: test
527 date: Thu Jan 01 00:00:00 1970 +0000
527 date: Thu Jan 01 00:00:00 1970 +0000
528 summary: another change for branch stable
528 summary: another change for branch stable
529
529
530 changeset: 10:a7949464abda
530 changeset: 10:a7949464abda
531 user: test
531 user: test
532 date: Thu Jan 01 00:00:00 1970 +0000
532 date: Thu Jan 01 00:00:00 1970 +0000
533 summary: test
533 summary: test
534
534
535
535
536 Branch 'stable' is checked out:
536 Branch 'stable' is checked out:
537
537
538 $ hg -R ua parents
538 $ hg -R ua parents
539 changeset: 13:0aae7cf88f0d
539 changeset: 13:0aae7cf88f0d
540 branch: stable
540 branch: stable
541 tag: tip
541 tag: tip
542 user: test
542 user: test
543 date: Thu Jan 01 00:00:00 1970 +0000
543 date: Thu Jan 01 00:00:00 1970 +0000
544 summary: another change for branch stable
544 summary: another change for branch stable
545
545
546
546
547 $ rm -r ua
547 $ rm -r ua
548
548
549
549
550 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
550 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
551 iterable in addbranchrevs()
551 iterable in addbranchrevs()
552
552
553 $ cat <<EOF > simpleclone.py
553 $ cat <<EOF > simpleclone.py
554 > from mercurial import hg, ui as uimod
554 > from mercurial import hg, ui as uimod
555 > myui = uimod.ui.load()
555 > myui = uimod.ui.load()
556 > repo = hg.repository(myui, b'a')
556 > repo = hg.repository(myui, b'a')
557 > hg.clone(myui, {}, repo, dest=b"ua")
557 > hg.clone(myui, {}, repo, dest=b"ua")
558 > EOF
558 > EOF
559
559
560 $ "$PYTHON" simpleclone.py
560 $ "$PYTHON" simpleclone.py
561 updating to branch default
561 updating to branch default
562 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
562 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
563
563
564 $ rm -r ua
564 $ rm -r ua
565
565
566 $ cat <<EOF > branchclone.py
566 $ cat <<EOF > branchclone.py
567 > from mercurial import extensions, hg, ui as uimod
567 > from mercurial import extensions, hg, ui as uimod
568 > myui = uimod.ui.load()
568 > myui = uimod.ui.load()
569 > extensions.loadall(myui)
569 > extensions.loadall(myui)
570 > extensions.populateui(myui)
570 > extensions.populateui(myui)
571 > repo = hg.repository(myui, b'a')
571 > repo = hg.repository(myui, b'a')
572 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
572 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
573 > EOF
573 > EOF
574
574
575 $ "$PYTHON" branchclone.py
575 $ "$PYTHON" branchclone.py
576 adding changesets
576 adding changesets
577 adding manifests
577 adding manifests
578 adding file changes
578 adding file changes
579 added 14 changesets with 14 changes to 3 files
579 added 14 changesets with 14 changes to 3 files
580 new changesets acb14030fe0a:0aae7cf88f0d
580 new changesets acb14030fe0a:0aae7cf88f0d
581 updating to branch stable
581 updating to branch stable
582 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
582 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
583 $ rm -r ua
583 $ rm -r ua
584
584
585
585
586 Test clone with special '@' bookmark:
586 Test clone with special '@' bookmark:
587 $ cd a
587 $ cd a
588 $ hg bookmark -r a7949464abda @ # branch point of stable from default
588 $ hg bookmark -r a7949464abda @ # branch point of stable from default
589 $ hg clone . ../i
589 $ hg clone . ../i
590 updating to bookmark @
590 updating to bookmark @
591 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
591 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
592 $ hg id -i ../i
592 $ hg id -i ../i
593 a7949464abda
593 a7949464abda
594 $ rm -r ../i
594 $ rm -r ../i
595
595
596 $ hg bookmark -f -r stable @
596 $ hg bookmark -f -r stable @
597 $ hg bookmarks
597 $ hg bookmarks
598 @ 15:0aae7cf88f0d
598 @ 15:0aae7cf88f0d
599 $ hg clone . ../i
599 $ hg clone . ../i
600 updating to bookmark @ on branch stable
600 updating to bookmark @ on branch stable
601 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
601 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
602 $ hg id -i ../i
602 $ hg id -i ../i
603 0aae7cf88f0d
603 0aae7cf88f0d
604 $ cd "$TESTTMP"
604 $ cd "$TESTTMP"
605
605
606
606
607 Testing failures:
607 Testing failures:
608
608
609 $ mkdir fail
609 $ mkdir fail
610 $ cd fail
610 $ cd fail
611
611
612 No local source
612 No local source
613
613
614 $ hg clone a b
614 $ hg clone a b
615 abort: repository a not found!
615 abort: repository a not found!
616 [255]
616 [255]
617
617
618 No remote source
618 No remote source
619
619
620 #if windows
620 #if windows
621 $ hg clone http://$LOCALIP:3121/a b
621 $ hg clone http://$LOCALIP:3121/a b
622 abort: error: * (glob)
622 abort: error: * (glob)
623 [255]
623 [255]
624 #else
624 #else
625 $ hg clone http://$LOCALIP:3121/a b
625 $ hg clone http://$LOCALIP:3121/a b
626 abort: error: *refused* (glob)
626 abort: error: *refused* (glob)
627 [255]
627 [255]
628 #endif
628 #endif
629 $ rm -rf b # work around bug with http clone
629 $ rm -rf b # work around bug with http clone
630
630
631
631
632 #if unix-permissions no-root
632 #if unix-permissions no-root
633
633
634 Inaccessible source
634 Inaccessible source
635
635
636 $ mkdir a
636 $ mkdir a
637 $ chmod 000 a
637 $ chmod 000 a
638 $ hg clone a b
638 $ hg clone a b
639 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
639 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
640 [255]
640 [255]
641
641
642 Inaccessible destination
642 Inaccessible destination
643
643
644 $ hg init b
644 $ hg init b
645 $ cd b
645 $ cd b
646 $ hg clone . ../a
646 $ hg clone . ../a
647 abort: Permission denied: *../a* (glob)
647 abort: Permission denied: *../a* (glob)
648 [255]
648 [255]
649 $ cd ..
649 $ cd ..
650 $ chmod 700 a
650 $ chmod 700 a
651 $ rm -r a b
651 $ rm -r a b
652
652
653 #endif
653 #endif
654
654
655
655
656 #if fifo
656 #if fifo
657
657
658 Source of wrong type
658 Source of wrong type
659
659
660 $ mkfifo a
660 $ mkfifo a
661 $ hg clone a b
661 $ hg clone a b
662 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
662 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
663 [255]
663 [255]
664 $ rm a
664 $ rm a
665
665
666 #endif
666 #endif
667
667
668 Default destination, same directory
668 Default destination, same directory
669
669
670 $ hg init q
670 $ hg init q
671 $ hg clone q
671 $ hg clone q
672 destination directory: q
672 destination directory: q
673 abort: destination 'q' is not empty
673 abort: destination 'q' is not empty
674 [255]
674 [255]
675
675
676 destination directory not empty
676 destination directory not empty
677
677
678 $ mkdir a
678 $ mkdir a
679 $ echo stuff > a/a
679 $ echo stuff > a/a
680 $ hg clone q a
680 $ hg clone q a
681 abort: destination 'a' is not empty
681 abort: destination 'a' is not empty
682 [255]
682 [255]
683
683
684
684
685 #if unix-permissions no-root
685 #if unix-permissions no-root
686
686
687 leave existing directory in place after clone failure
687 leave existing directory in place after clone failure
688
688
689 $ hg init c
689 $ hg init c
690 $ cd c
690 $ cd c
691 $ echo c > c
691 $ echo c > c
692 $ hg commit -A -m test
692 $ hg commit -A -m test
693 adding c
693 adding c
694 $ chmod -rx .hg/store/data
694 $ chmod -rx .hg/store/data
695 $ cd ..
695 $ cd ..
696 $ mkdir d
696 $ mkdir d
697 $ hg clone c d 2> err
697 $ hg clone c d 2> err
698 [255]
698 [255]
699 $ test -d d
699 $ test -d d
700 $ test -d d/.hg
700 $ test -d d/.hg
701 [1]
701 [1]
702
702
703 re-enable perm to allow deletion
703 re-enable perm to allow deletion
704
704
705 $ chmod +rx c/.hg/store/data
705 $ chmod +rx c/.hg/store/data
706
706
707 #endif
707 #endif
708
708
709 $ cd ..
709 $ cd ..
710
710
711 Test clone from the repository in (emulated) revlog format 0 (issue4203):
711 Test clone from the repository in (emulated) revlog format 0 (issue4203):
712
712
713 $ mkdir issue4203
713 $ mkdir issue4203
714 $ mkdir -p src/.hg
714 $ mkdir -p src/.hg
715 $ echo foo > src/foo
715 $ echo foo > src/foo
716 $ hg -R src add src/foo
716 $ hg -R src add src/foo
717 $ hg -R src commit -m '#0'
717 $ hg -R src commit -m '#0'
718 $ hg -R src log -q
718 $ hg -R src log -q
719 0:e1bab28bca43
719 0:e1bab28bca43
720 $ hg -R src debugrevlog -c | egrep 'format|flags'
721 format : 0
722 flags : (none)
720 $ hg clone -U -q src dst
723 $ hg clone -U -q src dst
721 $ hg -R dst log -q
724 $ hg -R dst log -q
722 0:e1bab28bca43
725 0:e1bab28bca43
723
726
724 Create repositories to test auto sharing functionality
727 Create repositories to test auto sharing functionality
725
728
726 $ cat >> $HGRCPATH << EOF
729 $ cat >> $HGRCPATH << EOF
727 > [extensions]
730 > [extensions]
728 > share=
731 > share=
729 > EOF
732 > EOF
730
733
731 $ hg init empty
734 $ hg init empty
732 $ hg init source1a
735 $ hg init source1a
733 $ cd source1a
736 $ cd source1a
734 $ echo initial1 > foo
737 $ echo initial1 > foo
735 $ hg -q commit -A -m initial
738 $ hg -q commit -A -m initial
736 $ echo second > foo
739 $ echo second > foo
737 $ hg commit -m second
740 $ hg commit -m second
738 $ cd ..
741 $ cd ..
739
742
740 $ hg init filteredrev0
743 $ hg init filteredrev0
741 $ cd filteredrev0
744 $ cd filteredrev0
742 $ cat >> .hg/hgrc << EOF
745 $ cat >> .hg/hgrc << EOF
743 > [experimental]
746 > [experimental]
744 > evolution.createmarkers=True
747 > evolution.createmarkers=True
745 > EOF
748 > EOF
746 $ echo initial1 > foo
749 $ echo initial1 > foo
747 $ hg -q commit -A -m initial0
750 $ hg -q commit -A -m initial0
748 $ hg -q up -r null
751 $ hg -q up -r null
749 $ echo initial2 > foo
752 $ echo initial2 > foo
750 $ hg -q commit -A -m initial1
753 $ hg -q commit -A -m initial1
751 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
754 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
752 obsoleted 1 changesets
755 obsoleted 1 changesets
753 $ cd ..
756 $ cd ..
754
757
755 $ hg -q clone --pull source1a source1b
758 $ hg -q clone --pull source1a source1b
756 $ cd source1a
759 $ cd source1a
757 $ hg bookmark bookA
760 $ hg bookmark bookA
758 $ echo 1a > foo
761 $ echo 1a > foo
759 $ hg commit -m 1a
762 $ hg commit -m 1a
760 $ cd ../source1b
763 $ cd ../source1b
761 $ hg -q up -r 0
764 $ hg -q up -r 0
762 $ echo head1 > foo
765 $ echo head1 > foo
763 $ hg commit -m head1
766 $ hg commit -m head1
764 created new head
767 created new head
765 $ hg bookmark head1
768 $ hg bookmark head1
766 $ hg -q up -r 0
769 $ hg -q up -r 0
767 $ echo head2 > foo
770 $ echo head2 > foo
768 $ hg commit -m head2
771 $ hg commit -m head2
769 created new head
772 created new head
770 $ hg bookmark head2
773 $ hg bookmark head2
771 $ hg -q up -r 0
774 $ hg -q up -r 0
772 $ hg branch branch1
775 $ hg branch branch1
773 marked working directory as branch branch1
776 marked working directory as branch branch1
774 (branches are permanent and global, did you want a bookmark?)
777 (branches are permanent and global, did you want a bookmark?)
775 $ echo branch1 > foo
778 $ echo branch1 > foo
776 $ hg commit -m branch1
779 $ hg commit -m branch1
777 $ hg -q up -r 0
780 $ hg -q up -r 0
778 $ hg branch branch2
781 $ hg branch branch2
779 marked working directory as branch branch2
782 marked working directory as branch branch2
780 $ echo branch2 > foo
783 $ echo branch2 > foo
781 $ hg commit -m branch2
784 $ hg commit -m branch2
782 $ cd ..
785 $ cd ..
783 $ hg init source2
786 $ hg init source2
784 $ cd source2
787 $ cd source2
785 $ echo initial2 > foo
788 $ echo initial2 > foo
786 $ hg -q commit -A -m initial2
789 $ hg -q commit -A -m initial2
787 $ echo second > foo
790 $ echo second > foo
788 $ hg commit -m second
791 $ hg commit -m second
789 $ cd ..
792 $ cd ..
790
793
791 Clone with auto share from an empty repo should not result in share
794 Clone with auto share from an empty repo should not result in share
792
795
793 $ mkdir share
796 $ mkdir share
794 $ hg --config share.pool=share clone empty share-empty
797 $ hg --config share.pool=share clone empty share-empty
795 (not using pooled storage: remote appears to be empty)
798 (not using pooled storage: remote appears to be empty)
796 updating to branch default
799 updating to branch default
797 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
800 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
798 $ ls share
801 $ ls share
799 $ test -d share-empty/.hg/store
802 $ test -d share-empty/.hg/store
800 $ test -f share-empty/.hg/sharedpath
803 $ test -f share-empty/.hg/sharedpath
801 [1]
804 [1]
802
805
803 Clone with auto share from a repo with filtered revision 0 should not result in share
806 Clone with auto share from a repo with filtered revision 0 should not result in share
804
807
805 $ hg --config share.pool=share clone filteredrev0 share-filtered
808 $ hg --config share.pool=share clone filteredrev0 share-filtered
806 (not using pooled storage: unable to resolve identity of remote)
809 (not using pooled storage: unable to resolve identity of remote)
807 requesting all changes
810 requesting all changes
808 adding changesets
811 adding changesets
809 adding manifests
812 adding manifests
810 adding file changes
813 adding file changes
811 added 1 changesets with 1 changes to 1 files
814 added 1 changesets with 1 changes to 1 files
812 new changesets e082c1832e09
815 new changesets e082c1832e09
813 updating to branch default
816 updating to branch default
814 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
817 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
815
818
816 Clone from repo with content should result in shared store being created
819 Clone from repo with content should result in shared store being created
817
820
818 $ hg --config share.pool=share clone source1a share-dest1a
821 $ hg --config share.pool=share clone source1a share-dest1a
819 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
822 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
820 requesting all changes
823 requesting all changes
821 adding changesets
824 adding changesets
822 adding manifests
825 adding manifests
823 adding file changes
826 adding file changes
824 added 3 changesets with 3 changes to 1 files
827 added 3 changesets with 3 changes to 1 files
825 new changesets b5f04eac9d8f:e5bfe23c0b47
828 new changesets b5f04eac9d8f:e5bfe23c0b47
826 searching for changes
829 searching for changes
827 no changes found
830 no changes found
828 adding remote bookmark bookA
831 adding remote bookmark bookA
829 updating working directory
832 updating working directory
830 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
833 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
831
834
832 The shared repo should have been created
835 The shared repo should have been created
833
836
834 $ ls share
837 $ ls share
835 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
838 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
836
839
837 The destination should point to it
840 The destination should point to it
838
841
839 $ cat share-dest1a/.hg/sharedpath; echo
842 $ cat share-dest1a/.hg/sharedpath; echo
840 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
843 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
841
844
842 The destination should have bookmarks
845 The destination should have bookmarks
843
846
844 $ hg -R share-dest1a bookmarks
847 $ hg -R share-dest1a bookmarks
845 bookA 2:e5bfe23c0b47
848 bookA 2:e5bfe23c0b47
846
849
847 The default path should be the remote, not the share
850 The default path should be the remote, not the share
848
851
849 $ hg -R share-dest1a config paths.default
852 $ hg -R share-dest1a config paths.default
850 $TESTTMP/source1a
853 $TESTTMP/source1a
851
854
852 Clone with existing share dir should result in pull + share
855 Clone with existing share dir should result in pull + share
853
856
854 $ hg --config share.pool=share clone source1b share-dest1b
857 $ hg --config share.pool=share clone source1b share-dest1b
855 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
858 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
856 searching for changes
859 searching for changes
857 adding changesets
860 adding changesets
858 adding manifests
861 adding manifests
859 adding file changes
862 adding file changes
860 added 4 changesets with 4 changes to 1 files (+4 heads)
863 added 4 changesets with 4 changes to 1 files (+4 heads)
861 adding remote bookmark head1
864 adding remote bookmark head1
862 adding remote bookmark head2
865 adding remote bookmark head2
863 new changesets 4a8dc1ab4c13:6bacf4683960
866 new changesets 4a8dc1ab4c13:6bacf4683960
864 updating working directory
867 updating working directory
865 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
868 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
866
869
867 $ ls share
870 $ ls share
868 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
871 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
869
872
870 $ cat share-dest1b/.hg/sharedpath; echo
873 $ cat share-dest1b/.hg/sharedpath; echo
871 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
874 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
872
875
873 We only get bookmarks from the remote, not everything in the share
876 We only get bookmarks from the remote, not everything in the share
874
877
875 $ hg -R share-dest1b bookmarks
878 $ hg -R share-dest1b bookmarks
876 head1 3:4a8dc1ab4c13
879 head1 3:4a8dc1ab4c13
877 head2 4:99f71071f117
880 head2 4:99f71071f117
878
881
879 Default path should be source, not share.
882 Default path should be source, not share.
880
883
881 $ hg -R share-dest1b config paths.default
884 $ hg -R share-dest1b config paths.default
882 $TESTTMP/source1b
885 $TESTTMP/source1b
883
886
884 Checked out revision should be head of default branch
887 Checked out revision should be head of default branch
885
888
886 $ hg -R share-dest1b log -r .
889 $ hg -R share-dest1b log -r .
887 changeset: 4:99f71071f117
890 changeset: 4:99f71071f117
888 bookmark: head2
891 bookmark: head2
889 parent: 0:b5f04eac9d8f
892 parent: 0:b5f04eac9d8f
890 user: test
893 user: test
891 date: Thu Jan 01 00:00:00 1970 +0000
894 date: Thu Jan 01 00:00:00 1970 +0000
892 summary: head2
895 summary: head2
893
896
894
897
895 Clone from unrelated repo should result in new share
898 Clone from unrelated repo should result in new share
896
899
897 $ hg --config share.pool=share clone source2 share-dest2
900 $ hg --config share.pool=share clone source2 share-dest2
898 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
901 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
899 requesting all changes
902 requesting all changes
900 adding changesets
903 adding changesets
901 adding manifests
904 adding manifests
902 adding file changes
905 adding file changes
903 added 2 changesets with 2 changes to 1 files
906 added 2 changesets with 2 changes to 1 files
904 new changesets 22aeff664783:63cf6c3dba4a
907 new changesets 22aeff664783:63cf6c3dba4a
905 searching for changes
908 searching for changes
906 no changes found
909 no changes found
907 updating working directory
910 updating working directory
908 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
911 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
909
912
910 $ ls share
913 $ ls share
911 22aeff664783fd44c6d9b435618173c118c3448e
914 22aeff664783fd44c6d9b435618173c118c3448e
912 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
915 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
913
916
914 remote naming mode works as advertised
917 remote naming mode works as advertised
915
918
916 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
919 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
917 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
920 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
918 requesting all changes
921 requesting all changes
919 adding changesets
922 adding changesets
920 adding manifests
923 adding manifests
921 adding file changes
924 adding file changes
922 added 3 changesets with 3 changes to 1 files
925 added 3 changesets with 3 changes to 1 files
923 new changesets b5f04eac9d8f:e5bfe23c0b47
926 new changesets b5f04eac9d8f:e5bfe23c0b47
924 searching for changes
927 searching for changes
925 no changes found
928 no changes found
926 adding remote bookmark bookA
929 adding remote bookmark bookA
927 updating working directory
930 updating working directory
928 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
931 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
929
932
930 $ ls shareremote
933 $ ls shareremote
931 195bb1fcdb595c14a6c13e0269129ed78f6debde
934 195bb1fcdb595c14a6c13e0269129ed78f6debde
932
935
933 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
936 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
934 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
937 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
935 requesting all changes
938 requesting all changes
936 adding changesets
939 adding changesets
937 adding manifests
940 adding manifests
938 adding file changes
941 adding file changes
939 added 6 changesets with 6 changes to 1 files (+4 heads)
942 added 6 changesets with 6 changes to 1 files (+4 heads)
940 new changesets b5f04eac9d8f:6bacf4683960
943 new changesets b5f04eac9d8f:6bacf4683960
941 searching for changes
944 searching for changes
942 no changes found
945 no changes found
943 adding remote bookmark head1
946 adding remote bookmark head1
944 adding remote bookmark head2
947 adding remote bookmark head2
945 updating working directory
948 updating working directory
946 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
949 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
947
950
948 $ ls shareremote
951 $ ls shareremote
949 195bb1fcdb595c14a6c13e0269129ed78f6debde
952 195bb1fcdb595c14a6c13e0269129ed78f6debde
950 c0d4f83847ca2a873741feb7048a45085fd47c46
953 c0d4f83847ca2a873741feb7048a45085fd47c46
951
954
952 request to clone a single revision is respected in sharing mode
955 request to clone a single revision is respected in sharing mode
953
956
954 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
957 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
955 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
958 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
956 adding changesets
959 adding changesets
957 adding manifests
960 adding manifests
958 adding file changes
961 adding file changes
959 added 2 changesets with 2 changes to 1 files
962 added 2 changesets with 2 changes to 1 files
960 new changesets b5f04eac9d8f:4a8dc1ab4c13
963 new changesets b5f04eac9d8f:4a8dc1ab4c13
961 no changes found
964 no changes found
962 adding remote bookmark head1
965 adding remote bookmark head1
963 updating working directory
966 updating working directory
964 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
967 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
965
968
966 $ hg -R share-1arev log -G
969 $ hg -R share-1arev log -G
967 @ changeset: 1:4a8dc1ab4c13
970 @ changeset: 1:4a8dc1ab4c13
968 | bookmark: head1
971 | bookmark: head1
969 | tag: tip
972 | tag: tip
970 | user: test
973 | user: test
971 | date: Thu Jan 01 00:00:00 1970 +0000
974 | date: Thu Jan 01 00:00:00 1970 +0000
972 | summary: head1
975 | summary: head1
973 |
976 |
974 o changeset: 0:b5f04eac9d8f
977 o changeset: 0:b5f04eac9d8f
975 user: test
978 user: test
976 date: Thu Jan 01 00:00:00 1970 +0000
979 date: Thu Jan 01 00:00:00 1970 +0000
977 summary: initial
980 summary: initial
978
981
979
982
980 making another clone should only pull down requested rev
983 making another clone should only pull down requested rev
981
984
982 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
985 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
983 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
986 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
984 searching for changes
987 searching for changes
985 adding changesets
988 adding changesets
986 adding manifests
989 adding manifests
987 adding file changes
990 adding file changes
988 added 1 changesets with 1 changes to 1 files (+1 heads)
991 added 1 changesets with 1 changes to 1 files (+1 heads)
989 adding remote bookmark head1
992 adding remote bookmark head1
990 adding remote bookmark head2
993 adding remote bookmark head2
991 new changesets 99f71071f117
994 new changesets 99f71071f117
992 updating working directory
995 updating working directory
993 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
996 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
994
997
995 $ hg -R share-1brev log -G
998 $ hg -R share-1brev log -G
996 @ changeset: 2:99f71071f117
999 @ changeset: 2:99f71071f117
997 | bookmark: head2
1000 | bookmark: head2
998 | tag: tip
1001 | tag: tip
999 | parent: 0:b5f04eac9d8f
1002 | parent: 0:b5f04eac9d8f
1000 | user: test
1003 | user: test
1001 | date: Thu Jan 01 00:00:00 1970 +0000
1004 | date: Thu Jan 01 00:00:00 1970 +0000
1002 | summary: head2
1005 | summary: head2
1003 |
1006 |
1004 | o changeset: 1:4a8dc1ab4c13
1007 | o changeset: 1:4a8dc1ab4c13
1005 |/ bookmark: head1
1008 |/ bookmark: head1
1006 | user: test
1009 | user: test
1007 | date: Thu Jan 01 00:00:00 1970 +0000
1010 | date: Thu Jan 01 00:00:00 1970 +0000
1008 | summary: head1
1011 | summary: head1
1009 |
1012 |
1010 o changeset: 0:b5f04eac9d8f
1013 o changeset: 0:b5f04eac9d8f
1011 user: test
1014 user: test
1012 date: Thu Jan 01 00:00:00 1970 +0000
1015 date: Thu Jan 01 00:00:00 1970 +0000
1013 summary: initial
1016 summary: initial
1014
1017
1015
1018
1016 Request to clone a single branch is respected in sharing mode
1019 Request to clone a single branch is respected in sharing mode
1017
1020
1018 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1021 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1019 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1022 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1020 adding changesets
1023 adding changesets
1021 adding manifests
1024 adding manifests
1022 adding file changes
1025 adding file changes
1023 added 2 changesets with 2 changes to 1 files
1026 added 2 changesets with 2 changes to 1 files
1024 new changesets b5f04eac9d8f:5f92a6c1a1b1
1027 new changesets b5f04eac9d8f:5f92a6c1a1b1
1025 no changes found
1028 no changes found
1026 updating working directory
1029 updating working directory
1027 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1030 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1028
1031
1029 $ hg -R share-1bbranch1 log -G
1032 $ hg -R share-1bbranch1 log -G
1030 o changeset: 1:5f92a6c1a1b1
1033 o changeset: 1:5f92a6c1a1b1
1031 | branch: branch1
1034 | branch: branch1
1032 | tag: tip
1035 | tag: tip
1033 | user: test
1036 | user: test
1034 | date: Thu Jan 01 00:00:00 1970 +0000
1037 | date: Thu Jan 01 00:00:00 1970 +0000
1035 | summary: branch1
1038 | summary: branch1
1036 |
1039 |
1037 @ changeset: 0:b5f04eac9d8f
1040 @ changeset: 0:b5f04eac9d8f
1038 user: test
1041 user: test
1039 date: Thu Jan 01 00:00:00 1970 +0000
1042 date: Thu Jan 01 00:00:00 1970 +0000
1040 summary: initial
1043 summary: initial
1041
1044
1042
1045
1043 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1046 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1044 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1047 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1045 searching for changes
1048 searching for changes
1046 adding changesets
1049 adding changesets
1047 adding manifests
1050 adding manifests
1048 adding file changes
1051 adding file changes
1049 added 1 changesets with 1 changes to 1 files (+1 heads)
1052 added 1 changesets with 1 changes to 1 files (+1 heads)
1050 new changesets 6bacf4683960
1053 new changesets 6bacf4683960
1051 updating working directory
1054 updating working directory
1052 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1055 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1053
1056
1054 $ hg -R share-1bbranch2 log -G
1057 $ hg -R share-1bbranch2 log -G
1055 o changeset: 2:6bacf4683960
1058 o changeset: 2:6bacf4683960
1056 | branch: branch2
1059 | branch: branch2
1057 | tag: tip
1060 | tag: tip
1058 | parent: 0:b5f04eac9d8f
1061 | parent: 0:b5f04eac9d8f
1059 | user: test
1062 | user: test
1060 | date: Thu Jan 01 00:00:00 1970 +0000
1063 | date: Thu Jan 01 00:00:00 1970 +0000
1061 | summary: branch2
1064 | summary: branch2
1062 |
1065 |
1063 | o changeset: 1:5f92a6c1a1b1
1066 | o changeset: 1:5f92a6c1a1b1
1064 |/ branch: branch1
1067 |/ branch: branch1
1065 | user: test
1068 | user: test
1066 | date: Thu Jan 01 00:00:00 1970 +0000
1069 | date: Thu Jan 01 00:00:00 1970 +0000
1067 | summary: branch1
1070 | summary: branch1
1068 |
1071 |
1069 @ changeset: 0:b5f04eac9d8f
1072 @ changeset: 0:b5f04eac9d8f
1070 user: test
1073 user: test
1071 date: Thu Jan 01 00:00:00 1970 +0000
1074 date: Thu Jan 01 00:00:00 1970 +0000
1072 summary: initial
1075 summary: initial
1073
1076
1074
1077
1075 -U is respected in share clone mode
1078 -U is respected in share clone mode
1076
1079
1077 $ hg --config share.pool=share clone -U source1a share-1anowc
1080 $ hg --config share.pool=share clone -U source1a share-1anowc
1078 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1081 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1079 searching for changes
1082 searching for changes
1080 no changes found
1083 no changes found
1081 adding remote bookmark bookA
1084 adding remote bookmark bookA
1082
1085
1083 $ ls share-1anowc
1086 $ ls share-1anowc
1084
1087
1085 Test that auto sharing doesn't cause failure of "hg clone local remote"
1088 Test that auto sharing doesn't cause failure of "hg clone local remote"
1086
1089
1087 $ cd $TESTTMP
1090 $ cd $TESTTMP
1088 $ hg -R a id -r 0
1091 $ hg -R a id -r 0
1089 acb14030fe0a
1092 acb14030fe0a
1090 $ hg id -R remote -r 0
1093 $ hg id -R remote -r 0
1091 abort: repository remote not found!
1094 abort: repository remote not found!
1092 [255]
1095 [255]
1093 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1096 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1094 $ hg -R remote id -r 0
1097 $ hg -R remote id -r 0
1095 acb14030fe0a
1098 acb14030fe0a
1096
1099
1097 Cloning into pooled storage doesn't race (issue5104)
1100 Cloning into pooled storage doesn't race (issue5104)
1098
1101
1099 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1102 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1100 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1103 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1101 $ wait
1104 $ wait
1102
1105
1103 $ hg -R share-destrace1 log -r tip
1106 $ hg -R share-destrace1 log -r tip
1104 changeset: 2:e5bfe23c0b47
1107 changeset: 2:e5bfe23c0b47
1105 bookmark: bookA
1108 bookmark: bookA
1106 tag: tip
1109 tag: tip
1107 user: test
1110 user: test
1108 date: Thu Jan 01 00:00:00 1970 +0000
1111 date: Thu Jan 01 00:00:00 1970 +0000
1109 summary: 1a
1112 summary: 1a
1110
1113
1111
1114
1112 $ hg -R share-destrace2 log -r tip
1115 $ hg -R share-destrace2 log -r tip
1113 changeset: 2:e5bfe23c0b47
1116 changeset: 2:e5bfe23c0b47
1114 bookmark: bookA
1117 bookmark: bookA
1115 tag: tip
1118 tag: tip
1116 user: test
1119 user: test
1117 date: Thu Jan 01 00:00:00 1970 +0000
1120 date: Thu Jan 01 00:00:00 1970 +0000
1118 summary: 1a
1121 summary: 1a
1119
1122
1120 One repo should be new, the other should be shared from the pool. We
1123 One repo should be new, the other should be shared from the pool. We
1121 don't care which is which, so we just make sure we always print the
1124 don't care which is which, so we just make sure we always print the
1122 one containing "new pooled" first, then one one containing "existing
1125 one containing "new pooled" first, then one one containing "existing
1123 pooled".
1126 pooled".
1124
1127
1125 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1128 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1126 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1129 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1127 requesting all changes
1130 requesting all changes
1128 adding changesets
1131 adding changesets
1129 adding manifests
1132 adding manifests
1130 adding file changes
1133 adding file changes
1131 added 3 changesets with 3 changes to 1 files
1134 added 3 changesets with 3 changes to 1 files
1132 new changesets b5f04eac9d8f:e5bfe23c0b47
1135 new changesets b5f04eac9d8f:e5bfe23c0b47
1133 searching for changes
1136 searching for changes
1134 no changes found
1137 no changes found
1135 adding remote bookmark bookA
1138 adding remote bookmark bookA
1136 updating working directory
1139 updating working directory
1137 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1140 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1138
1141
1139 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1142 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1140 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1143 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1141 searching for changes
1144 searching for changes
1142 no changes found
1145 no changes found
1143 adding remote bookmark bookA
1146 adding remote bookmark bookA
1144 updating working directory
1147 updating working directory
1145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1148 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1146
1149
1147 SEC: check for unsafe ssh url
1150 SEC: check for unsafe ssh url
1148
1151
1149 $ cat >> $HGRCPATH << EOF
1152 $ cat >> $HGRCPATH << EOF
1150 > [ui]
1153 > [ui]
1151 > ssh = sh -c "read l; read l; read l"
1154 > ssh = sh -c "read l; read l; read l"
1152 > EOF
1155 > EOF
1153
1156
1154 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1157 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1155 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1158 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1156 [255]
1159 [255]
1157 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1160 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1158 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1161 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1159 [255]
1162 [255]
1160 $ hg clone 'ssh://fakehost|touch%20owned/path'
1163 $ hg clone 'ssh://fakehost|touch%20owned/path'
1161 abort: no suitable response from remote hg!
1164 abort: no suitable response from remote hg!
1162 [255]
1165 [255]
1163 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1166 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1164 abort: no suitable response from remote hg!
1167 abort: no suitable response from remote hg!
1165 [255]
1168 [255]
1166
1169
1167 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1170 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1168 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1171 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1169 [255]
1172 [255]
1170
1173
1171 #if windows
1174 #if windows
1172 $ hg clone "ssh://%26touch%20owned%20/" --debug
1175 $ hg clone "ssh://%26touch%20owned%20/" --debug
1173 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1176 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1174 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1177 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1175 sending hello command
1178 sending hello command
1176 sending between command
1179 sending between command
1177 abort: no suitable response from remote hg!
1180 abort: no suitable response from remote hg!
1178 [255]
1181 [255]
1179 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1182 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1180 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1183 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1181 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1184 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1182 sending hello command
1185 sending hello command
1183 sending between command
1186 sending between command
1184 abort: no suitable response from remote hg!
1187 abort: no suitable response from remote hg!
1185 [255]
1188 [255]
1186 #else
1189 #else
1187 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1190 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1188 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1191 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1189 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1192 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1190 sending hello command
1193 sending hello command
1191 sending between command
1194 sending between command
1192 abort: no suitable response from remote hg!
1195 abort: no suitable response from remote hg!
1193 [255]
1196 [255]
1194 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1197 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1195 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1198 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1196 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1199 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1197 sending hello command
1200 sending hello command
1198 sending between command
1201 sending between command
1199 abort: no suitable response from remote hg!
1202 abort: no suitable response from remote hg!
1200 [255]
1203 [255]
1201 #endif
1204 #endif
1202
1205
1203 $ hg clone "ssh://v-alid.example.com/" --debug
1206 $ hg clone "ssh://v-alid.example.com/" --debug
1204 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1207 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1205 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1208 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1206 sending hello command
1209 sending hello command
1207 sending between command
1210 sending between command
1208 abort: no suitable response from remote hg!
1211 abort: no suitable response from remote hg!
1209 [255]
1212 [255]
1210
1213
1211 We should not have created a file named owned - if it exists, the
1214 We should not have created a file named owned - if it exists, the
1212 attack succeeded.
1215 attack succeeded.
1213 $ if test -f owned; then echo 'you got owned'; fi
1216 $ if test -f owned; then echo 'you got owned'; fi
1214
1217
1215 Cloning without fsmonitor enabled does not print a warning for small repos
1218 Cloning without fsmonitor enabled does not print a warning for small repos
1216
1219
1217 $ hg clone a fsmonitor-default
1220 $ hg clone a fsmonitor-default
1218 updating to bookmark @ on branch stable
1221 updating to bookmark @ on branch stable
1219 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1222 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1220
1223
1221 Lower the warning threshold to simulate a large repo
1224 Lower the warning threshold to simulate a large repo
1222
1225
1223 $ cat >> $HGRCPATH << EOF
1226 $ cat >> $HGRCPATH << EOF
1224 > [fsmonitor]
1227 > [fsmonitor]
1225 > warn_update_file_count = 2
1228 > warn_update_file_count = 2
1226 > EOF
1229 > EOF
1227
1230
1228 We should see a warning about no fsmonitor on supported platforms
1231 We should see a warning about no fsmonitor on supported platforms
1229
1232
1230 #if linuxormacos no-fsmonitor
1233 #if linuxormacos no-fsmonitor
1231 $ hg clone a nofsmonitor
1234 $ hg clone a nofsmonitor
1232 updating to bookmark @ on branch stable
1235 updating to bookmark @ on branch stable
1233 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1236 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1234 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1237 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1235 #else
1238 #else
1236 $ hg clone a nofsmonitor
1239 $ hg clone a nofsmonitor
1237 updating to bookmark @ on branch stable
1240 updating to bookmark @ on branch stable
1238 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1241 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1239 #endif
1242 #endif
1240
1243
1241 We should not see warning about fsmonitor when it is enabled
1244 We should not see warning about fsmonitor when it is enabled
1242
1245
1243 #if fsmonitor
1246 #if fsmonitor
1244 $ hg clone a fsmonitor-enabled
1247 $ hg clone a fsmonitor-enabled
1245 updating to bookmark @ on branch stable
1248 updating to bookmark @ on branch stable
1246 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1249 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1247 #endif
1250 #endif
1248
1251
1249 We can disable the fsmonitor warning
1252 We can disable the fsmonitor warning
1250
1253
1251 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1254 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1252 updating to bookmark @ on branch stable
1255 updating to bookmark @ on branch stable
1253 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1256 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1254
1257
1255 Loaded fsmonitor but disabled in config should still print warning
1258 Loaded fsmonitor but disabled in config should still print warning
1256
1259
1257 #if linuxormacos fsmonitor
1260 #if linuxormacos fsmonitor
1258 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1261 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1259 updating to bookmark @ on branch stable
1262 updating to bookmark @ on branch stable
1260 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1263 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1261 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1264 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1262 #endif
1265 #endif
1263
1266
1264 Warning not printed if working directory isn't empty
1267 Warning not printed if working directory isn't empty
1265
1268
1266 $ hg -q clone a fsmonitor-update
1269 $ hg -q clone a fsmonitor-update
1267 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1270 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1268 $ cd fsmonitor-update
1271 $ cd fsmonitor-update
1269 $ hg up acb14030fe0a
1272 $ hg up acb14030fe0a
1270 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1273 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1271 (leaving bookmark @)
1274 (leaving bookmark @)
1272 $ hg up cf0fe1914066
1275 $ hg up cf0fe1914066
1273 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1276 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1274
1277
1275 `hg update` from null revision also prints
1278 `hg update` from null revision also prints
1276
1279
1277 $ hg up null
1280 $ hg up null
1278 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1281 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1279
1282
1280 #if linuxormacos no-fsmonitor
1283 #if linuxormacos no-fsmonitor
1281 $ hg up cf0fe1914066
1284 $ hg up cf0fe1914066
1282 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1285 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1283 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1286 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1284 #else
1287 #else
1285 $ hg up cf0fe1914066
1288 $ hg up cf0fe1914066
1286 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1289 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1287 #endif
1290 #endif
1288
1291
1289 $ cd ..
1292 $ cd ..
1290
1293
@@ -1,159 +1,170
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2 import sys
2 import sys
3 import unittest
3 import unittest
4
4
5 from mercurial import (
6 error,
7 node,
8 )
9
5 try:
10 try:
6 from mercurial import rustext
11 from mercurial import rustext
7 rustext.__name__ # trigger immediate actual import
12 rustext.__name__ # trigger immediate actual import
8 except ImportError:
13 except ImportError:
9 rustext = None
14 rustext = None
10 else:
15 else:
11 # this would fail already without appropriate ancestor.__package__
16 # this would fail already without appropriate ancestor.__package__
12 from mercurial.rustext.ancestor import (
17 from mercurial.rustext.ancestor import (
13 AncestorsIterator,
18 AncestorsIterator,
14 LazyAncestors,
19 LazyAncestors,
15 MissingAncestors,
20 MissingAncestors,
16 )
21 )
17
22
18 try:
23 try:
19 from mercurial.cext import parsers as cparsers
24 from mercurial.cext import parsers as cparsers
20 except ImportError:
25 except ImportError:
21 cparsers = None
26 cparsers = None
22
27
23 # picked from test-parse-index2, copied rather than imported
28 # picked from test-parse-index2, copied rather than imported
24 # so that it stays stable even if test-parse-index2 changes or disappears.
29 # so that it stays stable even if test-parse-index2 changes or disappears.
25 data_non_inlined = (
30 data_non_inlined = (
26 b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01D\x19'
31 b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01D\x19'
27 b'\x00\x07e\x12\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff'
32 b'\x00\x07e\x12\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff'
28 b'\xff\xff\xff\xff\xd1\xf4\xbb\xb0\xbe\xfc\x13\xbd\x8c\xd3\x9d'
33 b'\xff\xff\xff\xff\xd1\xf4\xbb\xb0\xbe\xfc\x13\xbd\x8c\xd3\x9d'
29 b'\x0f\xcd\xd9;\x8c\x07\x8cJ/\x00\x00\x00\x00\x00\x00\x00\x00\x00'
34 b'\x0f\xcd\xd9;\x8c\x07\x8cJ/\x00\x00\x00\x00\x00\x00\x00\x00\x00'
30 b'\x00\x00\x00\x00\x00\x00\x01D\x19\x00\x00\x00\x00\x00\xdf\x00'
35 b'\x00\x00\x00\x00\x00\x00\x01D\x19\x00\x00\x00\x00\x00\xdf\x00'
31 b'\x00\x01q\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff'
36 b'\x00\x01q\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff'
32 b'\xff\xff\xff\xc1\x12\xb9\x04\x96\xa4Z1t\x91\xdfsJ\x90\xf0\x9bh'
37 b'\xff\xff\xff\xc1\x12\xb9\x04\x96\xa4Z1t\x91\xdfsJ\x90\xf0\x9bh'
33 b'\x07l&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
38 b'\x07l&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
34 b'\x00\x01D\xf8\x00\x00\x00\x00\x01\x1b\x00\x00\x01\xb8\x00\x00'
39 b'\x00\x01D\xf8\x00\x00\x00\x00\x01\x1b\x00\x00\x01\xb8\x00\x00'
35 b'\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\x02\n'
40 b'\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\x02\n'
36 b'\x0e\xc6&\xa1\x92\xae6\x0b\x02i\xfe-\xe5\xbao\x05\xd1\xe7\x00'
41 b'\x0e\xc6&\xa1\x92\xae6\x0b\x02i\xfe-\xe5\xbao\x05\xd1\xe7\x00'
37 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F'
42 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F'
38 b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01'
43 b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01'
39 b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
44 b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
40 b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
45 b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
41 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
46 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
42 )
47 )
43
48
44
49
45 @unittest.skipIf(rustext is None or cparsers is None,
50 @unittest.skipIf(rustext is None or cparsers is None,
46 "rustext or the C Extension parsers module "
51 "rustext or the C Extension parsers module "
47 "ancestor relies on is not available")
52 "ancestor relies on is not available")
48 class rustancestorstest(unittest.TestCase):
53 class rustancestorstest(unittest.TestCase):
49 """Test the correctness of binding to Rust code.
54 """Test the correctness of binding to Rust code.
50
55
51 This test is merely for the binding to Rust itself: extraction of
56 This test is merely for the binding to Rust itself: extraction of
52 Python variable, giving back the results etc.
57 Python variable, giving back the results etc.
53
58
54 It is not meant to test the algorithmic correctness of the operations
59 It is not meant to test the algorithmic correctness of the operations
55 on ancestors it provides. Hence the very simple embedded index data is
60 on ancestors it provides. Hence the very simple embedded index data is
56 good enough.
61 good enough.
57
62
58 Algorithmic correctness is asserted by the Rust unit tests.
63 Algorithmic correctness is asserted by the Rust unit tests.
59 """
64 """
60
65
61 def parseindex(self):
66 def parseindex(self):
62 return cparsers.parse_index2(data_non_inlined, False)[0]
67 return cparsers.parse_index2(data_non_inlined, False)[0]
63
68
64 def testiteratorrevlist(self):
69 def testiteratorrevlist(self):
65 idx = self.parseindex()
70 idx = self.parseindex()
66 # checking test assumption about the index binary data:
71 # checking test assumption about the index binary data:
67 self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)},
72 self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)},
68 {0: (-1, -1),
73 {0: (-1, -1),
69 1: (0, -1),
74 1: (0, -1),
70 2: (1, -1),
75 2: (1, -1),
71 3: (2, -1)})
76 3: (2, -1)})
72 ait = AncestorsIterator(idx, [3], 0, True)
77 ait = AncestorsIterator(idx, [3], 0, True)
73 self.assertEqual([r for r in ait], [3, 2, 1, 0])
78 self.assertEqual([r for r in ait], [3, 2, 1, 0])
74
79
75 ait = AncestorsIterator(idx, [3], 0, False)
80 ait = AncestorsIterator(idx, [3], 0, False)
76 self.assertEqual([r for r in ait], [2, 1, 0])
81 self.assertEqual([r for r in ait], [2, 1, 0])
77
82
78 def testlazyancestors(self):
83 def testlazyancestors(self):
79 idx = self.parseindex()
84 idx = self.parseindex()
80 start_count = sys.getrefcount(idx) # should be 2 (see Python doc)
85 start_count = sys.getrefcount(idx) # should be 2 (see Python doc)
81 self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)},
86 self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)},
82 {0: (-1, -1),
87 {0: (-1, -1),
83 1: (0, -1),
88 1: (0, -1),
84 2: (1, -1),
89 2: (1, -1),
85 3: (2, -1)})
90 3: (2, -1)})
86 lazy = LazyAncestors(idx, [3], 0, True)
91 lazy = LazyAncestors(idx, [3], 0, True)
87 # we have two more references to the index:
92 # we have two more references to the index:
88 # - in its inner iterator for __contains__ and __bool__
93 # - in its inner iterator for __contains__ and __bool__
89 # - in the LazyAncestors instance itself (to spawn new iterators)
94 # - in the LazyAncestors instance itself (to spawn new iterators)
90 self.assertEqual(sys.getrefcount(idx), start_count + 2)
95 self.assertEqual(sys.getrefcount(idx), start_count + 2)
91
96
92 self.assertTrue(2 in lazy)
97 self.assertTrue(2 in lazy)
93 self.assertTrue(bool(lazy))
98 self.assertTrue(bool(lazy))
94 self.assertEqual(list(lazy), [3, 2, 1, 0])
99 self.assertEqual(list(lazy), [3, 2, 1, 0])
95 # a second time to validate that we spawn new iterators
100 # a second time to validate that we spawn new iterators
96 self.assertEqual(list(lazy), [3, 2, 1, 0])
101 self.assertEqual(list(lazy), [3, 2, 1, 0])
97
102
98 # now let's watch the refcounts closer
103 # now let's watch the refcounts closer
99 ait = iter(lazy)
104 ait = iter(lazy)
100 self.assertEqual(sys.getrefcount(idx), start_count + 3)
105 self.assertEqual(sys.getrefcount(idx), start_count + 3)
101 del ait
106 del ait
102 self.assertEqual(sys.getrefcount(idx), start_count + 2)
107 self.assertEqual(sys.getrefcount(idx), start_count + 2)
103 del lazy
108 del lazy
104 self.assertEqual(sys.getrefcount(idx), start_count)
109 self.assertEqual(sys.getrefcount(idx), start_count)
105
110
106 # let's check bool for an empty one
111 # let's check bool for an empty one
107 self.assertFalse(LazyAncestors(idx, [0], 0, False))
112 self.assertFalse(LazyAncestors(idx, [0], 0, False))
108
113
109 def testmissingancestors(self):
114 def testmissingancestors(self):
110 idx = self.parseindex()
115 idx = self.parseindex()
111 missanc = MissingAncestors(idx, [1])
116 missanc = MissingAncestors(idx, [1])
112 self.assertTrue(missanc.hasbases())
117 self.assertTrue(missanc.hasbases())
113 self.assertEqual(missanc.missingancestors([3]), [2, 3])
118 self.assertEqual(missanc.missingancestors([3]), [2, 3])
114 missanc.addbases({2})
119 missanc.addbases({2})
115 self.assertEqual(missanc.bases(), {1, 2})
120 self.assertEqual(missanc.bases(), {1, 2})
116 self.assertEqual(missanc.missingancestors([3]), [3])
121 self.assertEqual(missanc.missingancestors([3]), [3])
117 self.assertEqual(missanc.basesheads(), {2})
122 self.assertEqual(missanc.basesheads(), {2})
118
123
119 def testmissingancestorsremove(self):
124 def testmissingancestorsremove(self):
120 idx = self.parseindex()
125 idx = self.parseindex()
121 missanc = MissingAncestors(idx, [1])
126 missanc = MissingAncestors(idx, [1])
122 revs = {0, 1, 2, 3}
127 revs = {0, 1, 2, 3}
123 missanc.removeancestorsfrom(revs)
128 missanc.removeancestorsfrom(revs)
124 self.assertEqual(revs, {2, 3})
129 self.assertEqual(revs, {2, 3})
125
130
126 def testrefcount(self):
131 def testrefcount(self):
127 idx = self.parseindex()
132 idx = self.parseindex()
128 start_count = sys.getrefcount(idx)
133 start_count = sys.getrefcount(idx)
129
134
130 # refcount increases upon iterator init...
135 # refcount increases upon iterator init...
131 ait = AncestorsIterator(idx, [3], 0, True)
136 ait = AncestorsIterator(idx, [3], 0, True)
132 self.assertEqual(sys.getrefcount(idx), start_count + 1)
137 self.assertEqual(sys.getrefcount(idx), start_count + 1)
133 self.assertEqual(next(ait), 3)
138 self.assertEqual(next(ait), 3)
134
139
135 # and decreases once the iterator is removed
140 # and decreases once the iterator is removed
136 del ait
141 del ait
137 self.assertEqual(sys.getrefcount(idx), start_count)
142 self.assertEqual(sys.getrefcount(idx), start_count)
138
143
139 # and removing ref to the index after iterator init is no issue
144 # and removing ref to the index after iterator init is no issue
140 ait = AncestorsIterator(idx, [3], 0, True)
145 ait = AncestorsIterator(idx, [3], 0, True)
141 del idx
146 del idx
142 self.assertEqual(list(ait), [3, 2, 1, 0])
147 self.assertEqual(list(ait), [3, 2, 1, 0])
143
148
144 def testgrapherror(self):
149 def testgrapherror(self):
145 data = (data_non_inlined[:64 + 27] +
150 data = (data_non_inlined[:64 + 27] +
146 b'\xf2' +
151 b'\xf2' +
147 data_non_inlined[64 + 28:])
152 data_non_inlined[64 + 28:])
148 idx = cparsers.parse_index2(data, False)[0]
153 idx = cparsers.parse_index2(data, False)[0]
149 with self.assertRaises(rustext.GraphError) as arc:
154 with self.assertRaises(rustext.GraphError) as arc:
150 AncestorsIterator(idx, [1], -1, False)
155 AncestorsIterator(idx, [1], -1, False)
151 exc = arc.exception
156 exc = arc.exception
152 self.assertIsInstance(exc, ValueError)
157 self.assertIsInstance(exc, ValueError)
153 # rust-cpython issues appropriate str instances for Python 2 and 3
158 # rust-cpython issues appropriate str instances for Python 2 and 3
154 self.assertEqual(exc.args, ('ParentOutOfRange', 1))
159 self.assertEqual(exc.args, ('ParentOutOfRange', 1))
155
160
161 def testwdirunsupported(self):
162 # trying to access ancestors of the working directory raises
163 # WdirUnsupported directly
164 idx = self.parseindex()
165 with self.assertRaises(error.WdirUnsupported):
166 list(AncestorsIterator(idx, [node.wdirrev], -1, False))
156
167
157 if __name__ == '__main__':
168 if __name__ == '__main__':
158 import silenttestrunner
169 import silenttestrunner
159 silenttestrunner.main(__name__)
170 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now