##// END OF EJS Templates
revlog: fix resolution of revlog version 0...
Yuya Nishihara -
r41355:c953c2a9 stable
parent child Browse files
Show More
@@ -1,2643 +1,2648 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import os
19 import os
20 import struct
20 import struct
21 import zlib
21 import zlib
22
22
23 # import stuff from node for others to import from revlog
23 # import stuff from node for others to import from revlog
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullhex,
27 nullhex,
28 nullid,
28 nullid,
29 nullrev,
29 nullrev,
30 short,
30 short,
31 wdirfilenodeids,
31 wdirfilenodeids,
32 wdirhex,
32 wdirhex,
33 wdirid,
33 wdirid,
34 wdirrev,
34 wdirrev,
35 )
35 )
36 from .i18n import _
36 from .i18n import _
37 from .revlogutils.constants import (
37 from .revlogutils.constants import (
38 FLAG_GENERALDELTA,
38 FLAG_GENERALDELTA,
39 FLAG_INLINE_DATA,
39 FLAG_INLINE_DATA,
40 REVIDX_DEFAULT_FLAGS,
40 REVIDX_DEFAULT_FLAGS,
41 REVIDX_ELLIPSIS,
41 REVIDX_ELLIPSIS,
42 REVIDX_EXTSTORED,
42 REVIDX_EXTSTORED,
43 REVIDX_FLAGS_ORDER,
43 REVIDX_FLAGS_ORDER,
44 REVIDX_ISCENSORED,
44 REVIDX_ISCENSORED,
45 REVIDX_KNOWN_FLAGS,
45 REVIDX_KNOWN_FLAGS,
46 REVIDX_RAWTEXT_CHANGING_FLAGS,
46 REVIDX_RAWTEXT_CHANGING_FLAGS,
47 REVLOGV0,
47 REVLOGV0,
48 REVLOGV1,
48 REVLOGV1,
49 REVLOGV1_FLAGS,
49 REVLOGV1_FLAGS,
50 REVLOGV2,
50 REVLOGV2,
51 REVLOGV2_FLAGS,
51 REVLOGV2_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
52 REVLOG_DEFAULT_FLAGS,
53 REVLOG_DEFAULT_FORMAT,
53 REVLOG_DEFAULT_FORMAT,
54 REVLOG_DEFAULT_VERSION,
54 REVLOG_DEFAULT_VERSION,
55 )
55 )
56 from .thirdparty import (
56 from .thirdparty import (
57 attr,
57 attr,
58 )
58 )
59 from . import (
59 from . import (
60 ancestor,
60 ancestor,
61 dagop,
61 dagop,
62 error,
62 error,
63 mdiff,
63 mdiff,
64 policy,
64 policy,
65 pycompat,
65 pycompat,
66 repository,
66 repository,
67 templatefilters,
67 templatefilters,
68 util,
68 util,
69 )
69 )
70 from .revlogutils import (
70 from .revlogutils import (
71 deltas as deltautil,
71 deltas as deltautil,
72 )
72 )
73 from .utils import (
73 from .utils import (
74 interfaceutil,
74 interfaceutil,
75 storageutil,
75 storageutil,
76 stringutil,
76 stringutil,
77 )
77 )
78
78
79 # blanked usage of all the name to prevent pyflakes constraints
79 # blanked usage of all the name to prevent pyflakes constraints
80 # We need these name available in the module for extensions.
80 # We need these name available in the module for extensions.
81 REVLOGV0
81 REVLOGV0
82 REVLOGV1
82 REVLOGV1
83 REVLOGV2
83 REVLOGV2
84 FLAG_INLINE_DATA
84 FLAG_INLINE_DATA
85 FLAG_GENERALDELTA
85 FLAG_GENERALDELTA
86 REVLOG_DEFAULT_FLAGS
86 REVLOG_DEFAULT_FLAGS
87 REVLOG_DEFAULT_FORMAT
87 REVLOG_DEFAULT_FORMAT
88 REVLOG_DEFAULT_VERSION
88 REVLOG_DEFAULT_VERSION
89 REVLOGV1_FLAGS
89 REVLOGV1_FLAGS
90 REVLOGV2_FLAGS
90 REVLOGV2_FLAGS
91 REVIDX_ISCENSORED
91 REVIDX_ISCENSORED
92 REVIDX_ELLIPSIS
92 REVIDX_ELLIPSIS
93 REVIDX_EXTSTORED
93 REVIDX_EXTSTORED
94 REVIDX_DEFAULT_FLAGS
94 REVIDX_DEFAULT_FLAGS
95 REVIDX_FLAGS_ORDER
95 REVIDX_FLAGS_ORDER
96 REVIDX_KNOWN_FLAGS
96 REVIDX_KNOWN_FLAGS
97 REVIDX_RAWTEXT_CHANGING_FLAGS
97 REVIDX_RAWTEXT_CHANGING_FLAGS
98
98
99 parsers = policy.importmod(r'parsers')
99 parsers = policy.importmod(r'parsers')
100 try:
100 try:
101 from . import rustext
101 from . import rustext
102 rustext.__name__ # force actual import (see hgdemandimport)
102 rustext.__name__ # force actual import (see hgdemandimport)
103 except ImportError:
103 except ImportError:
104 rustext = None
104 rustext = None
105
105
106 # Aliased for performance.
106 # Aliased for performance.
107 _zlibdecompress = zlib.decompress
107 _zlibdecompress = zlib.decompress
108
108
109 # max size of revlog with inline data
109 # max size of revlog with inline data
110 _maxinline = 131072
110 _maxinline = 131072
111 _chunksize = 1048576
111 _chunksize = 1048576
112
112
113 # Store flag processors (cf. 'addflagprocessor()' to register)
113 # Store flag processors (cf. 'addflagprocessor()' to register)
114 _flagprocessors = {
114 _flagprocessors = {
115 REVIDX_ISCENSORED: None,
115 REVIDX_ISCENSORED: None,
116 }
116 }
117
117
118 # Flag processors for REVIDX_ELLIPSIS.
118 # Flag processors for REVIDX_ELLIPSIS.
119 def ellipsisreadprocessor(rl, text):
119 def ellipsisreadprocessor(rl, text):
120 return text, False
120 return text, False
121
121
122 def ellipsiswriteprocessor(rl, text):
122 def ellipsiswriteprocessor(rl, text):
123 return text, False
123 return text, False
124
124
125 def ellipsisrawprocessor(rl, text):
125 def ellipsisrawprocessor(rl, text):
126 return False
126 return False
127
127
128 ellipsisprocessor = (
128 ellipsisprocessor = (
129 ellipsisreadprocessor,
129 ellipsisreadprocessor,
130 ellipsiswriteprocessor,
130 ellipsiswriteprocessor,
131 ellipsisrawprocessor,
131 ellipsisrawprocessor,
132 )
132 )
133
133
134 def addflagprocessor(flag, processor):
134 def addflagprocessor(flag, processor):
135 """Register a flag processor on a revision data flag.
135 """Register a flag processor on a revision data flag.
136
136
137 Invariant:
137 Invariant:
138 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
138 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
139 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
139 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
140 - Only one flag processor can be registered on a specific flag.
140 - Only one flag processor can be registered on a specific flag.
141 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
141 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
142 following signatures:
142 following signatures:
143 - (read) f(self, rawtext) -> text, bool
143 - (read) f(self, rawtext) -> text, bool
144 - (write) f(self, text) -> rawtext, bool
144 - (write) f(self, text) -> rawtext, bool
145 - (raw) f(self, rawtext) -> bool
145 - (raw) f(self, rawtext) -> bool
146 "text" is presented to the user. "rawtext" is stored in revlog data, not
146 "text" is presented to the user. "rawtext" is stored in revlog data, not
147 directly visible to the user.
147 directly visible to the user.
148 The boolean returned by these transforms is used to determine whether
148 The boolean returned by these transforms is used to determine whether
149 the returned text can be used for hash integrity checking. For example,
149 the returned text can be used for hash integrity checking. For example,
150 if "write" returns False, then "text" is used to generate hash. If
150 if "write" returns False, then "text" is used to generate hash. If
151 "write" returns True, that basically means "rawtext" returned by "write"
151 "write" returns True, that basically means "rawtext" returned by "write"
152 should be used to generate hash. Usually, "write" and "read" return
152 should be used to generate hash. Usually, "write" and "read" return
153 different booleans. And "raw" returns a same boolean as "write".
153 different booleans. And "raw" returns a same boolean as "write".
154
154
155 Note: The 'raw' transform is used for changegroup generation and in some
155 Note: The 'raw' transform is used for changegroup generation and in some
156 debug commands. In this case the transform only indicates whether the
156 debug commands. In this case the transform only indicates whether the
157 contents can be used for hash integrity checks.
157 contents can be used for hash integrity checks.
158 """
158 """
159 _insertflagprocessor(flag, processor, _flagprocessors)
159 _insertflagprocessor(flag, processor, _flagprocessors)
160
160
161 def _insertflagprocessor(flag, processor, flagprocessors):
161 def _insertflagprocessor(flag, processor, flagprocessors):
162 if not flag & REVIDX_KNOWN_FLAGS:
162 if not flag & REVIDX_KNOWN_FLAGS:
163 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
163 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
164 raise error.ProgrammingError(msg)
164 raise error.ProgrammingError(msg)
165 if flag not in REVIDX_FLAGS_ORDER:
165 if flag not in REVIDX_FLAGS_ORDER:
166 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
166 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
167 raise error.ProgrammingError(msg)
167 raise error.ProgrammingError(msg)
168 if flag in flagprocessors:
168 if flag in flagprocessors:
169 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
169 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 flagprocessors[flag] = processor
171 flagprocessors[flag] = processor
172
172
173 def getoffset(q):
173 def getoffset(q):
174 return int(q >> 16)
174 return int(q >> 16)
175
175
176 def gettype(q):
176 def gettype(q):
177 return int(q & 0xFFFF)
177 return int(q & 0xFFFF)
178
178
179 def offset_type(offset, type):
179 def offset_type(offset, type):
180 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
180 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
181 raise ValueError('unknown revlog index flags')
181 raise ValueError('unknown revlog index flags')
182 return int(int(offset) << 16 | type)
182 return int(int(offset) << 16 | type)
183
183
184 @attr.s(slots=True, frozen=True)
184 @attr.s(slots=True, frozen=True)
185 class _revisioninfo(object):
185 class _revisioninfo(object):
186 """Information about a revision that allows building its fulltext
186 """Information about a revision that allows building its fulltext
187 node: expected hash of the revision
187 node: expected hash of the revision
188 p1, p2: parent revs of the revision
188 p1, p2: parent revs of the revision
189 btext: built text cache consisting of a one-element list
189 btext: built text cache consisting of a one-element list
190 cachedelta: (baserev, uncompressed_delta) or None
190 cachedelta: (baserev, uncompressed_delta) or None
191 flags: flags associated to the revision storage
191 flags: flags associated to the revision storage
192
192
193 One of btext[0] or cachedelta must be set.
193 One of btext[0] or cachedelta must be set.
194 """
194 """
195 node = attr.ib()
195 node = attr.ib()
196 p1 = attr.ib()
196 p1 = attr.ib()
197 p2 = attr.ib()
197 p2 = attr.ib()
198 btext = attr.ib()
198 btext = attr.ib()
199 textlen = attr.ib()
199 textlen = attr.ib()
200 cachedelta = attr.ib()
200 cachedelta = attr.ib()
201 flags = attr.ib()
201 flags = attr.ib()
202
202
203 @interfaceutil.implementer(repository.irevisiondelta)
203 @interfaceutil.implementer(repository.irevisiondelta)
204 @attr.s(slots=True)
204 @attr.s(slots=True)
205 class revlogrevisiondelta(object):
205 class revlogrevisiondelta(object):
206 node = attr.ib()
206 node = attr.ib()
207 p1node = attr.ib()
207 p1node = attr.ib()
208 p2node = attr.ib()
208 p2node = attr.ib()
209 basenode = attr.ib()
209 basenode = attr.ib()
210 flags = attr.ib()
210 flags = attr.ib()
211 baserevisionsize = attr.ib()
211 baserevisionsize = attr.ib()
212 revision = attr.ib()
212 revision = attr.ib()
213 delta = attr.ib()
213 delta = attr.ib()
214 linknode = attr.ib(default=None)
214 linknode = attr.ib(default=None)
215
215
216 @interfaceutil.implementer(repository.iverifyproblem)
216 @interfaceutil.implementer(repository.iverifyproblem)
217 @attr.s(frozen=True)
217 @attr.s(frozen=True)
218 class revlogproblem(object):
218 class revlogproblem(object):
219 warning = attr.ib(default=None)
219 warning = attr.ib(default=None)
220 error = attr.ib(default=None)
220 error = attr.ib(default=None)
221 node = attr.ib(default=None)
221 node = attr.ib(default=None)
222
222
223 # index v0:
223 # index v0:
224 # 4 bytes: offset
224 # 4 bytes: offset
225 # 4 bytes: compressed length
225 # 4 bytes: compressed length
226 # 4 bytes: base rev
226 # 4 bytes: base rev
227 # 4 bytes: link rev
227 # 4 bytes: link rev
228 # 20 bytes: parent 1 nodeid
228 # 20 bytes: parent 1 nodeid
229 # 20 bytes: parent 2 nodeid
229 # 20 bytes: parent 2 nodeid
230 # 20 bytes: nodeid
230 # 20 bytes: nodeid
231 indexformatv0 = struct.Struct(">4l20s20s20s")
231 indexformatv0 = struct.Struct(">4l20s20s20s")
232 indexformatv0_pack = indexformatv0.pack
232 indexformatv0_pack = indexformatv0.pack
233 indexformatv0_unpack = indexformatv0.unpack
233 indexformatv0_unpack = indexformatv0.unpack
234
234
235 class revlogoldindex(list):
235 class revlogoldindex(list):
236 def __getitem__(self, i):
236 def __getitem__(self, i):
237 if i == -1:
237 if i == -1:
238 return (0, 0, 0, -1, -1, -1, -1, nullid)
238 return (0, 0, 0, -1, -1, -1, -1, nullid)
239 return list.__getitem__(self, i)
239 return list.__getitem__(self, i)
240
240
241 class revlogoldio(object):
241 class revlogoldio(object):
242 def __init__(self):
242 def __init__(self):
243 self.size = indexformatv0.size
243 self.size = indexformatv0.size
244
244
245 def parseindex(self, data, inline):
245 def parseindex(self, data, inline):
246 s = self.size
246 s = self.size
247 index = []
247 index = []
248 nodemap = {nullid: nullrev}
248 nodemap = {nullid: nullrev}
249 n = off = 0
249 n = off = 0
250 l = len(data)
250 l = len(data)
251 while off + s <= l:
251 while off + s <= l:
252 cur = data[off:off + s]
252 cur = data[off:off + s]
253 off += s
253 off += s
254 e = indexformatv0_unpack(cur)
254 e = indexformatv0_unpack(cur)
255 # transform to revlogv1 format
255 # transform to revlogv1 format
256 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
256 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
257 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
257 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
258 index.append(e2)
258 index.append(e2)
259 nodemap[e[6]] = n
259 nodemap[e[6]] = n
260 n += 1
260 n += 1
261
261
262 return revlogoldindex(index), nodemap, None
262 return revlogoldindex(index), nodemap, None
263
263
264 def packentry(self, entry, node, version, rev):
264 def packentry(self, entry, node, version, rev):
265 if gettype(entry[0]):
265 if gettype(entry[0]):
266 raise error.RevlogError(_('index entry flags need revlog '
266 raise error.RevlogError(_('index entry flags need revlog '
267 'version 1'))
267 'version 1'))
268 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
268 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
269 node(entry[5]), node(entry[6]), entry[7])
269 node(entry[5]), node(entry[6]), entry[7])
270 return indexformatv0_pack(*e2)
270 return indexformatv0_pack(*e2)
271
271
272 # index ng:
272 # index ng:
273 # 6 bytes: offset
273 # 6 bytes: offset
274 # 2 bytes: flags
274 # 2 bytes: flags
275 # 4 bytes: compressed length
275 # 4 bytes: compressed length
276 # 4 bytes: uncompressed length
276 # 4 bytes: uncompressed length
277 # 4 bytes: base rev
277 # 4 bytes: base rev
278 # 4 bytes: link rev
278 # 4 bytes: link rev
279 # 4 bytes: parent 1 rev
279 # 4 bytes: parent 1 rev
280 # 4 bytes: parent 2 rev
280 # 4 bytes: parent 2 rev
281 # 32 bytes: nodeid
281 # 32 bytes: nodeid
282 indexformatng = struct.Struct(">Qiiiiii20s12x")
282 indexformatng = struct.Struct(">Qiiiiii20s12x")
283 indexformatng_pack = indexformatng.pack
283 indexformatng_pack = indexformatng.pack
284 versionformat = struct.Struct(">I")
284 versionformat = struct.Struct(">I")
285 versionformat_pack = versionformat.pack
285 versionformat_pack = versionformat.pack
286 versionformat_unpack = versionformat.unpack
286 versionformat_unpack = versionformat.unpack
287
287
288 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
288 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
289 # signed integer)
289 # signed integer)
290 _maxentrysize = 0x7fffffff
290 _maxentrysize = 0x7fffffff
291
291
292 class revlogio(object):
292 class revlogio(object):
293 def __init__(self):
293 def __init__(self):
294 self.size = indexformatng.size
294 self.size = indexformatng.size
295
295
296 def parseindex(self, data, inline):
296 def parseindex(self, data, inline):
297 # call the C implementation to parse the index data
297 # call the C implementation to parse the index data
298 index, cache = parsers.parse_index2(data, inline)
298 index, cache = parsers.parse_index2(data, inline)
299 return index, getattr(index, 'nodemap', None), cache
299 return index, getattr(index, 'nodemap', None), cache
300
300
301 def packentry(self, entry, node, version, rev):
301 def packentry(self, entry, node, version, rev):
302 p = indexformatng_pack(*entry)
302 p = indexformatng_pack(*entry)
303 if rev == 0:
303 if rev == 0:
304 p = versionformat_pack(version) + p[4:]
304 p = versionformat_pack(version) + p[4:]
305 return p
305 return p
306
306
307 class revlog(object):
307 class revlog(object):
308 """
308 """
309 the underlying revision storage object
309 the underlying revision storage object
310
310
311 A revlog consists of two parts, an index and the revision data.
311 A revlog consists of two parts, an index and the revision data.
312
312
313 The index is a file with a fixed record size containing
313 The index is a file with a fixed record size containing
314 information on each revision, including its nodeid (hash), the
314 information on each revision, including its nodeid (hash), the
315 nodeids of its parents, the position and offset of its data within
315 nodeids of its parents, the position and offset of its data within
316 the data file, and the revision it's based on. Finally, each entry
316 the data file, and the revision it's based on. Finally, each entry
317 contains a linkrev entry that can serve as a pointer to external
317 contains a linkrev entry that can serve as a pointer to external
318 data.
318 data.
319
319
320 The revision data itself is a linear collection of data chunks.
320 The revision data itself is a linear collection of data chunks.
321 Each chunk represents a revision and is usually represented as a
321 Each chunk represents a revision and is usually represented as a
322 delta against the previous chunk. To bound lookup time, runs of
322 delta against the previous chunk. To bound lookup time, runs of
323 deltas are limited to about 2 times the length of the original
323 deltas are limited to about 2 times the length of the original
324 version data. This makes retrieval of a version proportional to
324 version data. This makes retrieval of a version proportional to
325 its size, or O(1) relative to the number of revisions.
325 its size, or O(1) relative to the number of revisions.
326
326
327 Both pieces of the revlog are written to in an append-only
327 Both pieces of the revlog are written to in an append-only
328 fashion, which means we never need to rewrite a file to insert or
328 fashion, which means we never need to rewrite a file to insert or
329 remove data, and can use some simple techniques to avoid the need
329 remove data, and can use some simple techniques to avoid the need
330 for locking while reading.
330 for locking while reading.
331
331
332 If checkambig, indexfile is opened with checkambig=True at
332 If checkambig, indexfile is opened with checkambig=True at
333 writing, to avoid file stat ambiguity.
333 writing, to avoid file stat ambiguity.
334
334
335 If mmaplargeindex is True, and an mmapindexthreshold is set, the
335 If mmaplargeindex is True, and an mmapindexthreshold is set, the
336 index will be mmapped rather than read if it is larger than the
336 index will be mmapped rather than read if it is larger than the
337 configured threshold.
337 configured threshold.
338
338
339 If censorable is True, the revlog can have censored revisions.
339 If censorable is True, the revlog can have censored revisions.
340 """
340 """
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
341 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
342 mmaplargeindex=False, censorable=False):
342 mmaplargeindex=False, censorable=False):
343 """
343 """
344 create a revlog object
344 create a revlog object
345
345
346 opener is a function that abstracts the file opening operation
346 opener is a function that abstracts the file opening operation
347 and can be used to implement COW semantics or the like.
347 and can be used to implement COW semantics or the like.
348 """
348 """
349 self.indexfile = indexfile
349 self.indexfile = indexfile
350 self.datafile = datafile or (indexfile[:-2] + ".d")
350 self.datafile = datafile or (indexfile[:-2] + ".d")
351 self.opener = opener
351 self.opener = opener
352 # When True, indexfile is opened with checkambig=True at writing, to
352 # When True, indexfile is opened with checkambig=True at writing, to
353 # avoid file stat ambiguity.
353 # avoid file stat ambiguity.
354 self._checkambig = checkambig
354 self._checkambig = checkambig
355 self._mmaplargeindex = mmaplargeindex
355 self._mmaplargeindex = mmaplargeindex
356 self._censorable = censorable
356 self._censorable = censorable
357 # 3-tuple of (node, rev, text) for a raw revision.
357 # 3-tuple of (node, rev, text) for a raw revision.
358 self._revisioncache = None
358 self._revisioncache = None
359 # Maps rev to chain base rev.
359 # Maps rev to chain base rev.
360 self._chainbasecache = util.lrucachedict(100)
360 self._chainbasecache = util.lrucachedict(100)
361 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
361 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
362 self._chunkcache = (0, '')
362 self._chunkcache = (0, '')
363 # How much data to read and cache into the raw revlog data cache.
363 # How much data to read and cache into the raw revlog data cache.
364 self._chunkcachesize = 65536
364 self._chunkcachesize = 65536
365 self._maxchainlen = None
365 self._maxchainlen = None
366 self._deltabothparents = True
366 self._deltabothparents = True
367 self.index = []
367 self.index = []
368 # Mapping of partial identifiers to full nodes.
368 # Mapping of partial identifiers to full nodes.
369 self._pcache = {}
369 self._pcache = {}
370 # Mapping of revision integer to full node.
370 # Mapping of revision integer to full node.
371 self._nodecache = {nullid: nullrev}
371 self._nodecache = {nullid: nullrev}
372 self._nodepos = None
372 self._nodepos = None
373 self._compengine = 'zlib'
373 self._compengine = 'zlib'
374 self._maxdeltachainspan = -1
374 self._maxdeltachainspan = -1
375 self._withsparseread = False
375 self._withsparseread = False
376 self._sparserevlog = False
376 self._sparserevlog = False
377 self._srdensitythreshold = 0.50
377 self._srdensitythreshold = 0.50
378 self._srmingapsize = 262144
378 self._srmingapsize = 262144
379
379
380 # Make copy of flag processors so each revlog instance can support
380 # Make copy of flag processors so each revlog instance can support
381 # custom flags.
381 # custom flags.
382 self._flagprocessors = dict(_flagprocessors)
382 self._flagprocessors = dict(_flagprocessors)
383
383
384 # 2-tuple of file handles being used for active writing.
384 # 2-tuple of file handles being used for active writing.
385 self._writinghandles = None
385 self._writinghandles = None
386
386
387 self._loadindex()
387 self._loadindex()
388
388
389 def _loadindex(self):
389 def _loadindex(self):
390 mmapindexthreshold = None
390 mmapindexthreshold = None
391 opts = getattr(self.opener, 'options', {}) or {}
391 opts = getattr(self.opener, 'options', {}) or {}
392
392
393 if 'revlogv2' in opts:
393 if 'revlogv2' in opts:
394 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
394 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
395 elif 'revlogv1' in opts:
395 elif 'revlogv1' in opts:
396 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
396 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
397 if 'generaldelta' in opts:
397 if 'generaldelta' in opts:
398 newversionflags |= FLAG_GENERALDELTA
398 newversionflags |= FLAG_GENERALDELTA
399 elif getattr(self.opener, 'options', None) is not None:
400 # If options provided but no 'revlog*' found, the repository
401 # would have no 'requires' file in it, which means we have to
402 # stick to the old format.
403 newversionflags = REVLOGV0
399 else:
404 else:
400 newversionflags = REVLOG_DEFAULT_VERSION
405 newversionflags = REVLOG_DEFAULT_VERSION
401
406
402 if 'chunkcachesize' in opts:
407 if 'chunkcachesize' in opts:
403 self._chunkcachesize = opts['chunkcachesize']
408 self._chunkcachesize = opts['chunkcachesize']
404 if 'maxchainlen' in opts:
409 if 'maxchainlen' in opts:
405 self._maxchainlen = opts['maxchainlen']
410 self._maxchainlen = opts['maxchainlen']
406 if 'deltabothparents' in opts:
411 if 'deltabothparents' in opts:
407 self._deltabothparents = opts['deltabothparents']
412 self._deltabothparents = opts['deltabothparents']
408 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
413 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
409 if 'compengine' in opts:
414 if 'compengine' in opts:
410 self._compengine = opts['compengine']
415 self._compengine = opts['compengine']
411 if 'maxdeltachainspan' in opts:
416 if 'maxdeltachainspan' in opts:
412 self._maxdeltachainspan = opts['maxdeltachainspan']
417 self._maxdeltachainspan = opts['maxdeltachainspan']
413 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
418 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
414 mmapindexthreshold = opts['mmapindexthreshold']
419 mmapindexthreshold = opts['mmapindexthreshold']
415 self._sparserevlog = bool(opts.get('sparse-revlog', False))
420 self._sparserevlog = bool(opts.get('sparse-revlog', False))
416 withsparseread = bool(opts.get('with-sparse-read', False))
421 withsparseread = bool(opts.get('with-sparse-read', False))
417 # sparse-revlog forces sparse-read
422 # sparse-revlog forces sparse-read
418 self._withsparseread = self._sparserevlog or withsparseread
423 self._withsparseread = self._sparserevlog or withsparseread
419 if 'sparse-read-density-threshold' in opts:
424 if 'sparse-read-density-threshold' in opts:
420 self._srdensitythreshold = opts['sparse-read-density-threshold']
425 self._srdensitythreshold = opts['sparse-read-density-threshold']
421 if 'sparse-read-min-gap-size' in opts:
426 if 'sparse-read-min-gap-size' in opts:
422 self._srmingapsize = opts['sparse-read-min-gap-size']
427 self._srmingapsize = opts['sparse-read-min-gap-size']
423 if opts.get('enableellipsis'):
428 if opts.get('enableellipsis'):
424 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
429 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
425
430
426 # revlog v0 doesn't have flag processors
431 # revlog v0 doesn't have flag processors
427 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
432 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
428 _insertflagprocessor(flag, processor, self._flagprocessors)
433 _insertflagprocessor(flag, processor, self._flagprocessors)
429
434
430 if self._chunkcachesize <= 0:
435 if self._chunkcachesize <= 0:
431 raise error.RevlogError(_('revlog chunk cache size %r is not '
436 raise error.RevlogError(_('revlog chunk cache size %r is not '
432 'greater than 0') % self._chunkcachesize)
437 'greater than 0') % self._chunkcachesize)
433 elif self._chunkcachesize & (self._chunkcachesize - 1):
438 elif self._chunkcachesize & (self._chunkcachesize - 1):
434 raise error.RevlogError(_('revlog chunk cache size %r is not a '
439 raise error.RevlogError(_('revlog chunk cache size %r is not a '
435 'power of 2') % self._chunkcachesize)
440 'power of 2') % self._chunkcachesize)
436
441
437 indexdata = ''
442 indexdata = ''
438 self._initempty = True
443 self._initempty = True
439 try:
444 try:
440 with self._indexfp() as f:
445 with self._indexfp() as f:
441 if (mmapindexthreshold is not None and
446 if (mmapindexthreshold is not None and
442 self.opener.fstat(f).st_size >= mmapindexthreshold):
447 self.opener.fstat(f).st_size >= mmapindexthreshold):
443 # TODO: should .close() to release resources without
448 # TODO: should .close() to release resources without
444 # relying on Python GC
449 # relying on Python GC
445 indexdata = util.buffer(util.mmapread(f))
450 indexdata = util.buffer(util.mmapread(f))
446 else:
451 else:
447 indexdata = f.read()
452 indexdata = f.read()
448 if len(indexdata) > 0:
453 if len(indexdata) > 0:
449 versionflags = versionformat_unpack(indexdata[:4])[0]
454 versionflags = versionformat_unpack(indexdata[:4])[0]
450 self._initempty = False
455 self._initempty = False
451 else:
456 else:
452 versionflags = newversionflags
457 versionflags = newversionflags
453 except IOError as inst:
458 except IOError as inst:
454 if inst.errno != errno.ENOENT:
459 if inst.errno != errno.ENOENT:
455 raise
460 raise
456
461
457 versionflags = newversionflags
462 versionflags = newversionflags
458
463
459 self.version = versionflags
464 self.version = versionflags
460
465
461 flags = versionflags & ~0xFFFF
466 flags = versionflags & ~0xFFFF
462 fmt = versionflags & 0xFFFF
467 fmt = versionflags & 0xFFFF
463
468
464 if fmt == REVLOGV0:
469 if fmt == REVLOGV0:
465 if flags:
470 if flags:
466 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
471 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
467 'revlog %s') %
472 'revlog %s') %
468 (flags >> 16, fmt, self.indexfile))
473 (flags >> 16, fmt, self.indexfile))
469
474
470 self._inline = False
475 self._inline = False
471 self._generaldelta = False
476 self._generaldelta = False
472
477
473 elif fmt == REVLOGV1:
478 elif fmt == REVLOGV1:
474 if flags & ~REVLOGV1_FLAGS:
479 if flags & ~REVLOGV1_FLAGS:
475 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
480 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
476 'revlog %s') %
481 'revlog %s') %
477 (flags >> 16, fmt, self.indexfile))
482 (flags >> 16, fmt, self.indexfile))
478
483
479 self._inline = versionflags & FLAG_INLINE_DATA
484 self._inline = versionflags & FLAG_INLINE_DATA
480 self._generaldelta = versionflags & FLAG_GENERALDELTA
485 self._generaldelta = versionflags & FLAG_GENERALDELTA
481
486
482 elif fmt == REVLOGV2:
487 elif fmt == REVLOGV2:
483 if flags & ~REVLOGV2_FLAGS:
488 if flags & ~REVLOGV2_FLAGS:
484 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
489 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
485 'revlog %s') %
490 'revlog %s') %
486 (flags >> 16, fmt, self.indexfile))
491 (flags >> 16, fmt, self.indexfile))
487
492
488 self._inline = versionflags & FLAG_INLINE_DATA
493 self._inline = versionflags & FLAG_INLINE_DATA
489 # generaldelta implied by version 2 revlogs.
494 # generaldelta implied by version 2 revlogs.
490 self._generaldelta = True
495 self._generaldelta = True
491
496
492 else:
497 else:
493 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
498 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
494 (fmt, self.indexfile))
499 (fmt, self.indexfile))
495
500
496 self._storedeltachains = True
501 self._storedeltachains = True
497
502
498 self._io = revlogio()
503 self._io = revlogio()
499 if self.version == REVLOGV0:
504 if self.version == REVLOGV0:
500 self._io = revlogoldio()
505 self._io = revlogoldio()
501 try:
506 try:
502 d = self._io.parseindex(indexdata, self._inline)
507 d = self._io.parseindex(indexdata, self._inline)
503 except (ValueError, IndexError):
508 except (ValueError, IndexError):
504 raise error.RevlogError(_("index %s is corrupted") %
509 raise error.RevlogError(_("index %s is corrupted") %
505 self.indexfile)
510 self.indexfile)
506 self.index, nodemap, self._chunkcache = d
511 self.index, nodemap, self._chunkcache = d
507 if nodemap is not None:
512 if nodemap is not None:
508 self.nodemap = self._nodecache = nodemap
513 self.nodemap = self._nodecache = nodemap
509 if not self._chunkcache:
514 if not self._chunkcache:
510 self._chunkclear()
515 self._chunkclear()
511 # revnum -> (chain-length, sum-delta-length)
516 # revnum -> (chain-length, sum-delta-length)
512 self._chaininfocache = {}
517 self._chaininfocache = {}
513 # revlog header -> revlog compressor
518 # revlog header -> revlog compressor
514 self._decompressors = {}
519 self._decompressors = {}
515
520
516 @util.propertycache
521 @util.propertycache
517 def _compressor(self):
522 def _compressor(self):
518 return util.compengines[self._compengine].revlogcompressor()
523 return util.compengines[self._compengine].revlogcompressor()
519
524
520 def _indexfp(self, mode='r'):
525 def _indexfp(self, mode='r'):
521 """file object for the revlog's index file"""
526 """file object for the revlog's index file"""
522 args = {r'mode': mode}
527 args = {r'mode': mode}
523 if mode != 'r':
528 if mode != 'r':
524 args[r'checkambig'] = self._checkambig
529 args[r'checkambig'] = self._checkambig
525 if mode == 'w':
530 if mode == 'w':
526 args[r'atomictemp'] = True
531 args[r'atomictemp'] = True
527 return self.opener(self.indexfile, **args)
532 return self.opener(self.indexfile, **args)
528
533
529 def _datafp(self, mode='r'):
534 def _datafp(self, mode='r'):
530 """file object for the revlog's data file"""
535 """file object for the revlog's data file"""
531 return self.opener(self.datafile, mode=mode)
536 return self.opener(self.datafile, mode=mode)
532
537
533 @contextlib.contextmanager
538 @contextlib.contextmanager
534 def _datareadfp(self, existingfp=None):
539 def _datareadfp(self, existingfp=None):
535 """file object suitable to read data"""
540 """file object suitable to read data"""
536 # Use explicit file handle, if given.
541 # Use explicit file handle, if given.
537 if existingfp is not None:
542 if existingfp is not None:
538 yield existingfp
543 yield existingfp
539
544
540 # Use a file handle being actively used for writes, if available.
545 # Use a file handle being actively used for writes, if available.
541 # There is some danger to doing this because reads will seek the
546 # There is some danger to doing this because reads will seek the
542 # file. However, _writeentry() performs a SEEK_END before all writes,
547 # file. However, _writeentry() performs a SEEK_END before all writes,
543 # so we should be safe.
548 # so we should be safe.
544 elif self._writinghandles:
549 elif self._writinghandles:
545 if self._inline:
550 if self._inline:
546 yield self._writinghandles[0]
551 yield self._writinghandles[0]
547 else:
552 else:
548 yield self._writinghandles[1]
553 yield self._writinghandles[1]
549
554
550 # Otherwise open a new file handle.
555 # Otherwise open a new file handle.
551 else:
556 else:
552 if self._inline:
557 if self._inline:
553 func = self._indexfp
558 func = self._indexfp
554 else:
559 else:
555 func = self._datafp
560 func = self._datafp
556 with func() as fp:
561 with func() as fp:
557 yield fp
562 yield fp
558
563
559 def tip(self):
564 def tip(self):
560 return self.node(len(self.index) - 1)
565 return self.node(len(self.index) - 1)
561 def __contains__(self, rev):
566 def __contains__(self, rev):
562 return 0 <= rev < len(self)
567 return 0 <= rev < len(self)
563 def __len__(self):
568 def __len__(self):
564 return len(self.index)
569 return len(self.index)
565 def __iter__(self):
570 def __iter__(self):
566 return iter(pycompat.xrange(len(self)))
571 return iter(pycompat.xrange(len(self)))
567 def revs(self, start=0, stop=None):
572 def revs(self, start=0, stop=None):
568 """iterate over all rev in this revlog (from start to stop)"""
573 """iterate over all rev in this revlog (from start to stop)"""
569 return storageutil.iterrevs(len(self), start=start, stop=stop)
574 return storageutil.iterrevs(len(self), start=start, stop=stop)
570
575
571 @util.propertycache
576 @util.propertycache
572 def nodemap(self):
577 def nodemap(self):
573 if self.index:
578 if self.index:
574 # populate mapping down to the initial node
579 # populate mapping down to the initial node
575 node0 = self.index[0][7] # get around changelog filtering
580 node0 = self.index[0][7] # get around changelog filtering
576 self.rev(node0)
581 self.rev(node0)
577 return self._nodecache
582 return self._nodecache
578
583
579 def hasnode(self, node):
584 def hasnode(self, node):
580 try:
585 try:
581 self.rev(node)
586 self.rev(node)
582 return True
587 return True
583 except KeyError:
588 except KeyError:
584 return False
589 return False
585
590
586 def candelta(self, baserev, rev):
591 def candelta(self, baserev, rev):
587 """whether two revisions (baserev, rev) can be delta-ed or not"""
592 """whether two revisions (baserev, rev) can be delta-ed or not"""
588 # Disable delta if either rev requires a content-changing flag
593 # Disable delta if either rev requires a content-changing flag
589 # processor (ex. LFS). This is because such flag processor can alter
594 # processor (ex. LFS). This is because such flag processor can alter
590 # the rawtext content that the delta will be based on, and two clients
595 # the rawtext content that the delta will be based on, and two clients
591 # could have a same revlog node with different flags (i.e. different
596 # could have a same revlog node with different flags (i.e. different
592 # rawtext contents) and the delta could be incompatible.
597 # rawtext contents) and the delta could be incompatible.
593 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
598 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
594 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
599 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
595 return False
600 return False
596 return True
601 return True
597
602
598 def clearcaches(self):
603 def clearcaches(self):
599 self._revisioncache = None
604 self._revisioncache = None
600 self._chainbasecache.clear()
605 self._chainbasecache.clear()
601 self._chunkcache = (0, '')
606 self._chunkcache = (0, '')
602 self._pcache = {}
607 self._pcache = {}
603
608
604 try:
609 try:
605 self._nodecache.clearcaches()
610 self._nodecache.clearcaches()
606 except AttributeError:
611 except AttributeError:
607 self._nodecache = {nullid: nullrev}
612 self._nodecache = {nullid: nullrev}
608 self._nodepos = None
613 self._nodepos = None
609
614
610 def rev(self, node):
615 def rev(self, node):
611 try:
616 try:
612 return self._nodecache[node]
617 return self._nodecache[node]
613 except TypeError:
618 except TypeError:
614 raise
619 raise
615 except error.RevlogError:
620 except error.RevlogError:
616 # parsers.c radix tree lookup failed
621 # parsers.c radix tree lookup failed
617 if node == wdirid or node in wdirfilenodeids:
622 if node == wdirid or node in wdirfilenodeids:
618 raise error.WdirUnsupported
623 raise error.WdirUnsupported
619 raise error.LookupError(node, self.indexfile, _('no node'))
624 raise error.LookupError(node, self.indexfile, _('no node'))
620 except KeyError:
625 except KeyError:
621 # pure python cache lookup failed
626 # pure python cache lookup failed
622 n = self._nodecache
627 n = self._nodecache
623 i = self.index
628 i = self.index
624 p = self._nodepos
629 p = self._nodepos
625 if p is None:
630 if p is None:
626 p = len(i) - 1
631 p = len(i) - 1
627 else:
632 else:
628 assert p < len(i)
633 assert p < len(i)
629 for r in pycompat.xrange(p, -1, -1):
634 for r in pycompat.xrange(p, -1, -1):
630 v = i[r][7]
635 v = i[r][7]
631 n[v] = r
636 n[v] = r
632 if v == node:
637 if v == node:
633 self._nodepos = r - 1
638 self._nodepos = r - 1
634 return r
639 return r
635 if node == wdirid or node in wdirfilenodeids:
640 if node == wdirid or node in wdirfilenodeids:
636 raise error.WdirUnsupported
641 raise error.WdirUnsupported
637 raise error.LookupError(node, self.indexfile, _('no node'))
642 raise error.LookupError(node, self.indexfile, _('no node'))
638
643
639 # Accessors for index entries.
644 # Accessors for index entries.
640
645
641 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
646 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
642 # are flags.
647 # are flags.
643 def start(self, rev):
648 def start(self, rev):
644 return int(self.index[rev][0] >> 16)
649 return int(self.index[rev][0] >> 16)
645
650
646 def flags(self, rev):
651 def flags(self, rev):
647 return self.index[rev][0] & 0xFFFF
652 return self.index[rev][0] & 0xFFFF
648
653
649 def length(self, rev):
654 def length(self, rev):
650 return self.index[rev][1]
655 return self.index[rev][1]
651
656
652 def rawsize(self, rev):
657 def rawsize(self, rev):
653 """return the length of the uncompressed text for a given revision"""
658 """return the length of the uncompressed text for a given revision"""
654 l = self.index[rev][2]
659 l = self.index[rev][2]
655 if l >= 0:
660 if l >= 0:
656 return l
661 return l
657
662
658 t = self.revision(rev, raw=True)
663 t = self.revision(rev, raw=True)
659 return len(t)
664 return len(t)
660
665
661 def size(self, rev):
666 def size(self, rev):
662 """length of non-raw text (processed by a "read" flag processor)"""
667 """length of non-raw text (processed by a "read" flag processor)"""
663 # fast path: if no "read" flag processor could change the content,
668 # fast path: if no "read" flag processor could change the content,
664 # size is rawsize. note: ELLIPSIS is known to not change the content.
669 # size is rawsize. note: ELLIPSIS is known to not change the content.
665 flags = self.flags(rev)
670 flags = self.flags(rev)
666 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
671 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
667 return self.rawsize(rev)
672 return self.rawsize(rev)
668
673
669 return len(self.revision(rev, raw=False))
674 return len(self.revision(rev, raw=False))
670
675
671 def chainbase(self, rev):
676 def chainbase(self, rev):
672 base = self._chainbasecache.get(rev)
677 base = self._chainbasecache.get(rev)
673 if base is not None:
678 if base is not None:
674 return base
679 return base
675
680
676 index = self.index
681 index = self.index
677 iterrev = rev
682 iterrev = rev
678 base = index[iterrev][3]
683 base = index[iterrev][3]
679 while base != iterrev:
684 while base != iterrev:
680 iterrev = base
685 iterrev = base
681 base = index[iterrev][3]
686 base = index[iterrev][3]
682
687
683 self._chainbasecache[rev] = base
688 self._chainbasecache[rev] = base
684 return base
689 return base
685
690
686 def linkrev(self, rev):
691 def linkrev(self, rev):
687 return self.index[rev][4]
692 return self.index[rev][4]
688
693
689 def parentrevs(self, rev):
694 def parentrevs(self, rev):
690 try:
695 try:
691 entry = self.index[rev]
696 entry = self.index[rev]
692 except IndexError:
697 except IndexError:
693 if rev == wdirrev:
698 if rev == wdirrev:
694 raise error.WdirUnsupported
699 raise error.WdirUnsupported
695 raise
700 raise
696
701
697 return entry[5], entry[6]
702 return entry[5], entry[6]
698
703
699 # fast parentrevs(rev) where rev isn't filtered
704 # fast parentrevs(rev) where rev isn't filtered
700 _uncheckedparentrevs = parentrevs
705 _uncheckedparentrevs = parentrevs
701
706
702 def node(self, rev):
707 def node(self, rev):
703 try:
708 try:
704 return self.index[rev][7]
709 return self.index[rev][7]
705 except IndexError:
710 except IndexError:
706 if rev == wdirrev:
711 if rev == wdirrev:
707 raise error.WdirUnsupported
712 raise error.WdirUnsupported
708 raise
713 raise
709
714
710 # Derived from index values.
715 # Derived from index values.
711
716
712 def end(self, rev):
717 def end(self, rev):
713 return self.start(rev) + self.length(rev)
718 return self.start(rev) + self.length(rev)
714
719
715 def parents(self, node):
720 def parents(self, node):
716 i = self.index
721 i = self.index
717 d = i[self.rev(node)]
722 d = i[self.rev(node)]
718 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
723 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
719
724
720 def chainlen(self, rev):
725 def chainlen(self, rev):
721 return self._chaininfo(rev)[0]
726 return self._chaininfo(rev)[0]
722
727
723 def _chaininfo(self, rev):
728 def _chaininfo(self, rev):
724 chaininfocache = self._chaininfocache
729 chaininfocache = self._chaininfocache
725 if rev in chaininfocache:
730 if rev in chaininfocache:
726 return chaininfocache[rev]
731 return chaininfocache[rev]
727 index = self.index
732 index = self.index
728 generaldelta = self._generaldelta
733 generaldelta = self._generaldelta
729 iterrev = rev
734 iterrev = rev
730 e = index[iterrev]
735 e = index[iterrev]
731 clen = 0
736 clen = 0
732 compresseddeltalen = 0
737 compresseddeltalen = 0
733 while iterrev != e[3]:
738 while iterrev != e[3]:
734 clen += 1
739 clen += 1
735 compresseddeltalen += e[1]
740 compresseddeltalen += e[1]
736 if generaldelta:
741 if generaldelta:
737 iterrev = e[3]
742 iterrev = e[3]
738 else:
743 else:
739 iterrev -= 1
744 iterrev -= 1
740 if iterrev in chaininfocache:
745 if iterrev in chaininfocache:
741 t = chaininfocache[iterrev]
746 t = chaininfocache[iterrev]
742 clen += t[0]
747 clen += t[0]
743 compresseddeltalen += t[1]
748 compresseddeltalen += t[1]
744 break
749 break
745 e = index[iterrev]
750 e = index[iterrev]
746 else:
751 else:
747 # Add text length of base since decompressing that also takes
752 # Add text length of base since decompressing that also takes
748 # work. For cache hits the length is already included.
753 # work. For cache hits the length is already included.
749 compresseddeltalen += e[1]
754 compresseddeltalen += e[1]
750 r = (clen, compresseddeltalen)
755 r = (clen, compresseddeltalen)
751 chaininfocache[rev] = r
756 chaininfocache[rev] = r
752 return r
757 return r
753
758
754 def _deltachain(self, rev, stoprev=None):
759 def _deltachain(self, rev, stoprev=None):
755 """Obtain the delta chain for a revision.
760 """Obtain the delta chain for a revision.
756
761
757 ``stoprev`` specifies a revision to stop at. If not specified, we
762 ``stoprev`` specifies a revision to stop at. If not specified, we
758 stop at the base of the chain.
763 stop at the base of the chain.
759
764
760 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
765 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
761 revs in ascending order and ``stopped`` is a bool indicating whether
766 revs in ascending order and ``stopped`` is a bool indicating whether
762 ``stoprev`` was hit.
767 ``stoprev`` was hit.
763 """
768 """
764 # Try C implementation.
769 # Try C implementation.
765 try:
770 try:
766 return self.index.deltachain(rev, stoprev, self._generaldelta)
771 return self.index.deltachain(rev, stoprev, self._generaldelta)
767 except AttributeError:
772 except AttributeError:
768 pass
773 pass
769
774
770 chain = []
775 chain = []
771
776
772 # Alias to prevent attribute lookup in tight loop.
777 # Alias to prevent attribute lookup in tight loop.
773 index = self.index
778 index = self.index
774 generaldelta = self._generaldelta
779 generaldelta = self._generaldelta
775
780
776 iterrev = rev
781 iterrev = rev
777 e = index[iterrev]
782 e = index[iterrev]
778 while iterrev != e[3] and iterrev != stoprev:
783 while iterrev != e[3] and iterrev != stoprev:
779 chain.append(iterrev)
784 chain.append(iterrev)
780 if generaldelta:
785 if generaldelta:
781 iterrev = e[3]
786 iterrev = e[3]
782 else:
787 else:
783 iterrev -= 1
788 iterrev -= 1
784 e = index[iterrev]
789 e = index[iterrev]
785
790
786 if iterrev == stoprev:
791 if iterrev == stoprev:
787 stopped = True
792 stopped = True
788 else:
793 else:
789 chain.append(iterrev)
794 chain.append(iterrev)
790 stopped = False
795 stopped = False
791
796
792 chain.reverse()
797 chain.reverse()
793 return chain, stopped
798 return chain, stopped
794
799
795 def ancestors(self, revs, stoprev=0, inclusive=False):
800 def ancestors(self, revs, stoprev=0, inclusive=False):
796 """Generate the ancestors of 'revs' in reverse revision order.
801 """Generate the ancestors of 'revs' in reverse revision order.
797 Does not generate revs lower than stoprev.
802 Does not generate revs lower than stoprev.
798
803
799 See the documentation for ancestor.lazyancestors for more details."""
804 See the documentation for ancestor.lazyancestors for more details."""
800
805
801 # first, make sure start revisions aren't filtered
806 # first, make sure start revisions aren't filtered
802 revs = list(revs)
807 revs = list(revs)
803 checkrev = self.node
808 checkrev = self.node
804 for r in revs:
809 for r in revs:
805 checkrev(r)
810 checkrev(r)
806 # and we're sure ancestors aren't filtered as well
811 # and we're sure ancestors aren't filtered as well
807
812
808 if rustext is not None:
813 if rustext is not None:
809 lazyancestors = rustext.ancestor.LazyAncestors
814 lazyancestors = rustext.ancestor.LazyAncestors
810 arg = self.index
815 arg = self.index
811 elif util.safehasattr(parsers, 'rustlazyancestors'):
816 elif util.safehasattr(parsers, 'rustlazyancestors'):
812 lazyancestors = ancestor.rustlazyancestors
817 lazyancestors = ancestor.rustlazyancestors
813 arg = self.index
818 arg = self.index
814 else:
819 else:
815 lazyancestors = ancestor.lazyancestors
820 lazyancestors = ancestor.lazyancestors
816 arg = self._uncheckedparentrevs
821 arg = self._uncheckedparentrevs
817 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
822 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
818
823
819 def descendants(self, revs):
824 def descendants(self, revs):
820 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
825 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
821
826
822 def findcommonmissing(self, common=None, heads=None):
827 def findcommonmissing(self, common=None, heads=None):
823 """Return a tuple of the ancestors of common and the ancestors of heads
828 """Return a tuple of the ancestors of common and the ancestors of heads
824 that are not ancestors of common. In revset terminology, we return the
829 that are not ancestors of common. In revset terminology, we return the
825 tuple:
830 tuple:
826
831
827 ::common, (::heads) - (::common)
832 ::common, (::heads) - (::common)
828
833
829 The list is sorted by revision number, meaning it is
834 The list is sorted by revision number, meaning it is
830 topologically sorted.
835 topologically sorted.
831
836
832 'heads' and 'common' are both lists of node IDs. If heads is
837 'heads' and 'common' are both lists of node IDs. If heads is
833 not supplied, uses all of the revlog's heads. If common is not
838 not supplied, uses all of the revlog's heads. If common is not
834 supplied, uses nullid."""
839 supplied, uses nullid."""
835 if common is None:
840 if common is None:
836 common = [nullid]
841 common = [nullid]
837 if heads is None:
842 if heads is None:
838 heads = self.heads()
843 heads = self.heads()
839
844
840 common = [self.rev(n) for n in common]
845 common = [self.rev(n) for n in common]
841 heads = [self.rev(n) for n in heads]
846 heads = [self.rev(n) for n in heads]
842
847
843 # we want the ancestors, but inclusive
848 # we want the ancestors, but inclusive
844 class lazyset(object):
849 class lazyset(object):
845 def __init__(self, lazyvalues):
850 def __init__(self, lazyvalues):
846 self.addedvalues = set()
851 self.addedvalues = set()
847 self.lazyvalues = lazyvalues
852 self.lazyvalues = lazyvalues
848
853
849 def __contains__(self, value):
854 def __contains__(self, value):
850 return value in self.addedvalues or value in self.lazyvalues
855 return value in self.addedvalues or value in self.lazyvalues
851
856
852 def __iter__(self):
857 def __iter__(self):
853 added = self.addedvalues
858 added = self.addedvalues
854 for r in added:
859 for r in added:
855 yield r
860 yield r
856 for r in self.lazyvalues:
861 for r in self.lazyvalues:
857 if not r in added:
862 if not r in added:
858 yield r
863 yield r
859
864
860 def add(self, value):
865 def add(self, value):
861 self.addedvalues.add(value)
866 self.addedvalues.add(value)
862
867
863 def update(self, values):
868 def update(self, values):
864 self.addedvalues.update(values)
869 self.addedvalues.update(values)
865
870
866 has = lazyset(self.ancestors(common))
871 has = lazyset(self.ancestors(common))
867 has.add(nullrev)
872 has.add(nullrev)
868 has.update(common)
873 has.update(common)
869
874
870 # take all ancestors from heads that aren't in has
875 # take all ancestors from heads that aren't in has
871 missing = set()
876 missing = set()
872 visit = collections.deque(r for r in heads if r not in has)
877 visit = collections.deque(r for r in heads if r not in has)
873 while visit:
878 while visit:
874 r = visit.popleft()
879 r = visit.popleft()
875 if r in missing:
880 if r in missing:
876 continue
881 continue
877 else:
882 else:
878 missing.add(r)
883 missing.add(r)
879 for p in self.parentrevs(r):
884 for p in self.parentrevs(r):
880 if p not in has:
885 if p not in has:
881 visit.append(p)
886 visit.append(p)
882 missing = list(missing)
887 missing = list(missing)
883 missing.sort()
888 missing.sort()
884 return has, [self.node(miss) for miss in missing]
889 return has, [self.node(miss) for miss in missing]
885
890
886 def incrementalmissingrevs(self, common=None):
891 def incrementalmissingrevs(self, common=None):
887 """Return an object that can be used to incrementally compute the
892 """Return an object that can be used to incrementally compute the
888 revision numbers of the ancestors of arbitrary sets that are not
893 revision numbers of the ancestors of arbitrary sets that are not
889 ancestors of common. This is an ancestor.incrementalmissingancestors
894 ancestors of common. This is an ancestor.incrementalmissingancestors
890 object.
895 object.
891
896
892 'common' is a list of revision numbers. If common is not supplied, uses
897 'common' is a list of revision numbers. If common is not supplied, uses
893 nullrev.
898 nullrev.
894 """
899 """
895 if common is None:
900 if common is None:
896 common = [nullrev]
901 common = [nullrev]
897
902
898 if rustext is not None:
903 if rustext is not None:
899 # TODO: WdirUnsupported should be raised instead of GraphError
904 # TODO: WdirUnsupported should be raised instead of GraphError
900 # if common includes wdirrev
905 # if common includes wdirrev
901 return rustext.ancestor.MissingAncestors(self.index, common)
906 return rustext.ancestor.MissingAncestors(self.index, common)
902 return ancestor.incrementalmissingancestors(self.parentrevs, common)
907 return ancestor.incrementalmissingancestors(self.parentrevs, common)
903
908
904 def findmissingrevs(self, common=None, heads=None):
909 def findmissingrevs(self, common=None, heads=None):
905 """Return the revision numbers of the ancestors of heads that
910 """Return the revision numbers of the ancestors of heads that
906 are not ancestors of common.
911 are not ancestors of common.
907
912
908 More specifically, return a list of revision numbers corresponding to
913 More specifically, return a list of revision numbers corresponding to
909 nodes N such that every N satisfies the following constraints:
914 nodes N such that every N satisfies the following constraints:
910
915
911 1. N is an ancestor of some node in 'heads'
916 1. N is an ancestor of some node in 'heads'
912 2. N is not an ancestor of any node in 'common'
917 2. N is not an ancestor of any node in 'common'
913
918
914 The list is sorted by revision number, meaning it is
919 The list is sorted by revision number, meaning it is
915 topologically sorted.
920 topologically sorted.
916
921
917 'heads' and 'common' are both lists of revision numbers. If heads is
922 'heads' and 'common' are both lists of revision numbers. If heads is
918 not supplied, uses all of the revlog's heads. If common is not
923 not supplied, uses all of the revlog's heads. If common is not
919 supplied, uses nullid."""
924 supplied, uses nullid."""
920 if common is None:
925 if common is None:
921 common = [nullrev]
926 common = [nullrev]
922 if heads is None:
927 if heads is None:
923 heads = self.headrevs()
928 heads = self.headrevs()
924
929
925 inc = self.incrementalmissingrevs(common=common)
930 inc = self.incrementalmissingrevs(common=common)
926 return inc.missingancestors(heads)
931 return inc.missingancestors(heads)
927
932
928 def findmissing(self, common=None, heads=None):
933 def findmissing(self, common=None, heads=None):
929 """Return the ancestors of heads that are not ancestors of common.
934 """Return the ancestors of heads that are not ancestors of common.
930
935
931 More specifically, return a list of nodes N such that every N
936 More specifically, return a list of nodes N such that every N
932 satisfies the following constraints:
937 satisfies the following constraints:
933
938
934 1. N is an ancestor of some node in 'heads'
939 1. N is an ancestor of some node in 'heads'
935 2. N is not an ancestor of any node in 'common'
940 2. N is not an ancestor of any node in 'common'
936
941
937 The list is sorted by revision number, meaning it is
942 The list is sorted by revision number, meaning it is
938 topologically sorted.
943 topologically sorted.
939
944
940 'heads' and 'common' are both lists of node IDs. If heads is
945 'heads' and 'common' are both lists of node IDs. If heads is
941 not supplied, uses all of the revlog's heads. If common is not
946 not supplied, uses all of the revlog's heads. If common is not
942 supplied, uses nullid."""
947 supplied, uses nullid."""
943 if common is None:
948 if common is None:
944 common = [nullid]
949 common = [nullid]
945 if heads is None:
950 if heads is None:
946 heads = self.heads()
951 heads = self.heads()
947
952
948 common = [self.rev(n) for n in common]
953 common = [self.rev(n) for n in common]
949 heads = [self.rev(n) for n in heads]
954 heads = [self.rev(n) for n in heads]
950
955
951 inc = self.incrementalmissingrevs(common=common)
956 inc = self.incrementalmissingrevs(common=common)
952 return [self.node(r) for r in inc.missingancestors(heads)]
957 return [self.node(r) for r in inc.missingancestors(heads)]
953
958
954 def nodesbetween(self, roots=None, heads=None):
959 def nodesbetween(self, roots=None, heads=None):
955 """Return a topological path from 'roots' to 'heads'.
960 """Return a topological path from 'roots' to 'heads'.
956
961
957 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
962 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
958 topologically sorted list of all nodes N that satisfy both of
963 topologically sorted list of all nodes N that satisfy both of
959 these constraints:
964 these constraints:
960
965
961 1. N is a descendant of some node in 'roots'
966 1. N is a descendant of some node in 'roots'
962 2. N is an ancestor of some node in 'heads'
967 2. N is an ancestor of some node in 'heads'
963
968
964 Every node is considered to be both a descendant and an ancestor
969 Every node is considered to be both a descendant and an ancestor
965 of itself, so every reachable node in 'roots' and 'heads' will be
970 of itself, so every reachable node in 'roots' and 'heads' will be
966 included in 'nodes'.
971 included in 'nodes'.
967
972
968 'outroots' is the list of reachable nodes in 'roots', i.e., the
973 'outroots' is the list of reachable nodes in 'roots', i.e., the
969 subset of 'roots' that is returned in 'nodes'. Likewise,
974 subset of 'roots' that is returned in 'nodes'. Likewise,
970 'outheads' is the subset of 'heads' that is also in 'nodes'.
975 'outheads' is the subset of 'heads' that is also in 'nodes'.
971
976
972 'roots' and 'heads' are both lists of node IDs. If 'roots' is
977 'roots' and 'heads' are both lists of node IDs. If 'roots' is
973 unspecified, uses nullid as the only root. If 'heads' is
978 unspecified, uses nullid as the only root. If 'heads' is
974 unspecified, uses list of all of the revlog's heads."""
979 unspecified, uses list of all of the revlog's heads."""
975 nonodes = ([], [], [])
980 nonodes = ([], [], [])
976 if roots is not None:
981 if roots is not None:
977 roots = list(roots)
982 roots = list(roots)
978 if not roots:
983 if not roots:
979 return nonodes
984 return nonodes
980 lowestrev = min([self.rev(n) for n in roots])
985 lowestrev = min([self.rev(n) for n in roots])
981 else:
986 else:
982 roots = [nullid] # Everybody's a descendant of nullid
987 roots = [nullid] # Everybody's a descendant of nullid
983 lowestrev = nullrev
988 lowestrev = nullrev
984 if (lowestrev == nullrev) and (heads is None):
989 if (lowestrev == nullrev) and (heads is None):
985 # We want _all_ the nodes!
990 # We want _all_ the nodes!
986 return ([self.node(r) for r in self], [nullid], list(self.heads()))
991 return ([self.node(r) for r in self], [nullid], list(self.heads()))
987 if heads is None:
992 if heads is None:
988 # All nodes are ancestors, so the latest ancestor is the last
993 # All nodes are ancestors, so the latest ancestor is the last
989 # node.
994 # node.
990 highestrev = len(self) - 1
995 highestrev = len(self) - 1
991 # Set ancestors to None to signal that every node is an ancestor.
996 # Set ancestors to None to signal that every node is an ancestor.
992 ancestors = None
997 ancestors = None
993 # Set heads to an empty dictionary for later discovery of heads
998 # Set heads to an empty dictionary for later discovery of heads
994 heads = {}
999 heads = {}
995 else:
1000 else:
996 heads = list(heads)
1001 heads = list(heads)
997 if not heads:
1002 if not heads:
998 return nonodes
1003 return nonodes
999 ancestors = set()
1004 ancestors = set()
1000 # Turn heads into a dictionary so we can remove 'fake' heads.
1005 # Turn heads into a dictionary so we can remove 'fake' heads.
1001 # Also, later we will be using it to filter out the heads we can't
1006 # Also, later we will be using it to filter out the heads we can't
1002 # find from roots.
1007 # find from roots.
1003 heads = dict.fromkeys(heads, False)
1008 heads = dict.fromkeys(heads, False)
1004 # Start at the top and keep marking parents until we're done.
1009 # Start at the top and keep marking parents until we're done.
1005 nodestotag = set(heads)
1010 nodestotag = set(heads)
1006 # Remember where the top was so we can use it as a limit later.
1011 # Remember where the top was so we can use it as a limit later.
1007 highestrev = max([self.rev(n) for n in nodestotag])
1012 highestrev = max([self.rev(n) for n in nodestotag])
1008 while nodestotag:
1013 while nodestotag:
1009 # grab a node to tag
1014 # grab a node to tag
1010 n = nodestotag.pop()
1015 n = nodestotag.pop()
1011 # Never tag nullid
1016 # Never tag nullid
1012 if n == nullid:
1017 if n == nullid:
1013 continue
1018 continue
1014 # A node's revision number represents its place in a
1019 # A node's revision number represents its place in a
1015 # topologically sorted list of nodes.
1020 # topologically sorted list of nodes.
1016 r = self.rev(n)
1021 r = self.rev(n)
1017 if r >= lowestrev:
1022 if r >= lowestrev:
1018 if n not in ancestors:
1023 if n not in ancestors:
1019 # If we are possibly a descendant of one of the roots
1024 # If we are possibly a descendant of one of the roots
1020 # and we haven't already been marked as an ancestor
1025 # and we haven't already been marked as an ancestor
1021 ancestors.add(n) # Mark as ancestor
1026 ancestors.add(n) # Mark as ancestor
1022 # Add non-nullid parents to list of nodes to tag.
1027 # Add non-nullid parents to list of nodes to tag.
1023 nodestotag.update([p for p in self.parents(n) if
1028 nodestotag.update([p for p in self.parents(n) if
1024 p != nullid])
1029 p != nullid])
1025 elif n in heads: # We've seen it before, is it a fake head?
1030 elif n in heads: # We've seen it before, is it a fake head?
1026 # So it is, real heads should not be the ancestors of
1031 # So it is, real heads should not be the ancestors of
1027 # any other heads.
1032 # any other heads.
1028 heads.pop(n)
1033 heads.pop(n)
1029 if not ancestors:
1034 if not ancestors:
1030 return nonodes
1035 return nonodes
1031 # Now that we have our set of ancestors, we want to remove any
1036 # Now that we have our set of ancestors, we want to remove any
1032 # roots that are not ancestors.
1037 # roots that are not ancestors.
1033
1038
1034 # If one of the roots was nullid, everything is included anyway.
1039 # If one of the roots was nullid, everything is included anyway.
1035 if lowestrev > nullrev:
1040 if lowestrev > nullrev:
1036 # But, since we weren't, let's recompute the lowest rev to not
1041 # But, since we weren't, let's recompute the lowest rev to not
1037 # include roots that aren't ancestors.
1042 # include roots that aren't ancestors.
1038
1043
1039 # Filter out roots that aren't ancestors of heads
1044 # Filter out roots that aren't ancestors of heads
1040 roots = [root for root in roots if root in ancestors]
1045 roots = [root for root in roots if root in ancestors]
1041 # Recompute the lowest revision
1046 # Recompute the lowest revision
1042 if roots:
1047 if roots:
1043 lowestrev = min([self.rev(root) for root in roots])
1048 lowestrev = min([self.rev(root) for root in roots])
1044 else:
1049 else:
1045 # No more roots? Return empty list
1050 # No more roots? Return empty list
1046 return nonodes
1051 return nonodes
1047 else:
1052 else:
1048 # We are descending from nullid, and don't need to care about
1053 # We are descending from nullid, and don't need to care about
1049 # any other roots.
1054 # any other roots.
1050 lowestrev = nullrev
1055 lowestrev = nullrev
1051 roots = [nullid]
1056 roots = [nullid]
1052 # Transform our roots list into a set.
1057 # Transform our roots list into a set.
1053 descendants = set(roots)
1058 descendants = set(roots)
1054 # Also, keep the original roots so we can filter out roots that aren't
1059 # Also, keep the original roots so we can filter out roots that aren't
1055 # 'real' roots (i.e. are descended from other roots).
1060 # 'real' roots (i.e. are descended from other roots).
1056 roots = descendants.copy()
1061 roots = descendants.copy()
1057 # Our topologically sorted list of output nodes.
1062 # Our topologically sorted list of output nodes.
1058 orderedout = []
1063 orderedout = []
1059 # Don't start at nullid since we don't want nullid in our output list,
1064 # Don't start at nullid since we don't want nullid in our output list,
1060 # and if nullid shows up in descendants, empty parents will look like
1065 # and if nullid shows up in descendants, empty parents will look like
1061 # they're descendants.
1066 # they're descendants.
1062 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1067 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1063 n = self.node(r)
1068 n = self.node(r)
1064 isdescendant = False
1069 isdescendant = False
1065 if lowestrev == nullrev: # Everybody is a descendant of nullid
1070 if lowestrev == nullrev: # Everybody is a descendant of nullid
1066 isdescendant = True
1071 isdescendant = True
1067 elif n in descendants:
1072 elif n in descendants:
1068 # n is already a descendant
1073 # n is already a descendant
1069 isdescendant = True
1074 isdescendant = True
1070 # This check only needs to be done here because all the roots
1075 # This check only needs to be done here because all the roots
1071 # will start being marked is descendants before the loop.
1076 # will start being marked is descendants before the loop.
1072 if n in roots:
1077 if n in roots:
1073 # If n was a root, check if it's a 'real' root.
1078 # If n was a root, check if it's a 'real' root.
1074 p = tuple(self.parents(n))
1079 p = tuple(self.parents(n))
1075 # If any of its parents are descendants, it's not a root.
1080 # If any of its parents are descendants, it's not a root.
1076 if (p[0] in descendants) or (p[1] in descendants):
1081 if (p[0] in descendants) or (p[1] in descendants):
1077 roots.remove(n)
1082 roots.remove(n)
1078 else:
1083 else:
1079 p = tuple(self.parents(n))
1084 p = tuple(self.parents(n))
1080 # A node is a descendant if either of its parents are
1085 # A node is a descendant if either of its parents are
1081 # descendants. (We seeded the dependents list with the roots
1086 # descendants. (We seeded the dependents list with the roots
1082 # up there, remember?)
1087 # up there, remember?)
1083 if (p[0] in descendants) or (p[1] in descendants):
1088 if (p[0] in descendants) or (p[1] in descendants):
1084 descendants.add(n)
1089 descendants.add(n)
1085 isdescendant = True
1090 isdescendant = True
1086 if isdescendant and ((ancestors is None) or (n in ancestors)):
1091 if isdescendant and ((ancestors is None) or (n in ancestors)):
1087 # Only include nodes that are both descendants and ancestors.
1092 # Only include nodes that are both descendants and ancestors.
1088 orderedout.append(n)
1093 orderedout.append(n)
1089 if (ancestors is not None) and (n in heads):
1094 if (ancestors is not None) and (n in heads):
1090 # We're trying to figure out which heads are reachable
1095 # We're trying to figure out which heads are reachable
1091 # from roots.
1096 # from roots.
1092 # Mark this head as having been reached
1097 # Mark this head as having been reached
1093 heads[n] = True
1098 heads[n] = True
1094 elif ancestors is None:
1099 elif ancestors is None:
1095 # Otherwise, we're trying to discover the heads.
1100 # Otherwise, we're trying to discover the heads.
1096 # Assume this is a head because if it isn't, the next step
1101 # Assume this is a head because if it isn't, the next step
1097 # will eventually remove it.
1102 # will eventually remove it.
1098 heads[n] = True
1103 heads[n] = True
1099 # But, obviously its parents aren't.
1104 # But, obviously its parents aren't.
1100 for p in self.parents(n):
1105 for p in self.parents(n):
1101 heads.pop(p, None)
1106 heads.pop(p, None)
1102 heads = [head for head, flag in heads.iteritems() if flag]
1107 heads = [head for head, flag in heads.iteritems() if flag]
1103 roots = list(roots)
1108 roots = list(roots)
1104 assert orderedout
1109 assert orderedout
1105 assert roots
1110 assert roots
1106 assert heads
1111 assert heads
1107 return (orderedout, roots, heads)
1112 return (orderedout, roots, heads)
1108
1113
1109 def headrevs(self, revs=None):
1114 def headrevs(self, revs=None):
1110 if revs is None:
1115 if revs is None:
1111 try:
1116 try:
1112 return self.index.headrevs()
1117 return self.index.headrevs()
1113 except AttributeError:
1118 except AttributeError:
1114 return self._headrevs()
1119 return self._headrevs()
1115 return dagop.headrevs(revs, self.parentrevs)
1120 return dagop.headrevs(revs, self.parentrevs)
1116
1121
1117 def computephases(self, roots):
1122 def computephases(self, roots):
1118 return self.index.computephasesmapsets(roots)
1123 return self.index.computephasesmapsets(roots)
1119
1124
1120 def _headrevs(self):
1125 def _headrevs(self):
1121 count = len(self)
1126 count = len(self)
1122 if not count:
1127 if not count:
1123 return [nullrev]
1128 return [nullrev]
1124 # we won't iter over filtered rev so nobody is a head at start
1129 # we won't iter over filtered rev so nobody is a head at start
1125 ishead = [0] * (count + 1)
1130 ishead = [0] * (count + 1)
1126 index = self.index
1131 index = self.index
1127 for r in self:
1132 for r in self:
1128 ishead[r] = 1 # I may be an head
1133 ishead[r] = 1 # I may be an head
1129 e = index[r]
1134 e = index[r]
1130 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1135 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1131 return [r for r, val in enumerate(ishead) if val]
1136 return [r for r, val in enumerate(ishead) if val]
1132
1137
1133 def heads(self, start=None, stop=None):
1138 def heads(self, start=None, stop=None):
1134 """return the list of all nodes that have no children
1139 """return the list of all nodes that have no children
1135
1140
1136 if start is specified, only heads that are descendants of
1141 if start is specified, only heads that are descendants of
1137 start will be returned
1142 start will be returned
1138 if stop is specified, it will consider all the revs from stop
1143 if stop is specified, it will consider all the revs from stop
1139 as if they had no children
1144 as if they had no children
1140 """
1145 """
1141 if start is None and stop is None:
1146 if start is None and stop is None:
1142 if not len(self):
1147 if not len(self):
1143 return [nullid]
1148 return [nullid]
1144 return [self.node(r) for r in self.headrevs()]
1149 return [self.node(r) for r in self.headrevs()]
1145
1150
1146 if start is None:
1151 if start is None:
1147 start = nullrev
1152 start = nullrev
1148 else:
1153 else:
1149 start = self.rev(start)
1154 start = self.rev(start)
1150
1155
1151 stoprevs = set(self.rev(n) for n in stop or [])
1156 stoprevs = set(self.rev(n) for n in stop or [])
1152
1157
1153 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1158 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1154 stoprevs=stoprevs)
1159 stoprevs=stoprevs)
1155
1160
1156 return [self.node(rev) for rev in revs]
1161 return [self.node(rev) for rev in revs]
1157
1162
1158 def children(self, node):
1163 def children(self, node):
1159 """find the children of a given node"""
1164 """find the children of a given node"""
1160 c = []
1165 c = []
1161 p = self.rev(node)
1166 p = self.rev(node)
1162 for r in self.revs(start=p + 1):
1167 for r in self.revs(start=p + 1):
1163 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1168 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1164 if prevs:
1169 if prevs:
1165 for pr in prevs:
1170 for pr in prevs:
1166 if pr == p:
1171 if pr == p:
1167 c.append(self.node(r))
1172 c.append(self.node(r))
1168 elif p == nullrev:
1173 elif p == nullrev:
1169 c.append(self.node(r))
1174 c.append(self.node(r))
1170 return c
1175 return c
1171
1176
1172 def commonancestorsheads(self, a, b):
1177 def commonancestorsheads(self, a, b):
1173 """calculate all the heads of the common ancestors of nodes a and b"""
1178 """calculate all the heads of the common ancestors of nodes a and b"""
1174 a, b = self.rev(a), self.rev(b)
1179 a, b = self.rev(a), self.rev(b)
1175 ancs = self._commonancestorsheads(a, b)
1180 ancs = self._commonancestorsheads(a, b)
1176 return pycompat.maplist(self.node, ancs)
1181 return pycompat.maplist(self.node, ancs)
1177
1182
1178 def _commonancestorsheads(self, *revs):
1183 def _commonancestorsheads(self, *revs):
1179 """calculate all the heads of the common ancestors of revs"""
1184 """calculate all the heads of the common ancestors of revs"""
1180 try:
1185 try:
1181 ancs = self.index.commonancestorsheads(*revs)
1186 ancs = self.index.commonancestorsheads(*revs)
1182 except (AttributeError, OverflowError): # C implementation failed
1187 except (AttributeError, OverflowError): # C implementation failed
1183 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1188 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1184 return ancs
1189 return ancs
1185
1190
1186 def isancestor(self, a, b):
1191 def isancestor(self, a, b):
1187 """return True if node a is an ancestor of node b
1192 """return True if node a is an ancestor of node b
1188
1193
1189 A revision is considered an ancestor of itself."""
1194 A revision is considered an ancestor of itself."""
1190 a, b = self.rev(a), self.rev(b)
1195 a, b = self.rev(a), self.rev(b)
1191 return self.isancestorrev(a, b)
1196 return self.isancestorrev(a, b)
1192
1197
1193 def isancestorrev(self, a, b):
1198 def isancestorrev(self, a, b):
1194 """return True if revision a is an ancestor of revision b
1199 """return True if revision a is an ancestor of revision b
1195
1200
1196 A revision is considered an ancestor of itself.
1201 A revision is considered an ancestor of itself.
1197
1202
1198 The implementation of this is trivial but the use of
1203 The implementation of this is trivial but the use of
1199 commonancestorsheads is not."""
1204 commonancestorsheads is not."""
1200 if a == nullrev:
1205 if a == nullrev:
1201 return True
1206 return True
1202 elif a == b:
1207 elif a == b:
1203 return True
1208 return True
1204 elif a > b:
1209 elif a > b:
1205 return False
1210 return False
1206 return a in self._commonancestorsheads(a, b)
1211 return a in self._commonancestorsheads(a, b)
1207
1212
1208 def ancestor(self, a, b):
1213 def ancestor(self, a, b):
1209 """calculate the "best" common ancestor of nodes a and b"""
1214 """calculate the "best" common ancestor of nodes a and b"""
1210
1215
1211 a, b = self.rev(a), self.rev(b)
1216 a, b = self.rev(a), self.rev(b)
1212 try:
1217 try:
1213 ancs = self.index.ancestors(a, b)
1218 ancs = self.index.ancestors(a, b)
1214 except (AttributeError, OverflowError):
1219 except (AttributeError, OverflowError):
1215 ancs = ancestor.ancestors(self.parentrevs, a, b)
1220 ancs = ancestor.ancestors(self.parentrevs, a, b)
1216 if ancs:
1221 if ancs:
1217 # choose a consistent winner when there's a tie
1222 # choose a consistent winner when there's a tie
1218 return min(map(self.node, ancs))
1223 return min(map(self.node, ancs))
1219 return nullid
1224 return nullid
1220
1225
1221 def _match(self, id):
1226 def _match(self, id):
1222 if isinstance(id, int):
1227 if isinstance(id, int):
1223 # rev
1228 # rev
1224 return self.node(id)
1229 return self.node(id)
1225 if len(id) == 20:
1230 if len(id) == 20:
1226 # possibly a binary node
1231 # possibly a binary node
1227 # odds of a binary node being all hex in ASCII are 1 in 10**25
1232 # odds of a binary node being all hex in ASCII are 1 in 10**25
1228 try:
1233 try:
1229 node = id
1234 node = id
1230 self.rev(node) # quick search the index
1235 self.rev(node) # quick search the index
1231 return node
1236 return node
1232 except error.LookupError:
1237 except error.LookupError:
1233 pass # may be partial hex id
1238 pass # may be partial hex id
1234 try:
1239 try:
1235 # str(rev)
1240 # str(rev)
1236 rev = int(id)
1241 rev = int(id)
1237 if "%d" % rev != id:
1242 if "%d" % rev != id:
1238 raise ValueError
1243 raise ValueError
1239 if rev < 0:
1244 if rev < 0:
1240 rev = len(self) + rev
1245 rev = len(self) + rev
1241 if rev < 0 or rev >= len(self):
1246 if rev < 0 or rev >= len(self):
1242 raise ValueError
1247 raise ValueError
1243 return self.node(rev)
1248 return self.node(rev)
1244 except (ValueError, OverflowError):
1249 except (ValueError, OverflowError):
1245 pass
1250 pass
1246 if len(id) == 40:
1251 if len(id) == 40:
1247 try:
1252 try:
1248 # a full hex nodeid?
1253 # a full hex nodeid?
1249 node = bin(id)
1254 node = bin(id)
1250 self.rev(node)
1255 self.rev(node)
1251 return node
1256 return node
1252 except (TypeError, error.LookupError):
1257 except (TypeError, error.LookupError):
1253 pass
1258 pass
1254
1259
1255 def _partialmatch(self, id):
1260 def _partialmatch(self, id):
1256 # we don't care wdirfilenodeids as they should be always full hash
1261 # we don't care wdirfilenodeids as they should be always full hash
1257 maybewdir = wdirhex.startswith(id)
1262 maybewdir = wdirhex.startswith(id)
1258 try:
1263 try:
1259 partial = self.index.partialmatch(id)
1264 partial = self.index.partialmatch(id)
1260 if partial and self.hasnode(partial):
1265 if partial and self.hasnode(partial):
1261 if maybewdir:
1266 if maybewdir:
1262 # single 'ff...' match in radix tree, ambiguous with wdir
1267 # single 'ff...' match in radix tree, ambiguous with wdir
1263 raise error.RevlogError
1268 raise error.RevlogError
1264 return partial
1269 return partial
1265 if maybewdir:
1270 if maybewdir:
1266 # no 'ff...' match in radix tree, wdir identified
1271 # no 'ff...' match in radix tree, wdir identified
1267 raise error.WdirUnsupported
1272 raise error.WdirUnsupported
1268 return None
1273 return None
1269 except error.RevlogError:
1274 except error.RevlogError:
1270 # parsers.c radix tree lookup gave multiple matches
1275 # parsers.c radix tree lookup gave multiple matches
1271 # fast path: for unfiltered changelog, radix tree is accurate
1276 # fast path: for unfiltered changelog, radix tree is accurate
1272 if not getattr(self, 'filteredrevs', None):
1277 if not getattr(self, 'filteredrevs', None):
1273 raise error.AmbiguousPrefixLookupError(
1278 raise error.AmbiguousPrefixLookupError(
1274 id, self.indexfile, _('ambiguous identifier'))
1279 id, self.indexfile, _('ambiguous identifier'))
1275 # fall through to slow path that filters hidden revisions
1280 # fall through to slow path that filters hidden revisions
1276 except (AttributeError, ValueError):
1281 except (AttributeError, ValueError):
1277 # we are pure python, or key was too short to search radix tree
1282 # we are pure python, or key was too short to search radix tree
1278 pass
1283 pass
1279
1284
1280 if id in self._pcache:
1285 if id in self._pcache:
1281 return self._pcache[id]
1286 return self._pcache[id]
1282
1287
1283 if len(id) <= 40:
1288 if len(id) <= 40:
1284 try:
1289 try:
1285 # hex(node)[:...]
1290 # hex(node)[:...]
1286 l = len(id) // 2 # grab an even number of digits
1291 l = len(id) // 2 # grab an even number of digits
1287 prefix = bin(id[:l * 2])
1292 prefix = bin(id[:l * 2])
1288 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1293 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1289 nl = [n for n in nl if hex(n).startswith(id) and
1294 nl = [n for n in nl if hex(n).startswith(id) and
1290 self.hasnode(n)]
1295 self.hasnode(n)]
1291 if nullhex.startswith(id):
1296 if nullhex.startswith(id):
1292 nl.append(nullid)
1297 nl.append(nullid)
1293 if len(nl) > 0:
1298 if len(nl) > 0:
1294 if len(nl) == 1 and not maybewdir:
1299 if len(nl) == 1 and not maybewdir:
1295 self._pcache[id] = nl[0]
1300 self._pcache[id] = nl[0]
1296 return nl[0]
1301 return nl[0]
1297 raise error.AmbiguousPrefixLookupError(
1302 raise error.AmbiguousPrefixLookupError(
1298 id, self.indexfile, _('ambiguous identifier'))
1303 id, self.indexfile, _('ambiguous identifier'))
1299 if maybewdir:
1304 if maybewdir:
1300 raise error.WdirUnsupported
1305 raise error.WdirUnsupported
1301 return None
1306 return None
1302 except TypeError:
1307 except TypeError:
1303 pass
1308 pass
1304
1309
1305 def lookup(self, id):
1310 def lookup(self, id):
1306 """locate a node based on:
1311 """locate a node based on:
1307 - revision number or str(revision number)
1312 - revision number or str(revision number)
1308 - nodeid or subset of hex nodeid
1313 - nodeid or subset of hex nodeid
1309 """
1314 """
1310 n = self._match(id)
1315 n = self._match(id)
1311 if n is not None:
1316 if n is not None:
1312 return n
1317 return n
1313 n = self._partialmatch(id)
1318 n = self._partialmatch(id)
1314 if n:
1319 if n:
1315 return n
1320 return n
1316
1321
1317 raise error.LookupError(id, self.indexfile, _('no match found'))
1322 raise error.LookupError(id, self.indexfile, _('no match found'))
1318
1323
1319 def shortest(self, node, minlength=1):
1324 def shortest(self, node, minlength=1):
1320 """Find the shortest unambiguous prefix that matches node."""
1325 """Find the shortest unambiguous prefix that matches node."""
1321 def isvalid(prefix):
1326 def isvalid(prefix):
1322 try:
1327 try:
1323 node = self._partialmatch(prefix)
1328 node = self._partialmatch(prefix)
1324 except error.AmbiguousPrefixLookupError:
1329 except error.AmbiguousPrefixLookupError:
1325 return False
1330 return False
1326 except error.WdirUnsupported:
1331 except error.WdirUnsupported:
1327 # single 'ff...' match
1332 # single 'ff...' match
1328 return True
1333 return True
1329 if node is None:
1334 if node is None:
1330 raise error.LookupError(node, self.indexfile, _('no node'))
1335 raise error.LookupError(node, self.indexfile, _('no node'))
1331 return True
1336 return True
1332
1337
1333 def maybewdir(prefix):
1338 def maybewdir(prefix):
1334 return all(c == 'f' for c in prefix)
1339 return all(c == 'f' for c in prefix)
1335
1340
1336 hexnode = hex(node)
1341 hexnode = hex(node)
1337
1342
1338 def disambiguate(hexnode, minlength):
1343 def disambiguate(hexnode, minlength):
1339 """Disambiguate against wdirid."""
1344 """Disambiguate against wdirid."""
1340 for length in range(minlength, 41):
1345 for length in range(minlength, 41):
1341 prefix = hexnode[:length]
1346 prefix = hexnode[:length]
1342 if not maybewdir(prefix):
1347 if not maybewdir(prefix):
1343 return prefix
1348 return prefix
1344
1349
1345 if not getattr(self, 'filteredrevs', None):
1350 if not getattr(self, 'filteredrevs', None):
1346 try:
1351 try:
1347 length = max(self.index.shortest(node), minlength)
1352 length = max(self.index.shortest(node), minlength)
1348 return disambiguate(hexnode, length)
1353 return disambiguate(hexnode, length)
1349 except error.RevlogError:
1354 except error.RevlogError:
1350 if node != wdirid:
1355 if node != wdirid:
1351 raise error.LookupError(node, self.indexfile, _('no node'))
1356 raise error.LookupError(node, self.indexfile, _('no node'))
1352 except AttributeError:
1357 except AttributeError:
1353 # Fall through to pure code
1358 # Fall through to pure code
1354 pass
1359 pass
1355
1360
1356 if node == wdirid:
1361 if node == wdirid:
1357 for length in range(minlength, 41):
1362 for length in range(minlength, 41):
1358 prefix = hexnode[:length]
1363 prefix = hexnode[:length]
1359 if isvalid(prefix):
1364 if isvalid(prefix):
1360 return prefix
1365 return prefix
1361
1366
1362 for length in range(minlength, 41):
1367 for length in range(minlength, 41):
1363 prefix = hexnode[:length]
1368 prefix = hexnode[:length]
1364 if isvalid(prefix):
1369 if isvalid(prefix):
1365 return disambiguate(hexnode, length)
1370 return disambiguate(hexnode, length)
1366
1371
1367 def cmp(self, node, text):
1372 def cmp(self, node, text):
1368 """compare text with a given file revision
1373 """compare text with a given file revision
1369
1374
1370 returns True if text is different than what is stored.
1375 returns True if text is different than what is stored.
1371 """
1376 """
1372 p1, p2 = self.parents(node)
1377 p1, p2 = self.parents(node)
1373 return storageutil.hashrevisionsha1(text, p1, p2) != node
1378 return storageutil.hashrevisionsha1(text, p1, p2) != node
1374
1379
1375 def _cachesegment(self, offset, data):
1380 def _cachesegment(self, offset, data):
1376 """Add a segment to the revlog cache.
1381 """Add a segment to the revlog cache.
1377
1382
1378 Accepts an absolute offset and the data that is at that location.
1383 Accepts an absolute offset and the data that is at that location.
1379 """
1384 """
1380 o, d = self._chunkcache
1385 o, d = self._chunkcache
1381 # try to add to existing cache
1386 # try to add to existing cache
1382 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1387 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1383 self._chunkcache = o, d + data
1388 self._chunkcache = o, d + data
1384 else:
1389 else:
1385 self._chunkcache = offset, data
1390 self._chunkcache = offset, data
1386
1391
1387 def _readsegment(self, offset, length, df=None):
1392 def _readsegment(self, offset, length, df=None):
1388 """Load a segment of raw data from the revlog.
1393 """Load a segment of raw data from the revlog.
1389
1394
1390 Accepts an absolute offset, length to read, and an optional existing
1395 Accepts an absolute offset, length to read, and an optional existing
1391 file handle to read from.
1396 file handle to read from.
1392
1397
1393 If an existing file handle is passed, it will be seeked and the
1398 If an existing file handle is passed, it will be seeked and the
1394 original seek position will NOT be restored.
1399 original seek position will NOT be restored.
1395
1400
1396 Returns a str or buffer of raw byte data.
1401 Returns a str or buffer of raw byte data.
1397
1402
1398 Raises if the requested number of bytes could not be read.
1403 Raises if the requested number of bytes could not be read.
1399 """
1404 """
1400 # Cache data both forward and backward around the requested
1405 # Cache data both forward and backward around the requested
1401 # data, in a fixed size window. This helps speed up operations
1406 # data, in a fixed size window. This helps speed up operations
1402 # involving reading the revlog backwards.
1407 # involving reading the revlog backwards.
1403 cachesize = self._chunkcachesize
1408 cachesize = self._chunkcachesize
1404 realoffset = offset & ~(cachesize - 1)
1409 realoffset = offset & ~(cachesize - 1)
1405 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1410 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1406 - realoffset)
1411 - realoffset)
1407 with self._datareadfp(df) as df:
1412 with self._datareadfp(df) as df:
1408 df.seek(realoffset)
1413 df.seek(realoffset)
1409 d = df.read(reallength)
1414 d = df.read(reallength)
1410
1415
1411 self._cachesegment(realoffset, d)
1416 self._cachesegment(realoffset, d)
1412 if offset != realoffset or reallength != length:
1417 if offset != realoffset or reallength != length:
1413 startoffset = offset - realoffset
1418 startoffset = offset - realoffset
1414 if len(d) - startoffset < length:
1419 if len(d) - startoffset < length:
1415 raise error.RevlogError(
1420 raise error.RevlogError(
1416 _('partial read of revlog %s; expected %d bytes from '
1421 _('partial read of revlog %s; expected %d bytes from '
1417 'offset %d, got %d') %
1422 'offset %d, got %d') %
1418 (self.indexfile if self._inline else self.datafile,
1423 (self.indexfile if self._inline else self.datafile,
1419 length, realoffset, len(d) - startoffset))
1424 length, realoffset, len(d) - startoffset))
1420
1425
1421 return util.buffer(d, startoffset, length)
1426 return util.buffer(d, startoffset, length)
1422
1427
1423 if len(d) < length:
1428 if len(d) < length:
1424 raise error.RevlogError(
1429 raise error.RevlogError(
1425 _('partial read of revlog %s; expected %d bytes from offset '
1430 _('partial read of revlog %s; expected %d bytes from offset '
1426 '%d, got %d') %
1431 '%d, got %d') %
1427 (self.indexfile if self._inline else self.datafile,
1432 (self.indexfile if self._inline else self.datafile,
1428 length, offset, len(d)))
1433 length, offset, len(d)))
1429
1434
1430 return d
1435 return d
1431
1436
1432 def _getsegment(self, offset, length, df=None):
1437 def _getsegment(self, offset, length, df=None):
1433 """Obtain a segment of raw data from the revlog.
1438 """Obtain a segment of raw data from the revlog.
1434
1439
1435 Accepts an absolute offset, length of bytes to obtain, and an
1440 Accepts an absolute offset, length of bytes to obtain, and an
1436 optional file handle to the already-opened revlog. If the file
1441 optional file handle to the already-opened revlog. If the file
1437 handle is used, it's original seek position will not be preserved.
1442 handle is used, it's original seek position will not be preserved.
1438
1443
1439 Requests for data may be returned from a cache.
1444 Requests for data may be returned from a cache.
1440
1445
1441 Returns a str or a buffer instance of raw byte data.
1446 Returns a str or a buffer instance of raw byte data.
1442 """
1447 """
1443 o, d = self._chunkcache
1448 o, d = self._chunkcache
1444 l = len(d)
1449 l = len(d)
1445
1450
1446 # is it in the cache?
1451 # is it in the cache?
1447 cachestart = offset - o
1452 cachestart = offset - o
1448 cacheend = cachestart + length
1453 cacheend = cachestart + length
1449 if cachestart >= 0 and cacheend <= l:
1454 if cachestart >= 0 and cacheend <= l:
1450 if cachestart == 0 and cacheend == l:
1455 if cachestart == 0 and cacheend == l:
1451 return d # avoid a copy
1456 return d # avoid a copy
1452 return util.buffer(d, cachestart, cacheend - cachestart)
1457 return util.buffer(d, cachestart, cacheend - cachestart)
1453
1458
1454 return self._readsegment(offset, length, df=df)
1459 return self._readsegment(offset, length, df=df)
1455
1460
1456 def _getsegmentforrevs(self, startrev, endrev, df=None):
1461 def _getsegmentforrevs(self, startrev, endrev, df=None):
1457 """Obtain a segment of raw data corresponding to a range of revisions.
1462 """Obtain a segment of raw data corresponding to a range of revisions.
1458
1463
1459 Accepts the start and end revisions and an optional already-open
1464 Accepts the start and end revisions and an optional already-open
1460 file handle to be used for reading. If the file handle is read, its
1465 file handle to be used for reading. If the file handle is read, its
1461 seek position will not be preserved.
1466 seek position will not be preserved.
1462
1467
1463 Requests for data may be satisfied by a cache.
1468 Requests for data may be satisfied by a cache.
1464
1469
1465 Returns a 2-tuple of (offset, data) for the requested range of
1470 Returns a 2-tuple of (offset, data) for the requested range of
1466 revisions. Offset is the integer offset from the beginning of the
1471 revisions. Offset is the integer offset from the beginning of the
1467 revlog and data is a str or buffer of the raw byte data.
1472 revlog and data is a str or buffer of the raw byte data.
1468
1473
1469 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1474 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1470 to determine where each revision's data begins and ends.
1475 to determine where each revision's data begins and ends.
1471 """
1476 """
1472 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1477 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1473 # (functions are expensive).
1478 # (functions are expensive).
1474 index = self.index
1479 index = self.index
1475 istart = index[startrev]
1480 istart = index[startrev]
1476 start = int(istart[0] >> 16)
1481 start = int(istart[0] >> 16)
1477 if startrev == endrev:
1482 if startrev == endrev:
1478 end = start + istart[1]
1483 end = start + istart[1]
1479 else:
1484 else:
1480 iend = index[endrev]
1485 iend = index[endrev]
1481 end = int(iend[0] >> 16) + iend[1]
1486 end = int(iend[0] >> 16) + iend[1]
1482
1487
1483 if self._inline:
1488 if self._inline:
1484 start += (startrev + 1) * self._io.size
1489 start += (startrev + 1) * self._io.size
1485 end += (endrev + 1) * self._io.size
1490 end += (endrev + 1) * self._io.size
1486 length = end - start
1491 length = end - start
1487
1492
1488 return start, self._getsegment(start, length, df=df)
1493 return start, self._getsegment(start, length, df=df)
1489
1494
1490 def _chunk(self, rev, df=None):
1495 def _chunk(self, rev, df=None):
1491 """Obtain a single decompressed chunk for a revision.
1496 """Obtain a single decompressed chunk for a revision.
1492
1497
1493 Accepts an integer revision and an optional already-open file handle
1498 Accepts an integer revision and an optional already-open file handle
1494 to be used for reading. If used, the seek position of the file will not
1499 to be used for reading. If used, the seek position of the file will not
1495 be preserved.
1500 be preserved.
1496
1501
1497 Returns a str holding uncompressed data for the requested revision.
1502 Returns a str holding uncompressed data for the requested revision.
1498 """
1503 """
1499 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1504 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1500
1505
1501 def _chunks(self, revs, df=None, targetsize=None):
1506 def _chunks(self, revs, df=None, targetsize=None):
1502 """Obtain decompressed chunks for the specified revisions.
1507 """Obtain decompressed chunks for the specified revisions.
1503
1508
1504 Accepts an iterable of numeric revisions that are assumed to be in
1509 Accepts an iterable of numeric revisions that are assumed to be in
1505 ascending order. Also accepts an optional already-open file handle
1510 ascending order. Also accepts an optional already-open file handle
1506 to be used for reading. If used, the seek position of the file will
1511 to be used for reading. If used, the seek position of the file will
1507 not be preserved.
1512 not be preserved.
1508
1513
1509 This function is similar to calling ``self._chunk()`` multiple times,
1514 This function is similar to calling ``self._chunk()`` multiple times,
1510 but is faster.
1515 but is faster.
1511
1516
1512 Returns a list with decompressed data for each requested revision.
1517 Returns a list with decompressed data for each requested revision.
1513 """
1518 """
1514 if not revs:
1519 if not revs:
1515 return []
1520 return []
1516 start = self.start
1521 start = self.start
1517 length = self.length
1522 length = self.length
1518 inline = self._inline
1523 inline = self._inline
1519 iosize = self._io.size
1524 iosize = self._io.size
1520 buffer = util.buffer
1525 buffer = util.buffer
1521
1526
1522 l = []
1527 l = []
1523 ladd = l.append
1528 ladd = l.append
1524
1529
1525 if not self._withsparseread:
1530 if not self._withsparseread:
1526 slicedchunks = (revs,)
1531 slicedchunks = (revs,)
1527 else:
1532 else:
1528 slicedchunks = deltautil.slicechunk(self, revs,
1533 slicedchunks = deltautil.slicechunk(self, revs,
1529 targetsize=targetsize)
1534 targetsize=targetsize)
1530
1535
1531 for revschunk in slicedchunks:
1536 for revschunk in slicedchunks:
1532 firstrev = revschunk[0]
1537 firstrev = revschunk[0]
1533 # Skip trailing revisions with empty diff
1538 # Skip trailing revisions with empty diff
1534 for lastrev in revschunk[::-1]:
1539 for lastrev in revschunk[::-1]:
1535 if length(lastrev) != 0:
1540 if length(lastrev) != 0:
1536 break
1541 break
1537
1542
1538 try:
1543 try:
1539 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1544 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1540 except OverflowError:
1545 except OverflowError:
1541 # issue4215 - we can't cache a run of chunks greater than
1546 # issue4215 - we can't cache a run of chunks greater than
1542 # 2G on Windows
1547 # 2G on Windows
1543 return [self._chunk(rev, df=df) for rev in revschunk]
1548 return [self._chunk(rev, df=df) for rev in revschunk]
1544
1549
1545 decomp = self.decompress
1550 decomp = self.decompress
1546 for rev in revschunk:
1551 for rev in revschunk:
1547 chunkstart = start(rev)
1552 chunkstart = start(rev)
1548 if inline:
1553 if inline:
1549 chunkstart += (rev + 1) * iosize
1554 chunkstart += (rev + 1) * iosize
1550 chunklength = length(rev)
1555 chunklength = length(rev)
1551 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1556 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1552
1557
1553 return l
1558 return l
1554
1559
1555 def _chunkclear(self):
1560 def _chunkclear(self):
1556 """Clear the raw chunk cache."""
1561 """Clear the raw chunk cache."""
1557 self._chunkcache = (0, '')
1562 self._chunkcache = (0, '')
1558
1563
1559 def deltaparent(self, rev):
1564 def deltaparent(self, rev):
1560 """return deltaparent of the given revision"""
1565 """return deltaparent of the given revision"""
1561 base = self.index[rev][3]
1566 base = self.index[rev][3]
1562 if base == rev:
1567 if base == rev:
1563 return nullrev
1568 return nullrev
1564 elif self._generaldelta:
1569 elif self._generaldelta:
1565 return base
1570 return base
1566 else:
1571 else:
1567 return rev - 1
1572 return rev - 1
1568
1573
1569 def issnapshot(self, rev):
1574 def issnapshot(self, rev):
1570 """tells whether rev is a snapshot
1575 """tells whether rev is a snapshot
1571 """
1576 """
1572 if not self._sparserevlog:
1577 if not self._sparserevlog:
1573 return self.deltaparent(rev) == nullrev
1578 return self.deltaparent(rev) == nullrev
1574 elif util.safehasattr(self.index, 'issnapshot'):
1579 elif util.safehasattr(self.index, 'issnapshot'):
1575 # directly assign the method to cache the testing and access
1580 # directly assign the method to cache the testing and access
1576 self.issnapshot = self.index.issnapshot
1581 self.issnapshot = self.index.issnapshot
1577 return self.issnapshot(rev)
1582 return self.issnapshot(rev)
1578 if rev == nullrev:
1583 if rev == nullrev:
1579 return True
1584 return True
1580 entry = self.index[rev]
1585 entry = self.index[rev]
1581 base = entry[3]
1586 base = entry[3]
1582 if base == rev:
1587 if base == rev:
1583 return True
1588 return True
1584 if base == nullrev:
1589 if base == nullrev:
1585 return True
1590 return True
1586 p1 = entry[5]
1591 p1 = entry[5]
1587 p2 = entry[6]
1592 p2 = entry[6]
1588 if base == p1 or base == p2:
1593 if base == p1 or base == p2:
1589 return False
1594 return False
1590 return self.issnapshot(base)
1595 return self.issnapshot(base)
1591
1596
1592 def snapshotdepth(self, rev):
1597 def snapshotdepth(self, rev):
1593 """number of snapshot in the chain before this one"""
1598 """number of snapshot in the chain before this one"""
1594 if not self.issnapshot(rev):
1599 if not self.issnapshot(rev):
1595 raise error.ProgrammingError('revision %d not a snapshot')
1600 raise error.ProgrammingError('revision %d not a snapshot')
1596 return len(self._deltachain(rev)[0]) - 1
1601 return len(self._deltachain(rev)[0]) - 1
1597
1602
1598 def revdiff(self, rev1, rev2):
1603 def revdiff(self, rev1, rev2):
1599 """return or calculate a delta between two revisions
1604 """return or calculate a delta between two revisions
1600
1605
1601 The delta calculated is in binary form and is intended to be written to
1606 The delta calculated is in binary form and is intended to be written to
1602 revlog data directly. So this function needs raw revision data.
1607 revlog data directly. So this function needs raw revision data.
1603 """
1608 """
1604 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1609 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1605 return bytes(self._chunk(rev2))
1610 return bytes(self._chunk(rev2))
1606
1611
1607 return mdiff.textdiff(self.revision(rev1, raw=True),
1612 return mdiff.textdiff(self.revision(rev1, raw=True),
1608 self.revision(rev2, raw=True))
1613 self.revision(rev2, raw=True))
1609
1614
1610 def revision(self, nodeorrev, _df=None, raw=False):
1615 def revision(self, nodeorrev, _df=None, raw=False):
1611 """return an uncompressed revision of a given node or revision
1616 """return an uncompressed revision of a given node or revision
1612 number.
1617 number.
1613
1618
1614 _df - an existing file handle to read from. (internal-only)
1619 _df - an existing file handle to read from. (internal-only)
1615 raw - an optional argument specifying if the revision data is to be
1620 raw - an optional argument specifying if the revision data is to be
1616 treated as raw data when applying flag transforms. 'raw' should be set
1621 treated as raw data when applying flag transforms. 'raw' should be set
1617 to True when generating changegroups or in debug commands.
1622 to True when generating changegroups or in debug commands.
1618 """
1623 """
1619 if isinstance(nodeorrev, int):
1624 if isinstance(nodeorrev, int):
1620 rev = nodeorrev
1625 rev = nodeorrev
1621 node = self.node(rev)
1626 node = self.node(rev)
1622 else:
1627 else:
1623 node = nodeorrev
1628 node = nodeorrev
1624 rev = None
1629 rev = None
1625
1630
1626 cachedrev = None
1631 cachedrev = None
1627 flags = None
1632 flags = None
1628 rawtext = None
1633 rawtext = None
1629 if node == nullid:
1634 if node == nullid:
1630 return ""
1635 return ""
1631 if self._revisioncache:
1636 if self._revisioncache:
1632 if self._revisioncache[0] == node:
1637 if self._revisioncache[0] == node:
1633 # _cache only stores rawtext
1638 # _cache only stores rawtext
1634 if raw:
1639 if raw:
1635 return self._revisioncache[2]
1640 return self._revisioncache[2]
1636 # duplicated, but good for perf
1641 # duplicated, but good for perf
1637 if rev is None:
1642 if rev is None:
1638 rev = self.rev(node)
1643 rev = self.rev(node)
1639 if flags is None:
1644 if flags is None:
1640 flags = self.flags(rev)
1645 flags = self.flags(rev)
1641 # no extra flags set, no flag processor runs, text = rawtext
1646 # no extra flags set, no flag processor runs, text = rawtext
1642 if flags == REVIDX_DEFAULT_FLAGS:
1647 if flags == REVIDX_DEFAULT_FLAGS:
1643 return self._revisioncache[2]
1648 return self._revisioncache[2]
1644 # rawtext is reusable. need to run flag processor
1649 # rawtext is reusable. need to run flag processor
1645 rawtext = self._revisioncache[2]
1650 rawtext = self._revisioncache[2]
1646
1651
1647 cachedrev = self._revisioncache[1]
1652 cachedrev = self._revisioncache[1]
1648
1653
1649 # look up what we need to read
1654 # look up what we need to read
1650 if rawtext is None:
1655 if rawtext is None:
1651 if rev is None:
1656 if rev is None:
1652 rev = self.rev(node)
1657 rev = self.rev(node)
1653
1658
1654 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1659 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1655 if stopped:
1660 if stopped:
1656 rawtext = self._revisioncache[2]
1661 rawtext = self._revisioncache[2]
1657
1662
1658 # drop cache to save memory
1663 # drop cache to save memory
1659 self._revisioncache = None
1664 self._revisioncache = None
1660
1665
1661 targetsize = None
1666 targetsize = None
1662 rawsize = self.index[rev][2]
1667 rawsize = self.index[rev][2]
1663 if 0 <= rawsize:
1668 if 0 <= rawsize:
1664 targetsize = 4 * rawsize
1669 targetsize = 4 * rawsize
1665
1670
1666 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1671 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1667 if rawtext is None:
1672 if rawtext is None:
1668 rawtext = bytes(bins[0])
1673 rawtext = bytes(bins[0])
1669 bins = bins[1:]
1674 bins = bins[1:]
1670
1675
1671 rawtext = mdiff.patches(rawtext, bins)
1676 rawtext = mdiff.patches(rawtext, bins)
1672 self._revisioncache = (node, rev, rawtext)
1677 self._revisioncache = (node, rev, rawtext)
1673
1678
1674 if flags is None:
1679 if flags is None:
1675 if rev is None:
1680 if rev is None:
1676 rev = self.rev(node)
1681 rev = self.rev(node)
1677 flags = self.flags(rev)
1682 flags = self.flags(rev)
1678
1683
1679 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1684 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1680 if validatehash:
1685 if validatehash:
1681 self.checkhash(text, node, rev=rev)
1686 self.checkhash(text, node, rev=rev)
1682
1687
1683 return text
1688 return text
1684
1689
1685 def hash(self, text, p1, p2):
1690 def hash(self, text, p1, p2):
1686 """Compute a node hash.
1691 """Compute a node hash.
1687
1692
1688 Available as a function so that subclasses can replace the hash
1693 Available as a function so that subclasses can replace the hash
1689 as needed.
1694 as needed.
1690 """
1695 """
1691 return storageutil.hashrevisionsha1(text, p1, p2)
1696 return storageutil.hashrevisionsha1(text, p1, p2)
1692
1697
1693 def _processflags(self, text, flags, operation, raw=False):
1698 def _processflags(self, text, flags, operation, raw=False):
1694 """Inspect revision data flags and applies transforms defined by
1699 """Inspect revision data flags and applies transforms defined by
1695 registered flag processors.
1700 registered flag processors.
1696
1701
1697 ``text`` - the revision data to process
1702 ``text`` - the revision data to process
1698 ``flags`` - the revision flags
1703 ``flags`` - the revision flags
1699 ``operation`` - the operation being performed (read or write)
1704 ``operation`` - the operation being performed (read or write)
1700 ``raw`` - an optional argument describing if the raw transform should be
1705 ``raw`` - an optional argument describing if the raw transform should be
1701 applied.
1706 applied.
1702
1707
1703 This method processes the flags in the order (or reverse order if
1708 This method processes the flags in the order (or reverse order if
1704 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1709 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1705 flag processors registered for present flags. The order of flags defined
1710 flag processors registered for present flags. The order of flags defined
1706 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1711 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1707
1712
1708 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1713 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1709 processed text and ``validatehash`` is a bool indicating whether the
1714 processed text and ``validatehash`` is a bool indicating whether the
1710 returned text should be checked for hash integrity.
1715 returned text should be checked for hash integrity.
1711
1716
1712 Note: If the ``raw`` argument is set, it has precedence over the
1717 Note: If the ``raw`` argument is set, it has precedence over the
1713 operation and will only update the value of ``validatehash``.
1718 operation and will only update the value of ``validatehash``.
1714 """
1719 """
1715 # fast path: no flag processors will run
1720 # fast path: no flag processors will run
1716 if flags == 0:
1721 if flags == 0:
1717 return text, True
1722 return text, True
1718 if not operation in ('read', 'write'):
1723 if not operation in ('read', 'write'):
1719 raise error.ProgrammingError(_("invalid '%s' operation") %
1724 raise error.ProgrammingError(_("invalid '%s' operation") %
1720 operation)
1725 operation)
1721 # Check all flags are known.
1726 # Check all flags are known.
1722 if flags & ~REVIDX_KNOWN_FLAGS:
1727 if flags & ~REVIDX_KNOWN_FLAGS:
1723 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1728 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1724 (flags & ~REVIDX_KNOWN_FLAGS))
1729 (flags & ~REVIDX_KNOWN_FLAGS))
1725 validatehash = True
1730 validatehash = True
1726 # Depending on the operation (read or write), the order might be
1731 # Depending on the operation (read or write), the order might be
1727 # reversed due to non-commutative transforms.
1732 # reversed due to non-commutative transforms.
1728 orderedflags = REVIDX_FLAGS_ORDER
1733 orderedflags = REVIDX_FLAGS_ORDER
1729 if operation == 'write':
1734 if operation == 'write':
1730 orderedflags = reversed(orderedflags)
1735 orderedflags = reversed(orderedflags)
1731
1736
1732 for flag in orderedflags:
1737 for flag in orderedflags:
1733 # If a flagprocessor has been registered for a known flag, apply the
1738 # If a flagprocessor has been registered for a known flag, apply the
1734 # related operation transform and update result tuple.
1739 # related operation transform and update result tuple.
1735 if flag & flags:
1740 if flag & flags:
1736 vhash = True
1741 vhash = True
1737
1742
1738 if flag not in self._flagprocessors:
1743 if flag not in self._flagprocessors:
1739 message = _("missing processor for flag '%#x'") % (flag)
1744 message = _("missing processor for flag '%#x'") % (flag)
1740 raise error.RevlogError(message)
1745 raise error.RevlogError(message)
1741
1746
1742 processor = self._flagprocessors[flag]
1747 processor = self._flagprocessors[flag]
1743 if processor is not None:
1748 if processor is not None:
1744 readtransform, writetransform, rawtransform = processor
1749 readtransform, writetransform, rawtransform = processor
1745
1750
1746 if raw:
1751 if raw:
1747 vhash = rawtransform(self, text)
1752 vhash = rawtransform(self, text)
1748 elif operation == 'read':
1753 elif operation == 'read':
1749 text, vhash = readtransform(self, text)
1754 text, vhash = readtransform(self, text)
1750 else: # write operation
1755 else: # write operation
1751 text, vhash = writetransform(self, text)
1756 text, vhash = writetransform(self, text)
1752 validatehash = validatehash and vhash
1757 validatehash = validatehash and vhash
1753
1758
1754 return text, validatehash
1759 return text, validatehash
1755
1760
1756 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1761 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1757 """Check node hash integrity.
1762 """Check node hash integrity.
1758
1763
1759 Available as a function so that subclasses can extend hash mismatch
1764 Available as a function so that subclasses can extend hash mismatch
1760 behaviors as needed.
1765 behaviors as needed.
1761 """
1766 """
1762 try:
1767 try:
1763 if p1 is None and p2 is None:
1768 if p1 is None and p2 is None:
1764 p1, p2 = self.parents(node)
1769 p1, p2 = self.parents(node)
1765 if node != self.hash(text, p1, p2):
1770 if node != self.hash(text, p1, p2):
1766 # Clear the revision cache on hash failure. The revision cache
1771 # Clear the revision cache on hash failure. The revision cache
1767 # only stores the raw revision and clearing the cache does have
1772 # only stores the raw revision and clearing the cache does have
1768 # the side-effect that we won't have a cache hit when the raw
1773 # the side-effect that we won't have a cache hit when the raw
1769 # revision data is accessed. But this case should be rare and
1774 # revision data is accessed. But this case should be rare and
1770 # it is extra work to teach the cache about the hash
1775 # it is extra work to teach the cache about the hash
1771 # verification state.
1776 # verification state.
1772 if self._revisioncache and self._revisioncache[0] == node:
1777 if self._revisioncache and self._revisioncache[0] == node:
1773 self._revisioncache = None
1778 self._revisioncache = None
1774
1779
1775 revornode = rev
1780 revornode = rev
1776 if revornode is None:
1781 if revornode is None:
1777 revornode = templatefilters.short(hex(node))
1782 revornode = templatefilters.short(hex(node))
1778 raise error.RevlogError(_("integrity check failed on %s:%s")
1783 raise error.RevlogError(_("integrity check failed on %s:%s")
1779 % (self.indexfile, pycompat.bytestr(revornode)))
1784 % (self.indexfile, pycompat.bytestr(revornode)))
1780 except error.RevlogError:
1785 except error.RevlogError:
1781 if self._censorable and storageutil.iscensoredtext(text):
1786 if self._censorable and storageutil.iscensoredtext(text):
1782 raise error.CensoredNodeError(self.indexfile, node, text)
1787 raise error.CensoredNodeError(self.indexfile, node, text)
1783 raise
1788 raise
1784
1789
1785 def _enforceinlinesize(self, tr, fp=None):
1790 def _enforceinlinesize(self, tr, fp=None):
1786 """Check if the revlog is too big for inline and convert if so.
1791 """Check if the revlog is too big for inline and convert if so.
1787
1792
1788 This should be called after revisions are added to the revlog. If the
1793 This should be called after revisions are added to the revlog. If the
1789 revlog has grown too large to be an inline revlog, it will convert it
1794 revlog has grown too large to be an inline revlog, it will convert it
1790 to use multiple index and data files.
1795 to use multiple index and data files.
1791 """
1796 """
1792 tiprev = len(self) - 1
1797 tiprev = len(self) - 1
1793 if (not self._inline or
1798 if (not self._inline or
1794 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1799 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1795 return
1800 return
1796
1801
1797 trinfo = tr.find(self.indexfile)
1802 trinfo = tr.find(self.indexfile)
1798 if trinfo is None:
1803 if trinfo is None:
1799 raise error.RevlogError(_("%s not found in the transaction")
1804 raise error.RevlogError(_("%s not found in the transaction")
1800 % self.indexfile)
1805 % self.indexfile)
1801
1806
1802 trindex = trinfo[2]
1807 trindex = trinfo[2]
1803 if trindex is not None:
1808 if trindex is not None:
1804 dataoff = self.start(trindex)
1809 dataoff = self.start(trindex)
1805 else:
1810 else:
1806 # revlog was stripped at start of transaction, use all leftover data
1811 # revlog was stripped at start of transaction, use all leftover data
1807 trindex = len(self) - 1
1812 trindex = len(self) - 1
1808 dataoff = self.end(tiprev)
1813 dataoff = self.end(tiprev)
1809
1814
1810 tr.add(self.datafile, dataoff)
1815 tr.add(self.datafile, dataoff)
1811
1816
1812 if fp:
1817 if fp:
1813 fp.flush()
1818 fp.flush()
1814 fp.close()
1819 fp.close()
1815 # We can't use the cached file handle after close(). So prevent
1820 # We can't use the cached file handle after close(). So prevent
1816 # its usage.
1821 # its usage.
1817 self._writinghandles = None
1822 self._writinghandles = None
1818
1823
1819 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1824 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1820 for r in self:
1825 for r in self:
1821 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1826 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1822
1827
1823 with self._indexfp('w') as fp:
1828 with self._indexfp('w') as fp:
1824 self.version &= ~FLAG_INLINE_DATA
1829 self.version &= ~FLAG_INLINE_DATA
1825 self._inline = False
1830 self._inline = False
1826 io = self._io
1831 io = self._io
1827 for i in self:
1832 for i in self:
1828 e = io.packentry(self.index[i], self.node, self.version, i)
1833 e = io.packentry(self.index[i], self.node, self.version, i)
1829 fp.write(e)
1834 fp.write(e)
1830
1835
1831 # the temp file replace the real index when we exit the context
1836 # the temp file replace the real index when we exit the context
1832 # manager
1837 # manager
1833
1838
1834 tr.replace(self.indexfile, trindex * self._io.size)
1839 tr.replace(self.indexfile, trindex * self._io.size)
1835 self._chunkclear()
1840 self._chunkclear()
1836
1841
1837 def _nodeduplicatecallback(self, transaction, node):
1842 def _nodeduplicatecallback(self, transaction, node):
1838 """called when trying to add a node already stored.
1843 """called when trying to add a node already stored.
1839 """
1844 """
1840
1845
1841 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1846 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1842 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1847 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1843 """add a revision to the log
1848 """add a revision to the log
1844
1849
1845 text - the revision data to add
1850 text - the revision data to add
1846 transaction - the transaction object used for rollback
1851 transaction - the transaction object used for rollback
1847 link - the linkrev data to add
1852 link - the linkrev data to add
1848 p1, p2 - the parent nodeids of the revision
1853 p1, p2 - the parent nodeids of the revision
1849 cachedelta - an optional precomputed delta
1854 cachedelta - an optional precomputed delta
1850 node - nodeid of revision; typically node is not specified, and it is
1855 node - nodeid of revision; typically node is not specified, and it is
1851 computed by default as hash(text, p1, p2), however subclasses might
1856 computed by default as hash(text, p1, p2), however subclasses might
1852 use different hashing method (and override checkhash() in such case)
1857 use different hashing method (and override checkhash() in such case)
1853 flags - the known flags to set on the revision
1858 flags - the known flags to set on the revision
1854 deltacomputer - an optional deltacomputer instance shared between
1859 deltacomputer - an optional deltacomputer instance shared between
1855 multiple calls
1860 multiple calls
1856 """
1861 """
1857 if link == nullrev:
1862 if link == nullrev:
1858 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1863 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1859 % self.indexfile)
1864 % self.indexfile)
1860
1865
1861 if flags:
1866 if flags:
1862 node = node or self.hash(text, p1, p2)
1867 node = node or self.hash(text, p1, p2)
1863
1868
1864 rawtext, validatehash = self._processflags(text, flags, 'write')
1869 rawtext, validatehash = self._processflags(text, flags, 'write')
1865
1870
1866 # If the flag processor modifies the revision data, ignore any provided
1871 # If the flag processor modifies the revision data, ignore any provided
1867 # cachedelta.
1872 # cachedelta.
1868 if rawtext != text:
1873 if rawtext != text:
1869 cachedelta = None
1874 cachedelta = None
1870
1875
1871 if len(rawtext) > _maxentrysize:
1876 if len(rawtext) > _maxentrysize:
1872 raise error.RevlogError(
1877 raise error.RevlogError(
1873 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1878 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1874 % (self.indexfile, len(rawtext)))
1879 % (self.indexfile, len(rawtext)))
1875
1880
1876 node = node or self.hash(rawtext, p1, p2)
1881 node = node or self.hash(rawtext, p1, p2)
1877 if node in self.nodemap:
1882 if node in self.nodemap:
1878 return node
1883 return node
1879
1884
1880 if validatehash:
1885 if validatehash:
1881 self.checkhash(rawtext, node, p1=p1, p2=p2)
1886 self.checkhash(rawtext, node, p1=p1, p2=p2)
1882
1887
1883 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1888 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1884 flags, cachedelta=cachedelta,
1889 flags, cachedelta=cachedelta,
1885 deltacomputer=deltacomputer)
1890 deltacomputer=deltacomputer)
1886
1891
1887 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1892 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1888 cachedelta=None, deltacomputer=None):
1893 cachedelta=None, deltacomputer=None):
1889 """add a raw revision with known flags, node and parents
1894 """add a raw revision with known flags, node and parents
1890 useful when reusing a revision not stored in this revlog (ex: received
1895 useful when reusing a revision not stored in this revlog (ex: received
1891 over wire, or read from an external bundle).
1896 over wire, or read from an external bundle).
1892 """
1897 """
1893 dfh = None
1898 dfh = None
1894 if not self._inline:
1899 if not self._inline:
1895 dfh = self._datafp("a+")
1900 dfh = self._datafp("a+")
1896 ifh = self._indexfp("a+")
1901 ifh = self._indexfp("a+")
1897 try:
1902 try:
1898 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1903 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1899 flags, cachedelta, ifh, dfh,
1904 flags, cachedelta, ifh, dfh,
1900 deltacomputer=deltacomputer)
1905 deltacomputer=deltacomputer)
1901 finally:
1906 finally:
1902 if dfh:
1907 if dfh:
1903 dfh.close()
1908 dfh.close()
1904 ifh.close()
1909 ifh.close()
1905
1910
1906 def compress(self, data):
1911 def compress(self, data):
1907 """Generate a possibly-compressed representation of data."""
1912 """Generate a possibly-compressed representation of data."""
1908 if not data:
1913 if not data:
1909 return '', data
1914 return '', data
1910
1915
1911 compressed = self._compressor.compress(data)
1916 compressed = self._compressor.compress(data)
1912
1917
1913 if compressed:
1918 if compressed:
1914 # The revlog compressor added the header in the returned data.
1919 # The revlog compressor added the header in the returned data.
1915 return '', compressed
1920 return '', compressed
1916
1921
1917 if data[0:1] == '\0':
1922 if data[0:1] == '\0':
1918 return '', data
1923 return '', data
1919 return 'u', data
1924 return 'u', data
1920
1925
1921 def decompress(self, data):
1926 def decompress(self, data):
1922 """Decompress a revlog chunk.
1927 """Decompress a revlog chunk.
1923
1928
1924 The chunk is expected to begin with a header identifying the
1929 The chunk is expected to begin with a header identifying the
1925 format type so it can be routed to an appropriate decompressor.
1930 format type so it can be routed to an appropriate decompressor.
1926 """
1931 """
1927 if not data:
1932 if not data:
1928 return data
1933 return data
1929
1934
1930 # Revlogs are read much more frequently than they are written and many
1935 # Revlogs are read much more frequently than they are written and many
1931 # chunks only take microseconds to decompress, so performance is
1936 # chunks only take microseconds to decompress, so performance is
1932 # important here.
1937 # important here.
1933 #
1938 #
1934 # We can make a few assumptions about revlogs:
1939 # We can make a few assumptions about revlogs:
1935 #
1940 #
1936 # 1) the majority of chunks will be compressed (as opposed to inline
1941 # 1) the majority of chunks will be compressed (as opposed to inline
1937 # raw data).
1942 # raw data).
1938 # 2) decompressing *any* data will likely by at least 10x slower than
1943 # 2) decompressing *any* data will likely by at least 10x slower than
1939 # returning raw inline data.
1944 # returning raw inline data.
1940 # 3) we want to prioritize common and officially supported compression
1945 # 3) we want to prioritize common and officially supported compression
1941 # engines
1946 # engines
1942 #
1947 #
1943 # It follows that we want to optimize for "decompress compressed data
1948 # It follows that we want to optimize for "decompress compressed data
1944 # when encoded with common and officially supported compression engines"
1949 # when encoded with common and officially supported compression engines"
1945 # case over "raw data" and "data encoded by less common or non-official
1950 # case over "raw data" and "data encoded by less common or non-official
1946 # compression engines." That is why we have the inline lookup first
1951 # compression engines." That is why we have the inline lookup first
1947 # followed by the compengines lookup.
1952 # followed by the compengines lookup.
1948 #
1953 #
1949 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1954 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1950 # compressed chunks. And this matters for changelog and manifest reads.
1955 # compressed chunks. And this matters for changelog and manifest reads.
1951 t = data[0:1]
1956 t = data[0:1]
1952
1957
1953 if t == 'x':
1958 if t == 'x':
1954 try:
1959 try:
1955 return _zlibdecompress(data)
1960 return _zlibdecompress(data)
1956 except zlib.error as e:
1961 except zlib.error as e:
1957 raise error.RevlogError(_('revlog decompress error: %s') %
1962 raise error.RevlogError(_('revlog decompress error: %s') %
1958 stringutil.forcebytestr(e))
1963 stringutil.forcebytestr(e))
1959 # '\0' is more common than 'u' so it goes first.
1964 # '\0' is more common than 'u' so it goes first.
1960 elif t == '\0':
1965 elif t == '\0':
1961 return data
1966 return data
1962 elif t == 'u':
1967 elif t == 'u':
1963 return util.buffer(data, 1)
1968 return util.buffer(data, 1)
1964
1969
1965 try:
1970 try:
1966 compressor = self._decompressors[t]
1971 compressor = self._decompressors[t]
1967 except KeyError:
1972 except KeyError:
1968 try:
1973 try:
1969 engine = util.compengines.forrevlogheader(t)
1974 engine = util.compengines.forrevlogheader(t)
1970 compressor = engine.revlogcompressor()
1975 compressor = engine.revlogcompressor()
1971 self._decompressors[t] = compressor
1976 self._decompressors[t] = compressor
1972 except KeyError:
1977 except KeyError:
1973 raise error.RevlogError(_('unknown compression type %r') % t)
1978 raise error.RevlogError(_('unknown compression type %r') % t)
1974
1979
1975 return compressor.decompress(data)
1980 return compressor.decompress(data)
1976
1981
1977 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1982 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1978 cachedelta, ifh, dfh, alwayscache=False,
1983 cachedelta, ifh, dfh, alwayscache=False,
1979 deltacomputer=None):
1984 deltacomputer=None):
1980 """internal function to add revisions to the log
1985 """internal function to add revisions to the log
1981
1986
1982 see addrevision for argument descriptions.
1987 see addrevision for argument descriptions.
1983
1988
1984 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1989 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1985
1990
1986 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1991 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1987 be used.
1992 be used.
1988
1993
1989 invariants:
1994 invariants:
1990 - rawtext is optional (can be None); if not set, cachedelta must be set.
1995 - rawtext is optional (can be None); if not set, cachedelta must be set.
1991 if both are set, they must correspond to each other.
1996 if both are set, they must correspond to each other.
1992 """
1997 """
1993 if node == nullid:
1998 if node == nullid:
1994 raise error.RevlogError(_("%s: attempt to add null revision") %
1999 raise error.RevlogError(_("%s: attempt to add null revision") %
1995 self.indexfile)
2000 self.indexfile)
1996 if node == wdirid or node in wdirfilenodeids:
2001 if node == wdirid or node in wdirfilenodeids:
1997 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2002 raise error.RevlogError(_("%s: attempt to add wdir revision") %
1998 self.indexfile)
2003 self.indexfile)
1999
2004
2000 if self._inline:
2005 if self._inline:
2001 fh = ifh
2006 fh = ifh
2002 else:
2007 else:
2003 fh = dfh
2008 fh = dfh
2004
2009
2005 btext = [rawtext]
2010 btext = [rawtext]
2006
2011
2007 curr = len(self)
2012 curr = len(self)
2008 prev = curr - 1
2013 prev = curr - 1
2009 offset = self.end(prev)
2014 offset = self.end(prev)
2010 p1r, p2r = self.rev(p1), self.rev(p2)
2015 p1r, p2r = self.rev(p1), self.rev(p2)
2011
2016
2012 # full versions are inserted when the needed deltas
2017 # full versions are inserted when the needed deltas
2013 # become comparable to the uncompressed text
2018 # become comparable to the uncompressed text
2014 if rawtext is None:
2019 if rawtext is None:
2015 # need rawtext size, before changed by flag processors, which is
2020 # need rawtext size, before changed by flag processors, which is
2016 # the non-raw size. use revlog explicitly to avoid filelog's extra
2021 # the non-raw size. use revlog explicitly to avoid filelog's extra
2017 # logic that might remove metadata size.
2022 # logic that might remove metadata size.
2018 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2023 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2019 cachedelta[1])
2024 cachedelta[1])
2020 else:
2025 else:
2021 textlen = len(rawtext)
2026 textlen = len(rawtext)
2022
2027
2023 if deltacomputer is None:
2028 if deltacomputer is None:
2024 deltacomputer = deltautil.deltacomputer(self)
2029 deltacomputer = deltautil.deltacomputer(self)
2025
2030
2026 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2031 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2027
2032
2028 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2033 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2029
2034
2030 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2035 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2031 deltainfo.base, link, p1r, p2r, node)
2036 deltainfo.base, link, p1r, p2r, node)
2032 self.index.append(e)
2037 self.index.append(e)
2033 self.nodemap[node] = curr
2038 self.nodemap[node] = curr
2034
2039
2035 # Reset the pure node cache start lookup offset to account for new
2040 # Reset the pure node cache start lookup offset to account for new
2036 # revision.
2041 # revision.
2037 if self._nodepos is not None:
2042 if self._nodepos is not None:
2038 self._nodepos = curr
2043 self._nodepos = curr
2039
2044
2040 entry = self._io.packentry(e, self.node, self.version, curr)
2045 entry = self._io.packentry(e, self.node, self.version, curr)
2041 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2046 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2042 link, offset)
2047 link, offset)
2043
2048
2044 rawtext = btext[0]
2049 rawtext = btext[0]
2045
2050
2046 if alwayscache and rawtext is None:
2051 if alwayscache and rawtext is None:
2047 rawtext = deltacomputer.buildtext(revinfo, fh)
2052 rawtext = deltacomputer.buildtext(revinfo, fh)
2048
2053
2049 if type(rawtext) == bytes: # only accept immutable objects
2054 if type(rawtext) == bytes: # only accept immutable objects
2050 self._revisioncache = (node, curr, rawtext)
2055 self._revisioncache = (node, curr, rawtext)
2051 self._chainbasecache[curr] = deltainfo.chainbase
2056 self._chainbasecache[curr] = deltainfo.chainbase
2052 return node
2057 return node
2053
2058
2054 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2059 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2055 # Files opened in a+ mode have inconsistent behavior on various
2060 # Files opened in a+ mode have inconsistent behavior on various
2056 # platforms. Windows requires that a file positioning call be made
2061 # platforms. Windows requires that a file positioning call be made
2057 # when the file handle transitions between reads and writes. See
2062 # when the file handle transitions between reads and writes. See
2058 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2063 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2059 # platforms, Python or the platform itself can be buggy. Some versions
2064 # platforms, Python or the platform itself can be buggy. Some versions
2060 # of Solaris have been observed to not append at the end of the file
2065 # of Solaris have been observed to not append at the end of the file
2061 # if the file was seeked to before the end. See issue4943 for more.
2066 # if the file was seeked to before the end. See issue4943 for more.
2062 #
2067 #
2063 # We work around this issue by inserting a seek() before writing.
2068 # We work around this issue by inserting a seek() before writing.
2064 # Note: This is likely not necessary on Python 3. However, because
2069 # Note: This is likely not necessary on Python 3. However, because
2065 # the file handle is reused for reads and may be seeked there, we need
2070 # the file handle is reused for reads and may be seeked there, we need
2066 # to be careful before changing this.
2071 # to be careful before changing this.
2067 ifh.seek(0, os.SEEK_END)
2072 ifh.seek(0, os.SEEK_END)
2068 if dfh:
2073 if dfh:
2069 dfh.seek(0, os.SEEK_END)
2074 dfh.seek(0, os.SEEK_END)
2070
2075
2071 curr = len(self) - 1
2076 curr = len(self) - 1
2072 if not self._inline:
2077 if not self._inline:
2073 transaction.add(self.datafile, offset)
2078 transaction.add(self.datafile, offset)
2074 transaction.add(self.indexfile, curr * len(entry))
2079 transaction.add(self.indexfile, curr * len(entry))
2075 if data[0]:
2080 if data[0]:
2076 dfh.write(data[0])
2081 dfh.write(data[0])
2077 dfh.write(data[1])
2082 dfh.write(data[1])
2078 ifh.write(entry)
2083 ifh.write(entry)
2079 else:
2084 else:
2080 offset += curr * self._io.size
2085 offset += curr * self._io.size
2081 transaction.add(self.indexfile, offset, curr)
2086 transaction.add(self.indexfile, offset, curr)
2082 ifh.write(entry)
2087 ifh.write(entry)
2083 ifh.write(data[0])
2088 ifh.write(data[0])
2084 ifh.write(data[1])
2089 ifh.write(data[1])
2085 self._enforceinlinesize(transaction, ifh)
2090 self._enforceinlinesize(transaction, ifh)
2086
2091
2087 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2092 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2088 """
2093 """
2089 add a delta group
2094 add a delta group
2090
2095
2091 given a set of deltas, add them to the revision log. the
2096 given a set of deltas, add them to the revision log. the
2092 first delta is against its parent, which should be in our
2097 first delta is against its parent, which should be in our
2093 log, the rest are against the previous delta.
2098 log, the rest are against the previous delta.
2094
2099
2095 If ``addrevisioncb`` is defined, it will be called with arguments of
2100 If ``addrevisioncb`` is defined, it will be called with arguments of
2096 this revlog and the node that was added.
2101 this revlog and the node that was added.
2097 """
2102 """
2098
2103
2099 if self._writinghandles:
2104 if self._writinghandles:
2100 raise error.ProgrammingError('cannot nest addgroup() calls')
2105 raise error.ProgrammingError('cannot nest addgroup() calls')
2101
2106
2102 nodes = []
2107 nodes = []
2103
2108
2104 r = len(self)
2109 r = len(self)
2105 end = 0
2110 end = 0
2106 if r:
2111 if r:
2107 end = self.end(r - 1)
2112 end = self.end(r - 1)
2108 ifh = self._indexfp("a+")
2113 ifh = self._indexfp("a+")
2109 isize = r * self._io.size
2114 isize = r * self._io.size
2110 if self._inline:
2115 if self._inline:
2111 transaction.add(self.indexfile, end + isize, r)
2116 transaction.add(self.indexfile, end + isize, r)
2112 dfh = None
2117 dfh = None
2113 else:
2118 else:
2114 transaction.add(self.indexfile, isize, r)
2119 transaction.add(self.indexfile, isize, r)
2115 transaction.add(self.datafile, end)
2120 transaction.add(self.datafile, end)
2116 dfh = self._datafp("a+")
2121 dfh = self._datafp("a+")
2117 def flush():
2122 def flush():
2118 if dfh:
2123 if dfh:
2119 dfh.flush()
2124 dfh.flush()
2120 ifh.flush()
2125 ifh.flush()
2121
2126
2122 self._writinghandles = (ifh, dfh)
2127 self._writinghandles = (ifh, dfh)
2123
2128
2124 try:
2129 try:
2125 deltacomputer = deltautil.deltacomputer(self)
2130 deltacomputer = deltautil.deltacomputer(self)
2126 # loop through our set of deltas
2131 # loop through our set of deltas
2127 for data in deltas:
2132 for data in deltas:
2128 node, p1, p2, linknode, deltabase, delta, flags = data
2133 node, p1, p2, linknode, deltabase, delta, flags = data
2129 link = linkmapper(linknode)
2134 link = linkmapper(linknode)
2130 flags = flags or REVIDX_DEFAULT_FLAGS
2135 flags = flags or REVIDX_DEFAULT_FLAGS
2131
2136
2132 nodes.append(node)
2137 nodes.append(node)
2133
2138
2134 if node in self.nodemap:
2139 if node in self.nodemap:
2135 self._nodeduplicatecallback(transaction, node)
2140 self._nodeduplicatecallback(transaction, node)
2136 # this can happen if two branches make the same change
2141 # this can happen if two branches make the same change
2137 continue
2142 continue
2138
2143
2139 for p in (p1, p2):
2144 for p in (p1, p2):
2140 if p not in self.nodemap:
2145 if p not in self.nodemap:
2141 raise error.LookupError(p, self.indexfile,
2146 raise error.LookupError(p, self.indexfile,
2142 _('unknown parent'))
2147 _('unknown parent'))
2143
2148
2144 if deltabase not in self.nodemap:
2149 if deltabase not in self.nodemap:
2145 raise error.LookupError(deltabase, self.indexfile,
2150 raise error.LookupError(deltabase, self.indexfile,
2146 _('unknown delta base'))
2151 _('unknown delta base'))
2147
2152
2148 baserev = self.rev(deltabase)
2153 baserev = self.rev(deltabase)
2149
2154
2150 if baserev != nullrev and self.iscensored(baserev):
2155 if baserev != nullrev and self.iscensored(baserev):
2151 # if base is censored, delta must be full replacement in a
2156 # if base is censored, delta must be full replacement in a
2152 # single patch operation
2157 # single patch operation
2153 hlen = struct.calcsize(">lll")
2158 hlen = struct.calcsize(">lll")
2154 oldlen = self.rawsize(baserev)
2159 oldlen = self.rawsize(baserev)
2155 newlen = len(delta) - hlen
2160 newlen = len(delta) - hlen
2156 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2161 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2157 raise error.CensoredBaseError(self.indexfile,
2162 raise error.CensoredBaseError(self.indexfile,
2158 self.node(baserev))
2163 self.node(baserev))
2159
2164
2160 if not flags and self._peek_iscensored(baserev, delta, flush):
2165 if not flags and self._peek_iscensored(baserev, delta, flush):
2161 flags |= REVIDX_ISCENSORED
2166 flags |= REVIDX_ISCENSORED
2162
2167
2163 # We assume consumers of addrevisioncb will want to retrieve
2168 # We assume consumers of addrevisioncb will want to retrieve
2164 # the added revision, which will require a call to
2169 # the added revision, which will require a call to
2165 # revision(). revision() will fast path if there is a cache
2170 # revision(). revision() will fast path if there is a cache
2166 # hit. So, we tell _addrevision() to always cache in this case.
2171 # hit. So, we tell _addrevision() to always cache in this case.
2167 # We're only using addgroup() in the context of changegroup
2172 # We're only using addgroup() in the context of changegroup
2168 # generation so the revision data can always be handled as raw
2173 # generation so the revision data can always be handled as raw
2169 # by the flagprocessor.
2174 # by the flagprocessor.
2170 self._addrevision(node, None, transaction, link,
2175 self._addrevision(node, None, transaction, link,
2171 p1, p2, flags, (baserev, delta),
2176 p1, p2, flags, (baserev, delta),
2172 ifh, dfh,
2177 ifh, dfh,
2173 alwayscache=bool(addrevisioncb),
2178 alwayscache=bool(addrevisioncb),
2174 deltacomputer=deltacomputer)
2179 deltacomputer=deltacomputer)
2175
2180
2176 if addrevisioncb:
2181 if addrevisioncb:
2177 addrevisioncb(self, node)
2182 addrevisioncb(self, node)
2178
2183
2179 if not dfh and not self._inline:
2184 if not dfh and not self._inline:
2180 # addrevision switched from inline to conventional
2185 # addrevision switched from inline to conventional
2181 # reopen the index
2186 # reopen the index
2182 ifh.close()
2187 ifh.close()
2183 dfh = self._datafp("a+")
2188 dfh = self._datafp("a+")
2184 ifh = self._indexfp("a+")
2189 ifh = self._indexfp("a+")
2185 self._writinghandles = (ifh, dfh)
2190 self._writinghandles = (ifh, dfh)
2186 finally:
2191 finally:
2187 self._writinghandles = None
2192 self._writinghandles = None
2188
2193
2189 if dfh:
2194 if dfh:
2190 dfh.close()
2195 dfh.close()
2191 ifh.close()
2196 ifh.close()
2192
2197
2193 return nodes
2198 return nodes
2194
2199
2195 def iscensored(self, rev):
2200 def iscensored(self, rev):
2196 """Check if a file revision is censored."""
2201 """Check if a file revision is censored."""
2197 if not self._censorable:
2202 if not self._censorable:
2198 return False
2203 return False
2199
2204
2200 return self.flags(rev) & REVIDX_ISCENSORED
2205 return self.flags(rev) & REVIDX_ISCENSORED
2201
2206
2202 def _peek_iscensored(self, baserev, delta, flush):
2207 def _peek_iscensored(self, baserev, delta, flush):
2203 """Quickly check if a delta produces a censored revision."""
2208 """Quickly check if a delta produces a censored revision."""
2204 if not self._censorable:
2209 if not self._censorable:
2205 return False
2210 return False
2206
2211
2207 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2212 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2208
2213
2209 def getstrippoint(self, minlink):
2214 def getstrippoint(self, minlink):
2210 """find the minimum rev that must be stripped to strip the linkrev
2215 """find the minimum rev that must be stripped to strip the linkrev
2211
2216
2212 Returns a tuple containing the minimum rev and a set of all revs that
2217 Returns a tuple containing the minimum rev and a set of all revs that
2213 have linkrevs that will be broken by this strip.
2218 have linkrevs that will be broken by this strip.
2214 """
2219 """
2215 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2220 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2216 self.headrevs(),
2221 self.headrevs(),
2217 self.linkrev, self.parentrevs)
2222 self.linkrev, self.parentrevs)
2218
2223
2219 def strip(self, minlink, transaction):
2224 def strip(self, minlink, transaction):
2220 """truncate the revlog on the first revision with a linkrev >= minlink
2225 """truncate the revlog on the first revision with a linkrev >= minlink
2221
2226
2222 This function is called when we're stripping revision minlink and
2227 This function is called when we're stripping revision minlink and
2223 its descendants from the repository.
2228 its descendants from the repository.
2224
2229
2225 We have to remove all revisions with linkrev >= minlink, because
2230 We have to remove all revisions with linkrev >= minlink, because
2226 the equivalent changelog revisions will be renumbered after the
2231 the equivalent changelog revisions will be renumbered after the
2227 strip.
2232 strip.
2228
2233
2229 So we truncate the revlog on the first of these revisions, and
2234 So we truncate the revlog on the first of these revisions, and
2230 trust that the caller has saved the revisions that shouldn't be
2235 trust that the caller has saved the revisions that shouldn't be
2231 removed and that it'll re-add them after this truncation.
2236 removed and that it'll re-add them after this truncation.
2232 """
2237 """
2233 if len(self) == 0:
2238 if len(self) == 0:
2234 return
2239 return
2235
2240
2236 rev, _ = self.getstrippoint(minlink)
2241 rev, _ = self.getstrippoint(minlink)
2237 if rev == len(self):
2242 if rev == len(self):
2238 return
2243 return
2239
2244
2240 # first truncate the files on disk
2245 # first truncate the files on disk
2241 end = self.start(rev)
2246 end = self.start(rev)
2242 if not self._inline:
2247 if not self._inline:
2243 transaction.add(self.datafile, end)
2248 transaction.add(self.datafile, end)
2244 end = rev * self._io.size
2249 end = rev * self._io.size
2245 else:
2250 else:
2246 end += rev * self._io.size
2251 end += rev * self._io.size
2247
2252
2248 transaction.add(self.indexfile, end)
2253 transaction.add(self.indexfile, end)
2249
2254
2250 # then reset internal state in memory to forget those revisions
2255 # then reset internal state in memory to forget those revisions
2251 self._revisioncache = None
2256 self._revisioncache = None
2252 self._chaininfocache = {}
2257 self._chaininfocache = {}
2253 self._chunkclear()
2258 self._chunkclear()
2254 for x in pycompat.xrange(rev, len(self)):
2259 for x in pycompat.xrange(rev, len(self)):
2255 del self.nodemap[self.node(x)]
2260 del self.nodemap[self.node(x)]
2256
2261
2257 del self.index[rev:-1]
2262 del self.index[rev:-1]
2258 self._nodepos = None
2263 self._nodepos = None
2259
2264
2260 def checksize(self):
2265 def checksize(self):
2261 expected = 0
2266 expected = 0
2262 if len(self):
2267 if len(self):
2263 expected = max(0, self.end(len(self) - 1))
2268 expected = max(0, self.end(len(self) - 1))
2264
2269
2265 try:
2270 try:
2266 with self._datafp() as f:
2271 with self._datafp() as f:
2267 f.seek(0, 2)
2272 f.seek(0, 2)
2268 actual = f.tell()
2273 actual = f.tell()
2269 dd = actual - expected
2274 dd = actual - expected
2270 except IOError as inst:
2275 except IOError as inst:
2271 if inst.errno != errno.ENOENT:
2276 if inst.errno != errno.ENOENT:
2272 raise
2277 raise
2273 dd = 0
2278 dd = 0
2274
2279
2275 try:
2280 try:
2276 f = self.opener(self.indexfile)
2281 f = self.opener(self.indexfile)
2277 f.seek(0, 2)
2282 f.seek(0, 2)
2278 actual = f.tell()
2283 actual = f.tell()
2279 f.close()
2284 f.close()
2280 s = self._io.size
2285 s = self._io.size
2281 i = max(0, actual // s)
2286 i = max(0, actual // s)
2282 di = actual - (i * s)
2287 di = actual - (i * s)
2283 if self._inline:
2288 if self._inline:
2284 databytes = 0
2289 databytes = 0
2285 for r in self:
2290 for r in self:
2286 databytes += max(0, self.length(r))
2291 databytes += max(0, self.length(r))
2287 dd = 0
2292 dd = 0
2288 di = actual - len(self) * s - databytes
2293 di = actual - len(self) * s - databytes
2289 except IOError as inst:
2294 except IOError as inst:
2290 if inst.errno != errno.ENOENT:
2295 if inst.errno != errno.ENOENT:
2291 raise
2296 raise
2292 di = 0
2297 di = 0
2293
2298
2294 return (dd, di)
2299 return (dd, di)
2295
2300
2296 def files(self):
2301 def files(self):
2297 res = [self.indexfile]
2302 res = [self.indexfile]
2298 if not self._inline:
2303 if not self._inline:
2299 res.append(self.datafile)
2304 res.append(self.datafile)
2300 return res
2305 return res
2301
2306
2302 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2307 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2303 assumehaveparentrevisions=False,
2308 assumehaveparentrevisions=False,
2304 deltamode=repository.CG_DELTAMODE_STD):
2309 deltamode=repository.CG_DELTAMODE_STD):
2305 if nodesorder not in ('nodes', 'storage', 'linear', None):
2310 if nodesorder not in ('nodes', 'storage', 'linear', None):
2306 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2311 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2307 nodesorder)
2312 nodesorder)
2308
2313
2309 if nodesorder is None and not self._generaldelta:
2314 if nodesorder is None and not self._generaldelta:
2310 nodesorder = 'storage'
2315 nodesorder = 'storage'
2311
2316
2312 if (not self._storedeltachains and
2317 if (not self._storedeltachains and
2313 deltamode != repository.CG_DELTAMODE_PREV):
2318 deltamode != repository.CG_DELTAMODE_PREV):
2314 deltamode = repository.CG_DELTAMODE_FULL
2319 deltamode = repository.CG_DELTAMODE_FULL
2315
2320
2316 return storageutil.emitrevisions(
2321 return storageutil.emitrevisions(
2317 self, nodes, nodesorder, revlogrevisiondelta,
2322 self, nodes, nodesorder, revlogrevisiondelta,
2318 deltaparentfn=self.deltaparent,
2323 deltaparentfn=self.deltaparent,
2319 candeltafn=self.candelta,
2324 candeltafn=self.candelta,
2320 rawsizefn=self.rawsize,
2325 rawsizefn=self.rawsize,
2321 revdifffn=self.revdiff,
2326 revdifffn=self.revdiff,
2322 flagsfn=self.flags,
2327 flagsfn=self.flags,
2323 deltamode=deltamode,
2328 deltamode=deltamode,
2324 revisiondata=revisiondata,
2329 revisiondata=revisiondata,
2325 assumehaveparentrevisions=assumehaveparentrevisions)
2330 assumehaveparentrevisions=assumehaveparentrevisions)
2326
2331
2327 DELTAREUSEALWAYS = 'always'
2332 DELTAREUSEALWAYS = 'always'
2328 DELTAREUSESAMEREVS = 'samerevs'
2333 DELTAREUSESAMEREVS = 'samerevs'
2329 DELTAREUSENEVER = 'never'
2334 DELTAREUSENEVER = 'never'
2330
2335
2331 DELTAREUSEFULLADD = 'fulladd'
2336 DELTAREUSEFULLADD = 'fulladd'
2332
2337
2333 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2338 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2334
2339
2335 def clone(self, tr, destrevlog, addrevisioncb=None,
2340 def clone(self, tr, destrevlog, addrevisioncb=None,
2336 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2341 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2337 """Copy this revlog to another, possibly with format changes.
2342 """Copy this revlog to another, possibly with format changes.
2338
2343
2339 The destination revlog will contain the same revisions and nodes.
2344 The destination revlog will contain the same revisions and nodes.
2340 However, it may not be bit-for-bit identical due to e.g. delta encoding
2345 However, it may not be bit-for-bit identical due to e.g. delta encoding
2341 differences.
2346 differences.
2342
2347
2343 The ``deltareuse`` argument control how deltas from the existing revlog
2348 The ``deltareuse`` argument control how deltas from the existing revlog
2344 are preserved in the destination revlog. The argument can have the
2349 are preserved in the destination revlog. The argument can have the
2345 following values:
2350 following values:
2346
2351
2347 DELTAREUSEALWAYS
2352 DELTAREUSEALWAYS
2348 Deltas will always be reused (if possible), even if the destination
2353 Deltas will always be reused (if possible), even if the destination
2349 revlog would not select the same revisions for the delta. This is the
2354 revlog would not select the same revisions for the delta. This is the
2350 fastest mode of operation.
2355 fastest mode of operation.
2351 DELTAREUSESAMEREVS
2356 DELTAREUSESAMEREVS
2352 Deltas will be reused if the destination revlog would pick the same
2357 Deltas will be reused if the destination revlog would pick the same
2353 revisions for the delta. This mode strikes a balance between speed
2358 revisions for the delta. This mode strikes a balance between speed
2354 and optimization.
2359 and optimization.
2355 DELTAREUSENEVER
2360 DELTAREUSENEVER
2356 Deltas will never be reused. This is the slowest mode of execution.
2361 Deltas will never be reused. This is the slowest mode of execution.
2357 This mode can be used to recompute deltas (e.g. if the diff/delta
2362 This mode can be used to recompute deltas (e.g. if the diff/delta
2358 algorithm changes).
2363 algorithm changes).
2359
2364
2360 Delta computation can be slow, so the choice of delta reuse policy can
2365 Delta computation can be slow, so the choice of delta reuse policy can
2361 significantly affect run time.
2366 significantly affect run time.
2362
2367
2363 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2368 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2364 two extremes. Deltas will be reused if they are appropriate. But if the
2369 two extremes. Deltas will be reused if they are appropriate. But if the
2365 delta could choose a better revision, it will do so. This means if you
2370 delta could choose a better revision, it will do so. This means if you
2366 are converting a non-generaldelta revlog to a generaldelta revlog,
2371 are converting a non-generaldelta revlog to a generaldelta revlog,
2367 deltas will be recomputed if the delta's parent isn't a parent of the
2372 deltas will be recomputed if the delta's parent isn't a parent of the
2368 revision.
2373 revision.
2369
2374
2370 In addition to the delta policy, the ``forcedeltabothparents``
2375 In addition to the delta policy, the ``forcedeltabothparents``
2371 argument controls whether to force compute deltas against both parents
2376 argument controls whether to force compute deltas against both parents
2372 for merges. By default, the current default is used.
2377 for merges. By default, the current default is used.
2373 """
2378 """
2374 if deltareuse not in self.DELTAREUSEALL:
2379 if deltareuse not in self.DELTAREUSEALL:
2375 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2380 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2376
2381
2377 if len(destrevlog):
2382 if len(destrevlog):
2378 raise ValueError(_('destination revlog is not empty'))
2383 raise ValueError(_('destination revlog is not empty'))
2379
2384
2380 if getattr(self, 'filteredrevs', None):
2385 if getattr(self, 'filteredrevs', None):
2381 raise ValueError(_('source revlog has filtered revisions'))
2386 raise ValueError(_('source revlog has filtered revisions'))
2382 if getattr(destrevlog, 'filteredrevs', None):
2387 if getattr(destrevlog, 'filteredrevs', None):
2383 raise ValueError(_('destination revlog has filtered revisions'))
2388 raise ValueError(_('destination revlog has filtered revisions'))
2384
2389
2385 # lazydeltabase controls whether to reuse a cached delta, if possible.
2390 # lazydeltabase controls whether to reuse a cached delta, if possible.
2386 oldlazydeltabase = destrevlog._lazydeltabase
2391 oldlazydeltabase = destrevlog._lazydeltabase
2387 oldamd = destrevlog._deltabothparents
2392 oldamd = destrevlog._deltabothparents
2388
2393
2389 try:
2394 try:
2390 if deltareuse == self.DELTAREUSEALWAYS:
2395 if deltareuse == self.DELTAREUSEALWAYS:
2391 destrevlog._lazydeltabase = True
2396 destrevlog._lazydeltabase = True
2392 elif deltareuse == self.DELTAREUSESAMEREVS:
2397 elif deltareuse == self.DELTAREUSESAMEREVS:
2393 destrevlog._lazydeltabase = False
2398 destrevlog._lazydeltabase = False
2394
2399
2395 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2400 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2396
2401
2397 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2402 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2398 self.DELTAREUSESAMEREVS)
2403 self.DELTAREUSESAMEREVS)
2399
2404
2400 deltacomputer = deltautil.deltacomputer(destrevlog)
2405 deltacomputer = deltautil.deltacomputer(destrevlog)
2401 index = self.index
2406 index = self.index
2402 for rev in self:
2407 for rev in self:
2403 entry = index[rev]
2408 entry = index[rev]
2404
2409
2405 # Some classes override linkrev to take filtered revs into
2410 # Some classes override linkrev to take filtered revs into
2406 # account. Use raw entry from index.
2411 # account. Use raw entry from index.
2407 flags = entry[0] & 0xffff
2412 flags = entry[0] & 0xffff
2408 linkrev = entry[4]
2413 linkrev = entry[4]
2409 p1 = index[entry[5]][7]
2414 p1 = index[entry[5]][7]
2410 p2 = index[entry[6]][7]
2415 p2 = index[entry[6]][7]
2411 node = entry[7]
2416 node = entry[7]
2412
2417
2413 # (Possibly) reuse the delta from the revlog if allowed and
2418 # (Possibly) reuse the delta from the revlog if allowed and
2414 # the revlog chunk is a delta.
2419 # the revlog chunk is a delta.
2415 cachedelta = None
2420 cachedelta = None
2416 rawtext = None
2421 rawtext = None
2417 if populatecachedelta:
2422 if populatecachedelta:
2418 dp = self.deltaparent(rev)
2423 dp = self.deltaparent(rev)
2419 if dp != nullrev:
2424 if dp != nullrev:
2420 cachedelta = (dp, bytes(self._chunk(rev)))
2425 cachedelta = (dp, bytes(self._chunk(rev)))
2421
2426
2422 if not cachedelta:
2427 if not cachedelta:
2423 rawtext = self.revision(rev, raw=True)
2428 rawtext = self.revision(rev, raw=True)
2424
2429
2425
2430
2426 if deltareuse == self.DELTAREUSEFULLADD:
2431 if deltareuse == self.DELTAREUSEFULLADD:
2427 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2432 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2428 cachedelta=cachedelta,
2433 cachedelta=cachedelta,
2429 node=node, flags=flags,
2434 node=node, flags=flags,
2430 deltacomputer=deltacomputer)
2435 deltacomputer=deltacomputer)
2431 else:
2436 else:
2432 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2437 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2433 checkambig=False)
2438 checkambig=False)
2434 dfh = None
2439 dfh = None
2435 if not destrevlog._inline:
2440 if not destrevlog._inline:
2436 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2441 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2437 try:
2442 try:
2438 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2443 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2439 p2, flags, cachedelta, ifh, dfh,
2444 p2, flags, cachedelta, ifh, dfh,
2440 deltacomputer=deltacomputer)
2445 deltacomputer=deltacomputer)
2441 finally:
2446 finally:
2442 if dfh:
2447 if dfh:
2443 dfh.close()
2448 dfh.close()
2444 ifh.close()
2449 ifh.close()
2445
2450
2446 if addrevisioncb:
2451 if addrevisioncb:
2447 addrevisioncb(self, rev, node)
2452 addrevisioncb(self, rev, node)
2448 finally:
2453 finally:
2449 destrevlog._lazydeltabase = oldlazydeltabase
2454 destrevlog._lazydeltabase = oldlazydeltabase
2450 destrevlog._deltabothparents = oldamd
2455 destrevlog._deltabothparents = oldamd
2451
2456
2452 def censorrevision(self, tr, censornode, tombstone=b''):
2457 def censorrevision(self, tr, censornode, tombstone=b''):
2453 if (self.version & 0xFFFF) == REVLOGV0:
2458 if (self.version & 0xFFFF) == REVLOGV0:
2454 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2459 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2455 self.version)
2460 self.version)
2456
2461
2457 censorrev = self.rev(censornode)
2462 censorrev = self.rev(censornode)
2458 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2463 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2459
2464
2460 if len(tombstone) > self.rawsize(censorrev):
2465 if len(tombstone) > self.rawsize(censorrev):
2461 raise error.Abort(_('censor tombstone must be no longer than '
2466 raise error.Abort(_('censor tombstone must be no longer than '
2462 'censored data'))
2467 'censored data'))
2463
2468
2464 # Rewriting the revlog in place is hard. Our strategy for censoring is
2469 # Rewriting the revlog in place is hard. Our strategy for censoring is
2465 # to create a new revlog, copy all revisions to it, then replace the
2470 # to create a new revlog, copy all revisions to it, then replace the
2466 # revlogs on transaction close.
2471 # revlogs on transaction close.
2467
2472
2468 newindexfile = self.indexfile + b'.tmpcensored'
2473 newindexfile = self.indexfile + b'.tmpcensored'
2469 newdatafile = self.datafile + b'.tmpcensored'
2474 newdatafile = self.datafile + b'.tmpcensored'
2470
2475
2471 # This is a bit dangerous. We could easily have a mismatch of state.
2476 # This is a bit dangerous. We could easily have a mismatch of state.
2472 newrl = revlog(self.opener, newindexfile, newdatafile,
2477 newrl = revlog(self.opener, newindexfile, newdatafile,
2473 censorable=True)
2478 censorable=True)
2474 newrl.version = self.version
2479 newrl.version = self.version
2475 newrl._generaldelta = self._generaldelta
2480 newrl._generaldelta = self._generaldelta
2476 newrl._io = self._io
2481 newrl._io = self._io
2477
2482
2478 for rev in self.revs():
2483 for rev in self.revs():
2479 node = self.node(rev)
2484 node = self.node(rev)
2480 p1, p2 = self.parents(node)
2485 p1, p2 = self.parents(node)
2481
2486
2482 if rev == censorrev:
2487 if rev == censorrev:
2483 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2488 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2484 p1, p2, censornode, REVIDX_ISCENSORED)
2489 p1, p2, censornode, REVIDX_ISCENSORED)
2485
2490
2486 if newrl.deltaparent(rev) != nullrev:
2491 if newrl.deltaparent(rev) != nullrev:
2487 raise error.Abort(_('censored revision stored as delta; '
2492 raise error.Abort(_('censored revision stored as delta; '
2488 'cannot censor'),
2493 'cannot censor'),
2489 hint=_('censoring of revlogs is not '
2494 hint=_('censoring of revlogs is not '
2490 'fully implemented; please report '
2495 'fully implemented; please report '
2491 'this bug'))
2496 'this bug'))
2492 continue
2497 continue
2493
2498
2494 if self.iscensored(rev):
2499 if self.iscensored(rev):
2495 if self.deltaparent(rev) != nullrev:
2500 if self.deltaparent(rev) != nullrev:
2496 raise error.Abort(_('cannot censor due to censored '
2501 raise error.Abort(_('cannot censor due to censored '
2497 'revision having delta stored'))
2502 'revision having delta stored'))
2498 rawtext = self._chunk(rev)
2503 rawtext = self._chunk(rev)
2499 else:
2504 else:
2500 rawtext = self.revision(rev, raw=True)
2505 rawtext = self.revision(rev, raw=True)
2501
2506
2502 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2507 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2503 self.flags(rev))
2508 self.flags(rev))
2504
2509
2505 tr.addbackup(self.indexfile, location='store')
2510 tr.addbackup(self.indexfile, location='store')
2506 if not self._inline:
2511 if not self._inline:
2507 tr.addbackup(self.datafile, location='store')
2512 tr.addbackup(self.datafile, location='store')
2508
2513
2509 self.opener.rename(newrl.indexfile, self.indexfile)
2514 self.opener.rename(newrl.indexfile, self.indexfile)
2510 if not self._inline:
2515 if not self._inline:
2511 self.opener.rename(newrl.datafile, self.datafile)
2516 self.opener.rename(newrl.datafile, self.datafile)
2512
2517
2513 self.clearcaches()
2518 self.clearcaches()
2514 self._loadindex()
2519 self._loadindex()
2515
2520
2516 def verifyintegrity(self, state):
2521 def verifyintegrity(self, state):
2517 """Verifies the integrity of the revlog.
2522 """Verifies the integrity of the revlog.
2518
2523
2519 Yields ``revlogproblem`` instances describing problems that are
2524 Yields ``revlogproblem`` instances describing problems that are
2520 found.
2525 found.
2521 """
2526 """
2522 dd, di = self.checksize()
2527 dd, di = self.checksize()
2523 if dd:
2528 if dd:
2524 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2529 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2525 if di:
2530 if di:
2526 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2531 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2527
2532
2528 version = self.version & 0xFFFF
2533 version = self.version & 0xFFFF
2529
2534
2530 # The verifier tells us what version revlog we should be.
2535 # The verifier tells us what version revlog we should be.
2531 if version != state['expectedversion']:
2536 if version != state['expectedversion']:
2532 yield revlogproblem(
2537 yield revlogproblem(
2533 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2538 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2534 (self.indexfile, version, state['expectedversion']))
2539 (self.indexfile, version, state['expectedversion']))
2535
2540
2536 state['skipread'] = set()
2541 state['skipread'] = set()
2537
2542
2538 for rev in self:
2543 for rev in self:
2539 node = self.node(rev)
2544 node = self.node(rev)
2540
2545
2541 # Verify contents. 4 cases to care about:
2546 # Verify contents. 4 cases to care about:
2542 #
2547 #
2543 # common: the most common case
2548 # common: the most common case
2544 # rename: with a rename
2549 # rename: with a rename
2545 # meta: file content starts with b'\1\n', the metadata
2550 # meta: file content starts with b'\1\n', the metadata
2546 # header defined in filelog.py, but without a rename
2551 # header defined in filelog.py, but without a rename
2547 # ext: content stored externally
2552 # ext: content stored externally
2548 #
2553 #
2549 # More formally, their differences are shown below:
2554 # More formally, their differences are shown below:
2550 #
2555 #
2551 # | common | rename | meta | ext
2556 # | common | rename | meta | ext
2552 # -------------------------------------------------------
2557 # -------------------------------------------------------
2553 # flags() | 0 | 0 | 0 | not 0
2558 # flags() | 0 | 0 | 0 | not 0
2554 # renamed() | False | True | False | ?
2559 # renamed() | False | True | False | ?
2555 # rawtext[0:2]=='\1\n'| False | True | True | ?
2560 # rawtext[0:2]=='\1\n'| False | True | True | ?
2556 #
2561 #
2557 # "rawtext" means the raw text stored in revlog data, which
2562 # "rawtext" means the raw text stored in revlog data, which
2558 # could be retrieved by "revision(rev, raw=True)". "text"
2563 # could be retrieved by "revision(rev, raw=True)". "text"
2559 # mentioned below is "revision(rev, raw=False)".
2564 # mentioned below is "revision(rev, raw=False)".
2560 #
2565 #
2561 # There are 3 different lengths stored physically:
2566 # There are 3 different lengths stored physically:
2562 # 1. L1: rawsize, stored in revlog index
2567 # 1. L1: rawsize, stored in revlog index
2563 # 2. L2: len(rawtext), stored in revlog data
2568 # 2. L2: len(rawtext), stored in revlog data
2564 # 3. L3: len(text), stored in revlog data if flags==0, or
2569 # 3. L3: len(text), stored in revlog data if flags==0, or
2565 # possibly somewhere else if flags!=0
2570 # possibly somewhere else if flags!=0
2566 #
2571 #
2567 # L1 should be equal to L2. L3 could be different from them.
2572 # L1 should be equal to L2. L3 could be different from them.
2568 # "text" may or may not affect commit hash depending on flag
2573 # "text" may or may not affect commit hash depending on flag
2569 # processors (see revlog.addflagprocessor).
2574 # processors (see revlog.addflagprocessor).
2570 #
2575 #
2571 # | common | rename | meta | ext
2576 # | common | rename | meta | ext
2572 # -------------------------------------------------
2577 # -------------------------------------------------
2573 # rawsize() | L1 | L1 | L1 | L1
2578 # rawsize() | L1 | L1 | L1 | L1
2574 # size() | L1 | L2-LM | L1(*) | L1 (?)
2579 # size() | L1 | L2-LM | L1(*) | L1 (?)
2575 # len(rawtext) | L2 | L2 | L2 | L2
2580 # len(rawtext) | L2 | L2 | L2 | L2
2576 # len(text) | L2 | L2 | L2 | L3
2581 # len(text) | L2 | L2 | L2 | L3
2577 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2582 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2578 #
2583 #
2579 # LM: length of metadata, depending on rawtext
2584 # LM: length of metadata, depending on rawtext
2580 # (*): not ideal, see comment in filelog.size
2585 # (*): not ideal, see comment in filelog.size
2581 # (?): could be "- len(meta)" if the resolved content has
2586 # (?): could be "- len(meta)" if the resolved content has
2582 # rename metadata
2587 # rename metadata
2583 #
2588 #
2584 # Checks needed to be done:
2589 # Checks needed to be done:
2585 # 1. length check: L1 == L2, in all cases.
2590 # 1. length check: L1 == L2, in all cases.
2586 # 2. hash check: depending on flag processor, we may need to
2591 # 2. hash check: depending on flag processor, we may need to
2587 # use either "text" (external), or "rawtext" (in revlog).
2592 # use either "text" (external), or "rawtext" (in revlog).
2588
2593
2589 try:
2594 try:
2590 skipflags = state.get('skipflags', 0)
2595 skipflags = state.get('skipflags', 0)
2591 if skipflags:
2596 if skipflags:
2592 skipflags &= self.flags(rev)
2597 skipflags &= self.flags(rev)
2593
2598
2594 if skipflags:
2599 if skipflags:
2595 state['skipread'].add(node)
2600 state['skipread'].add(node)
2596 else:
2601 else:
2597 # Side-effect: read content and verify hash.
2602 # Side-effect: read content and verify hash.
2598 self.revision(node)
2603 self.revision(node)
2599
2604
2600 l1 = self.rawsize(rev)
2605 l1 = self.rawsize(rev)
2601 l2 = len(self.revision(node, raw=True))
2606 l2 = len(self.revision(node, raw=True))
2602
2607
2603 if l1 != l2:
2608 if l1 != l2:
2604 yield revlogproblem(
2609 yield revlogproblem(
2605 error=_('unpacked size is %d, %d expected') % (l2, l1),
2610 error=_('unpacked size is %d, %d expected') % (l2, l1),
2606 node=node)
2611 node=node)
2607
2612
2608 except error.CensoredNodeError:
2613 except error.CensoredNodeError:
2609 if state['erroroncensored']:
2614 if state['erroroncensored']:
2610 yield revlogproblem(error=_('censored file data'),
2615 yield revlogproblem(error=_('censored file data'),
2611 node=node)
2616 node=node)
2612 state['skipread'].add(node)
2617 state['skipread'].add(node)
2613 except Exception as e:
2618 except Exception as e:
2614 yield revlogproblem(
2619 yield revlogproblem(
2615 error=_('unpacking %s: %s') % (short(node),
2620 error=_('unpacking %s: %s') % (short(node),
2616 stringutil.forcebytestr(e)),
2621 stringutil.forcebytestr(e)),
2617 node=node)
2622 node=node)
2618 state['skipread'].add(node)
2623 state['skipread'].add(node)
2619
2624
2620 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2625 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2621 revisionscount=False, trackedsize=False,
2626 revisionscount=False, trackedsize=False,
2622 storedsize=False):
2627 storedsize=False):
2623 d = {}
2628 d = {}
2624
2629
2625 if exclusivefiles:
2630 if exclusivefiles:
2626 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2631 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2627 if not self._inline:
2632 if not self._inline:
2628 d['exclusivefiles'].append((self.opener, self.datafile))
2633 d['exclusivefiles'].append((self.opener, self.datafile))
2629
2634
2630 if sharedfiles:
2635 if sharedfiles:
2631 d['sharedfiles'] = []
2636 d['sharedfiles'] = []
2632
2637
2633 if revisionscount:
2638 if revisionscount:
2634 d['revisionscount'] = len(self)
2639 d['revisionscount'] = len(self)
2635
2640
2636 if trackedsize:
2641 if trackedsize:
2637 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2642 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2638
2643
2639 if storedsize:
2644 if storedsize:
2640 d['storedsize'] = sum(self.opener.stat(path).st_size
2645 d['storedsize'] = sum(self.opener.stat(path).st_size
2641 for path in self.files())
2646 for path in self.files())
2642
2647
2643 return d
2648 return d
@@ -1,1290 +1,1293 b''
1 #testcases sshv1 sshv2
1 #testcases sshv1 sshv2
2
2
3 #if sshv2
3 #if sshv2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > sshpeer.advertise-v2 = true
6 > sshpeer.advertise-v2 = true
7 > sshserver.support-v2 = true
7 > sshserver.support-v2 = true
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 Prepare repo a:
11 Prepare repo a:
12
12
13 $ hg init a
13 $ hg init a
14 $ cd a
14 $ cd a
15 $ echo a > a
15 $ echo a > a
16 $ hg add a
16 $ hg add a
17 $ hg commit -m test
17 $ hg commit -m test
18 $ echo first line > b
18 $ echo first line > b
19 $ hg add b
19 $ hg add b
20
20
21 Create a non-inlined filelog:
21 Create a non-inlined filelog:
22
22
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 > cat data1 >> b
25 > cat data1 >> b
26 > hg commit -m test
26 > hg commit -m test
27 > done
27 > done
28
28
29 List files in store/data (should show a 'b.d'):
29 List files in store/data (should show a 'b.d'):
30
30
31 #if reporevlogstore
31 #if reporevlogstore
32 $ for i in .hg/store/data/*; do
32 $ for i in .hg/store/data/*; do
33 > echo $i
33 > echo $i
34 > done
34 > done
35 .hg/store/data/a.i
35 .hg/store/data/a.i
36 .hg/store/data/b.d
36 .hg/store/data/b.d
37 .hg/store/data/b.i
37 .hg/store/data/b.i
38 #endif
38 #endif
39
39
40 Trigger branchcache creation:
40 Trigger branchcache creation:
41
41
42 $ hg branches
42 $ hg branches
43 default 10:a7949464abda
43 default 10:a7949464abda
44 $ ls .hg/cache
44 $ ls .hg/cache
45 branch2-served
45 branch2-served
46 manifestfulltextcache (reporevlogstore !)
46 manifestfulltextcache (reporevlogstore !)
47 rbc-names-v1
47 rbc-names-v1
48 rbc-revs-v1
48 rbc-revs-v1
49
49
50 Default operation:
50 Default operation:
51
51
52 $ hg clone . ../b
52 $ hg clone . ../b
53 updating to branch default
53 updating to branch default
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 $ cd ../b
55 $ cd ../b
56
56
57 Ensure branchcache got copied over:
57 Ensure branchcache got copied over:
58
58
59 $ ls .hg/cache
59 $ ls .hg/cache
60 branch2-served
60 branch2-served
61 rbc-names-v1
61 rbc-names-v1
62 rbc-revs-v1
62 rbc-revs-v1
63
63
64 $ cat a
64 $ cat a
65 a
65 a
66 $ hg verify
66 $ hg verify
67 checking changesets
67 checking changesets
68 checking manifests
68 checking manifests
69 crosschecking files in changesets and manifests
69 crosschecking files in changesets and manifests
70 checking files
70 checking files
71 checked 11 changesets with 11 changes to 2 files
71 checked 11 changesets with 11 changes to 2 files
72
72
73 Invalid dest '' must abort:
73 Invalid dest '' must abort:
74
74
75 $ hg clone . ''
75 $ hg clone . ''
76 abort: empty destination path is not valid
76 abort: empty destination path is not valid
77 [255]
77 [255]
78
78
79 No update, with debug option:
79 No update, with debug option:
80
80
81 #if hardlink
81 #if hardlink
82 $ hg --debug clone -U . ../c --config progress.debug=true
82 $ hg --debug clone -U . ../c --config progress.debug=true
83 linking: 1 files
83 linking: 1 files
84 linking: 2 files
84 linking: 2 files
85 linking: 3 files
85 linking: 3 files
86 linking: 4 files
86 linking: 4 files
87 linking: 5 files
87 linking: 5 files
88 linking: 6 files
88 linking: 6 files
89 linking: 7 files
89 linking: 7 files
90 linking: 8 files
90 linking: 8 files
91 linked 8 files (reporevlogstore !)
91 linked 8 files (reporevlogstore !)
92 linking: 9 files (reposimplestore !)
92 linking: 9 files (reposimplestore !)
93 linking: 10 files (reposimplestore !)
93 linking: 10 files (reposimplestore !)
94 linking: 11 files (reposimplestore !)
94 linking: 11 files (reposimplestore !)
95 linking: 12 files (reposimplestore !)
95 linking: 12 files (reposimplestore !)
96 linking: 13 files (reposimplestore !)
96 linking: 13 files (reposimplestore !)
97 linking: 14 files (reposimplestore !)
97 linking: 14 files (reposimplestore !)
98 linking: 15 files (reposimplestore !)
98 linking: 15 files (reposimplestore !)
99 linking: 16 files (reposimplestore !)
99 linking: 16 files (reposimplestore !)
100 linking: 17 files (reposimplestore !)
100 linking: 17 files (reposimplestore !)
101 linking: 18 files (reposimplestore !)
101 linking: 18 files (reposimplestore !)
102 linked 18 files (reposimplestore !)
102 linked 18 files (reposimplestore !)
103 #else
103 #else
104 $ hg --debug clone -U . ../c --config progress.debug=true
104 $ hg --debug clone -U . ../c --config progress.debug=true
105 linking: 1 files
105 linking: 1 files
106 copying: 2 files
106 copying: 2 files
107 copying: 3 files
107 copying: 3 files
108 copying: 4 files
108 copying: 4 files
109 copying: 5 files
109 copying: 5 files
110 copying: 6 files
110 copying: 6 files
111 copying: 7 files
111 copying: 7 files
112 copying: 8 files
112 copying: 8 files
113 copied 8 files (reporevlogstore !)
113 copied 8 files (reporevlogstore !)
114 copying: 9 files (reposimplestore !)
114 copying: 9 files (reposimplestore !)
115 copying: 10 files (reposimplestore !)
115 copying: 10 files (reposimplestore !)
116 copying: 11 files (reposimplestore !)
116 copying: 11 files (reposimplestore !)
117 copying: 12 files (reposimplestore !)
117 copying: 12 files (reposimplestore !)
118 copying: 13 files (reposimplestore !)
118 copying: 13 files (reposimplestore !)
119 copying: 14 files (reposimplestore !)
119 copying: 14 files (reposimplestore !)
120 copying: 15 files (reposimplestore !)
120 copying: 15 files (reposimplestore !)
121 copying: 16 files (reposimplestore !)
121 copying: 16 files (reposimplestore !)
122 copying: 17 files (reposimplestore !)
122 copying: 17 files (reposimplestore !)
123 copying: 18 files (reposimplestore !)
123 copying: 18 files (reposimplestore !)
124 copied 18 files (reposimplestore !)
124 copied 18 files (reposimplestore !)
125 #endif
125 #endif
126 $ cd ../c
126 $ cd ../c
127
127
128 Ensure branchcache got copied over:
128 Ensure branchcache got copied over:
129
129
130 $ ls .hg/cache
130 $ ls .hg/cache
131 branch2-served
131 branch2-served
132 rbc-names-v1
132 rbc-names-v1
133 rbc-revs-v1
133 rbc-revs-v1
134
134
135 $ cat a 2>/dev/null || echo "a not present"
135 $ cat a 2>/dev/null || echo "a not present"
136 a not present
136 a not present
137 $ hg verify
137 $ hg verify
138 checking changesets
138 checking changesets
139 checking manifests
139 checking manifests
140 crosschecking files in changesets and manifests
140 crosschecking files in changesets and manifests
141 checking files
141 checking files
142 checked 11 changesets with 11 changes to 2 files
142 checked 11 changesets with 11 changes to 2 files
143
143
144 Default destination:
144 Default destination:
145
145
146 $ mkdir ../d
146 $ mkdir ../d
147 $ cd ../d
147 $ cd ../d
148 $ hg clone ../a
148 $ hg clone ../a
149 destination directory: a
149 destination directory: a
150 updating to branch default
150 updating to branch default
151 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 $ cd a
152 $ cd a
153 $ hg cat a
153 $ hg cat a
154 a
154 a
155 $ cd ../..
155 $ cd ../..
156
156
157 Check that we drop the 'file:' from the path before writing the .hgrc:
157 Check that we drop the 'file:' from the path before writing the .hgrc:
158
158
159 $ hg clone file:a e
159 $ hg clone file:a e
160 updating to branch default
160 updating to branch default
161 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 $ grep 'file:' e/.hg/hgrc
162 $ grep 'file:' e/.hg/hgrc
163 [1]
163 [1]
164
164
165 Check that path aliases are expanded:
165 Check that path aliases are expanded:
166
166
167 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
167 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
168 $ hg -R f showconfig paths.default
168 $ hg -R f showconfig paths.default
169 $TESTTMP/a#0
169 $TESTTMP/a#0
170
170
171 Use --pull:
171 Use --pull:
172
172
173 $ hg clone --pull a g
173 $ hg clone --pull a g
174 requesting all changes
174 requesting all changes
175 adding changesets
175 adding changesets
176 adding manifests
176 adding manifests
177 adding file changes
177 adding file changes
178 added 11 changesets with 11 changes to 2 files
178 added 11 changesets with 11 changes to 2 files
179 new changesets acb14030fe0a:a7949464abda
179 new changesets acb14030fe0a:a7949464abda
180 updating to branch default
180 updating to branch default
181 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
181 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
182 $ hg -R g verify
182 $ hg -R g verify
183 checking changesets
183 checking changesets
184 checking manifests
184 checking manifests
185 crosschecking files in changesets and manifests
185 crosschecking files in changesets and manifests
186 checking files
186 checking files
187 checked 11 changesets with 11 changes to 2 files
187 checked 11 changesets with 11 changes to 2 files
188
188
189 Invalid dest '' with --pull must abort (issue2528):
189 Invalid dest '' with --pull must abort (issue2528):
190
190
191 $ hg clone --pull a ''
191 $ hg clone --pull a ''
192 abort: empty destination path is not valid
192 abort: empty destination path is not valid
193 [255]
193 [255]
194
194
195 Clone to '.':
195 Clone to '.':
196
196
197 $ mkdir h
197 $ mkdir h
198 $ cd h
198 $ cd h
199 $ hg clone ../a .
199 $ hg clone ../a .
200 updating to branch default
200 updating to branch default
201 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 $ cd ..
202 $ cd ..
203
203
204
204
205 *** Tests for option -u ***
205 *** Tests for option -u ***
206
206
207 Adding some more history to repo a:
207 Adding some more history to repo a:
208
208
209 $ cd a
209 $ cd a
210 $ hg tag ref1
210 $ hg tag ref1
211 $ echo the quick brown fox >a
211 $ echo the quick brown fox >a
212 $ hg ci -m "hacked default"
212 $ hg ci -m "hacked default"
213 $ hg up ref1
213 $ hg up ref1
214 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
214 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
215 $ hg branch stable
215 $ hg branch stable
216 marked working directory as branch stable
216 marked working directory as branch stable
217 (branches are permanent and global, did you want a bookmark?)
217 (branches are permanent and global, did you want a bookmark?)
218 $ echo some text >a
218 $ echo some text >a
219 $ hg ci -m "starting branch stable"
219 $ hg ci -m "starting branch stable"
220 $ hg tag ref2
220 $ hg tag ref2
221 $ echo some more text >a
221 $ echo some more text >a
222 $ hg ci -m "another change for branch stable"
222 $ hg ci -m "another change for branch stable"
223 $ hg up ref2
223 $ hg up ref2
224 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
224 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
225 $ hg parents
225 $ hg parents
226 changeset: 13:e8ece76546a6
226 changeset: 13:e8ece76546a6
227 branch: stable
227 branch: stable
228 tag: ref2
228 tag: ref2
229 parent: 10:a7949464abda
229 parent: 10:a7949464abda
230 user: test
230 user: test
231 date: Thu Jan 01 00:00:00 1970 +0000
231 date: Thu Jan 01 00:00:00 1970 +0000
232 summary: starting branch stable
232 summary: starting branch stable
233
233
234
234
235 Repo a has two heads:
235 Repo a has two heads:
236
236
237 $ hg heads
237 $ hg heads
238 changeset: 15:0aae7cf88f0d
238 changeset: 15:0aae7cf88f0d
239 branch: stable
239 branch: stable
240 tag: tip
240 tag: tip
241 user: test
241 user: test
242 date: Thu Jan 01 00:00:00 1970 +0000
242 date: Thu Jan 01 00:00:00 1970 +0000
243 summary: another change for branch stable
243 summary: another change for branch stable
244
244
245 changeset: 12:f21241060d6a
245 changeset: 12:f21241060d6a
246 user: test
246 user: test
247 date: Thu Jan 01 00:00:00 1970 +0000
247 date: Thu Jan 01 00:00:00 1970 +0000
248 summary: hacked default
248 summary: hacked default
249
249
250
250
251 $ cd ..
251 $ cd ..
252
252
253
253
254 Testing --noupdate with --updaterev (must abort):
254 Testing --noupdate with --updaterev (must abort):
255
255
256 $ hg clone --noupdate --updaterev 1 a ua
256 $ hg clone --noupdate --updaterev 1 a ua
257 abort: cannot specify both --noupdate and --updaterev
257 abort: cannot specify both --noupdate and --updaterev
258 [255]
258 [255]
259
259
260
260
261 Testing clone -u:
261 Testing clone -u:
262
262
263 $ hg clone -u . a ua
263 $ hg clone -u . a ua
264 updating to branch stable
264 updating to branch stable
265 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
266
266
267 Repo ua has both heads:
267 Repo ua has both heads:
268
268
269 $ hg -R ua heads
269 $ hg -R ua heads
270 changeset: 15:0aae7cf88f0d
270 changeset: 15:0aae7cf88f0d
271 branch: stable
271 branch: stable
272 tag: tip
272 tag: tip
273 user: test
273 user: test
274 date: Thu Jan 01 00:00:00 1970 +0000
274 date: Thu Jan 01 00:00:00 1970 +0000
275 summary: another change for branch stable
275 summary: another change for branch stable
276
276
277 changeset: 12:f21241060d6a
277 changeset: 12:f21241060d6a
278 user: test
278 user: test
279 date: Thu Jan 01 00:00:00 1970 +0000
279 date: Thu Jan 01 00:00:00 1970 +0000
280 summary: hacked default
280 summary: hacked default
281
281
282
282
283 Same revision checked out in repo a and ua:
283 Same revision checked out in repo a and ua:
284
284
285 $ hg -R a parents --template "{node|short}\n"
285 $ hg -R a parents --template "{node|short}\n"
286 e8ece76546a6
286 e8ece76546a6
287 $ hg -R ua parents --template "{node|short}\n"
287 $ hg -R ua parents --template "{node|short}\n"
288 e8ece76546a6
288 e8ece76546a6
289
289
290 $ rm -r ua
290 $ rm -r ua
291
291
292
292
293 Testing clone --pull -u:
293 Testing clone --pull -u:
294
294
295 $ hg clone --pull -u . a ua
295 $ hg clone --pull -u . a ua
296 requesting all changes
296 requesting all changes
297 adding changesets
297 adding changesets
298 adding manifests
298 adding manifests
299 adding file changes
299 adding file changes
300 added 16 changesets with 16 changes to 3 files (+1 heads)
300 added 16 changesets with 16 changes to 3 files (+1 heads)
301 new changesets acb14030fe0a:0aae7cf88f0d
301 new changesets acb14030fe0a:0aae7cf88f0d
302 updating to branch stable
302 updating to branch stable
303 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
303 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
304
304
305 Repo ua has both heads:
305 Repo ua has both heads:
306
306
307 $ hg -R ua heads
307 $ hg -R ua heads
308 changeset: 15:0aae7cf88f0d
308 changeset: 15:0aae7cf88f0d
309 branch: stable
309 branch: stable
310 tag: tip
310 tag: tip
311 user: test
311 user: test
312 date: Thu Jan 01 00:00:00 1970 +0000
312 date: Thu Jan 01 00:00:00 1970 +0000
313 summary: another change for branch stable
313 summary: another change for branch stable
314
314
315 changeset: 12:f21241060d6a
315 changeset: 12:f21241060d6a
316 user: test
316 user: test
317 date: Thu Jan 01 00:00:00 1970 +0000
317 date: Thu Jan 01 00:00:00 1970 +0000
318 summary: hacked default
318 summary: hacked default
319
319
320
320
321 Same revision checked out in repo a and ua:
321 Same revision checked out in repo a and ua:
322
322
323 $ hg -R a parents --template "{node|short}\n"
323 $ hg -R a parents --template "{node|short}\n"
324 e8ece76546a6
324 e8ece76546a6
325 $ hg -R ua parents --template "{node|short}\n"
325 $ hg -R ua parents --template "{node|short}\n"
326 e8ece76546a6
326 e8ece76546a6
327
327
328 $ rm -r ua
328 $ rm -r ua
329
329
330
330
331 Testing clone -u <branch>:
331 Testing clone -u <branch>:
332
332
333 $ hg clone -u stable a ua
333 $ hg clone -u stable a ua
334 updating to branch stable
334 updating to branch stable
335 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
335 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
336
336
337 Repo ua has both heads:
337 Repo ua has both heads:
338
338
339 $ hg -R ua heads
339 $ hg -R ua heads
340 changeset: 15:0aae7cf88f0d
340 changeset: 15:0aae7cf88f0d
341 branch: stable
341 branch: stable
342 tag: tip
342 tag: tip
343 user: test
343 user: test
344 date: Thu Jan 01 00:00:00 1970 +0000
344 date: Thu Jan 01 00:00:00 1970 +0000
345 summary: another change for branch stable
345 summary: another change for branch stable
346
346
347 changeset: 12:f21241060d6a
347 changeset: 12:f21241060d6a
348 user: test
348 user: test
349 date: Thu Jan 01 00:00:00 1970 +0000
349 date: Thu Jan 01 00:00:00 1970 +0000
350 summary: hacked default
350 summary: hacked default
351
351
352
352
353 Branch 'stable' is checked out:
353 Branch 'stable' is checked out:
354
354
355 $ hg -R ua parents
355 $ hg -R ua parents
356 changeset: 15:0aae7cf88f0d
356 changeset: 15:0aae7cf88f0d
357 branch: stable
357 branch: stable
358 tag: tip
358 tag: tip
359 user: test
359 user: test
360 date: Thu Jan 01 00:00:00 1970 +0000
360 date: Thu Jan 01 00:00:00 1970 +0000
361 summary: another change for branch stable
361 summary: another change for branch stable
362
362
363
363
364 $ rm -r ua
364 $ rm -r ua
365
365
366
366
367 Testing default checkout:
367 Testing default checkout:
368
368
369 $ hg clone a ua
369 $ hg clone a ua
370 updating to branch default
370 updating to branch default
371 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
371 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
372
372
373 Repo ua has both heads:
373 Repo ua has both heads:
374
374
375 $ hg -R ua heads
375 $ hg -R ua heads
376 changeset: 15:0aae7cf88f0d
376 changeset: 15:0aae7cf88f0d
377 branch: stable
377 branch: stable
378 tag: tip
378 tag: tip
379 user: test
379 user: test
380 date: Thu Jan 01 00:00:00 1970 +0000
380 date: Thu Jan 01 00:00:00 1970 +0000
381 summary: another change for branch stable
381 summary: another change for branch stable
382
382
383 changeset: 12:f21241060d6a
383 changeset: 12:f21241060d6a
384 user: test
384 user: test
385 date: Thu Jan 01 00:00:00 1970 +0000
385 date: Thu Jan 01 00:00:00 1970 +0000
386 summary: hacked default
386 summary: hacked default
387
387
388
388
389 Branch 'default' is checked out:
389 Branch 'default' is checked out:
390
390
391 $ hg -R ua parents
391 $ hg -R ua parents
392 changeset: 12:f21241060d6a
392 changeset: 12:f21241060d6a
393 user: test
393 user: test
394 date: Thu Jan 01 00:00:00 1970 +0000
394 date: Thu Jan 01 00:00:00 1970 +0000
395 summary: hacked default
395 summary: hacked default
396
396
397 Test clone with a branch named "@" (issue3677)
397 Test clone with a branch named "@" (issue3677)
398
398
399 $ hg -R ua branch @
399 $ hg -R ua branch @
400 marked working directory as branch @
400 marked working directory as branch @
401 $ hg -R ua commit -m 'created branch @'
401 $ hg -R ua commit -m 'created branch @'
402 $ hg clone ua atbranch
402 $ hg clone ua atbranch
403 updating to branch default
403 updating to branch default
404 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
404 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
405 $ hg -R atbranch heads
405 $ hg -R atbranch heads
406 changeset: 16:798b6d97153e
406 changeset: 16:798b6d97153e
407 branch: @
407 branch: @
408 tag: tip
408 tag: tip
409 parent: 12:f21241060d6a
409 parent: 12:f21241060d6a
410 user: test
410 user: test
411 date: Thu Jan 01 00:00:00 1970 +0000
411 date: Thu Jan 01 00:00:00 1970 +0000
412 summary: created branch @
412 summary: created branch @
413
413
414 changeset: 15:0aae7cf88f0d
414 changeset: 15:0aae7cf88f0d
415 branch: stable
415 branch: stable
416 user: test
416 user: test
417 date: Thu Jan 01 00:00:00 1970 +0000
417 date: Thu Jan 01 00:00:00 1970 +0000
418 summary: another change for branch stable
418 summary: another change for branch stable
419
419
420 changeset: 12:f21241060d6a
420 changeset: 12:f21241060d6a
421 user: test
421 user: test
422 date: Thu Jan 01 00:00:00 1970 +0000
422 date: Thu Jan 01 00:00:00 1970 +0000
423 summary: hacked default
423 summary: hacked default
424
424
425 $ hg -R atbranch parents
425 $ hg -R atbranch parents
426 changeset: 12:f21241060d6a
426 changeset: 12:f21241060d6a
427 user: test
427 user: test
428 date: Thu Jan 01 00:00:00 1970 +0000
428 date: Thu Jan 01 00:00:00 1970 +0000
429 summary: hacked default
429 summary: hacked default
430
430
431
431
432 $ rm -r ua atbranch
432 $ rm -r ua atbranch
433
433
434
434
435 Testing #<branch>:
435 Testing #<branch>:
436
436
437 $ hg clone -u . a#stable ua
437 $ hg clone -u . a#stable ua
438 adding changesets
438 adding changesets
439 adding manifests
439 adding manifests
440 adding file changes
440 adding file changes
441 added 14 changesets with 14 changes to 3 files
441 added 14 changesets with 14 changes to 3 files
442 new changesets acb14030fe0a:0aae7cf88f0d
442 new changesets acb14030fe0a:0aae7cf88f0d
443 updating to branch stable
443 updating to branch stable
444 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
444 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
445
445
446 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
446 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
447
447
448 $ hg -R ua heads
448 $ hg -R ua heads
449 changeset: 13:0aae7cf88f0d
449 changeset: 13:0aae7cf88f0d
450 branch: stable
450 branch: stable
451 tag: tip
451 tag: tip
452 user: test
452 user: test
453 date: Thu Jan 01 00:00:00 1970 +0000
453 date: Thu Jan 01 00:00:00 1970 +0000
454 summary: another change for branch stable
454 summary: another change for branch stable
455
455
456 changeset: 10:a7949464abda
456 changeset: 10:a7949464abda
457 user: test
457 user: test
458 date: Thu Jan 01 00:00:00 1970 +0000
458 date: Thu Jan 01 00:00:00 1970 +0000
459 summary: test
459 summary: test
460
460
461
461
462 Same revision checked out in repo a and ua:
462 Same revision checked out in repo a and ua:
463
463
464 $ hg -R a parents --template "{node|short}\n"
464 $ hg -R a parents --template "{node|short}\n"
465 e8ece76546a6
465 e8ece76546a6
466 $ hg -R ua parents --template "{node|short}\n"
466 $ hg -R ua parents --template "{node|short}\n"
467 e8ece76546a6
467 e8ece76546a6
468
468
469 $ rm -r ua
469 $ rm -r ua
470
470
471
471
472 Testing -u -r <branch>:
472 Testing -u -r <branch>:
473
473
474 $ hg clone -u . -r stable a ua
474 $ hg clone -u . -r stable a ua
475 adding changesets
475 adding changesets
476 adding manifests
476 adding manifests
477 adding file changes
477 adding file changes
478 added 14 changesets with 14 changes to 3 files
478 added 14 changesets with 14 changes to 3 files
479 new changesets acb14030fe0a:0aae7cf88f0d
479 new changesets acb14030fe0a:0aae7cf88f0d
480 updating to branch stable
480 updating to branch stable
481 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
481 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
482
482
483 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
483 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
484
484
485 $ hg -R ua heads
485 $ hg -R ua heads
486 changeset: 13:0aae7cf88f0d
486 changeset: 13:0aae7cf88f0d
487 branch: stable
487 branch: stable
488 tag: tip
488 tag: tip
489 user: test
489 user: test
490 date: Thu Jan 01 00:00:00 1970 +0000
490 date: Thu Jan 01 00:00:00 1970 +0000
491 summary: another change for branch stable
491 summary: another change for branch stable
492
492
493 changeset: 10:a7949464abda
493 changeset: 10:a7949464abda
494 user: test
494 user: test
495 date: Thu Jan 01 00:00:00 1970 +0000
495 date: Thu Jan 01 00:00:00 1970 +0000
496 summary: test
496 summary: test
497
497
498
498
499 Same revision checked out in repo a and ua:
499 Same revision checked out in repo a and ua:
500
500
501 $ hg -R a parents --template "{node|short}\n"
501 $ hg -R a parents --template "{node|short}\n"
502 e8ece76546a6
502 e8ece76546a6
503 $ hg -R ua parents --template "{node|short}\n"
503 $ hg -R ua parents --template "{node|short}\n"
504 e8ece76546a6
504 e8ece76546a6
505
505
506 $ rm -r ua
506 $ rm -r ua
507
507
508
508
509 Testing -r <branch>:
509 Testing -r <branch>:
510
510
511 $ hg clone -r stable a ua
511 $ hg clone -r stable a ua
512 adding changesets
512 adding changesets
513 adding manifests
513 adding manifests
514 adding file changes
514 adding file changes
515 added 14 changesets with 14 changes to 3 files
515 added 14 changesets with 14 changes to 3 files
516 new changesets acb14030fe0a:0aae7cf88f0d
516 new changesets acb14030fe0a:0aae7cf88f0d
517 updating to branch stable
517 updating to branch stable
518 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
518 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
519
519
520 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
520 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
521
521
522 $ hg -R ua heads
522 $ hg -R ua heads
523 changeset: 13:0aae7cf88f0d
523 changeset: 13:0aae7cf88f0d
524 branch: stable
524 branch: stable
525 tag: tip
525 tag: tip
526 user: test
526 user: test
527 date: Thu Jan 01 00:00:00 1970 +0000
527 date: Thu Jan 01 00:00:00 1970 +0000
528 summary: another change for branch stable
528 summary: another change for branch stable
529
529
530 changeset: 10:a7949464abda
530 changeset: 10:a7949464abda
531 user: test
531 user: test
532 date: Thu Jan 01 00:00:00 1970 +0000
532 date: Thu Jan 01 00:00:00 1970 +0000
533 summary: test
533 summary: test
534
534
535
535
536 Branch 'stable' is checked out:
536 Branch 'stable' is checked out:
537
537
538 $ hg -R ua parents
538 $ hg -R ua parents
539 changeset: 13:0aae7cf88f0d
539 changeset: 13:0aae7cf88f0d
540 branch: stable
540 branch: stable
541 tag: tip
541 tag: tip
542 user: test
542 user: test
543 date: Thu Jan 01 00:00:00 1970 +0000
543 date: Thu Jan 01 00:00:00 1970 +0000
544 summary: another change for branch stable
544 summary: another change for branch stable
545
545
546
546
547 $ rm -r ua
547 $ rm -r ua
548
548
549
549
550 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
550 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
551 iterable in addbranchrevs()
551 iterable in addbranchrevs()
552
552
553 $ cat <<EOF > simpleclone.py
553 $ cat <<EOF > simpleclone.py
554 > from mercurial import hg, ui as uimod
554 > from mercurial import hg, ui as uimod
555 > myui = uimod.ui.load()
555 > myui = uimod.ui.load()
556 > repo = hg.repository(myui, b'a')
556 > repo = hg.repository(myui, b'a')
557 > hg.clone(myui, {}, repo, dest=b"ua")
557 > hg.clone(myui, {}, repo, dest=b"ua")
558 > EOF
558 > EOF
559
559
560 $ "$PYTHON" simpleclone.py
560 $ "$PYTHON" simpleclone.py
561 updating to branch default
561 updating to branch default
562 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
562 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
563
563
564 $ rm -r ua
564 $ rm -r ua
565
565
566 $ cat <<EOF > branchclone.py
566 $ cat <<EOF > branchclone.py
567 > from mercurial import extensions, hg, ui as uimod
567 > from mercurial import extensions, hg, ui as uimod
568 > myui = uimod.ui.load()
568 > myui = uimod.ui.load()
569 > extensions.loadall(myui)
569 > extensions.loadall(myui)
570 > extensions.populateui(myui)
570 > extensions.populateui(myui)
571 > repo = hg.repository(myui, b'a')
571 > repo = hg.repository(myui, b'a')
572 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
572 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
573 > EOF
573 > EOF
574
574
575 $ "$PYTHON" branchclone.py
575 $ "$PYTHON" branchclone.py
576 adding changesets
576 adding changesets
577 adding manifests
577 adding manifests
578 adding file changes
578 adding file changes
579 added 14 changesets with 14 changes to 3 files
579 added 14 changesets with 14 changes to 3 files
580 new changesets acb14030fe0a:0aae7cf88f0d
580 new changesets acb14030fe0a:0aae7cf88f0d
581 updating to branch stable
581 updating to branch stable
582 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
582 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
583 $ rm -r ua
583 $ rm -r ua
584
584
585
585
586 Test clone with special '@' bookmark:
586 Test clone with special '@' bookmark:
587 $ cd a
587 $ cd a
588 $ hg bookmark -r a7949464abda @ # branch point of stable from default
588 $ hg bookmark -r a7949464abda @ # branch point of stable from default
589 $ hg clone . ../i
589 $ hg clone . ../i
590 updating to bookmark @
590 updating to bookmark @
591 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
591 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
592 $ hg id -i ../i
592 $ hg id -i ../i
593 a7949464abda
593 a7949464abda
594 $ rm -r ../i
594 $ rm -r ../i
595
595
596 $ hg bookmark -f -r stable @
596 $ hg bookmark -f -r stable @
597 $ hg bookmarks
597 $ hg bookmarks
598 @ 15:0aae7cf88f0d
598 @ 15:0aae7cf88f0d
599 $ hg clone . ../i
599 $ hg clone . ../i
600 updating to bookmark @ on branch stable
600 updating to bookmark @ on branch stable
601 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
601 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
602 $ hg id -i ../i
602 $ hg id -i ../i
603 0aae7cf88f0d
603 0aae7cf88f0d
604 $ cd "$TESTTMP"
604 $ cd "$TESTTMP"
605
605
606
606
607 Testing failures:
607 Testing failures:
608
608
609 $ mkdir fail
609 $ mkdir fail
610 $ cd fail
610 $ cd fail
611
611
612 No local source
612 No local source
613
613
614 $ hg clone a b
614 $ hg clone a b
615 abort: repository a not found!
615 abort: repository a not found!
616 [255]
616 [255]
617
617
618 No remote source
618 No remote source
619
619
620 #if windows
620 #if windows
621 $ hg clone http://$LOCALIP:3121/a b
621 $ hg clone http://$LOCALIP:3121/a b
622 abort: error: * (glob)
622 abort: error: * (glob)
623 [255]
623 [255]
624 #else
624 #else
625 $ hg clone http://$LOCALIP:3121/a b
625 $ hg clone http://$LOCALIP:3121/a b
626 abort: error: *refused* (glob)
626 abort: error: *refused* (glob)
627 [255]
627 [255]
628 #endif
628 #endif
629 $ rm -rf b # work around bug with http clone
629 $ rm -rf b # work around bug with http clone
630
630
631
631
632 #if unix-permissions no-root
632 #if unix-permissions no-root
633
633
634 Inaccessible source
634 Inaccessible source
635
635
636 $ mkdir a
636 $ mkdir a
637 $ chmod 000 a
637 $ chmod 000 a
638 $ hg clone a b
638 $ hg clone a b
639 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
639 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
640 [255]
640 [255]
641
641
642 Inaccessible destination
642 Inaccessible destination
643
643
644 $ hg init b
644 $ hg init b
645 $ cd b
645 $ cd b
646 $ hg clone . ../a
646 $ hg clone . ../a
647 abort: Permission denied: *../a* (glob)
647 abort: Permission denied: *../a* (glob)
648 [255]
648 [255]
649 $ cd ..
649 $ cd ..
650 $ chmod 700 a
650 $ chmod 700 a
651 $ rm -r a b
651 $ rm -r a b
652
652
653 #endif
653 #endif
654
654
655
655
656 #if fifo
656 #if fifo
657
657
658 Source of wrong type
658 Source of wrong type
659
659
660 $ mkfifo a
660 $ mkfifo a
661 $ hg clone a b
661 $ hg clone a b
662 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
662 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
663 [255]
663 [255]
664 $ rm a
664 $ rm a
665
665
666 #endif
666 #endif
667
667
668 Default destination, same directory
668 Default destination, same directory
669
669
670 $ hg init q
670 $ hg init q
671 $ hg clone q
671 $ hg clone q
672 destination directory: q
672 destination directory: q
673 abort: destination 'q' is not empty
673 abort: destination 'q' is not empty
674 [255]
674 [255]
675
675
676 destination directory not empty
676 destination directory not empty
677
677
678 $ mkdir a
678 $ mkdir a
679 $ echo stuff > a/a
679 $ echo stuff > a/a
680 $ hg clone q a
680 $ hg clone q a
681 abort: destination 'a' is not empty
681 abort: destination 'a' is not empty
682 [255]
682 [255]
683
683
684
684
685 #if unix-permissions no-root
685 #if unix-permissions no-root
686
686
687 leave existing directory in place after clone failure
687 leave existing directory in place after clone failure
688
688
689 $ hg init c
689 $ hg init c
690 $ cd c
690 $ cd c
691 $ echo c > c
691 $ echo c > c
692 $ hg commit -A -m test
692 $ hg commit -A -m test
693 adding c
693 adding c
694 $ chmod -rx .hg/store/data
694 $ chmod -rx .hg/store/data
695 $ cd ..
695 $ cd ..
696 $ mkdir d
696 $ mkdir d
697 $ hg clone c d 2> err
697 $ hg clone c d 2> err
698 [255]
698 [255]
699 $ test -d d
699 $ test -d d
700 $ test -d d/.hg
700 $ test -d d/.hg
701 [1]
701 [1]
702
702
703 re-enable perm to allow deletion
703 re-enable perm to allow deletion
704
704
705 $ chmod +rx c/.hg/store/data
705 $ chmod +rx c/.hg/store/data
706
706
707 #endif
707 #endif
708
708
709 $ cd ..
709 $ cd ..
710
710
711 Test clone from the repository in (emulated) revlog format 0 (issue4203):
711 Test clone from the repository in (emulated) revlog format 0 (issue4203):
712
712
713 $ mkdir issue4203
713 $ mkdir issue4203
714 $ mkdir -p src/.hg
714 $ mkdir -p src/.hg
715 $ echo foo > src/foo
715 $ echo foo > src/foo
716 $ hg -R src add src/foo
716 $ hg -R src add src/foo
717 $ hg -R src commit -m '#0'
717 $ hg -R src commit -m '#0'
718 $ hg -R src log -q
718 $ hg -R src log -q
719 0:e1bab28bca43
719 0:e1bab28bca43
720 $ hg -R src debugrevlog -c | egrep 'format|flags'
721 format : 0
722 flags : (none)
720 $ hg clone -U -q src dst
723 $ hg clone -U -q src dst
721 $ hg -R dst log -q
724 $ hg -R dst log -q
722 0:e1bab28bca43
725 0:e1bab28bca43
723
726
724 Create repositories to test auto sharing functionality
727 Create repositories to test auto sharing functionality
725
728
726 $ cat >> $HGRCPATH << EOF
729 $ cat >> $HGRCPATH << EOF
727 > [extensions]
730 > [extensions]
728 > share=
731 > share=
729 > EOF
732 > EOF
730
733
731 $ hg init empty
734 $ hg init empty
732 $ hg init source1a
735 $ hg init source1a
733 $ cd source1a
736 $ cd source1a
734 $ echo initial1 > foo
737 $ echo initial1 > foo
735 $ hg -q commit -A -m initial
738 $ hg -q commit -A -m initial
736 $ echo second > foo
739 $ echo second > foo
737 $ hg commit -m second
740 $ hg commit -m second
738 $ cd ..
741 $ cd ..
739
742
740 $ hg init filteredrev0
743 $ hg init filteredrev0
741 $ cd filteredrev0
744 $ cd filteredrev0
742 $ cat >> .hg/hgrc << EOF
745 $ cat >> .hg/hgrc << EOF
743 > [experimental]
746 > [experimental]
744 > evolution.createmarkers=True
747 > evolution.createmarkers=True
745 > EOF
748 > EOF
746 $ echo initial1 > foo
749 $ echo initial1 > foo
747 $ hg -q commit -A -m initial0
750 $ hg -q commit -A -m initial0
748 $ hg -q up -r null
751 $ hg -q up -r null
749 $ echo initial2 > foo
752 $ echo initial2 > foo
750 $ hg -q commit -A -m initial1
753 $ hg -q commit -A -m initial1
751 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
754 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
752 obsoleted 1 changesets
755 obsoleted 1 changesets
753 $ cd ..
756 $ cd ..
754
757
755 $ hg -q clone --pull source1a source1b
758 $ hg -q clone --pull source1a source1b
756 $ cd source1a
759 $ cd source1a
757 $ hg bookmark bookA
760 $ hg bookmark bookA
758 $ echo 1a > foo
761 $ echo 1a > foo
759 $ hg commit -m 1a
762 $ hg commit -m 1a
760 $ cd ../source1b
763 $ cd ../source1b
761 $ hg -q up -r 0
764 $ hg -q up -r 0
762 $ echo head1 > foo
765 $ echo head1 > foo
763 $ hg commit -m head1
766 $ hg commit -m head1
764 created new head
767 created new head
765 $ hg bookmark head1
768 $ hg bookmark head1
766 $ hg -q up -r 0
769 $ hg -q up -r 0
767 $ echo head2 > foo
770 $ echo head2 > foo
768 $ hg commit -m head2
771 $ hg commit -m head2
769 created new head
772 created new head
770 $ hg bookmark head2
773 $ hg bookmark head2
771 $ hg -q up -r 0
774 $ hg -q up -r 0
772 $ hg branch branch1
775 $ hg branch branch1
773 marked working directory as branch branch1
776 marked working directory as branch branch1
774 (branches are permanent and global, did you want a bookmark?)
777 (branches are permanent and global, did you want a bookmark?)
775 $ echo branch1 > foo
778 $ echo branch1 > foo
776 $ hg commit -m branch1
779 $ hg commit -m branch1
777 $ hg -q up -r 0
780 $ hg -q up -r 0
778 $ hg branch branch2
781 $ hg branch branch2
779 marked working directory as branch branch2
782 marked working directory as branch branch2
780 $ echo branch2 > foo
783 $ echo branch2 > foo
781 $ hg commit -m branch2
784 $ hg commit -m branch2
782 $ cd ..
785 $ cd ..
783 $ hg init source2
786 $ hg init source2
784 $ cd source2
787 $ cd source2
785 $ echo initial2 > foo
788 $ echo initial2 > foo
786 $ hg -q commit -A -m initial2
789 $ hg -q commit -A -m initial2
787 $ echo second > foo
790 $ echo second > foo
788 $ hg commit -m second
791 $ hg commit -m second
789 $ cd ..
792 $ cd ..
790
793
791 Clone with auto share from an empty repo should not result in share
794 Clone with auto share from an empty repo should not result in share
792
795
793 $ mkdir share
796 $ mkdir share
794 $ hg --config share.pool=share clone empty share-empty
797 $ hg --config share.pool=share clone empty share-empty
795 (not using pooled storage: remote appears to be empty)
798 (not using pooled storage: remote appears to be empty)
796 updating to branch default
799 updating to branch default
797 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
800 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
798 $ ls share
801 $ ls share
799 $ test -d share-empty/.hg/store
802 $ test -d share-empty/.hg/store
800 $ test -f share-empty/.hg/sharedpath
803 $ test -f share-empty/.hg/sharedpath
801 [1]
804 [1]
802
805
803 Clone with auto share from a repo with filtered revision 0 should not result in share
806 Clone with auto share from a repo with filtered revision 0 should not result in share
804
807
805 $ hg --config share.pool=share clone filteredrev0 share-filtered
808 $ hg --config share.pool=share clone filteredrev0 share-filtered
806 (not using pooled storage: unable to resolve identity of remote)
809 (not using pooled storage: unable to resolve identity of remote)
807 requesting all changes
810 requesting all changes
808 adding changesets
811 adding changesets
809 adding manifests
812 adding manifests
810 adding file changes
813 adding file changes
811 added 1 changesets with 1 changes to 1 files
814 added 1 changesets with 1 changes to 1 files
812 new changesets e082c1832e09
815 new changesets e082c1832e09
813 updating to branch default
816 updating to branch default
814 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
817 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
815
818
816 Clone from repo with content should result in shared store being created
819 Clone from repo with content should result in shared store being created
817
820
818 $ hg --config share.pool=share clone source1a share-dest1a
821 $ hg --config share.pool=share clone source1a share-dest1a
819 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
822 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
820 requesting all changes
823 requesting all changes
821 adding changesets
824 adding changesets
822 adding manifests
825 adding manifests
823 adding file changes
826 adding file changes
824 added 3 changesets with 3 changes to 1 files
827 added 3 changesets with 3 changes to 1 files
825 new changesets b5f04eac9d8f:e5bfe23c0b47
828 new changesets b5f04eac9d8f:e5bfe23c0b47
826 searching for changes
829 searching for changes
827 no changes found
830 no changes found
828 adding remote bookmark bookA
831 adding remote bookmark bookA
829 updating working directory
832 updating working directory
830 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
833 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
831
834
832 The shared repo should have been created
835 The shared repo should have been created
833
836
834 $ ls share
837 $ ls share
835 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
838 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
836
839
837 The destination should point to it
840 The destination should point to it
838
841
839 $ cat share-dest1a/.hg/sharedpath; echo
842 $ cat share-dest1a/.hg/sharedpath; echo
840 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
843 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
841
844
842 The destination should have bookmarks
845 The destination should have bookmarks
843
846
844 $ hg -R share-dest1a bookmarks
847 $ hg -R share-dest1a bookmarks
845 bookA 2:e5bfe23c0b47
848 bookA 2:e5bfe23c0b47
846
849
847 The default path should be the remote, not the share
850 The default path should be the remote, not the share
848
851
849 $ hg -R share-dest1a config paths.default
852 $ hg -R share-dest1a config paths.default
850 $TESTTMP/source1a
853 $TESTTMP/source1a
851
854
852 Clone with existing share dir should result in pull + share
855 Clone with existing share dir should result in pull + share
853
856
854 $ hg --config share.pool=share clone source1b share-dest1b
857 $ hg --config share.pool=share clone source1b share-dest1b
855 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
858 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
856 searching for changes
859 searching for changes
857 adding changesets
860 adding changesets
858 adding manifests
861 adding manifests
859 adding file changes
862 adding file changes
860 added 4 changesets with 4 changes to 1 files (+4 heads)
863 added 4 changesets with 4 changes to 1 files (+4 heads)
861 adding remote bookmark head1
864 adding remote bookmark head1
862 adding remote bookmark head2
865 adding remote bookmark head2
863 new changesets 4a8dc1ab4c13:6bacf4683960
866 new changesets 4a8dc1ab4c13:6bacf4683960
864 updating working directory
867 updating working directory
865 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
868 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
866
869
867 $ ls share
870 $ ls share
868 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
871 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
869
872
870 $ cat share-dest1b/.hg/sharedpath; echo
873 $ cat share-dest1b/.hg/sharedpath; echo
871 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
874 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
872
875
873 We only get bookmarks from the remote, not everything in the share
876 We only get bookmarks from the remote, not everything in the share
874
877
875 $ hg -R share-dest1b bookmarks
878 $ hg -R share-dest1b bookmarks
876 head1 3:4a8dc1ab4c13
879 head1 3:4a8dc1ab4c13
877 head2 4:99f71071f117
880 head2 4:99f71071f117
878
881
879 Default path should be source, not share.
882 Default path should be source, not share.
880
883
881 $ hg -R share-dest1b config paths.default
884 $ hg -R share-dest1b config paths.default
882 $TESTTMP/source1b
885 $TESTTMP/source1b
883
886
884 Checked out revision should be head of default branch
887 Checked out revision should be head of default branch
885
888
886 $ hg -R share-dest1b log -r .
889 $ hg -R share-dest1b log -r .
887 changeset: 4:99f71071f117
890 changeset: 4:99f71071f117
888 bookmark: head2
891 bookmark: head2
889 parent: 0:b5f04eac9d8f
892 parent: 0:b5f04eac9d8f
890 user: test
893 user: test
891 date: Thu Jan 01 00:00:00 1970 +0000
894 date: Thu Jan 01 00:00:00 1970 +0000
892 summary: head2
895 summary: head2
893
896
894
897
895 Clone from unrelated repo should result in new share
898 Clone from unrelated repo should result in new share
896
899
897 $ hg --config share.pool=share clone source2 share-dest2
900 $ hg --config share.pool=share clone source2 share-dest2
898 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
901 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
899 requesting all changes
902 requesting all changes
900 adding changesets
903 adding changesets
901 adding manifests
904 adding manifests
902 adding file changes
905 adding file changes
903 added 2 changesets with 2 changes to 1 files
906 added 2 changesets with 2 changes to 1 files
904 new changesets 22aeff664783:63cf6c3dba4a
907 new changesets 22aeff664783:63cf6c3dba4a
905 searching for changes
908 searching for changes
906 no changes found
909 no changes found
907 updating working directory
910 updating working directory
908 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
911 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
909
912
910 $ ls share
913 $ ls share
911 22aeff664783fd44c6d9b435618173c118c3448e
914 22aeff664783fd44c6d9b435618173c118c3448e
912 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
915 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
913
916
914 remote naming mode works as advertised
917 remote naming mode works as advertised
915
918
916 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
919 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
917 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
920 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
918 requesting all changes
921 requesting all changes
919 adding changesets
922 adding changesets
920 adding manifests
923 adding manifests
921 adding file changes
924 adding file changes
922 added 3 changesets with 3 changes to 1 files
925 added 3 changesets with 3 changes to 1 files
923 new changesets b5f04eac9d8f:e5bfe23c0b47
926 new changesets b5f04eac9d8f:e5bfe23c0b47
924 searching for changes
927 searching for changes
925 no changes found
928 no changes found
926 adding remote bookmark bookA
929 adding remote bookmark bookA
927 updating working directory
930 updating working directory
928 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
931 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
929
932
930 $ ls shareremote
933 $ ls shareremote
931 195bb1fcdb595c14a6c13e0269129ed78f6debde
934 195bb1fcdb595c14a6c13e0269129ed78f6debde
932
935
933 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
936 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
934 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
937 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
935 requesting all changes
938 requesting all changes
936 adding changesets
939 adding changesets
937 adding manifests
940 adding manifests
938 adding file changes
941 adding file changes
939 added 6 changesets with 6 changes to 1 files (+4 heads)
942 added 6 changesets with 6 changes to 1 files (+4 heads)
940 new changesets b5f04eac9d8f:6bacf4683960
943 new changesets b5f04eac9d8f:6bacf4683960
941 searching for changes
944 searching for changes
942 no changes found
945 no changes found
943 adding remote bookmark head1
946 adding remote bookmark head1
944 adding remote bookmark head2
947 adding remote bookmark head2
945 updating working directory
948 updating working directory
946 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
949 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
947
950
948 $ ls shareremote
951 $ ls shareremote
949 195bb1fcdb595c14a6c13e0269129ed78f6debde
952 195bb1fcdb595c14a6c13e0269129ed78f6debde
950 c0d4f83847ca2a873741feb7048a45085fd47c46
953 c0d4f83847ca2a873741feb7048a45085fd47c46
951
954
952 request to clone a single revision is respected in sharing mode
955 request to clone a single revision is respected in sharing mode
953
956
954 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
957 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
955 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
958 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
956 adding changesets
959 adding changesets
957 adding manifests
960 adding manifests
958 adding file changes
961 adding file changes
959 added 2 changesets with 2 changes to 1 files
962 added 2 changesets with 2 changes to 1 files
960 new changesets b5f04eac9d8f:4a8dc1ab4c13
963 new changesets b5f04eac9d8f:4a8dc1ab4c13
961 no changes found
964 no changes found
962 adding remote bookmark head1
965 adding remote bookmark head1
963 updating working directory
966 updating working directory
964 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
967 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
965
968
966 $ hg -R share-1arev log -G
969 $ hg -R share-1arev log -G
967 @ changeset: 1:4a8dc1ab4c13
970 @ changeset: 1:4a8dc1ab4c13
968 | bookmark: head1
971 | bookmark: head1
969 | tag: tip
972 | tag: tip
970 | user: test
973 | user: test
971 | date: Thu Jan 01 00:00:00 1970 +0000
974 | date: Thu Jan 01 00:00:00 1970 +0000
972 | summary: head1
975 | summary: head1
973 |
976 |
974 o changeset: 0:b5f04eac9d8f
977 o changeset: 0:b5f04eac9d8f
975 user: test
978 user: test
976 date: Thu Jan 01 00:00:00 1970 +0000
979 date: Thu Jan 01 00:00:00 1970 +0000
977 summary: initial
980 summary: initial
978
981
979
982
980 making another clone should only pull down requested rev
983 making another clone should only pull down requested rev
981
984
982 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
985 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
983 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
986 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
984 searching for changes
987 searching for changes
985 adding changesets
988 adding changesets
986 adding manifests
989 adding manifests
987 adding file changes
990 adding file changes
988 added 1 changesets with 1 changes to 1 files (+1 heads)
991 added 1 changesets with 1 changes to 1 files (+1 heads)
989 adding remote bookmark head1
992 adding remote bookmark head1
990 adding remote bookmark head2
993 adding remote bookmark head2
991 new changesets 99f71071f117
994 new changesets 99f71071f117
992 updating working directory
995 updating working directory
993 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
996 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
994
997
995 $ hg -R share-1brev log -G
998 $ hg -R share-1brev log -G
996 @ changeset: 2:99f71071f117
999 @ changeset: 2:99f71071f117
997 | bookmark: head2
1000 | bookmark: head2
998 | tag: tip
1001 | tag: tip
999 | parent: 0:b5f04eac9d8f
1002 | parent: 0:b5f04eac9d8f
1000 | user: test
1003 | user: test
1001 | date: Thu Jan 01 00:00:00 1970 +0000
1004 | date: Thu Jan 01 00:00:00 1970 +0000
1002 | summary: head2
1005 | summary: head2
1003 |
1006 |
1004 | o changeset: 1:4a8dc1ab4c13
1007 | o changeset: 1:4a8dc1ab4c13
1005 |/ bookmark: head1
1008 |/ bookmark: head1
1006 | user: test
1009 | user: test
1007 | date: Thu Jan 01 00:00:00 1970 +0000
1010 | date: Thu Jan 01 00:00:00 1970 +0000
1008 | summary: head1
1011 | summary: head1
1009 |
1012 |
1010 o changeset: 0:b5f04eac9d8f
1013 o changeset: 0:b5f04eac9d8f
1011 user: test
1014 user: test
1012 date: Thu Jan 01 00:00:00 1970 +0000
1015 date: Thu Jan 01 00:00:00 1970 +0000
1013 summary: initial
1016 summary: initial
1014
1017
1015
1018
1016 Request to clone a single branch is respected in sharing mode
1019 Request to clone a single branch is respected in sharing mode
1017
1020
1018 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1021 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1019 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1022 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1020 adding changesets
1023 adding changesets
1021 adding manifests
1024 adding manifests
1022 adding file changes
1025 adding file changes
1023 added 2 changesets with 2 changes to 1 files
1026 added 2 changesets with 2 changes to 1 files
1024 new changesets b5f04eac9d8f:5f92a6c1a1b1
1027 new changesets b5f04eac9d8f:5f92a6c1a1b1
1025 no changes found
1028 no changes found
1026 updating working directory
1029 updating working directory
1027 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1030 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1028
1031
1029 $ hg -R share-1bbranch1 log -G
1032 $ hg -R share-1bbranch1 log -G
1030 o changeset: 1:5f92a6c1a1b1
1033 o changeset: 1:5f92a6c1a1b1
1031 | branch: branch1
1034 | branch: branch1
1032 | tag: tip
1035 | tag: tip
1033 | user: test
1036 | user: test
1034 | date: Thu Jan 01 00:00:00 1970 +0000
1037 | date: Thu Jan 01 00:00:00 1970 +0000
1035 | summary: branch1
1038 | summary: branch1
1036 |
1039 |
1037 @ changeset: 0:b5f04eac9d8f
1040 @ changeset: 0:b5f04eac9d8f
1038 user: test
1041 user: test
1039 date: Thu Jan 01 00:00:00 1970 +0000
1042 date: Thu Jan 01 00:00:00 1970 +0000
1040 summary: initial
1043 summary: initial
1041
1044
1042
1045
1043 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1046 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1044 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1047 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1045 searching for changes
1048 searching for changes
1046 adding changesets
1049 adding changesets
1047 adding manifests
1050 adding manifests
1048 adding file changes
1051 adding file changes
1049 added 1 changesets with 1 changes to 1 files (+1 heads)
1052 added 1 changesets with 1 changes to 1 files (+1 heads)
1050 new changesets 6bacf4683960
1053 new changesets 6bacf4683960
1051 updating working directory
1054 updating working directory
1052 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1055 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1053
1056
1054 $ hg -R share-1bbranch2 log -G
1057 $ hg -R share-1bbranch2 log -G
1055 o changeset: 2:6bacf4683960
1058 o changeset: 2:6bacf4683960
1056 | branch: branch2
1059 | branch: branch2
1057 | tag: tip
1060 | tag: tip
1058 | parent: 0:b5f04eac9d8f
1061 | parent: 0:b5f04eac9d8f
1059 | user: test
1062 | user: test
1060 | date: Thu Jan 01 00:00:00 1970 +0000
1063 | date: Thu Jan 01 00:00:00 1970 +0000
1061 | summary: branch2
1064 | summary: branch2
1062 |
1065 |
1063 | o changeset: 1:5f92a6c1a1b1
1066 | o changeset: 1:5f92a6c1a1b1
1064 |/ branch: branch1
1067 |/ branch: branch1
1065 | user: test
1068 | user: test
1066 | date: Thu Jan 01 00:00:00 1970 +0000
1069 | date: Thu Jan 01 00:00:00 1970 +0000
1067 | summary: branch1
1070 | summary: branch1
1068 |
1071 |
1069 @ changeset: 0:b5f04eac9d8f
1072 @ changeset: 0:b5f04eac9d8f
1070 user: test
1073 user: test
1071 date: Thu Jan 01 00:00:00 1970 +0000
1074 date: Thu Jan 01 00:00:00 1970 +0000
1072 summary: initial
1075 summary: initial
1073
1076
1074
1077
1075 -U is respected in share clone mode
1078 -U is respected in share clone mode
1076
1079
1077 $ hg --config share.pool=share clone -U source1a share-1anowc
1080 $ hg --config share.pool=share clone -U source1a share-1anowc
1078 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1081 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1079 searching for changes
1082 searching for changes
1080 no changes found
1083 no changes found
1081 adding remote bookmark bookA
1084 adding remote bookmark bookA
1082
1085
1083 $ ls share-1anowc
1086 $ ls share-1anowc
1084
1087
1085 Test that auto sharing doesn't cause failure of "hg clone local remote"
1088 Test that auto sharing doesn't cause failure of "hg clone local remote"
1086
1089
1087 $ cd $TESTTMP
1090 $ cd $TESTTMP
1088 $ hg -R a id -r 0
1091 $ hg -R a id -r 0
1089 acb14030fe0a
1092 acb14030fe0a
1090 $ hg id -R remote -r 0
1093 $ hg id -R remote -r 0
1091 abort: repository remote not found!
1094 abort: repository remote not found!
1092 [255]
1095 [255]
1093 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1096 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1094 $ hg -R remote id -r 0
1097 $ hg -R remote id -r 0
1095 acb14030fe0a
1098 acb14030fe0a
1096
1099
1097 Cloning into pooled storage doesn't race (issue5104)
1100 Cloning into pooled storage doesn't race (issue5104)
1098
1101
1099 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1102 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1100 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1103 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1101 $ wait
1104 $ wait
1102
1105
1103 $ hg -R share-destrace1 log -r tip
1106 $ hg -R share-destrace1 log -r tip
1104 changeset: 2:e5bfe23c0b47
1107 changeset: 2:e5bfe23c0b47
1105 bookmark: bookA
1108 bookmark: bookA
1106 tag: tip
1109 tag: tip
1107 user: test
1110 user: test
1108 date: Thu Jan 01 00:00:00 1970 +0000
1111 date: Thu Jan 01 00:00:00 1970 +0000
1109 summary: 1a
1112 summary: 1a
1110
1113
1111
1114
1112 $ hg -R share-destrace2 log -r tip
1115 $ hg -R share-destrace2 log -r tip
1113 changeset: 2:e5bfe23c0b47
1116 changeset: 2:e5bfe23c0b47
1114 bookmark: bookA
1117 bookmark: bookA
1115 tag: tip
1118 tag: tip
1116 user: test
1119 user: test
1117 date: Thu Jan 01 00:00:00 1970 +0000
1120 date: Thu Jan 01 00:00:00 1970 +0000
1118 summary: 1a
1121 summary: 1a
1119
1122
1120 One repo should be new, the other should be shared from the pool. We
1123 One repo should be new, the other should be shared from the pool. We
1121 don't care which is which, so we just make sure we always print the
1124 don't care which is which, so we just make sure we always print the
1122 one containing "new pooled" first, then one one containing "existing
1125 one containing "new pooled" first, then one one containing "existing
1123 pooled".
1126 pooled".
1124
1127
1125 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1128 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1126 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1129 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1127 requesting all changes
1130 requesting all changes
1128 adding changesets
1131 adding changesets
1129 adding manifests
1132 adding manifests
1130 adding file changes
1133 adding file changes
1131 added 3 changesets with 3 changes to 1 files
1134 added 3 changesets with 3 changes to 1 files
1132 new changesets b5f04eac9d8f:e5bfe23c0b47
1135 new changesets b5f04eac9d8f:e5bfe23c0b47
1133 searching for changes
1136 searching for changes
1134 no changes found
1137 no changes found
1135 adding remote bookmark bookA
1138 adding remote bookmark bookA
1136 updating working directory
1139 updating working directory
1137 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1140 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1138
1141
1139 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1142 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1140 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1143 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1141 searching for changes
1144 searching for changes
1142 no changes found
1145 no changes found
1143 adding remote bookmark bookA
1146 adding remote bookmark bookA
1144 updating working directory
1147 updating working directory
1145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1148 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1146
1149
1147 SEC: check for unsafe ssh url
1150 SEC: check for unsafe ssh url
1148
1151
1149 $ cat >> $HGRCPATH << EOF
1152 $ cat >> $HGRCPATH << EOF
1150 > [ui]
1153 > [ui]
1151 > ssh = sh -c "read l; read l; read l"
1154 > ssh = sh -c "read l; read l; read l"
1152 > EOF
1155 > EOF
1153
1156
1154 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1157 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1155 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1158 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1156 [255]
1159 [255]
1157 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1160 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1158 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1161 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1159 [255]
1162 [255]
1160 $ hg clone 'ssh://fakehost|touch%20owned/path'
1163 $ hg clone 'ssh://fakehost|touch%20owned/path'
1161 abort: no suitable response from remote hg!
1164 abort: no suitable response from remote hg!
1162 [255]
1165 [255]
1163 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1166 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1164 abort: no suitable response from remote hg!
1167 abort: no suitable response from remote hg!
1165 [255]
1168 [255]
1166
1169
1167 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1170 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1168 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1171 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1169 [255]
1172 [255]
1170
1173
1171 #if windows
1174 #if windows
1172 $ hg clone "ssh://%26touch%20owned%20/" --debug
1175 $ hg clone "ssh://%26touch%20owned%20/" --debug
1173 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1176 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1174 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1177 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1175 sending hello command
1178 sending hello command
1176 sending between command
1179 sending between command
1177 abort: no suitable response from remote hg!
1180 abort: no suitable response from remote hg!
1178 [255]
1181 [255]
1179 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1182 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1180 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1183 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1181 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1184 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1182 sending hello command
1185 sending hello command
1183 sending between command
1186 sending between command
1184 abort: no suitable response from remote hg!
1187 abort: no suitable response from remote hg!
1185 [255]
1188 [255]
1186 #else
1189 #else
1187 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1190 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1188 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1191 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1189 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1192 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1190 sending hello command
1193 sending hello command
1191 sending between command
1194 sending between command
1192 abort: no suitable response from remote hg!
1195 abort: no suitable response from remote hg!
1193 [255]
1196 [255]
1194 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1197 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1195 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1198 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1196 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1199 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1197 sending hello command
1200 sending hello command
1198 sending between command
1201 sending between command
1199 abort: no suitable response from remote hg!
1202 abort: no suitable response from remote hg!
1200 [255]
1203 [255]
1201 #endif
1204 #endif
1202
1205
1203 $ hg clone "ssh://v-alid.example.com/" --debug
1206 $ hg clone "ssh://v-alid.example.com/" --debug
1204 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1207 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1205 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1208 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1206 sending hello command
1209 sending hello command
1207 sending between command
1210 sending between command
1208 abort: no suitable response from remote hg!
1211 abort: no suitable response from remote hg!
1209 [255]
1212 [255]
1210
1213
1211 We should not have created a file named owned - if it exists, the
1214 We should not have created a file named owned - if it exists, the
1212 attack succeeded.
1215 attack succeeded.
1213 $ if test -f owned; then echo 'you got owned'; fi
1216 $ if test -f owned; then echo 'you got owned'; fi
1214
1217
1215 Cloning without fsmonitor enabled does not print a warning for small repos
1218 Cloning without fsmonitor enabled does not print a warning for small repos
1216
1219
1217 $ hg clone a fsmonitor-default
1220 $ hg clone a fsmonitor-default
1218 updating to bookmark @ on branch stable
1221 updating to bookmark @ on branch stable
1219 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1222 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1220
1223
1221 Lower the warning threshold to simulate a large repo
1224 Lower the warning threshold to simulate a large repo
1222
1225
1223 $ cat >> $HGRCPATH << EOF
1226 $ cat >> $HGRCPATH << EOF
1224 > [fsmonitor]
1227 > [fsmonitor]
1225 > warn_update_file_count = 2
1228 > warn_update_file_count = 2
1226 > EOF
1229 > EOF
1227
1230
1228 We should see a warning about no fsmonitor on supported platforms
1231 We should see a warning about no fsmonitor on supported platforms
1229
1232
1230 #if linuxormacos no-fsmonitor
1233 #if linuxormacos no-fsmonitor
1231 $ hg clone a nofsmonitor
1234 $ hg clone a nofsmonitor
1232 updating to bookmark @ on branch stable
1235 updating to bookmark @ on branch stable
1233 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1236 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1234 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1237 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1235 #else
1238 #else
1236 $ hg clone a nofsmonitor
1239 $ hg clone a nofsmonitor
1237 updating to bookmark @ on branch stable
1240 updating to bookmark @ on branch stable
1238 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1241 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1239 #endif
1242 #endif
1240
1243
1241 We should not see warning about fsmonitor when it is enabled
1244 We should not see warning about fsmonitor when it is enabled
1242
1245
1243 #if fsmonitor
1246 #if fsmonitor
1244 $ hg clone a fsmonitor-enabled
1247 $ hg clone a fsmonitor-enabled
1245 updating to bookmark @ on branch stable
1248 updating to bookmark @ on branch stable
1246 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1249 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1247 #endif
1250 #endif
1248
1251
1249 We can disable the fsmonitor warning
1252 We can disable the fsmonitor warning
1250
1253
1251 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1254 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1252 updating to bookmark @ on branch stable
1255 updating to bookmark @ on branch stable
1253 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1256 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1254
1257
1255 Loaded fsmonitor but disabled in config should still print warning
1258 Loaded fsmonitor but disabled in config should still print warning
1256
1259
1257 #if linuxormacos fsmonitor
1260 #if linuxormacos fsmonitor
1258 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1261 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1259 updating to bookmark @ on branch stable
1262 updating to bookmark @ on branch stable
1260 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1263 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1261 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1264 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1262 #endif
1265 #endif
1263
1266
1264 Warning not printed if working directory isn't empty
1267 Warning not printed if working directory isn't empty
1265
1268
1266 $ hg -q clone a fsmonitor-update
1269 $ hg -q clone a fsmonitor-update
1267 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1270 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1268 $ cd fsmonitor-update
1271 $ cd fsmonitor-update
1269 $ hg up acb14030fe0a
1272 $ hg up acb14030fe0a
1270 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1273 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1271 (leaving bookmark @)
1274 (leaving bookmark @)
1272 $ hg up cf0fe1914066
1275 $ hg up cf0fe1914066
1273 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1276 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1274
1277
1275 `hg update` from null revision also prints
1278 `hg update` from null revision also prints
1276
1279
1277 $ hg up null
1280 $ hg up null
1278 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1281 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1279
1282
1280 #if linuxormacos no-fsmonitor
1283 #if linuxormacos no-fsmonitor
1281 $ hg up cf0fe1914066
1284 $ hg up cf0fe1914066
1282 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1285 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1283 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1286 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1284 #else
1287 #else
1285 $ hg up cf0fe1914066
1288 $ hg up cf0fe1914066
1286 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1289 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1287 #endif
1290 #endif
1288
1291
1289 $ cd ..
1292 $ cd ..
1290
1293
General Comments 0
You need to be logged in to leave comments. Login now