##// END OF EJS Templates
changelog: never inline changelog...
marmoute -
r52074:dcaa2df1 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,500 +1,506 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 from .i18n import _
10 10 from .node import (
11 11 bin,
12 12 hex,
13 13 )
14 14 from .thirdparty import attr
15 15
16 16 from . import (
17 17 encoding,
18 18 error,
19 19 metadata,
20 20 pycompat,
21 21 revlog,
22 22 )
23 23 from .utils import (
24 24 dateutil,
25 25 stringutil,
26 26 )
27 27 from .revlogutils import (
28 28 constants as revlog_constants,
29 29 flagutil,
30 30 )
31 31
32 32 _defaultextra = {b'branch': b'default'}
33 33
34 34
35 35 def _string_escape(text):
36 36 """
37 37 >>> from .pycompat import bytechr as chr
38 38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 40 >>> s
41 41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 42 >>> res = _string_escape(s)
43 43 >>> s == _string_unescape(res)
44 44 True
45 45 """
46 46 # subset of the string_escape codec
47 47 text = (
48 48 text.replace(b'\\', b'\\\\')
49 49 .replace(b'\n', b'\\n')
50 50 .replace(b'\r', b'\\r')
51 51 )
52 52 return text.replace(b'\0', b'\\0')
53 53
54 54
55 55 def _string_unescape(text):
56 56 if b'\\0' in text:
57 57 # fix up \0 without getting into trouble with \\0
58 58 text = text.replace(b'\\\\', b'\\\\\n')
59 59 text = text.replace(b'\\0', b'\0')
60 60 text = text.replace(b'\n', b'')
61 61 return stringutil.unescapestr(text)
62 62
63 63
64 64 def decodeextra(text):
65 65 """
66 66 >>> from .pycompat import bytechr as chr
67 67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 68 ... ).items())
69 69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 71 ... b'baz': chr(92) + chr(0) + b'2'})
72 72 ... ).items())
73 73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 74 """
75 75 extra = _defaultextra.copy()
76 76 for l in text.split(b'\0'):
77 77 if l:
78 78 k, v = _string_unescape(l).split(b':', 1)
79 79 extra[k] = v
80 80 return extra
81 81
82 82
83 83 def encodeextra(d):
84 84 # keys must be sorted to produce a deterministic changelog entry
85 85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 86 return b"\0".join(items)
87 87
88 88
89 89 def stripdesc(desc):
90 90 """strip trailing whitespace and leading and trailing empty lines"""
91 91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92 92
93 93
94 94 @attr.s
95 95 class _changelogrevision:
96 96 # Extensions might modify _defaultextra, so let the constructor below pass
97 97 # it in
98 98 extra = attr.ib()
99 99 manifest = attr.ib()
100 100 user = attr.ib(default=b'')
101 101 date = attr.ib(default=(0, 0))
102 102 files = attr.ib(default=attr.Factory(list))
103 103 filesadded = attr.ib(default=None)
104 104 filesremoved = attr.ib(default=None)
105 105 p1copies = attr.ib(default=None)
106 106 p2copies = attr.ib(default=None)
107 107 description = attr.ib(default=b'')
108 108 branchinfo = attr.ib(default=(_defaultextra[b'branch'], False))
109 109
110 110
111 111 class changelogrevision:
112 112 """Holds results of a parsed changelog revision.
113 113
114 114 Changelog revisions consist of multiple pieces of data, including
115 115 the manifest node, user, and date. This object exposes a view into
116 116 the parsed object.
117 117 """
118 118
119 119 __slots__ = (
120 120 '_offsets',
121 121 '_text',
122 122 '_sidedata',
123 123 '_cpsd',
124 124 '_changes',
125 125 )
126 126
127 127 def __new__(cls, cl, text, sidedata, cpsd):
128 128 if not text:
129 129 return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
130 130
131 131 self = super(changelogrevision, cls).__new__(cls)
132 132 # We could return here and implement the following as an __init__.
133 133 # But doing it here is equivalent and saves an extra function call.
134 134
135 135 # format used:
136 136 # nodeid\n : manifest node in ascii
137 137 # user\n : user, no \n or \r allowed
138 138 # time tz extra\n : date (time is int or float, timezone is int)
139 139 # : extra is metadata, encoded and separated by '\0'
140 140 # : older versions ignore it
141 141 # files\n\n : files modified by the cset, no \n or \r allowed
142 142 # (.*) : comment (free text, ideally utf-8)
143 143 #
144 144 # changelog v0 doesn't use extra
145 145
146 146 nl1 = text.index(b'\n')
147 147 nl2 = text.index(b'\n', nl1 + 1)
148 148 nl3 = text.index(b'\n', nl2 + 1)
149 149
150 150 # The list of files may be empty. Which means nl3 is the first of the
151 151 # double newline that precedes the description.
152 152 if text[nl3 + 1 : nl3 + 2] == b'\n':
153 153 doublenl = nl3
154 154 else:
155 155 doublenl = text.index(b'\n\n', nl3 + 1)
156 156
157 157 self._offsets = (nl1, nl2, nl3, doublenl)
158 158 self._text = text
159 159 self._sidedata = sidedata
160 160 self._cpsd = cpsd
161 161 self._changes = None
162 162
163 163 return self
164 164
165 165 @property
166 166 def manifest(self):
167 167 return bin(self._text[0 : self._offsets[0]])
168 168
169 169 @property
170 170 def user(self):
171 171 off = self._offsets
172 172 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
173 173
174 174 @property
175 175 def _rawdate(self):
176 176 off = self._offsets
177 177 dateextra = self._text[off[1] + 1 : off[2]]
178 178 return dateextra.split(b' ', 2)[0:2]
179 179
180 180 @property
181 181 def _rawextra(self):
182 182 off = self._offsets
183 183 dateextra = self._text[off[1] + 1 : off[2]]
184 184 fields = dateextra.split(b' ', 2)
185 185 if len(fields) != 3:
186 186 return None
187 187
188 188 return fields[2]
189 189
190 190 @property
191 191 def date(self):
192 192 raw = self._rawdate
193 193 time = float(raw[0])
194 194 # Various tools did silly things with the timezone.
195 195 try:
196 196 timezone = int(raw[1])
197 197 except ValueError:
198 198 timezone = 0
199 199
200 200 return time, timezone
201 201
202 202 @property
203 203 def extra(self):
204 204 raw = self._rawextra
205 205 if raw is None:
206 206 return _defaultextra
207 207
208 208 return decodeextra(raw)
209 209
210 210 @property
211 211 def changes(self):
212 212 if self._changes is not None:
213 213 return self._changes
214 214 if self._cpsd:
215 215 changes = metadata.decode_files_sidedata(self._sidedata)
216 216 else:
217 217 changes = metadata.ChangingFiles(
218 218 touched=self.files or (),
219 219 added=self.filesadded or (),
220 220 removed=self.filesremoved or (),
221 221 p1_copies=self.p1copies or {},
222 222 p2_copies=self.p2copies or {},
223 223 )
224 224 self._changes = changes
225 225 return changes
226 226
227 227 @property
228 228 def files(self):
229 229 if self._cpsd:
230 230 return sorted(self.changes.touched)
231 231 off = self._offsets
232 232 if off[2] == off[3]:
233 233 return []
234 234
235 235 return self._text[off[2] + 1 : off[3]].split(b'\n')
236 236
237 237 @property
238 238 def filesadded(self):
239 239 if self._cpsd:
240 240 return self.changes.added
241 241 else:
242 242 rawindices = self.extra.get(b'filesadded')
243 243 if rawindices is None:
244 244 return None
245 245 return metadata.decodefileindices(self.files, rawindices)
246 246
247 247 @property
248 248 def filesremoved(self):
249 249 if self._cpsd:
250 250 return self.changes.removed
251 251 else:
252 252 rawindices = self.extra.get(b'filesremoved')
253 253 if rawindices is None:
254 254 return None
255 255 return metadata.decodefileindices(self.files, rawindices)
256 256
257 257 @property
258 258 def p1copies(self):
259 259 if self._cpsd:
260 260 return self.changes.copied_from_p1
261 261 else:
262 262 rawcopies = self.extra.get(b'p1copies')
263 263 if rawcopies is None:
264 264 return None
265 265 return metadata.decodecopies(self.files, rawcopies)
266 266
267 267 @property
268 268 def p2copies(self):
269 269 if self._cpsd:
270 270 return self.changes.copied_from_p2
271 271 else:
272 272 rawcopies = self.extra.get(b'p2copies')
273 273 if rawcopies is None:
274 274 return None
275 275 return metadata.decodecopies(self.files, rawcopies)
276 276
277 277 @property
278 278 def description(self):
279 279 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
280 280
281 281 @property
282 282 def branchinfo(self):
283 283 extra = self.extra
284 284 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
285 285
286 286
287 287 class changelog(revlog.revlog):
288 288 def __init__(self, opener, trypending=False, concurrencychecker=None):
289 289 """Load a changelog revlog using an opener.
290 290
291 291 If ``trypending`` is true, we attempt to load the index from a
292 292 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
293 293 The ``00changelog.i.a`` file contains index (and possibly inline
294 294 revision) data for a transaction that hasn't been finalized yet.
295 295 It exists in a separate file to facilitate readers (such as
296 296 hooks processes) accessing data before a transaction is finalized.
297 297
298 298 ``concurrencychecker`` will be passed to the revlog init function, see
299 299 the documentation there.
300 300 """
301 301 revlog.revlog.__init__(
302 302 self,
303 303 opener,
304 304 target=(revlog_constants.KIND_CHANGELOG, None),
305 305 radix=b'00changelog',
306 306 checkambig=True,
307 307 mmaplargeindex=True,
308 308 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
309 309 concurrencychecker=concurrencychecker,
310 310 trypending=trypending,
311 may_inline=False,
311 312 )
312 313
313 314 if self._initempty and (self._format_version == revlog.REVLOGV1):
314 315 # changelogs don't benefit from generaldelta.
315 316
316 317 self._format_flags &= ~revlog.FLAG_GENERALDELTA
317 318 self.delta_config.general_delta = False
318 319
319 320 # Delta chains for changelogs tend to be very small because entries
320 321 # tend to be small and don't delta well with each. So disable delta
321 322 # chains.
322 323 self._storedeltachains = False
323 324
324 325 self._v2_delayed = False
325 326 self._filteredrevs = frozenset()
326 327 self._filteredrevs_hashcache = {}
327 328 self._copiesstorage = opener.options.get(b'copies-storage')
328 329
329 330 @property
330 331 def filteredrevs(self):
331 332 return self._filteredrevs
332 333
333 334 @filteredrevs.setter
334 335 def filteredrevs(self, val):
335 336 # Ensure all updates go through this function
336 337 assert isinstance(val, frozenset)
337 338 self._filteredrevs = val
338 339 self._filteredrevs_hashcache = {}
339 340
340 341 def _write_docket(self, tr):
341 342 if not self._v2_delayed:
342 343 super(changelog, self)._write_docket(tr)
343 344
344 345 def delayupdate(self, tr):
345 346 """delay visibility of index updates to other readers"""
346 347 assert not self._inner.is_open
348 assert not self._may_inline
349 # enforce that older changelog that are still inline are split at the
350 # first opportunity.
351 if self._inline:
352 self._enforceinlinesize(tr)
347 353 if self._docket is not None:
348 354 self._v2_delayed = True
349 355 else:
350 356 new_index = self._inner.delay()
351 357 if new_index is not None:
352 358 self._indexfile = new_index
353 359 tr.registertmp(new_index)
354 360 tr.addpending(b'cl-%i' % id(self), self._writepending)
355 361 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
356 362
357 363 def _finalize(self, tr):
358 364 """finalize index updates"""
359 365 assert not self._inner.is_open
360 366 if self._docket is not None:
361 367 self._docket.write(tr)
362 368 self._v2_delayed = False
363 369 else:
364 370 new_index_file = self._inner.finalize_pending()
365 371 self._indexfile = new_index_file
366 372 # split when we're done
367 373 self._enforceinlinesize(tr, side_write=False)
368 374
369 375 def _writepending(self, tr):
370 376 """create a file containing the unfinalized state for
371 377 pretxnchangegroup"""
372 378 assert not self._inner.is_open
373 379 if self._docket:
374 380 any_pending = self._docket.write(tr, pending=True)
375 381 self._v2_delayed = False
376 382 else:
377 383 new_index, any_pending = self._inner.write_pending()
378 384 if new_index is not None:
379 385 self._indexfile = new_index
380 386 tr.registertmp(new_index)
381 387 return any_pending
382 388
383 389 def _enforceinlinesize(self, tr, side_write=True):
384 390 if not self.is_delaying:
385 391 revlog.revlog._enforceinlinesize(self, tr, side_write=side_write)
386 392
387 393 def read(self, nodeorrev):
388 394 """Obtain data from a parsed changelog revision.
389 395
390 396 Returns a 6-tuple of:
391 397
392 398 - manifest node in binary
393 399 - author/user as a localstr
394 400 - date as a 2-tuple of (time, timezone)
395 401 - list of files
396 402 - commit message as a localstr
397 403 - dict of extra metadata
398 404
399 405 Unless you need to access all fields, consider calling
400 406 ``changelogrevision`` instead, as it is faster for partial object
401 407 access.
402 408 """
403 409 d = self._revisiondata(nodeorrev)
404 410 sidedata = self.sidedata(nodeorrev)
405 411 copy_sd = self._copiesstorage == b'changeset-sidedata'
406 412 c = changelogrevision(self, d, sidedata, copy_sd)
407 413 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
408 414
409 415 def changelogrevision(self, nodeorrev):
410 416 """Obtain a ``changelogrevision`` for a node or revision."""
411 417 text = self._revisiondata(nodeorrev)
412 418 sidedata = self.sidedata(nodeorrev)
413 419 return changelogrevision(
414 420 self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
415 421 )
416 422
417 423 def readfiles(self, nodeorrev):
418 424 """
419 425 short version of read that only returns the files modified by the cset
420 426 """
421 427 text = self.revision(nodeorrev)
422 428 if not text:
423 429 return []
424 430 last = text.index(b"\n\n")
425 431 l = text[:last].split(b'\n')
426 432 return l[3:]
427 433
428 434 def add(
429 435 self,
430 436 manifest,
431 437 files,
432 438 desc,
433 439 transaction,
434 440 p1,
435 441 p2,
436 442 user,
437 443 date=None,
438 444 extra=None,
439 445 ):
440 446 # Convert to UTF-8 encoded bytestrings as the very first
441 447 # thing: calling any method on a localstr object will turn it
442 448 # into a str object and the cached UTF-8 string is thus lost.
443 449 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
444 450
445 451 user = user.strip()
446 452 # An empty username or a username with a "\n" will make the
447 453 # revision text contain two "\n\n" sequences -> corrupt
448 454 # repository since read cannot unpack the revision.
449 455 if not user:
450 456 raise error.StorageError(_(b"empty username"))
451 457 if b"\n" in user:
452 458 raise error.StorageError(
453 459 _(b"username %r contains a newline") % pycompat.bytestr(user)
454 460 )
455 461
456 462 desc = stripdesc(desc)
457 463
458 464 if date:
459 465 parseddate = b"%d %d" % dateutil.parsedate(date)
460 466 else:
461 467 parseddate = b"%d %d" % dateutil.makedate()
462 468 if extra:
463 469 branch = extra.get(b"branch")
464 470 if branch in (b"default", b""):
465 471 del extra[b"branch"]
466 472 elif branch in (b".", b"null", b"tip"):
467 473 raise error.StorageError(
468 474 _(b'the name \'%s\' is reserved') % branch
469 475 )
470 476 sortedfiles = sorted(files.touched)
471 477 flags = 0
472 478 sidedata = None
473 479 if self._copiesstorage == b'changeset-sidedata':
474 480 if files.has_copies_info:
475 481 flags |= flagutil.REVIDX_HASCOPIESINFO
476 482 sidedata = metadata.encode_files_sidedata(files)
477 483
478 484 if extra:
479 485 extra = encodeextra(extra)
480 486 parseddate = b"%s %s" % (parseddate, extra)
481 487 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
482 488 text = b"\n".join(l)
483 489 rev = self.addrevision(
484 490 text, transaction, len(self), p1, p2, sidedata=sidedata, flags=flags
485 491 )
486 492 return self.node(rev)
487 493
488 494 def branchinfo(self, rev):
489 495 """return the branch name and open/close state of a revision
490 496
491 497 This function exists because creating a changectx object
492 498 just to access this is costly."""
493 499 return self.changelogrevision(rev).branchinfo
494 500
495 501 def _nodeduplicatecallback(self, transaction, rev):
496 502 # keep track of revisions that got "re-added", eg: unbunde of know rev.
497 503 #
498 504 # We track them in a list to preserve their order from the source bundle
499 505 duplicates = transaction.changes.setdefault(b'revduplicates', [])
500 506 duplicates.append(rev)
@@ -1,4246 +1,4062 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 # coding: utf8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Storage back-end for Mercurial.
10 10
11 11 This provides efficient delta storage with O(1) retrieve and append
12 12 and O(changes) merge between branches.
13 13 """
14 14
15 15
16 16 import binascii
17 17 import collections
18 18 import contextlib
19 19 import io
20 20 import os
21 21 import struct
22 22 import weakref
23 23 import zlib
24 24
25 25 # import stuff from node for others to import from revlog
26 26 from .node import (
27 27 bin,
28 28 hex,
29 29 nullrev,
30 30 sha1nodeconstants,
31 31 short,
32 32 wdirrev,
33 33 )
34 34 from .i18n import _
35 35 from .revlogutils.constants import (
36 36 ALL_KINDS,
37 37 CHANGELOGV2,
38 38 COMP_MODE_DEFAULT,
39 39 COMP_MODE_INLINE,
40 40 COMP_MODE_PLAIN,
41 41 DELTA_BASE_REUSE_NO,
42 42 DELTA_BASE_REUSE_TRY,
43 43 ENTRY_RANK,
44 44 FEATURES_BY_VERSION,
45 45 FLAG_GENERALDELTA,
46 46 FLAG_INLINE_DATA,
47 47 INDEX_HEADER,
48 48 KIND_CHANGELOG,
49 49 KIND_FILELOG,
50 50 RANK_UNKNOWN,
51 51 REVLOGV0,
52 52 REVLOGV1,
53 53 REVLOGV1_FLAGS,
54 54 REVLOGV2,
55 55 REVLOGV2_FLAGS,
56 56 REVLOG_DEFAULT_FLAGS,
57 57 REVLOG_DEFAULT_FORMAT,
58 58 REVLOG_DEFAULT_VERSION,
59 59 SUPPORTED_FLAGS,
60 60 )
61 61 from .revlogutils.flagutil import (
62 62 REVIDX_DEFAULT_FLAGS,
63 63 REVIDX_ELLIPSIS,
64 64 REVIDX_EXTSTORED,
65 65 REVIDX_FLAGS_ORDER,
66 66 REVIDX_HASCOPIESINFO,
67 67 REVIDX_ISCENSORED,
68 68 REVIDX_RAWTEXT_CHANGING_FLAGS,
69 69 )
70 70 from .thirdparty import attr
71 71 from . import (
72 72 ancestor,
73 73 dagop,
74 74 error,
75 75 mdiff,
76 76 policy,
77 77 pycompat,
78 78 revlogutils,
79 79 templatefilters,
80 80 util,
81 81 )
82 82 from .interfaces import (
83 83 repository,
84 84 util as interfaceutil,
85 85 )
86 86 from .revlogutils import (
87 87 deltas as deltautil,
88 88 docket as docketutil,
89 89 flagutil,
90 90 nodemap as nodemaputil,
91 91 randomaccessfile,
92 92 revlogv0,
93 93 rewrite,
94 94 sidedata as sidedatautil,
95 95 )
96 96 from .utils import (
97 97 storageutil,
98 98 stringutil,
99 99 )
100 100
101 101 # blanked usage of all the name to prevent pyflakes constraints
102 102 # We need these name available in the module for extensions.
103 103
104 104 REVLOGV0
105 105 REVLOGV1
106 106 REVLOGV2
107 107 CHANGELOGV2
108 108 FLAG_INLINE_DATA
109 109 FLAG_GENERALDELTA
110 110 REVLOG_DEFAULT_FLAGS
111 111 REVLOG_DEFAULT_FORMAT
112 112 REVLOG_DEFAULT_VERSION
113 113 REVLOGV1_FLAGS
114 114 REVLOGV2_FLAGS
115 115 REVIDX_ISCENSORED
116 116 REVIDX_ELLIPSIS
117 117 REVIDX_HASCOPIESINFO
118 118 REVIDX_EXTSTORED
119 119 REVIDX_DEFAULT_FLAGS
120 120 REVIDX_FLAGS_ORDER
121 121 REVIDX_RAWTEXT_CHANGING_FLAGS
122 122
123 123 parsers = policy.importmod('parsers')
124 124 rustancestor = policy.importrust('ancestor')
125 125 rustdagop = policy.importrust('dagop')
126 126 rustrevlog = policy.importrust('revlog')
127 127
128 128 # Aliased for performance.
129 129 _zlibdecompress = zlib.decompress
130 130
131 131 # max size of inline data embedded into a revlog
132 132 _maxinline = 131072
133 133
134 134 # Flag processors for REVIDX_ELLIPSIS.
135 135 def ellipsisreadprocessor(rl, text):
136 136 return text, False
137 137
138 138
139 139 def ellipsiswriteprocessor(rl, text):
140 140 return text, False
141 141
142 142
143 143 def ellipsisrawprocessor(rl, text):
144 144 return False
145 145
146 146
147 147 ellipsisprocessor = (
148 148 ellipsisreadprocessor,
149 149 ellipsiswriteprocessor,
150 150 ellipsisrawprocessor,
151 151 )
152 152
153 153
154 154 def _verify_revision(rl, skipflags, state, node):
155 155 """Verify the integrity of the given revlog ``node`` while providing a hook
156 156 point for extensions to influence the operation."""
157 157 if skipflags:
158 158 state[b'skipread'].add(node)
159 159 else:
160 160 # Side-effect: read content and verify hash.
161 161 rl.revision(node)
162 162
163 163
164 164 # True if a fast implementation for persistent-nodemap is available
165 165 #
166 166 # We also consider we have a "fast" implementation in "pure" python because
167 167 # people using pure don't really have performance consideration (and a
168 168 # wheelbarrow of other slowness source)
169 169 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or hasattr(
170 170 parsers, 'BaseIndexObject'
171 171 )
172 172
173 173
174 174 @interfaceutil.implementer(repository.irevisiondelta)
175 175 @attr.s(slots=True)
176 176 class revlogrevisiondelta:
177 177 node = attr.ib()
178 178 p1node = attr.ib()
179 179 p2node = attr.ib()
180 180 basenode = attr.ib()
181 181 flags = attr.ib()
182 182 baserevisionsize = attr.ib()
183 183 revision = attr.ib()
184 184 delta = attr.ib()
185 185 sidedata = attr.ib()
186 186 protocol_flags = attr.ib()
187 187 linknode = attr.ib(default=None)
188 188
189 189
190 190 @interfaceutil.implementer(repository.iverifyproblem)
191 191 @attr.s(frozen=True)
192 192 class revlogproblem:
193 193 warning = attr.ib(default=None)
194 194 error = attr.ib(default=None)
195 195 node = attr.ib(default=None)
196 196
197 197
198 198 def parse_index_v1(data, inline):
199 199 # call the C implementation to parse the index data
200 200 index, cache = parsers.parse_index2(data, inline)
201 201 return index, cache
202 202
203 203
204 204 def parse_index_v2(data, inline):
205 205 # call the C implementation to parse the index data
206 206 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
207 207 return index, cache
208 208
209 209
210 210 def parse_index_cl_v2(data, inline):
211 211 # call the C implementation to parse the index data
212 212 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
213 213 return index, cache
214 214
215 215
216 216 if hasattr(parsers, 'parse_index_devel_nodemap'):
217 217
218 218 def parse_index_v1_nodemap(data, inline):
219 219 index, cache = parsers.parse_index_devel_nodemap(data, inline)
220 220 return index, cache
221 221
222 222
223 223 else:
224 224 parse_index_v1_nodemap = None
225 225
226 226
227 227 def parse_index_v1_mixed(data, inline):
228 228 index, cache = parse_index_v1(data, inline)
229 229 return rustrevlog.MixedIndex(index), cache
230 230
231 231
232 232 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
233 233 # signed integer)
234 234 _maxentrysize = 0x7FFFFFFF
235 235
236 236 FILE_TOO_SHORT_MSG = _(
237 237 b'cannot read from revlog %s;'
238 238 b' expected %d bytes from offset %d, data size is %d'
239 239 )
240 240
241 241 hexdigits = b'0123456789abcdefABCDEF'
242 242
243 243
244 244 class _Config:
245 245 def copy(self):
246 246 return self.__class__(**self.__dict__)
247 247
248 248
249 249 @attr.s()
250 250 class FeatureConfig(_Config):
251 251 """Hold configuration values about the available revlog features"""
252 252
253 253 # the default compression engine
254 254 compression_engine = attr.ib(default=b'zlib')
255 255 # compression engines options
256 256 compression_engine_options = attr.ib(default=attr.Factory(dict))
257 257
258 258 # can we use censor on this revlog
259 259 censorable = attr.ib(default=False)
260 260 # does this revlog use the "side data" feature
261 261 has_side_data = attr.ib(default=False)
262 262 # might remove rank configuration once the computation has no impact
263 263 compute_rank = attr.ib(default=False)
264 264 # parent order is supposed to be semantically irrelevant, so we
265 265 # normally resort parents to ensure that the first parent is non-null,
266 266 # if there is a non-null parent at all.
267 267 # filelog abuses the parent order as flag to mark some instances of
268 268 # meta-encoded files, so allow it to disable this behavior.
269 269 canonical_parent_order = attr.ib(default=False)
270 270 # can ellipsis commit be used
271 271 enable_ellipsis = attr.ib(default=False)
272 272
273 273 def copy(self):
274 274 new = super().copy()
275 275 new.compression_engine_options = self.compression_engine_options.copy()
276 276 return new
277 277
278 278
279 279 @attr.s()
280 280 class DataConfig(_Config):
281 281 """Hold configuration value about how the revlog data are read"""
282 282
283 283 # should we try to open the "pending" version of the revlog
284 284 try_pending = attr.ib(default=False)
285 285 # should we try to open the "splitted" version of the revlog
286 286 try_split = attr.ib(default=False)
287 287 # When True, indexfile should be opened with checkambig=True at writing,
288 288 # to avoid file stat ambiguity.
289 289 check_ambig = attr.ib(default=False)
290 290
291 291 # If true, use mmap instead of reading to deal with large index
292 292 mmap_large_index = attr.ib(default=False)
293 293 # how much data is large
294 294 mmap_index_threshold = attr.ib(default=None)
295 295 # How much data to read and cache into the raw revlog data cache.
296 296 chunk_cache_size = attr.ib(default=65536)
297 297
298 298 # The size of the uncompressed cache compared to the largest revision seen.
299 299 uncompressed_cache_factor = attr.ib(default=None)
300 300
301 301 # The number of chunk cached
302 302 uncompressed_cache_count = attr.ib(default=None)
303 303
304 304 # Allow sparse reading of the revlog data
305 305 with_sparse_read = attr.ib(default=False)
306 306 # minimal density of a sparse read chunk
307 307 sr_density_threshold = attr.ib(default=0.50)
308 308 # minimal size of data we skip when performing sparse read
309 309 sr_min_gap_size = attr.ib(default=262144)
310 310
311 311 # are delta encoded against arbitrary bases.
312 312 generaldelta = attr.ib(default=False)
313 313
314 314
315 315 @attr.s()
316 316 class DeltaConfig(_Config):
317 317 """Hold configuration value about how new delta are computed
318 318
319 319 Some attributes are duplicated from DataConfig to help havign each object
320 320 self contained.
321 321 """
322 322
323 323 # can delta be encoded against arbitrary bases.
324 324 general_delta = attr.ib(default=False)
325 325 # Allow sparse writing of the revlog data
326 326 sparse_revlog = attr.ib(default=False)
327 327 # maximum length of a delta chain
328 328 max_chain_len = attr.ib(default=None)
329 329 # Maximum distance between delta chain base start and end
330 330 max_deltachain_span = attr.ib(default=-1)
331 331 # If `upper_bound_comp` is not None, this is the expected maximal gain from
332 332 # compression for the data content.
333 333 upper_bound_comp = attr.ib(default=None)
334 334 # Should we try a delta against both parent
335 335 delta_both_parents = attr.ib(default=True)
336 336 # Test delta base candidate group by chunk of this maximal size.
337 337 candidate_group_chunk_size = attr.ib(default=0)
338 338 # Should we display debug information about delta computation
339 339 debug_delta = attr.ib(default=False)
340 340 # trust incoming delta by default
341 341 lazy_delta = attr.ib(default=True)
342 342 # trust the base of incoming delta by default
343 343 lazy_delta_base = attr.ib(default=False)
344 344
345 345
346 346 class _InnerRevlog:
347 347 """An inner layer of the revlog object
348 348
349 349 That layer exist to be able to delegate some operation to Rust, its
350 350 boundaries are arbitrary and based on what we can delegate to Rust.
351 351 """
352 352
353 353 def __init__(
354 354 self,
355 355 opener,
356 356 index,
357 357 index_file,
358 358 data_file,
359 359 sidedata_file,
360 360 inline,
361 361 data_config,
362 362 delta_config,
363 363 feature_config,
364 364 chunk_cache,
365 365 default_compression_header,
366 366 ):
367 367 self.opener = opener
368 368 self.index = index
369 369
370 370 self.__index_file = index_file
371 371 self.data_file = data_file
372 372 self.sidedata_file = sidedata_file
373 373 self.inline = inline
374 374 self.data_config = data_config
375 375 self.delta_config = delta_config
376 376 self.feature_config = feature_config
377 377
378 378 # used during diverted write.
379 379 self._orig_index_file = None
380 380
381 381 self._default_compression_header = default_compression_header
382 382
383 383 # index
384 384
385 385 # 3-tuple of file handles being used for active writing.
386 386 self._writinghandles = None
387 387
388 388 self._segmentfile = randomaccessfile.randomaccessfile(
389 389 self.opener,
390 390 (self.index_file if self.inline else self.data_file),
391 391 self.data_config.chunk_cache_size,
392 392 chunk_cache,
393 393 )
394 394 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
395 395 self.opener,
396 396 self.sidedata_file,
397 397 self.data_config.chunk_cache_size,
398 398 )
399 399
400 400 # revlog header -> revlog compressor
401 401 self._decompressors = {}
402 402 # 3-tuple of (node, rev, text) for a raw revision.
403 403 self._revisioncache = None
404 404
405 405 # cache some uncompressed chunks
406 406 # rev → uncompressed_chunk
407 407 #
408 408 # the max cost is dynamically updated to be proportionnal to the
409 409 # size of revision we actually encounter.
410 410 self._uncompressed_chunk_cache = None
411 411 if self.data_config.uncompressed_cache_factor is not None:
412 412 self._uncompressed_chunk_cache = util.lrucachedict(
413 413 self.data_config.uncompressed_cache_count,
414 414 maxcost=65536, # some arbitrary initial value
415 415 )
416 416
417 417 self._delay_buffer = None
418 418
419 419 @property
420 420 def index_file(self):
421 421 return self.__index_file
422 422
423 423 @index_file.setter
424 424 def index_file(self, new_index_file):
425 425 self.__index_file = new_index_file
426 426 if self.inline:
427 427 self._segmentfile.filename = new_index_file
428 428
429 429 def __len__(self):
430 430 return len(self.index)
431 431
432 432 def clear_cache(self):
433 433 assert not self.is_delaying
434 434 self._revisioncache = None
435 435 if self._uncompressed_chunk_cache is not None:
436 436 self._uncompressed_chunk_cache.clear()
437 437 self._segmentfile.clear_cache()
438 438 self._segmentfile_sidedata.clear_cache()
439 439
440 440 @property
441 441 def canonical_index_file(self):
442 442 if self._orig_index_file is not None:
443 443 return self._orig_index_file
444 444 return self.index_file
445 445
446 446 @property
447 447 def is_delaying(self):
448 448 """is the revlog is currently delaying the visibility of written data?
449 449
450 450 The delaying mechanism can be either in-memory or written on disk in a
451 451 side-file."""
452 452 return (self._delay_buffer is not None) or (
453 453 self._orig_index_file is not None
454 454 )
455 455
456 456 # Derived from index values.
457 457
458 458 def start(self, rev):
459 459 """the offset of the data chunk for this revision"""
460 460 return int(self.index[rev][0] >> 16)
461 461
462 462 def length(self, rev):
463 463 """the length of the data chunk for this revision"""
464 464 return self.index[rev][1]
465 465
466 466 def end(self, rev):
467 467 """the end of the data chunk for this revision"""
468 468 return self.start(rev) + self.length(rev)
469 469
470 470 def deltaparent(self, rev):
471 471 """return deltaparent of the given revision"""
472 472 base = self.index[rev][3]
473 473 if base == rev:
474 474 return nullrev
475 475 elif self.delta_config.general_delta:
476 476 return base
477 477 else:
478 478 return rev - 1
479 479
480 480 def issnapshot(self, rev):
481 481 """tells whether rev is a snapshot"""
482 482 if not self.delta_config.sparse_revlog:
483 483 return self.deltaparent(rev) == nullrev
484 484 elif hasattr(self.index, 'issnapshot'):
485 485 # directly assign the method to cache the testing and access
486 486 self.issnapshot = self.index.issnapshot
487 487 return self.issnapshot(rev)
488 488 if rev == nullrev:
489 489 return True
490 490 entry = self.index[rev]
491 491 base = entry[3]
492 492 if base == rev:
493 493 return True
494 494 if base == nullrev:
495 495 return True
496 496 p1 = entry[5]
497 497 while self.length(p1) == 0:
498 498 b = self.deltaparent(p1)
499 499 if b == p1:
500 500 break
501 501 p1 = b
502 502 p2 = entry[6]
503 503 while self.length(p2) == 0:
504 504 b = self.deltaparent(p2)
505 505 if b == p2:
506 506 break
507 507 p2 = b
508 508 if base == p1 or base == p2:
509 509 return False
510 510 return self.issnapshot(base)
511 511
512 512 def _deltachain(self, rev, stoprev=None):
513 513 """Obtain the delta chain for a revision.
514 514
515 515 ``stoprev`` specifies a revision to stop at. If not specified, we
516 516 stop at the base of the chain.
517 517
518 518 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
519 519 revs in ascending order and ``stopped`` is a bool indicating whether
520 520 ``stoprev`` was hit.
521 521 """
522 522 generaldelta = self.delta_config.general_delta
523 523 # Try C implementation.
524 524 try:
525 525 return self.index.deltachain(rev, stoprev, generaldelta)
526 526 except AttributeError:
527 527 pass
528 528
529 529 chain = []
530 530
531 531 # Alias to prevent attribute lookup in tight loop.
532 532 index = self.index
533 533
534 534 iterrev = rev
535 535 e = index[iterrev]
536 536 while iterrev != e[3] and iterrev != stoprev:
537 537 chain.append(iterrev)
538 538 if generaldelta:
539 539 iterrev = e[3]
540 540 else:
541 541 iterrev -= 1
542 542 e = index[iterrev]
543 543
544 544 if iterrev == stoprev:
545 545 stopped = True
546 546 else:
547 547 chain.append(iterrev)
548 548 stopped = False
549 549
550 550 chain.reverse()
551 551 return chain, stopped
552 552
553 553 @util.propertycache
554 554 def _compressor(self):
555 555 engine = util.compengines[self.feature_config.compression_engine]
556 556 return engine.revlogcompressor(
557 557 self.feature_config.compression_engine_options
558 558 )
559 559
560 560 @util.propertycache
561 561 def _decompressor(self):
562 562 """the default decompressor"""
563 563 if self._default_compression_header is None:
564 564 return None
565 565 t = self._default_compression_header
566 566 c = self._get_decompressor(t)
567 567 return c.decompress
568 568
569 569 def _get_decompressor(self, t):
570 570 try:
571 571 compressor = self._decompressors[t]
572 572 except KeyError:
573 573 try:
574 574 engine = util.compengines.forrevlogheader(t)
575 575 compressor = engine.revlogcompressor(
576 576 self.feature_config.compression_engine_options
577 577 )
578 578 self._decompressors[t] = compressor
579 579 except KeyError:
580 580 raise error.RevlogError(
581 581 _(b'unknown compression type %s') % binascii.hexlify(t)
582 582 )
583 583 return compressor
584 584
585 585 def compress(self, data):
586 586 """Generate a possibly-compressed representation of data."""
587 587 if not data:
588 588 return b'', data
589 589
590 590 compressed = self._compressor.compress(data)
591 591
592 592 if compressed:
593 593 # The revlog compressor added the header in the returned data.
594 594 return b'', compressed
595 595
596 596 if data[0:1] == b'\0':
597 597 return b'', data
598 598 return b'u', data
599 599
600 600 def decompress(self, data):
601 601 """Decompress a revlog chunk.
602 602
603 603 The chunk is expected to begin with a header identifying the
604 604 format type so it can be routed to an appropriate decompressor.
605 605 """
606 606 if not data:
607 607 return data
608 608
609 609 # Revlogs are read much more frequently than they are written and many
610 610 # chunks only take microseconds to decompress, so performance is
611 611 # important here.
612 612 #
613 613 # We can make a few assumptions about revlogs:
614 614 #
615 615 # 1) the majority of chunks will be compressed (as opposed to inline
616 616 # raw data).
617 617 # 2) decompressing *any* data will likely by at least 10x slower than
618 618 # returning raw inline data.
619 619 # 3) we want to prioritize common and officially supported compression
620 620 # engines
621 621 #
622 622 # It follows that we want to optimize for "decompress compressed data
623 623 # when encoded with common and officially supported compression engines"
624 624 # case over "raw data" and "data encoded by less common or non-official
625 625 # compression engines." That is why we have the inline lookup first
626 626 # followed by the compengines lookup.
627 627 #
628 628 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
629 629 # compressed chunks. And this matters for changelog and manifest reads.
630 630 t = data[0:1]
631 631
632 632 if t == b'x':
633 633 try:
634 634 return _zlibdecompress(data)
635 635 except zlib.error as e:
636 636 raise error.RevlogError(
637 637 _(b'revlog decompress error: %s')
638 638 % stringutil.forcebytestr(e)
639 639 )
640 640 # '\0' is more common than 'u' so it goes first.
641 641 elif t == b'\0':
642 642 return data
643 643 elif t == b'u':
644 644 return util.buffer(data, 1)
645 645
646 646 compressor = self._get_decompressor(t)
647 647
648 648 return compressor.decompress(data)
649 649
650 650 @contextlib.contextmanager
651 651 def reading(self):
652 652 """Context manager that keeps data and sidedata files open for reading"""
653 653 if len(self.index) == 0:
654 654 yield # nothing to be read
655 655 else:
656 656 with self._segmentfile.reading():
657 657 with self._segmentfile_sidedata.reading():
658 658 yield
659 659
660 660 @property
661 661 def is_writing(self):
662 662 """True is a writing context is open"""
663 663 return self._writinghandles is not None
664 664
665 665 @property
666 666 def is_open(self):
667 667 """True if any file handle is being held
668 668
669 669 Used for assert and debug in the python code"""
670 670 return self._segmentfile.is_open or self._segmentfile_sidedata.is_open
671 671
672 672 @contextlib.contextmanager
673 673 def writing(self, transaction, data_end=None, sidedata_end=None):
674 674 """Open the revlog files for writing
675 675
676 676 Add content to a revlog should be done within such context.
677 677 """
678 678 if self.is_writing:
679 679 yield
680 680 else:
681 681 ifh = dfh = sdfh = None
682 682 try:
683 683 r = len(self.index)
684 684 # opening the data file.
685 685 dsize = 0
686 686 if r:
687 687 dsize = self.end(r - 1)
688 688 dfh = None
689 689 if not self.inline:
690 690 try:
691 691 dfh = self.opener(self.data_file, mode=b"r+")
692 692 if data_end is None:
693 693 dfh.seek(0, os.SEEK_END)
694 694 else:
695 695 dfh.seek(data_end, os.SEEK_SET)
696 696 except FileNotFoundError:
697 697 dfh = self.opener(self.data_file, mode=b"w+")
698 698 transaction.add(self.data_file, dsize)
699 699 if self.sidedata_file is not None:
700 700 assert sidedata_end is not None
701 701 # revlog-v2 does not inline, help Pytype
702 702 assert dfh is not None
703 703 try:
704 704 sdfh = self.opener(self.sidedata_file, mode=b"r+")
705 705 dfh.seek(sidedata_end, os.SEEK_SET)
706 706 except FileNotFoundError:
707 707 sdfh = self.opener(self.sidedata_file, mode=b"w+")
708 708 transaction.add(self.sidedata_file, sidedata_end)
709 709
710 710 # opening the index file.
711 711 isize = r * self.index.entry_size
712 712 ifh = self.__index_write_fp()
713 713 if self.inline:
714 714 transaction.add(self.index_file, dsize + isize)
715 715 else:
716 716 transaction.add(self.index_file, isize)
717 717 # exposing all file handle for writing.
718 718 self._writinghandles = (ifh, dfh, sdfh)
719 719 self._segmentfile.writing_handle = ifh if self.inline else dfh
720 720 self._segmentfile_sidedata.writing_handle = sdfh
721 721 yield
722 722 finally:
723 723 self._writinghandles = None
724 724 self._segmentfile.writing_handle = None
725 725 self._segmentfile_sidedata.writing_handle = None
726 726 if dfh is not None:
727 727 dfh.close()
728 728 if sdfh is not None:
729 729 sdfh.close()
730 730 # closing the index file last to avoid exposing referent to
731 731 # potential unflushed data content.
732 732 if ifh is not None:
733 733 ifh.close()
734 734
735 735 def __index_write_fp(self, index_end=None):
736 736 """internal method to open the index file for writing
737 737
738 738 You should not use this directly and use `_writing` instead
739 739 """
740 740 try:
741 741 if self._delay_buffer is None:
742 742 f = self.opener(
743 743 self.index_file,
744 744 mode=b"r+",
745 745 checkambig=self.data_config.check_ambig,
746 746 )
747 747 else:
748 748 # check_ambig affect we way we open file for writing, however
749 749 # here, we do not actually open a file for writting as write
750 750 # will appened to a delay_buffer. So check_ambig is not
751 751 # meaningful and unneeded here.
752 752 f = randomaccessfile.appender(
753 753 self.opener, self.index_file, b"r+", self._delay_buffer
754 754 )
755 755 if index_end is None:
756 756 f.seek(0, os.SEEK_END)
757 757 else:
758 758 f.seek(index_end, os.SEEK_SET)
759 759 return f
760 760 except FileNotFoundError:
761 761 if self._delay_buffer is None:
762 762 return self.opener(
763 763 self.index_file,
764 764 mode=b"w+",
765 765 checkambig=self.data_config.check_ambig,
766 766 )
767 767 else:
768 768 return randomaccessfile.appender(
769 769 self.opener, self.index_file, b"w+", self._delay_buffer
770 770 )
771 771
772 772 def __index_new_fp(self):
773 773 """internal method to create a new index file for writing
774 774
775 775 You should not use this unless you are upgrading from inline revlog
776 776 """
777 777 return self.opener(
778 778 self.index_file,
779 779 mode=b"w",
780 780 checkambig=self.data_config.check_ambig,
781 781 atomictemp=True,
782 782 )
783 783
784 784 def split_inline(self, tr, header, new_index_file_path=None):
785 785 """split the data of an inline revlog into an index and a data file"""
786 786 assert self._delay_buffer is None
787 787 existing_handles = False
788 788 if self._writinghandles is not None:
789 789 existing_handles = True
790 790 fp = self._writinghandles[0]
791 791 fp.flush()
792 792 fp.close()
793 793 # We can't use the cached file handle after close(). So prevent
794 794 # its usage.
795 795 self._writinghandles = None
796 796 self._segmentfile.writing_handle = None
797 797 # No need to deal with sidedata writing handle as it is only
798 798 # relevant with revlog-v2 which is never inline, not reaching
799 799 # this code
800 800
801 801 new_dfh = self.opener(self.data_file, mode=b"w+")
802 802 new_dfh.truncate(0) # drop any potentially existing data
803 803 try:
804 804 with self.reading():
805 805 for r in range(len(self.index)):
806 806 new_dfh.write(self.get_segment_for_revs(r, r)[1])
807 807 new_dfh.flush()
808 808
809 809 if new_index_file_path is not None:
810 810 self.index_file = new_index_file_path
811 811 with self.__index_new_fp() as fp:
812 812 self.inline = False
813 813 for i in range(len(self.index)):
814 814 e = self.index.entry_binary(i)
815 815 if i == 0:
816 816 packed_header = self.index.pack_header(header)
817 817 e = packed_header + e
818 818 fp.write(e)
819 819
820 820 # If we don't use side-write, the temp file replace the real
821 821 # index when we exit the context manager
822 822
823 823 self._segmentfile = randomaccessfile.randomaccessfile(
824 824 self.opener,
825 825 self.data_file,
826 826 self.data_config.chunk_cache_size,
827 827 )
828 828
829 829 if existing_handles:
830 830 # switched from inline to conventional reopen the index
831 831 ifh = self.__index_write_fp()
832 832 self._writinghandles = (ifh, new_dfh, None)
833 833 self._segmentfile.writing_handle = new_dfh
834 834 new_dfh = None
835 835 # No need to deal with sidedata writing handle as it is only
836 836 # relevant with revlog-v2 which is never inline, not reaching
837 837 # this code
838 838 finally:
839 839 if new_dfh is not None:
840 840 new_dfh.close()
841 841 return self.index_file
842 842
843 843 def get_segment_for_revs(self, startrev, endrev):
844 844 """Obtain a segment of raw data corresponding to a range of revisions.
845 845
846 846 Accepts the start and end revisions and an optional already-open
847 847 file handle to be used for reading. If the file handle is read, its
848 848 seek position will not be preserved.
849 849
850 850 Requests for data may be satisfied by a cache.
851 851
852 852 Returns a 2-tuple of (offset, data) for the requested range of
853 853 revisions. Offset is the integer offset from the beginning of the
854 854 revlog and data is a str or buffer of the raw byte data.
855 855
856 856 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
857 857 to determine where each revision's data begins and ends.
858 858
859 859 API: we should consider making this a private part of the InnerRevlog
860 860 at some point.
861 861 """
862 862 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
863 863 # (functions are expensive).
864 864 index = self.index
865 865 istart = index[startrev]
866 866 start = int(istart[0] >> 16)
867 867 if startrev == endrev:
868 868 end = start + istart[1]
869 869 else:
870 870 iend = index[endrev]
871 871 end = int(iend[0] >> 16) + iend[1]
872 872
873 873 if self.inline:
874 874 start += (startrev + 1) * self.index.entry_size
875 875 end += (endrev + 1) * self.index.entry_size
876 876 length = end - start
877 877
878 878 return start, self._segmentfile.read_chunk(start, length)
879 879
880 880 def _chunk(self, rev):
881 881 """Obtain a single decompressed chunk for a revision.
882 882
883 883 Accepts an integer revision and an optional already-open file handle
884 884 to be used for reading. If used, the seek position of the file will not
885 885 be preserved.
886 886
887 887 Returns a str holding uncompressed data for the requested revision.
888 888 """
889 889 if self._uncompressed_chunk_cache is not None:
890 890 uncomp = self._uncompressed_chunk_cache.get(rev)
891 891 if uncomp is not None:
892 892 return uncomp
893 893
894 894 compression_mode = self.index[rev][10]
895 895 data = self.get_segment_for_revs(rev, rev)[1]
896 896 if compression_mode == COMP_MODE_PLAIN:
897 897 uncomp = data
898 898 elif compression_mode == COMP_MODE_DEFAULT:
899 899 uncomp = self._decompressor(data)
900 900 elif compression_mode == COMP_MODE_INLINE:
901 901 uncomp = self.decompress(data)
902 902 else:
903 903 msg = b'unknown compression mode %d'
904 904 msg %= compression_mode
905 905 raise error.RevlogError(msg)
906 906 if self._uncompressed_chunk_cache is not None:
907 907 self._uncompressed_chunk_cache.insert(rev, uncomp, cost=len(uncomp))
908 908 return uncomp
909 909
910 910 def _chunks(self, revs, targetsize=None):
911 911 """Obtain decompressed chunks for the specified revisions.
912 912
913 913 Accepts an iterable of numeric revisions that are assumed to be in
914 914 ascending order. Also accepts an optional already-open file handle
915 915 to be used for reading. If used, the seek position of the file will
916 916 not be preserved.
917 917
918 918 This function is similar to calling ``self._chunk()`` multiple times,
919 919 but is faster.
920 920
921 921 Returns a list with decompressed data for each requested revision.
922 922 """
923 923 if not revs:
924 924 return []
925 925 start = self.start
926 926 length = self.length
927 927 inline = self.inline
928 928 iosize = self.index.entry_size
929 929 buffer = util.buffer
930 930
931 931 fetched_revs = []
932 932 fadd = fetched_revs.append
933 933
934 934 chunks = []
935 935 ladd = chunks.append
936 936
937 937 if self._uncompressed_chunk_cache is None:
938 938 fetched_revs = revs
939 939 else:
940 940 for rev in revs:
941 941 cached_value = self._uncompressed_chunk_cache.get(rev)
942 942 if cached_value is None:
943 943 fadd(rev)
944 944 else:
945 945 ladd((rev, cached_value))
946 946
947 947 if not fetched_revs:
948 948 slicedchunks = ()
949 949 elif not self.data_config.with_sparse_read:
950 950 slicedchunks = (fetched_revs,)
951 951 else:
952 952 slicedchunks = deltautil.slicechunk(
953 953 self,
954 954 fetched_revs,
955 955 targetsize=targetsize,
956 956 )
957 957
958 958 for revschunk in slicedchunks:
959 959 firstrev = revschunk[0]
960 960 # Skip trailing revisions with empty diff
961 961 for lastrev in revschunk[::-1]:
962 962 if length(lastrev) != 0:
963 963 break
964 964
965 965 try:
966 966 offset, data = self.get_segment_for_revs(firstrev, lastrev)
967 967 except OverflowError:
968 968 # issue4215 - we can't cache a run of chunks greater than
969 969 # 2G on Windows
970 970 for rev in revschunk:
971 971 ladd((rev, self._chunk(rev)))
972 972
973 973 decomp = self.decompress
974 974 # self._decompressor might be None, but will not be used in that case
975 975 def_decomp = self._decompressor
976 976 for rev in revschunk:
977 977 chunkstart = start(rev)
978 978 if inline:
979 979 chunkstart += (rev + 1) * iosize
980 980 chunklength = length(rev)
981 981 comp_mode = self.index[rev][10]
982 982 c = buffer(data, chunkstart - offset, chunklength)
983 983 if comp_mode == COMP_MODE_PLAIN:
984 984 c = c
985 985 elif comp_mode == COMP_MODE_INLINE:
986 986 c = decomp(c)
987 987 elif comp_mode == COMP_MODE_DEFAULT:
988 988 c = def_decomp(c)
989 989 else:
990 990 msg = b'unknown compression mode %d'
991 991 msg %= comp_mode
992 992 raise error.RevlogError(msg)
993 993 ladd((rev, c))
994 994 if self._uncompressed_chunk_cache is not None:
995 995 self._uncompressed_chunk_cache.insert(rev, c, len(c))
996 996
997 997 chunks.sort()
998 998 return [x[1] for x in chunks]
999 999
1000 1000 def raw_text(self, node, rev):
1001 1001 """return the possibly unvalidated rawtext for a revision
1002 1002
1003 1003 returns (rev, rawtext, validated)
1004 1004 """
1005 1005
1006 1006 # revision in the cache (could be useful to apply delta)
1007 1007 cachedrev = None
1008 1008 # An intermediate text to apply deltas to
1009 1009 basetext = None
1010 1010
1011 1011 # Check if we have the entry in cache
1012 1012 # The cache entry looks like (node, rev, rawtext)
1013 1013 if self._revisioncache:
1014 1014 cachedrev = self._revisioncache[1]
1015 1015
1016 1016 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1017 1017 if stopped:
1018 1018 basetext = self._revisioncache[2]
1019 1019
1020 1020 # drop cache to save memory, the caller is expected to
1021 1021 # update self._inner._revisioncache after validating the text
1022 1022 self._revisioncache = None
1023 1023
1024 1024 targetsize = None
1025 1025 rawsize = self.index[rev][2]
1026 1026 if 0 <= rawsize:
1027 1027 targetsize = 4 * rawsize
1028 1028
1029 1029 if self._uncompressed_chunk_cache is not None:
1030 1030 # dynamically update the uncompressed_chunk_cache size to the
1031 1031 # largest revision we saw in this revlog.
1032 1032 factor = self.data_config.uncompressed_cache_factor
1033 1033 candidate_size = rawsize * factor
1034 1034 if candidate_size > self._uncompressed_chunk_cache.maxcost:
1035 1035 self._uncompressed_chunk_cache.maxcost = candidate_size
1036 1036
1037 1037 bins = self._chunks(chain, targetsize=targetsize)
1038 1038 if basetext is None:
1039 1039 basetext = bytes(bins[0])
1040 1040 bins = bins[1:]
1041 1041
1042 1042 rawtext = mdiff.patches(basetext, bins)
1043 1043 del basetext # let us have a chance to free memory early
1044 1044 return (rev, rawtext, False)
1045 1045
1046 1046 def sidedata(self, rev, sidedata_end):
1047 1047 """Return the sidedata for a given revision number."""
1048 1048 index_entry = self.index[rev]
1049 1049 sidedata_offset = index_entry[8]
1050 1050 sidedata_size = index_entry[9]
1051 1051
1052 1052 if self.inline:
1053 1053 sidedata_offset += self.index.entry_size * (1 + rev)
1054 1054 if sidedata_size == 0:
1055 1055 return {}
1056 1056
1057 1057 if sidedata_end < sidedata_offset + sidedata_size:
1058 1058 filename = self.sidedata_file
1059 1059 end = sidedata_end
1060 1060 offset = sidedata_offset
1061 1061 length = sidedata_size
1062 1062 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
1063 1063 raise error.RevlogError(m)
1064 1064
1065 1065 comp_segment = self._segmentfile_sidedata.read_chunk(
1066 1066 sidedata_offset, sidedata_size
1067 1067 )
1068 1068
1069 1069 comp = self.index[rev][11]
1070 1070 if comp == COMP_MODE_PLAIN:
1071 1071 segment = comp_segment
1072 1072 elif comp == COMP_MODE_DEFAULT:
1073 1073 segment = self._decompressor(comp_segment)
1074 1074 elif comp == COMP_MODE_INLINE:
1075 1075 segment = self.decompress(comp_segment)
1076 1076 else:
1077 1077 msg = b'unknown compression mode %d'
1078 1078 msg %= comp
1079 1079 raise error.RevlogError(msg)
1080 1080
1081 1081 sidedata = sidedatautil.deserialize_sidedata(segment)
1082 1082 return sidedata
1083 1083
1084 1084 def write_entry(
1085 1085 self,
1086 1086 transaction,
1087 1087 entry,
1088 1088 data,
1089 1089 link,
1090 1090 offset,
1091 1091 sidedata,
1092 1092 sidedata_offset,
1093 1093 index_end,
1094 1094 data_end,
1095 1095 sidedata_end,
1096 1096 ):
1097 1097 # Files opened in a+ mode have inconsistent behavior on various
1098 1098 # platforms. Windows requires that a file positioning call be made
1099 1099 # when the file handle transitions between reads and writes. See
1100 1100 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1101 1101 # platforms, Python or the platform itself can be buggy. Some versions
1102 1102 # of Solaris have been observed to not append at the end of the file
1103 1103 # if the file was seeked to before the end. See issue4943 for more.
1104 1104 #
1105 1105 # We work around this issue by inserting a seek() before writing.
1106 1106 # Note: This is likely not necessary on Python 3. However, because
1107 1107 # the file handle is reused for reads and may be seeked there, we need
1108 1108 # to be careful before changing this.
1109 1109 if self._writinghandles is None:
1110 1110 msg = b'adding revision outside `revlog._writing` context'
1111 1111 raise error.ProgrammingError(msg)
1112 1112 ifh, dfh, sdfh = self._writinghandles
1113 1113 if index_end is None:
1114 1114 ifh.seek(0, os.SEEK_END)
1115 1115 else:
1116 1116 ifh.seek(index_end, os.SEEK_SET)
1117 1117 if dfh:
1118 1118 if data_end is None:
1119 1119 dfh.seek(0, os.SEEK_END)
1120 1120 else:
1121 1121 dfh.seek(data_end, os.SEEK_SET)
1122 1122 if sdfh:
1123 1123 sdfh.seek(sidedata_end, os.SEEK_SET)
1124 1124
1125 1125 curr = len(self.index) - 1
1126 1126 if not self.inline:
1127 1127 transaction.add(self.data_file, offset)
1128 1128 if self.sidedata_file:
1129 1129 transaction.add(self.sidedata_file, sidedata_offset)
1130 1130 transaction.add(self.canonical_index_file, curr * len(entry))
1131 1131 if data[0]:
1132 1132 dfh.write(data[0])
1133 1133 dfh.write(data[1])
1134 1134 if sidedata:
1135 1135 sdfh.write(sidedata)
1136 1136 if self._delay_buffer is None:
1137 1137 ifh.write(entry)
1138 1138 else:
1139 1139 self._delay_buffer.append(entry)
1140 1140 else:
1141 1141 offset += curr * self.index.entry_size
1142 1142 transaction.add(self.canonical_index_file, offset)
1143 1143 assert not sidedata
1144 1144 if self._delay_buffer is None:
1145 1145 ifh.write(entry)
1146 1146 ifh.write(data[0])
1147 1147 ifh.write(data[1])
1148 1148 else:
1149 1149 self._delay_buffer.append(entry)
1150 1150 self._delay_buffer.append(data[0])
1151 1151 self._delay_buffer.append(data[1])
1152 1152 return (
1153 1153 ifh.tell(),
1154 1154 dfh.tell() if dfh else None,
1155 1155 sdfh.tell() if sdfh else None,
1156 1156 )
1157 1157
1158 1158 def _divert_index(self):
1159 1159 return self.index_file + b'.a'
1160 1160
1161 1161 def delay(self):
1162 1162 assert not self.is_open
1163 1163 if self._delay_buffer is not None or self._orig_index_file is not None:
1164 1164 # delay or divert already in place
1165 1165 return None
1166 1166 elif len(self.index) == 0:
1167 1167 self._orig_index_file = self.index_file
1168 1168 self.index_file = self._divert_index()
1169 1169 assert self._orig_index_file is not None
1170 1170 assert self.index_file is not None
1171 1171 if self.opener.exists(self.index_file):
1172 1172 self.opener.unlink(self.index_file)
1173 1173 return self.index_file
1174 1174 else:
1175 1175 self._delay_buffer = []
1176 1176 if self.inline:
1177 1177 self._segmentfile._delay_buffer = self._delay_buffer
1178 1178 return None
1179 1179
1180 1180 def write_pending(self):
1181 1181 assert not self.is_open
1182 1182 if self._orig_index_file is not None:
1183 1183 return None, True
1184 1184 any_pending = False
1185 1185 pending_index_file = self._divert_index()
1186 1186 if self.opener.exists(pending_index_file):
1187 1187 self.opener.unlink(pending_index_file)
1188 1188 util.copyfile(
1189 1189 self.opener.join(self.index_file),
1190 1190 self.opener.join(pending_index_file),
1191 1191 )
1192 1192 if self._delay_buffer:
1193 1193 with self.opener(pending_index_file, b'r+') as ifh:
1194 1194 ifh.seek(0, os.SEEK_END)
1195 1195 ifh.write(b"".join(self._delay_buffer))
1196 1196 any_pending = True
1197 1197 self._delay_buffer = None
1198 1198 if self.inline:
1199 1199 self._segmentfile._delay_buffer = self._delay_buffer
1200 1200 else:
1201 1201 assert self._segmentfile._delay_buffer is None
1202 1202 self._orig_index_file = self.index_file
1203 1203 self.index_file = pending_index_file
1204 1204 return self.index_file, any_pending
1205 1205
1206 1206 def finalize_pending(self):
1207 1207 assert not self.is_open
1208 1208
1209 1209 delay = self._delay_buffer is not None
1210 1210 divert = self._orig_index_file is not None
1211 1211
1212 1212 if delay and divert:
1213 1213 assert False, "unreachable"
1214 1214 elif delay:
1215 1215 if self._delay_buffer:
1216 1216 with self.opener(self.index_file, b'r+') as ifh:
1217 1217 ifh.seek(0, os.SEEK_END)
1218 1218 ifh.write(b"".join(self._delay_buffer))
1219 1219 self._segmentfile._delay_buffer = self._delay_buffer = None
1220 1220 elif divert:
1221 1221 if self.opener.exists(self.index_file):
1222 1222 self.opener.rename(
1223 1223 self.index_file,
1224 1224 self._orig_index_file,
1225 1225 checkambig=True,
1226 1226 )
1227 1227 self.index_file = self._orig_index_file
1228 1228 self._orig_index_file = None
1229 1229 else:
1230 1230 msg = b"not delay or divert found on this revlog"
1231 1231 raise error.ProgrammingError(msg)
1232 1232 return self.canonical_index_file
1233 1233
1234 1234
1235 1235 class revlog:
1236 1236 """
1237 1237 the underlying revision storage object
1238 1238
1239 1239 A revlog consists of two parts, an index and the revision data.
1240 1240
1241 1241 The index is a file with a fixed record size containing
1242 1242 information on each revision, including its nodeid (hash), the
1243 1243 nodeids of its parents, the position and offset of its data within
1244 1244 the data file, and the revision it's based on. Finally, each entry
1245 1245 contains a linkrev entry that can serve as a pointer to external
1246 1246 data.
1247 1247
1248 1248 The revision data itself is a linear collection of data chunks.
1249 1249 Each chunk represents a revision and is usually represented as a
1250 1250 delta against the previous chunk. To bound lookup time, runs of
1251 1251 deltas are limited to about 2 times the length of the original
1252 1252 version data. This makes retrieval of a version proportional to
1253 1253 its size, or O(1) relative to the number of revisions.
1254 1254
1255 1255 Both pieces of the revlog are written to in an append-only
1256 1256 fashion, which means we never need to rewrite a file to insert or
1257 1257 remove data, and can use some simple techniques to avoid the need
1258 1258 for locking while reading.
1259 1259
1260 1260 If checkambig, indexfile is opened with checkambig=True at
1261 1261 writing, to avoid file stat ambiguity.
1262 1262
1263 1263 If mmaplargeindex is True, and an mmapindexthreshold is set, the
1264 1264 index will be mmapped rather than read if it is larger than the
1265 1265 configured threshold.
1266 1266
1267 1267 If censorable is True, the revlog can have censored revisions.
1268 1268
1269 1269 If `upperboundcomp` is not None, this is the expected maximal gain from
1270 1270 compression for the data content.
1271 1271
1272 1272 `concurrencychecker` is an optional function that receives 3 arguments: a
1273 1273 file handle, a filename, and an expected position. It should check whether
1274 1274 the current position in the file handle is valid, and log/warn/fail (by
1275 1275 raising).
1276 1276
1277 1277 See mercurial/revlogutils/contants.py for details about the content of an
1278 1278 index entry.
1279 1279 """
1280 1280
1281 1281 _flagserrorclass = error.RevlogError
1282 1282
1283 1283 @staticmethod
1284 1284 def is_inline_index(header_bytes):
1285 1285 """Determine if a revlog is inline from the initial bytes of the index"""
1286 1286 if len(header_bytes) == 0:
1287 1287 return True
1288 1288
1289 1289 header = INDEX_HEADER.unpack(header_bytes)[0]
1290 1290
1291 1291 _format_flags = header & ~0xFFFF
1292 1292 _format_version = header & 0xFFFF
1293 1293
1294 1294 features = FEATURES_BY_VERSION[_format_version]
1295 1295 return features[b'inline'](_format_flags)
1296 1296
1297 1297 def __init__(
1298 1298 self,
1299 1299 opener,
1300 1300 target,
1301 1301 radix,
1302 1302 postfix=None, # only exist for `tmpcensored` now
1303 1303 checkambig=False,
1304 1304 mmaplargeindex=False,
1305 1305 censorable=False,
1306 1306 upperboundcomp=None,
1307 1307 persistentnodemap=False,
1308 1308 concurrencychecker=None,
1309 1309 trypending=False,
1310 1310 try_split=False,
1311 1311 canonical_parent_order=True,
1312 1312 data_config=None,
1313 1313 delta_config=None,
1314 1314 feature_config=None,
1315 1315 may_inline=True, # may inline new revlog
1316 1316 ):
1317 1317 """
1318 1318 create a revlog object
1319 1319
1320 1320 opener is a function that abstracts the file opening operation
1321 1321 and can be used to implement COW semantics or the like.
1322 1322
1323 1323 `target`: a (KIND, ID) tuple that identify the content stored in
1324 1324 this revlog. It help the rest of the code to understand what the revlog
1325 1325 is about without having to resort to heuristic and index filename
1326 1326 analysis. Note: that this must be reliably be set by normal code, but
1327 1327 that test, debug, or performance measurement code might not set this to
1328 1328 accurate value.
1329 1329 """
1330 1330
1331 1331 self.radix = radix
1332 1332
1333 1333 self._docket_file = None
1334 1334 self._indexfile = None
1335 1335 self._datafile = None
1336 1336 self._sidedatafile = None
1337 1337 self._nodemap_file = None
1338 1338 self.postfix = postfix
1339 1339 self._trypending = trypending
1340 1340 self._try_split = try_split
1341 1341 self._may_inline = may_inline
1342 1342 self.opener = opener
1343 1343 if persistentnodemap:
1344 1344 self._nodemap_file = nodemaputil.get_nodemap_file(self)
1345 1345
1346 1346 assert target[0] in ALL_KINDS
1347 1347 assert len(target) == 2
1348 1348 self.target = target
1349 1349 if feature_config is not None:
1350 1350 self.feature_config = feature_config.copy()
1351 1351 elif b'feature-config' in self.opener.options:
1352 1352 self.feature_config = self.opener.options[b'feature-config'].copy()
1353 1353 else:
1354 1354 self.feature_config = FeatureConfig()
1355 1355 self.feature_config.censorable = censorable
1356 1356 self.feature_config.canonical_parent_order = canonical_parent_order
1357 1357 if data_config is not None:
1358 1358 self.data_config = data_config.copy()
1359 1359 elif b'data-config' in self.opener.options:
1360 1360 self.data_config = self.opener.options[b'data-config'].copy()
1361 1361 else:
1362 1362 self.data_config = DataConfig()
1363 1363 self.data_config.check_ambig = checkambig
1364 1364 self.data_config.mmap_large_index = mmaplargeindex
1365 1365 if delta_config is not None:
1366 1366 self.delta_config = delta_config.copy()
1367 1367 elif b'delta-config' in self.opener.options:
1368 1368 self.delta_config = self.opener.options[b'delta-config'].copy()
1369 1369 else:
1370 1370 self.delta_config = DeltaConfig()
1371 1371 self.delta_config.upper_bound_comp = upperboundcomp
1372 1372
1373 1373 # Maps rev to chain base rev.
1374 1374 self._chainbasecache = util.lrucachedict(100)
1375 1375
1376 1376 self.index = None
1377 1377 self._docket = None
1378 1378 self._nodemap_docket = None
1379 1379 # Mapping of partial identifiers to full nodes.
1380 1380 self._pcache = {}
1381 1381
1382 1382 # other optionnals features
1383 1383
1384 1384 # Make copy of flag processors so each revlog instance can support
1385 1385 # custom flags.
1386 1386 self._flagprocessors = dict(flagutil.flagprocessors)
1387 1387 # prevent nesting of addgroup
1388 1388 self._adding_group = None
1389 1389
1390 1390 chunk_cache = self._loadindex()
1391 1391 self._load_inner(chunk_cache)
1392 1392 self._concurrencychecker = concurrencychecker
1393 1393
1394 @property
1395 def _generaldelta(self):
1396 """temporary compatibility proxy"""
1397 util.nouideprecwarn(
1398 b"use revlog.delta_config.general_delta", b"6.6", stacklevel=2
1399 )
1400 return self.delta_config.general_delta
1401
1402 @property
1403 def _checkambig(self):
1404 """temporary compatibility proxy"""
1405 util.nouideprecwarn(
1406 b"use revlog.data_config.checkambig", b"6.6", stacklevel=2
1407 )
1408 return self.data_config.check_ambig
1409
1410 @property
1411 def _mmaplargeindex(self):
1412 """temporary compatibility proxy"""
1413 util.nouideprecwarn(
1414 b"use revlog.data_config.mmap_large_index", b"6.6", stacklevel=2
1415 )
1416 return self.data_config.mmap_large_index
1417
1418 @property
1419 def _censorable(self):
1420 """temporary compatibility proxy"""
1421 util.nouideprecwarn(
1422 b"use revlog.feature_config.censorable", b"6.6", stacklevel=2
1423 )
1424 return self.feature_config.censorable
1425
1426 @property
1427 def _chunkcachesize(self):
1428 """temporary compatibility proxy"""
1429 util.nouideprecwarn(
1430 b"use revlog.data_config.chunk_cache_size", b"6.6", stacklevel=2
1431 )
1432 return self.data_config.chunk_cache_size
1433
1434 @property
1435 def _maxchainlen(self):
1436 """temporary compatibility proxy"""
1437 util.nouideprecwarn(
1438 b"use revlog.delta_config.max_chain_len", b"6.6", stacklevel=2
1439 )
1440 return self.delta_config.max_chain_len
1441
1442 @property
1443 def _deltabothparents(self):
1444 """temporary compatibility proxy"""
1445 util.nouideprecwarn(
1446 b"use revlog.delta_config.delta_both_parents", b"6.6", stacklevel=2
1447 )
1448 return self.delta_config.delta_both_parents
1449
1450 @property
1451 def _candidate_group_chunk_size(self):
1452 """temporary compatibility proxy"""
1453 util.nouideprecwarn(
1454 b"use revlog.delta_config.candidate_group_chunk_size",
1455 b"6.6",
1456 stacklevel=2,
1457 )
1458 return self.delta_config.candidate_group_chunk_size
1459
1460 @property
1461 def _debug_delta(self):
1462 """temporary compatibility proxy"""
1463 util.nouideprecwarn(
1464 b"use revlog.delta_config.debug_delta", b"6.6", stacklevel=2
1465 )
1466 return self.delta_config.debug_delta
1467
1468 @property
1469 def _compengine(self):
1470 """temporary compatibility proxy"""
1471 util.nouideprecwarn(
1472 b"use revlog.feature_config.compression_engine",
1473 b"6.6",
1474 stacklevel=2,
1475 )
1476 return self.feature_config.compression_engine
1477
1478 @property
1479 def upperboundcomp(self):
1480 """temporary compatibility proxy"""
1481 util.nouideprecwarn(
1482 b"use revlog.delta_config.upper_bound_comp",
1483 b"6.6",
1484 stacklevel=2,
1485 )
1486 return self.delta_config.upper_bound_comp
1487
1488 @property
1489 def _compengineopts(self):
1490 """temporary compatibility proxy"""
1491 util.nouideprecwarn(
1492 b"use revlog.feature_config.compression_engine_options",
1493 b"6.6",
1494 stacklevel=2,
1495 )
1496 return self.feature_config.compression_engine_options
1497
1498 @property
1499 def _maxdeltachainspan(self):
1500 """temporary compatibility proxy"""
1501 util.nouideprecwarn(
1502 b"use revlog.delta_config.max_deltachain_span", b"6.6", stacklevel=2
1503 )
1504 return self.delta_config.max_deltachain_span
1505
1506 @property
1507 def _withsparseread(self):
1508 """temporary compatibility proxy"""
1509 util.nouideprecwarn(
1510 b"use revlog.data_config.with_sparse_read", b"6.6", stacklevel=2
1511 )
1512 return self.data_config.with_sparse_read
1513
1514 @property
1515 def _sparserevlog(self):
1516 """temporary compatibility proxy"""
1517 util.nouideprecwarn(
1518 b"use revlog.delta_config.sparse_revlog", b"6.6", stacklevel=2
1519 )
1520 return self.delta_config.sparse_revlog
1521
1522 @property
1523 def hassidedata(self):
1524 """temporary compatibility proxy"""
1525 util.nouideprecwarn(
1526 b"use revlog.feature_config.has_side_data", b"6.6", stacklevel=2
1527 )
1528 return self.feature_config.has_side_data
1529
1530 @property
1531 def _srdensitythreshold(self):
1532 """temporary compatibility proxy"""
1533 util.nouideprecwarn(
1534 b"use revlog.data_config.sr_density_threshold",
1535 b"6.6",
1536 stacklevel=2,
1537 )
1538 return self.data_config.sr_density_threshold
1539
1540 @property
1541 def _srmingapsize(self):
1542 """temporary compatibility proxy"""
1543 util.nouideprecwarn(
1544 b"use revlog.data_config.sr_min_gap_size", b"6.6", stacklevel=2
1545 )
1546 return self.data_config.sr_min_gap_size
1547
1548 @property
1549 def _compute_rank(self):
1550 """temporary compatibility proxy"""
1551 util.nouideprecwarn(
1552 b"use revlog.feature_config.compute_rank", b"6.6", stacklevel=2
1553 )
1554 return self.feature_config.compute_rank
1555
1556 @property
1557 def canonical_parent_order(self):
1558 """temporary compatibility proxy"""
1559 util.nouideprecwarn(
1560 b"use revlog.feature_config.canonical_parent_order",
1561 b"6.6",
1562 stacklevel=2,
1563 )
1564 return self.feature_config.canonical_parent_order
1565
1566 @property
1567 def _lazydelta(self):
1568 """temporary compatibility proxy"""
1569 util.nouideprecwarn(
1570 b"use revlog.delta_config.lazy_delta", b"6.6", stacklevel=2
1571 )
1572 return self.delta_config.lazy_delta
1573
1574 @property
1575 def _lazydeltabase(self):
1576 """temporary compatibility proxy"""
1577 util.nouideprecwarn(
1578 b"use revlog.delta_config.lazy_delta_base", b"6.6", stacklevel=2
1579 )
1580 return self.delta_config.lazy_delta_base
1581
1582 1394 def _init_opts(self):
1583 1395 """process options (from above/config) to setup associated default revlog mode
1584 1396
1585 1397 These values might be affected when actually reading on disk information.
1586 1398
1587 1399 The relevant values are returned for use in _loadindex().
1588 1400
1589 1401 * newversionflags:
1590 1402 version header to use if we need to create a new revlog
1591 1403
1592 1404 * mmapindexthreshold:
1593 1405 minimal index size for start to use mmap
1594 1406
1595 1407 * force_nodemap:
1596 1408 force the usage of a "development" version of the nodemap code
1597 1409 """
1598 1410 opts = self.opener.options
1599 1411
1600 1412 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
1601 1413 new_header = CHANGELOGV2
1602 1414 compute_rank = opts.get(b'changelogv2.compute-rank', True)
1603 1415 self.feature_config.compute_rank = compute_rank
1604 1416 elif b'revlogv2' in opts:
1605 1417 new_header = REVLOGV2
1606 1418 elif b'revlogv1' in opts:
1607 1419 new_header = REVLOGV1
1608 1420 if self._may_inline:
1609 1421 new_header |= FLAG_INLINE_DATA
1610 1422 if b'generaldelta' in opts:
1611 1423 new_header |= FLAG_GENERALDELTA
1612 1424 elif b'revlogv0' in self.opener.options:
1613 1425 new_header = REVLOGV0
1614 1426 else:
1615 1427 new_header = REVLOG_DEFAULT_VERSION
1616 1428
1617 1429 mmapindexthreshold = None
1618 1430 if self.data_config.mmap_large_index:
1619 1431 mmapindexthreshold = self.data_config.mmap_index_threshold
1620 1432 if self.feature_config.enable_ellipsis:
1621 1433 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
1622 1434
1623 1435 # revlog v0 doesn't have flag processors
1624 1436 for flag, processor in opts.get(b'flagprocessors', {}).items():
1625 1437 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
1626 1438
1627 1439 chunk_cache_size = self.data_config.chunk_cache_size
1628 1440 if chunk_cache_size <= 0:
1629 1441 raise error.RevlogError(
1630 1442 _(b'revlog chunk cache size %r is not greater than 0')
1631 1443 % chunk_cache_size
1632 1444 )
1633 1445 elif chunk_cache_size & (chunk_cache_size - 1):
1634 1446 raise error.RevlogError(
1635 1447 _(b'revlog chunk cache size %r is not a power of 2')
1636 1448 % chunk_cache_size
1637 1449 )
1638 1450 force_nodemap = opts.get(b'devel-force-nodemap', False)
1639 1451 return new_header, mmapindexthreshold, force_nodemap
1640 1452
1641 1453 def _get_data(self, filepath, mmap_threshold, size=None):
1642 1454 """return a file content with or without mmap
1643 1455
1644 1456 If the file is missing return the empty string"""
1645 1457 try:
1646 1458 with self.opener(filepath) as fp:
1647 1459 if mmap_threshold is not None:
1648 1460 file_size = self.opener.fstat(fp).st_size
1649 1461 if file_size >= mmap_threshold:
1650 1462 if size is not None:
1651 1463 # avoid potentiel mmap crash
1652 1464 size = min(file_size, size)
1653 1465 # TODO: should .close() to release resources without
1654 1466 # relying on Python GC
1655 1467 if size is None:
1656 1468 return util.buffer(util.mmapread(fp))
1657 1469 else:
1658 1470 return util.buffer(util.mmapread(fp, size))
1659 1471 if size is None:
1660 1472 return fp.read()
1661 1473 else:
1662 1474 return fp.read(size)
1663 1475 except FileNotFoundError:
1664 1476 return b''
1665 1477
1666 1478 def get_streams(self, max_linkrev, force_inline=False):
1667 1479 """return a list of streams that represent this revlog
1668 1480
1669 1481 This is used by stream-clone to do bytes to bytes copies of a repository.
1670 1482
1671 1483 This streams data for all revisions that refer to a changelog revision up
1672 1484 to `max_linkrev`.
1673 1485
1674 1486 If `force_inline` is set, it enforces that the stream will represent an inline revlog.
1675 1487
1676 1488 It returns is a list of three-tuple:
1677 1489
1678 1490 [
1679 1491 (filename, bytes_stream, stream_size),
1680 1492
1681 1493 ]
1682 1494 """
1683 1495 n = len(self)
1684 1496 index = self.index
1685 1497 while n > 0:
1686 1498 linkrev = index[n - 1][4]
1687 1499 if linkrev < max_linkrev:
1688 1500 break
1689 1501 # note: this loop will rarely go through multiple iterations, since
1690 1502 # it only traverses commits created during the current streaming
1691 1503 # pull operation.
1692 1504 #
1693 1505 # If this become a problem, using a binary search should cap the
1694 1506 # runtime of this.
1695 1507 n = n - 1
1696 1508 if n == 0:
1697 1509 # no data to send
1698 1510 return []
1699 1511 index_size = n * index.entry_size
1700 1512 data_size = self.end(n - 1)
1701 1513
1702 1514 # XXX we might have been split (or stripped) since the object
1703 1515 # initialization, We need to close this race too, but having a way to
1704 1516 # pre-open the file we feed to the revlog and never closing them before
1705 1517 # we are done streaming.
1706 1518
1707 1519 if self._inline:
1708 1520
1709 1521 def get_stream():
1710 1522 with self.opener(self._indexfile, mode=b"r") as fp:
1711 1523 yield None
1712 1524 size = index_size + data_size
1713 1525 if size <= 65536:
1714 1526 yield fp.read(size)
1715 1527 else:
1716 1528 yield from util.filechunkiter(fp, limit=size)
1717 1529
1718 1530 inline_stream = get_stream()
1719 1531 next(inline_stream)
1720 1532 return [
1721 1533 (self._indexfile, inline_stream, index_size + data_size),
1722 1534 ]
1723 1535 elif force_inline:
1724 1536
1725 1537 def get_stream():
1726 1538 with self.reading():
1727 1539 yield None
1728 1540
1729 1541 for rev in range(n):
1730 1542 idx = self.index.entry_binary(rev)
1731 1543 if rev == 0 and self._docket is None:
1732 1544 # re-inject the inline flag
1733 1545 header = self._format_flags
1734 1546 header |= self._format_version
1735 1547 header |= FLAG_INLINE_DATA
1736 1548 header = self.index.pack_header(header)
1737 1549 idx = header + idx
1738 1550 yield idx
1739 1551 yield self._inner.get_segment_for_revs(rev, rev)[1]
1740 1552
1741 1553 inline_stream = get_stream()
1742 1554 next(inline_stream)
1743 1555 return [
1744 1556 (self._indexfile, inline_stream, index_size + data_size),
1745 1557 ]
1746 1558 else:
1747 1559
1748 1560 def get_index_stream():
1749 1561 with self.opener(self._indexfile, mode=b"r") as fp:
1750 1562 yield None
1751 1563 if index_size <= 65536:
1752 1564 yield fp.read(index_size)
1753 1565 else:
1754 1566 yield from util.filechunkiter(fp, limit=index_size)
1755 1567
1756 1568 def get_data_stream():
1757 1569 with self._datafp() as fp:
1758 1570 yield None
1759 1571 if data_size <= 65536:
1760 1572 yield fp.read(data_size)
1761 1573 else:
1762 1574 yield from util.filechunkiter(fp, limit=data_size)
1763 1575
1764 1576 index_stream = get_index_stream()
1765 1577 next(index_stream)
1766 1578 data_stream = get_data_stream()
1767 1579 next(data_stream)
1768 1580 return [
1769 1581 (self._datafile, data_stream, data_size),
1770 1582 (self._indexfile, index_stream, index_size),
1771 1583 ]
1772 1584
1773 1585 def _loadindex(self, docket=None):
1774 1586
1775 1587 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
1776 1588
1777 1589 if self.postfix is not None:
1778 1590 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
1779 1591 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
1780 1592 entry_point = b'%s.i.a' % self.radix
1781 1593 elif self._try_split and self.opener.exists(self._split_index_file):
1782 1594 entry_point = self._split_index_file
1783 1595 else:
1784 1596 entry_point = b'%s.i' % self.radix
1785 1597
1786 1598 if docket is not None:
1787 1599 self._docket = docket
1788 1600 self._docket_file = entry_point
1789 1601 else:
1790 1602 self._initempty = True
1791 1603 entry_data = self._get_data(entry_point, mmapindexthreshold)
1792 1604 if len(entry_data) > 0:
1793 1605 header = INDEX_HEADER.unpack(entry_data[:4])[0]
1794 1606 self._initempty = False
1795 1607 else:
1796 1608 header = new_header
1797 1609
1798 1610 self._format_flags = header & ~0xFFFF
1799 1611 self._format_version = header & 0xFFFF
1800 1612
1801 1613 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
1802 1614 if supported_flags is None:
1803 1615 msg = _(b'unknown version (%d) in revlog %s')
1804 1616 msg %= (self._format_version, self.display_id)
1805 1617 raise error.RevlogError(msg)
1806 1618 elif self._format_flags & ~supported_flags:
1807 1619 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
1808 1620 display_flag = self._format_flags >> 16
1809 1621 msg %= (display_flag, self._format_version, self.display_id)
1810 1622 raise error.RevlogError(msg)
1811 1623
1812 1624 features = FEATURES_BY_VERSION[self._format_version]
1813 1625 self._inline = features[b'inline'](self._format_flags)
1814 1626 self.delta_config.general_delta = features[b'generaldelta'](
1815 1627 self._format_flags
1816 1628 )
1817 1629 self.feature_config.has_side_data = features[b'sidedata']
1818 1630
1819 1631 if not features[b'docket']:
1820 1632 self._indexfile = entry_point
1821 1633 index_data = entry_data
1822 1634 else:
1823 1635 self._docket_file = entry_point
1824 1636 if self._initempty:
1825 1637 self._docket = docketutil.default_docket(self, header)
1826 1638 else:
1827 1639 self._docket = docketutil.parse_docket(
1828 1640 self, entry_data, use_pending=self._trypending
1829 1641 )
1830 1642
1831 1643 if self._docket is not None:
1832 1644 self._indexfile = self._docket.index_filepath()
1833 1645 index_data = b''
1834 1646 index_size = self._docket.index_end
1835 1647 if index_size > 0:
1836 1648 index_data = self._get_data(
1837 1649 self._indexfile, mmapindexthreshold, size=index_size
1838 1650 )
1839 1651 if len(index_data) < index_size:
1840 1652 msg = _(b'too few index data for %s: got %d, expected %d')
1841 1653 msg %= (self.display_id, len(index_data), index_size)
1842 1654 raise error.RevlogError(msg)
1843 1655
1844 1656 self._inline = False
1845 1657 # generaldelta implied by version 2 revlogs.
1846 1658 self.delta_config.general_delta = True
1847 1659 # the logic for persistent nodemap will be dealt with within the
1848 1660 # main docket, so disable it for now.
1849 1661 self._nodemap_file = None
1850 1662
1851 1663 if self._docket is not None:
1852 1664 self._datafile = self._docket.data_filepath()
1853 1665 self._sidedatafile = self._docket.sidedata_filepath()
1854 1666 elif self.postfix is None:
1855 1667 self._datafile = b'%s.d' % self.radix
1856 1668 else:
1857 1669 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
1858 1670
1859 1671 self.nodeconstants = sha1nodeconstants
1860 1672 self.nullid = self.nodeconstants.nullid
1861 1673
1862 1674 # sparse-revlog can't be on without general-delta (issue6056)
1863 1675 if not self.delta_config.general_delta:
1864 1676 self.delta_config.sparse_revlog = False
1865 1677
1866 1678 self._storedeltachains = True
1867 1679
1868 1680 devel_nodemap = (
1869 1681 self._nodemap_file
1870 1682 and force_nodemap
1871 1683 and parse_index_v1_nodemap is not None
1872 1684 )
1873 1685
1874 1686 use_rust_index = False
1875 1687 if rustrevlog is not None:
1876 1688 if self._nodemap_file is not None:
1877 1689 use_rust_index = True
1878 1690 else:
1879 1691 use_rust_index = self.opener.options.get(b'rust.index')
1880 1692
1881 1693 self._parse_index = parse_index_v1
1882 1694 if self._format_version == REVLOGV0:
1883 1695 self._parse_index = revlogv0.parse_index_v0
1884 1696 elif self._format_version == REVLOGV2:
1885 1697 self._parse_index = parse_index_v2
1886 1698 elif self._format_version == CHANGELOGV2:
1887 1699 self._parse_index = parse_index_cl_v2
1888 1700 elif devel_nodemap:
1889 1701 self._parse_index = parse_index_v1_nodemap
1890 1702 elif use_rust_index:
1891 1703 self._parse_index = parse_index_v1_mixed
1892 1704 try:
1893 1705 d = self._parse_index(index_data, self._inline)
1894 1706 index, chunkcache = d
1895 1707 use_nodemap = (
1896 1708 not self._inline
1897 1709 and self._nodemap_file is not None
1898 1710 and hasattr(index, 'update_nodemap_data')
1899 1711 )
1900 1712 if use_nodemap:
1901 1713 nodemap_data = nodemaputil.persisted_data(self)
1902 1714 if nodemap_data is not None:
1903 1715 docket = nodemap_data[0]
1904 1716 if (
1905 1717 len(d[0]) > docket.tip_rev
1906 1718 and d[0][docket.tip_rev][7] == docket.tip_node
1907 1719 ):
1908 1720 # no changelog tampering
1909 1721 self._nodemap_docket = docket
1910 1722 index.update_nodemap_data(*nodemap_data)
1911 1723 except (ValueError, IndexError):
1912 1724 raise error.RevlogError(
1913 1725 _(b"index %s is corrupted") % self.display_id
1914 1726 )
1915 1727 self.index = index
1916 1728 # revnum -> (chain-length, sum-delta-length)
1917 1729 self._chaininfocache = util.lrucachedict(500)
1918 1730
1919 1731 return chunkcache
1920 1732
1921 1733 def _load_inner(self, chunk_cache):
1922 1734 if self._docket is None:
1923 1735 default_compression_header = None
1924 1736 else:
1925 1737 default_compression_header = self._docket.default_compression_header
1926 1738
1927 1739 self._inner = _InnerRevlog(
1928 1740 opener=self.opener,
1929 1741 index=self.index,
1930 1742 index_file=self._indexfile,
1931 1743 data_file=self._datafile,
1932 1744 sidedata_file=self._sidedatafile,
1933 1745 inline=self._inline,
1934 1746 data_config=self.data_config,
1935 1747 delta_config=self.delta_config,
1936 1748 feature_config=self.feature_config,
1937 1749 chunk_cache=chunk_cache,
1938 1750 default_compression_header=default_compression_header,
1939 1751 )
1940 1752
1941 1753 def get_revlog(self):
1942 1754 """simple function to mirror API of other not-really-revlog API"""
1943 1755 return self
1944 1756
1945 1757 @util.propertycache
1946 1758 def revlog_kind(self):
1947 1759 return self.target[0]
1948 1760
1949 1761 @util.propertycache
1950 1762 def display_id(self):
1951 1763 """The public facing "ID" of the revlog that we use in message"""
1952 1764 if self.revlog_kind == KIND_FILELOG:
1953 1765 # Reference the file without the "data/" prefix, so it is familiar
1954 1766 # to the user.
1955 1767 return self.target[1]
1956 1768 else:
1957 1769 return self.radix
1958 1770
1959 1771 def _datafp(self, mode=b'r'):
1960 1772 """file object for the revlog's data file"""
1961 1773 return self.opener(self._datafile, mode=mode)
1962 1774
1963 1775 def tiprev(self):
1964 1776 return len(self.index) - 1
1965 1777
1966 1778 def tip(self):
1967 1779 return self.node(self.tiprev())
1968 1780
1969 1781 def __contains__(self, rev):
1970 1782 return 0 <= rev < len(self)
1971 1783
1972 1784 def __len__(self):
1973 1785 return len(self.index)
1974 1786
1975 1787 def __iter__(self):
1976 1788 return iter(range(len(self)))
1977 1789
1978 1790 def revs(self, start=0, stop=None):
1979 1791 """iterate over all rev in this revlog (from start to stop)"""
1980 1792 return storageutil.iterrevs(len(self), start=start, stop=stop)
1981 1793
1982 1794 def hasnode(self, node):
1983 1795 try:
1984 1796 self.rev(node)
1985 1797 return True
1986 1798 except KeyError:
1987 1799 return False
1988 1800
1989 1801 def _candelta(self, baserev, rev):
1990 1802 """whether two revisions (baserev, rev) can be delta-ed or not"""
1991 1803 # Disable delta if either rev requires a content-changing flag
1992 1804 # processor (ex. LFS). This is because such flag processor can alter
1993 1805 # the rawtext content that the delta will be based on, and two clients
1994 1806 # could have a same revlog node with different flags (i.e. different
1995 1807 # rawtext contents) and the delta could be incompatible.
1996 1808 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
1997 1809 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
1998 1810 ):
1999 1811 return False
2000 1812 return True
2001 1813
2002 1814 def update_caches(self, transaction):
2003 1815 """update on disk cache
2004 1816
2005 1817 If a transaction is passed, the update may be delayed to transaction
2006 1818 commit."""
2007 1819 if self._nodemap_file is not None:
2008 1820 if transaction is None:
2009 1821 nodemaputil.update_persistent_nodemap(self)
2010 1822 else:
2011 1823 nodemaputil.setup_persistent_nodemap(transaction, self)
2012 1824
2013 1825 def clearcaches(self):
2014 1826 """Clear in-memory caches"""
2015 1827 self._chainbasecache.clear()
2016 1828 self._inner.clear_cache()
2017 1829 self._pcache = {}
2018 1830 self._nodemap_docket = None
2019 1831 self.index.clearcaches()
2020 1832 # The python code is the one responsible for validating the docket, we
2021 1833 # end up having to refresh it here.
2022 1834 use_nodemap = (
2023 1835 not self._inline
2024 1836 and self._nodemap_file is not None
2025 1837 and hasattr(self.index, 'update_nodemap_data')
2026 1838 )
2027 1839 if use_nodemap:
2028 1840 nodemap_data = nodemaputil.persisted_data(self)
2029 1841 if nodemap_data is not None:
2030 1842 self._nodemap_docket = nodemap_data[0]
2031 1843 self.index.update_nodemap_data(*nodemap_data)
2032 1844
2033 1845 def rev(self, node):
2034 1846 """return the revision number associated with a <nodeid>"""
2035 1847 try:
2036 1848 return self.index.rev(node)
2037 1849 except TypeError:
2038 1850 raise
2039 1851 except error.RevlogError:
2040 1852 # parsers.c radix tree lookup failed
2041 1853 if (
2042 1854 node == self.nodeconstants.wdirid
2043 1855 or node in self.nodeconstants.wdirfilenodeids
2044 1856 ):
2045 1857 raise error.WdirUnsupported
2046 1858 raise error.LookupError(node, self.display_id, _(b'no node'))
2047 1859
2048 1860 # Accessors for index entries.
2049 1861
2050 1862 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
2051 1863 # are flags.
2052 1864 def start(self, rev):
2053 1865 return int(self.index[rev][0] >> 16)
2054 1866
2055 1867 def sidedata_cut_off(self, rev):
2056 1868 sd_cut_off = self.index[rev][8]
2057 1869 if sd_cut_off != 0:
2058 1870 return sd_cut_off
2059 1871 # This is some annoying dance, because entries without sidedata
2060 1872 # currently use 0 as their ofsset. (instead of previous-offset +
2061 1873 # previous-size)
2062 1874 #
2063 1875 # We should reconsider this sidedata → 0 sidata_offset policy.
2064 1876 # In the meantime, we need this.
2065 1877 while 0 <= rev:
2066 1878 e = self.index[rev]
2067 1879 if e[9] != 0:
2068 1880 return e[8] + e[9]
2069 1881 rev -= 1
2070 1882 return 0
2071 1883
2072 1884 def flags(self, rev):
2073 1885 return self.index[rev][0] & 0xFFFF
2074 1886
2075 1887 def length(self, rev):
2076 1888 return self.index[rev][1]
2077 1889
2078 1890 def sidedata_length(self, rev):
2079 1891 if not self.feature_config.has_side_data:
2080 1892 return 0
2081 1893 return self.index[rev][9]
2082 1894
2083 1895 def rawsize(self, rev):
2084 1896 """return the length of the uncompressed text for a given revision"""
2085 1897 l = self.index[rev][2]
2086 1898 if l >= 0:
2087 1899 return l
2088 1900
2089 1901 t = self.rawdata(rev)
2090 1902 return len(t)
2091 1903
2092 1904 def size(self, rev):
2093 1905 """length of non-raw text (processed by a "read" flag processor)"""
2094 1906 # fast path: if no "read" flag processor could change the content,
2095 1907 # size is rawsize. note: ELLIPSIS is known to not change the content.
2096 1908 flags = self.flags(rev)
2097 1909 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
2098 1910 return self.rawsize(rev)
2099 1911
2100 1912 return len(self.revision(rev))
2101 1913
2102 1914 def fast_rank(self, rev):
2103 1915 """Return the rank of a revision if already known, or None otherwise.
2104 1916
2105 1917 The rank of a revision is the size of the sub-graph it defines as a
2106 1918 head. Equivalently, the rank of a revision `r` is the size of the set
2107 1919 `ancestors(r)`, `r` included.
2108 1920
2109 1921 This method returns the rank retrieved from the revlog in constant
2110 1922 time. It makes no attempt at computing unknown values for versions of
2111 1923 the revlog which do not persist the rank.
2112 1924 """
2113 1925 rank = self.index[rev][ENTRY_RANK]
2114 1926 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
2115 1927 return None
2116 1928 if rev == nullrev:
2117 1929 return 0 # convention
2118 1930 return rank
2119 1931
2120 1932 def chainbase(self, rev):
2121 1933 base = self._chainbasecache.get(rev)
2122 1934 if base is not None:
2123 1935 return base
2124 1936
2125 1937 index = self.index
2126 1938 iterrev = rev
2127 1939 base = index[iterrev][3]
2128 1940 while base != iterrev:
2129 1941 iterrev = base
2130 1942 base = index[iterrev][3]
2131 1943
2132 1944 self._chainbasecache[rev] = base
2133 1945 return base
2134 1946
2135 1947 def linkrev(self, rev):
2136 1948 return self.index[rev][4]
2137 1949
2138 1950 def parentrevs(self, rev):
2139 1951 try:
2140 1952 entry = self.index[rev]
2141 1953 except IndexError:
2142 1954 if rev == wdirrev:
2143 1955 raise error.WdirUnsupported
2144 1956 raise
2145 1957
2146 1958 if self.feature_config.canonical_parent_order and entry[5] == nullrev:
2147 1959 return entry[6], entry[5]
2148 1960 else:
2149 1961 return entry[5], entry[6]
2150 1962
2151 1963 # fast parentrevs(rev) where rev isn't filtered
2152 1964 _uncheckedparentrevs = parentrevs
2153 1965
2154 1966 def node(self, rev):
2155 1967 try:
2156 1968 return self.index[rev][7]
2157 1969 except IndexError:
2158 1970 if rev == wdirrev:
2159 1971 raise error.WdirUnsupported
2160 1972 raise
2161 1973
2162 1974 # Derived from index values.
2163 1975
2164 1976 def end(self, rev):
2165 1977 return self.start(rev) + self.length(rev)
2166 1978
2167 1979 def parents(self, node):
2168 1980 i = self.index
2169 1981 d = i[self.rev(node)]
2170 1982 # inline node() to avoid function call overhead
2171 1983 if self.feature_config.canonical_parent_order and d[5] == self.nullid:
2172 1984 return i[d[6]][7], i[d[5]][7]
2173 1985 else:
2174 1986 return i[d[5]][7], i[d[6]][7]
2175 1987
2176 1988 def chainlen(self, rev):
2177 1989 return self._chaininfo(rev)[0]
2178 1990
2179 1991 def _chaininfo(self, rev):
2180 1992 chaininfocache = self._chaininfocache
2181 1993 if rev in chaininfocache:
2182 1994 return chaininfocache[rev]
2183 1995 index = self.index
2184 1996 generaldelta = self.delta_config.general_delta
2185 1997 iterrev = rev
2186 1998 e = index[iterrev]
2187 1999 clen = 0
2188 2000 compresseddeltalen = 0
2189 2001 while iterrev != e[3]:
2190 2002 clen += 1
2191 2003 compresseddeltalen += e[1]
2192 2004 if generaldelta:
2193 2005 iterrev = e[3]
2194 2006 else:
2195 2007 iterrev -= 1
2196 2008 if iterrev in chaininfocache:
2197 2009 t = chaininfocache[iterrev]
2198 2010 clen += t[0]
2199 2011 compresseddeltalen += t[1]
2200 2012 break
2201 2013 e = index[iterrev]
2202 2014 else:
2203 2015 # Add text length of base since decompressing that also takes
2204 2016 # work. For cache hits the length is already included.
2205 2017 compresseddeltalen += e[1]
2206 2018 r = (clen, compresseddeltalen)
2207 2019 chaininfocache[rev] = r
2208 2020 return r
2209 2021
2210 2022 def _deltachain(self, rev, stoprev=None):
2211 2023 return self._inner._deltachain(rev, stoprev=stoprev)
2212 2024
2213 2025 def ancestors(self, revs, stoprev=0, inclusive=False):
2214 2026 """Generate the ancestors of 'revs' in reverse revision order.
2215 2027 Does not generate revs lower than stoprev.
2216 2028
2217 2029 See the documentation for ancestor.lazyancestors for more details."""
2218 2030
2219 2031 # first, make sure start revisions aren't filtered
2220 2032 revs = list(revs)
2221 2033 checkrev = self.node
2222 2034 for r in revs:
2223 2035 checkrev(r)
2224 2036 # and we're sure ancestors aren't filtered as well
2225 2037
2226 2038 if rustancestor is not None and self.index.rust_ext_compat:
2227 2039 lazyancestors = rustancestor.LazyAncestors
2228 2040 arg = self.index
2229 2041 else:
2230 2042 lazyancestors = ancestor.lazyancestors
2231 2043 arg = self._uncheckedparentrevs
2232 2044 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
2233 2045
2234 2046 def descendants(self, revs):
2235 2047 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
2236 2048
2237 2049 def findcommonmissing(self, common=None, heads=None):
2238 2050 """Return a tuple of the ancestors of common and the ancestors of heads
2239 2051 that are not ancestors of common. In revset terminology, we return the
2240 2052 tuple:
2241 2053
2242 2054 ::common, (::heads) - (::common)
2243 2055
2244 2056 The list is sorted by revision number, meaning it is
2245 2057 topologically sorted.
2246 2058
2247 2059 'heads' and 'common' are both lists of node IDs. If heads is
2248 2060 not supplied, uses all of the revlog's heads. If common is not
2249 2061 supplied, uses nullid."""
2250 2062 if common is None:
2251 2063 common = [self.nullid]
2252 2064 if heads is None:
2253 2065 heads = self.heads()
2254 2066
2255 2067 common = [self.rev(n) for n in common]
2256 2068 heads = [self.rev(n) for n in heads]
2257 2069
2258 2070 # we want the ancestors, but inclusive
2259 2071 class lazyset:
2260 2072 def __init__(self, lazyvalues):
2261 2073 self.addedvalues = set()
2262 2074 self.lazyvalues = lazyvalues
2263 2075
2264 2076 def __contains__(self, value):
2265 2077 return value in self.addedvalues or value in self.lazyvalues
2266 2078
2267 2079 def __iter__(self):
2268 2080 added = self.addedvalues
2269 2081 for r in added:
2270 2082 yield r
2271 2083 for r in self.lazyvalues:
2272 2084 if not r in added:
2273 2085 yield r
2274 2086
2275 2087 def add(self, value):
2276 2088 self.addedvalues.add(value)
2277 2089
2278 2090 def update(self, values):
2279 2091 self.addedvalues.update(values)
2280 2092
2281 2093 has = lazyset(self.ancestors(common))
2282 2094 has.add(nullrev)
2283 2095 has.update(common)
2284 2096
2285 2097 # take all ancestors from heads that aren't in has
2286 2098 missing = set()
2287 2099 visit = collections.deque(r for r in heads if r not in has)
2288 2100 while visit:
2289 2101 r = visit.popleft()
2290 2102 if r in missing:
2291 2103 continue
2292 2104 else:
2293 2105 missing.add(r)
2294 2106 for p in self.parentrevs(r):
2295 2107 if p not in has:
2296 2108 visit.append(p)
2297 2109 missing = list(missing)
2298 2110 missing.sort()
2299 2111 return has, [self.node(miss) for miss in missing]
2300 2112
2301 2113 def incrementalmissingrevs(self, common=None):
2302 2114 """Return an object that can be used to incrementally compute the
2303 2115 revision numbers of the ancestors of arbitrary sets that are not
2304 2116 ancestors of common. This is an ancestor.incrementalmissingancestors
2305 2117 object.
2306 2118
2307 2119 'common' is a list of revision numbers. If common is not supplied, uses
2308 2120 nullrev.
2309 2121 """
2310 2122 if common is None:
2311 2123 common = [nullrev]
2312 2124
2313 2125 if rustancestor is not None and self.index.rust_ext_compat:
2314 2126 return rustancestor.MissingAncestors(self.index, common)
2315 2127 return ancestor.incrementalmissingancestors(self.parentrevs, common)
2316 2128
2317 2129 def findmissingrevs(self, common=None, heads=None):
2318 2130 """Return the revision numbers of the ancestors of heads that
2319 2131 are not ancestors of common.
2320 2132
2321 2133 More specifically, return a list of revision numbers corresponding to
2322 2134 nodes N such that every N satisfies the following constraints:
2323 2135
2324 2136 1. N is an ancestor of some node in 'heads'
2325 2137 2. N is not an ancestor of any node in 'common'
2326 2138
2327 2139 The list is sorted by revision number, meaning it is
2328 2140 topologically sorted.
2329 2141
2330 2142 'heads' and 'common' are both lists of revision numbers. If heads is
2331 2143 not supplied, uses all of the revlog's heads. If common is not
2332 2144 supplied, uses nullid."""
2333 2145 if common is None:
2334 2146 common = [nullrev]
2335 2147 if heads is None:
2336 2148 heads = self.headrevs()
2337 2149
2338 2150 inc = self.incrementalmissingrevs(common=common)
2339 2151 return inc.missingancestors(heads)
2340 2152
2341 2153 def findmissing(self, common=None, heads=None):
2342 2154 """Return the ancestors of heads that are not ancestors of common.
2343 2155
2344 2156 More specifically, return a list of nodes N such that every N
2345 2157 satisfies the following constraints:
2346 2158
2347 2159 1. N is an ancestor of some node in 'heads'
2348 2160 2. N is not an ancestor of any node in 'common'
2349 2161
2350 2162 The list is sorted by revision number, meaning it is
2351 2163 topologically sorted.
2352 2164
2353 2165 'heads' and 'common' are both lists of node IDs. If heads is
2354 2166 not supplied, uses all of the revlog's heads. If common is not
2355 2167 supplied, uses nullid."""
2356 2168 if common is None:
2357 2169 common = [self.nullid]
2358 2170 if heads is None:
2359 2171 heads = self.heads()
2360 2172
2361 2173 common = [self.rev(n) for n in common]
2362 2174 heads = [self.rev(n) for n in heads]
2363 2175
2364 2176 inc = self.incrementalmissingrevs(common=common)
2365 2177 return [self.node(r) for r in inc.missingancestors(heads)]
2366 2178
2367 2179 def nodesbetween(self, roots=None, heads=None):
2368 2180 """Return a topological path from 'roots' to 'heads'.
2369 2181
2370 2182 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
2371 2183 topologically sorted list of all nodes N that satisfy both of
2372 2184 these constraints:
2373 2185
2374 2186 1. N is a descendant of some node in 'roots'
2375 2187 2. N is an ancestor of some node in 'heads'
2376 2188
2377 2189 Every node is considered to be both a descendant and an ancestor
2378 2190 of itself, so every reachable node in 'roots' and 'heads' will be
2379 2191 included in 'nodes'.
2380 2192
2381 2193 'outroots' is the list of reachable nodes in 'roots', i.e., the
2382 2194 subset of 'roots' that is returned in 'nodes'. Likewise,
2383 2195 'outheads' is the subset of 'heads' that is also in 'nodes'.
2384 2196
2385 2197 'roots' and 'heads' are both lists of node IDs. If 'roots' is
2386 2198 unspecified, uses nullid as the only root. If 'heads' is
2387 2199 unspecified, uses list of all of the revlog's heads."""
2388 2200 nonodes = ([], [], [])
2389 2201 if roots is not None:
2390 2202 roots = list(roots)
2391 2203 if not roots:
2392 2204 return nonodes
2393 2205 lowestrev = min([self.rev(n) for n in roots])
2394 2206 else:
2395 2207 roots = [self.nullid] # Everybody's a descendant of nullid
2396 2208 lowestrev = nullrev
2397 2209 if (lowestrev == nullrev) and (heads is None):
2398 2210 # We want _all_ the nodes!
2399 2211 return (
2400 2212 [self.node(r) for r in self],
2401 2213 [self.nullid],
2402 2214 list(self.heads()),
2403 2215 )
2404 2216 if heads is None:
2405 2217 # All nodes are ancestors, so the latest ancestor is the last
2406 2218 # node.
2407 2219 highestrev = len(self) - 1
2408 2220 # Set ancestors to None to signal that every node is an ancestor.
2409 2221 ancestors = None
2410 2222 # Set heads to an empty dictionary for later discovery of heads
2411 2223 heads = {}
2412 2224 else:
2413 2225 heads = list(heads)
2414 2226 if not heads:
2415 2227 return nonodes
2416 2228 ancestors = set()
2417 2229 # Turn heads into a dictionary so we can remove 'fake' heads.
2418 2230 # Also, later we will be using it to filter out the heads we can't
2419 2231 # find from roots.
2420 2232 heads = dict.fromkeys(heads, False)
2421 2233 # Start at the top and keep marking parents until we're done.
2422 2234 nodestotag = set(heads)
2423 2235 # Remember where the top was so we can use it as a limit later.
2424 2236 highestrev = max([self.rev(n) for n in nodestotag])
2425 2237 while nodestotag:
2426 2238 # grab a node to tag
2427 2239 n = nodestotag.pop()
2428 2240 # Never tag nullid
2429 2241 if n == self.nullid:
2430 2242 continue
2431 2243 # A node's revision number represents its place in a
2432 2244 # topologically sorted list of nodes.
2433 2245 r = self.rev(n)
2434 2246 if r >= lowestrev:
2435 2247 if n not in ancestors:
2436 2248 # If we are possibly a descendant of one of the roots
2437 2249 # and we haven't already been marked as an ancestor
2438 2250 ancestors.add(n) # Mark as ancestor
2439 2251 # Add non-nullid parents to list of nodes to tag.
2440 2252 nodestotag.update(
2441 2253 [p for p in self.parents(n) if p != self.nullid]
2442 2254 )
2443 2255 elif n in heads: # We've seen it before, is it a fake head?
2444 2256 # So it is, real heads should not be the ancestors of
2445 2257 # any other heads.
2446 2258 heads.pop(n)
2447 2259 if not ancestors:
2448 2260 return nonodes
2449 2261 # Now that we have our set of ancestors, we want to remove any
2450 2262 # roots that are not ancestors.
2451 2263
2452 2264 # If one of the roots was nullid, everything is included anyway.
2453 2265 if lowestrev > nullrev:
2454 2266 # But, since we weren't, let's recompute the lowest rev to not
2455 2267 # include roots that aren't ancestors.
2456 2268
2457 2269 # Filter out roots that aren't ancestors of heads
2458 2270 roots = [root for root in roots if root in ancestors]
2459 2271 # Recompute the lowest revision
2460 2272 if roots:
2461 2273 lowestrev = min([self.rev(root) for root in roots])
2462 2274 else:
2463 2275 # No more roots? Return empty list
2464 2276 return nonodes
2465 2277 else:
2466 2278 # We are descending from nullid, and don't need to care about
2467 2279 # any other roots.
2468 2280 lowestrev = nullrev
2469 2281 roots = [self.nullid]
2470 2282 # Transform our roots list into a set.
2471 2283 descendants = set(roots)
2472 2284 # Also, keep the original roots so we can filter out roots that aren't
2473 2285 # 'real' roots (i.e. are descended from other roots).
2474 2286 roots = descendants.copy()
2475 2287 # Our topologically sorted list of output nodes.
2476 2288 orderedout = []
2477 2289 # Don't start at nullid since we don't want nullid in our output list,
2478 2290 # and if nullid shows up in descendants, empty parents will look like
2479 2291 # they're descendants.
2480 2292 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
2481 2293 n = self.node(r)
2482 2294 isdescendant = False
2483 2295 if lowestrev == nullrev: # Everybody is a descendant of nullid
2484 2296 isdescendant = True
2485 2297 elif n in descendants:
2486 2298 # n is already a descendant
2487 2299 isdescendant = True
2488 2300 # This check only needs to be done here because all the roots
2489 2301 # will start being marked is descendants before the loop.
2490 2302 if n in roots:
2491 2303 # If n was a root, check if it's a 'real' root.
2492 2304 p = tuple(self.parents(n))
2493 2305 # If any of its parents are descendants, it's not a root.
2494 2306 if (p[0] in descendants) or (p[1] in descendants):
2495 2307 roots.remove(n)
2496 2308 else:
2497 2309 p = tuple(self.parents(n))
2498 2310 # A node is a descendant if either of its parents are
2499 2311 # descendants. (We seeded the dependents list with the roots
2500 2312 # up there, remember?)
2501 2313 if (p[0] in descendants) or (p[1] in descendants):
2502 2314 descendants.add(n)
2503 2315 isdescendant = True
2504 2316 if isdescendant and ((ancestors is None) or (n in ancestors)):
2505 2317 # Only include nodes that are both descendants and ancestors.
2506 2318 orderedout.append(n)
2507 2319 if (ancestors is not None) and (n in heads):
2508 2320 # We're trying to figure out which heads are reachable
2509 2321 # from roots.
2510 2322 # Mark this head as having been reached
2511 2323 heads[n] = True
2512 2324 elif ancestors is None:
2513 2325 # Otherwise, we're trying to discover the heads.
2514 2326 # Assume this is a head because if it isn't, the next step
2515 2327 # will eventually remove it.
2516 2328 heads[n] = True
2517 2329 # But, obviously its parents aren't.
2518 2330 for p in self.parents(n):
2519 2331 heads.pop(p, None)
2520 2332 heads = [head for head, flag in heads.items() if flag]
2521 2333 roots = list(roots)
2522 2334 assert orderedout
2523 2335 assert roots
2524 2336 assert heads
2525 2337 return (orderedout, roots, heads)
2526 2338
2527 2339 def headrevs(self, revs=None):
2528 2340 if revs is None:
2529 2341 try:
2530 2342 return self.index.headrevs()
2531 2343 except AttributeError:
2532 2344 return self._headrevs()
2533 2345 if rustdagop is not None and self.index.rust_ext_compat:
2534 2346 return rustdagop.headrevs(self.index, revs)
2535 2347 return dagop.headrevs(revs, self._uncheckedparentrevs)
2536 2348
2537 2349 def computephases(self, roots):
2538 2350 return self.index.computephasesmapsets(roots)
2539 2351
2540 2352 def _headrevs(self):
2541 2353 count = len(self)
2542 2354 if not count:
2543 2355 return [nullrev]
2544 2356 # we won't iter over filtered rev so nobody is a head at start
2545 2357 ishead = [0] * (count + 1)
2546 2358 index = self.index
2547 2359 for r in self:
2548 2360 ishead[r] = 1 # I may be an head
2549 2361 e = index[r]
2550 2362 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
2551 2363 return [r for r, val in enumerate(ishead) if val]
2552 2364
2553 2365 def heads(self, start=None, stop=None):
2554 2366 """return the list of all nodes that have no children
2555 2367
2556 2368 if start is specified, only heads that are descendants of
2557 2369 start will be returned
2558 2370 if stop is specified, it will consider all the revs from stop
2559 2371 as if they had no children
2560 2372 """
2561 2373 if start is None and stop is None:
2562 2374 if not len(self):
2563 2375 return [self.nullid]
2564 2376 return [self.node(r) for r in self.headrevs()]
2565 2377
2566 2378 if start is None:
2567 2379 start = nullrev
2568 2380 else:
2569 2381 start = self.rev(start)
2570 2382
2571 2383 stoprevs = {self.rev(n) for n in stop or []}
2572 2384
2573 2385 revs = dagop.headrevssubset(
2574 2386 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
2575 2387 )
2576 2388
2577 2389 return [self.node(rev) for rev in revs]
2578 2390
2579 2391 def children(self, node):
2580 2392 """find the children of a given node"""
2581 2393 c = []
2582 2394 p = self.rev(node)
2583 2395 for r in self.revs(start=p + 1):
2584 2396 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
2585 2397 if prevs:
2586 2398 for pr in prevs:
2587 2399 if pr == p:
2588 2400 c.append(self.node(r))
2589 2401 elif p == nullrev:
2590 2402 c.append(self.node(r))
2591 2403 return c
2592 2404
2593 2405 def commonancestorsheads(self, a, b):
2594 2406 """calculate all the heads of the common ancestors of nodes a and b"""
2595 2407 a, b = self.rev(a), self.rev(b)
2596 2408 ancs = self._commonancestorsheads(a, b)
2597 2409 return pycompat.maplist(self.node, ancs)
2598 2410
2599 2411 def _commonancestorsheads(self, *revs):
2600 2412 """calculate all the heads of the common ancestors of revs"""
2601 2413 try:
2602 2414 ancs = self.index.commonancestorsheads(*revs)
2603 2415 except (AttributeError, OverflowError): # C implementation failed
2604 2416 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
2605 2417 return ancs
2606 2418
2607 2419 def isancestor(self, a, b):
2608 2420 """return True if node a is an ancestor of node b
2609 2421
2610 2422 A revision is considered an ancestor of itself."""
2611 2423 a, b = self.rev(a), self.rev(b)
2612 2424 return self.isancestorrev(a, b)
2613 2425
2614 2426 def isancestorrev(self, a, b):
2615 2427 """return True if revision a is an ancestor of revision b
2616 2428
2617 2429 A revision is considered an ancestor of itself.
2618 2430
2619 2431 The implementation of this is trivial but the use of
2620 2432 reachableroots is not."""
2621 2433 if a == nullrev:
2622 2434 return True
2623 2435 elif a == b:
2624 2436 return True
2625 2437 elif a > b:
2626 2438 return False
2627 2439 return bool(self.reachableroots(a, [b], [a], includepath=False))
2628 2440
2629 2441 def reachableroots(self, minroot, heads, roots, includepath=False):
2630 2442 """return (heads(::(<roots> and <roots>::<heads>)))
2631 2443
2632 2444 If includepath is True, return (<roots>::<heads>)."""
2633 2445 try:
2634 2446 return self.index.reachableroots2(
2635 2447 minroot, heads, roots, includepath
2636 2448 )
2637 2449 except AttributeError:
2638 2450 return dagop._reachablerootspure(
2639 2451 self.parentrevs, minroot, roots, heads, includepath
2640 2452 )
2641 2453
2642 2454 def ancestor(self, a, b):
2643 2455 """calculate the "best" common ancestor of nodes a and b"""
2644 2456
2645 2457 a, b = self.rev(a), self.rev(b)
2646 2458 try:
2647 2459 ancs = self.index.ancestors(a, b)
2648 2460 except (AttributeError, OverflowError):
2649 2461 ancs = ancestor.ancestors(self.parentrevs, a, b)
2650 2462 if ancs:
2651 2463 # choose a consistent winner when there's a tie
2652 2464 return min(map(self.node, ancs))
2653 2465 return self.nullid
2654 2466
2655 2467 def _match(self, id):
2656 2468 if isinstance(id, int):
2657 2469 # rev
2658 2470 return self.node(id)
2659 2471 if len(id) == self.nodeconstants.nodelen:
2660 2472 # possibly a binary node
2661 2473 # odds of a binary node being all hex in ASCII are 1 in 10**25
2662 2474 try:
2663 2475 node = id
2664 2476 self.rev(node) # quick search the index
2665 2477 return node
2666 2478 except error.LookupError:
2667 2479 pass # may be partial hex id
2668 2480 try:
2669 2481 # str(rev)
2670 2482 rev = int(id)
2671 2483 if b"%d" % rev != id:
2672 2484 raise ValueError
2673 2485 if rev < 0:
2674 2486 rev = len(self) + rev
2675 2487 if rev < 0 or rev >= len(self):
2676 2488 raise ValueError
2677 2489 return self.node(rev)
2678 2490 except (ValueError, OverflowError):
2679 2491 pass
2680 2492 if len(id) == 2 * self.nodeconstants.nodelen:
2681 2493 try:
2682 2494 # a full hex nodeid?
2683 2495 node = bin(id)
2684 2496 self.rev(node)
2685 2497 return node
2686 2498 except (binascii.Error, error.LookupError):
2687 2499 pass
2688 2500
2689 2501 def _partialmatch(self, id):
2690 2502 # we don't care wdirfilenodeids as they should be always full hash
2691 2503 maybewdir = self.nodeconstants.wdirhex.startswith(id)
2692 2504 ambiguous = False
2693 2505 try:
2694 2506 partial = self.index.partialmatch(id)
2695 2507 if partial and self.hasnode(partial):
2696 2508 if maybewdir:
2697 2509 # single 'ff...' match in radix tree, ambiguous with wdir
2698 2510 ambiguous = True
2699 2511 else:
2700 2512 return partial
2701 2513 elif maybewdir:
2702 2514 # no 'ff...' match in radix tree, wdir identified
2703 2515 raise error.WdirUnsupported
2704 2516 else:
2705 2517 return None
2706 2518 except error.RevlogError:
2707 2519 # parsers.c radix tree lookup gave multiple matches
2708 2520 # fast path: for unfiltered changelog, radix tree is accurate
2709 2521 if not getattr(self, 'filteredrevs', None):
2710 2522 ambiguous = True
2711 2523 # fall through to slow path that filters hidden revisions
2712 2524 except (AttributeError, ValueError):
2713 2525 # we are pure python, or key is not hex
2714 2526 pass
2715 2527 if ambiguous:
2716 2528 raise error.AmbiguousPrefixLookupError(
2717 2529 id, self.display_id, _(b'ambiguous identifier')
2718 2530 )
2719 2531
2720 2532 if id in self._pcache:
2721 2533 return self._pcache[id]
2722 2534
2723 2535 if len(id) <= 40:
2724 2536 # hex(node)[:...]
2725 2537 l = len(id) // 2 * 2 # grab an even number of digits
2726 2538 try:
2727 2539 # we're dropping the last digit, so let's check that it's hex,
2728 2540 # to avoid the expensive computation below if it's not
2729 2541 if len(id) % 2 > 0:
2730 2542 if not (id[-1] in hexdigits):
2731 2543 return None
2732 2544 prefix = bin(id[:l])
2733 2545 except binascii.Error:
2734 2546 pass
2735 2547 else:
2736 2548 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
2737 2549 nl = [
2738 2550 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
2739 2551 ]
2740 2552 if self.nodeconstants.nullhex.startswith(id):
2741 2553 nl.append(self.nullid)
2742 2554 if len(nl) > 0:
2743 2555 if len(nl) == 1 and not maybewdir:
2744 2556 self._pcache[id] = nl[0]
2745 2557 return nl[0]
2746 2558 raise error.AmbiguousPrefixLookupError(
2747 2559 id, self.display_id, _(b'ambiguous identifier')
2748 2560 )
2749 2561 if maybewdir:
2750 2562 raise error.WdirUnsupported
2751 2563 return None
2752 2564
2753 2565 def lookup(self, id):
2754 2566 """locate a node based on:
2755 2567 - revision number or str(revision number)
2756 2568 - nodeid or subset of hex nodeid
2757 2569 """
2758 2570 n = self._match(id)
2759 2571 if n is not None:
2760 2572 return n
2761 2573 n = self._partialmatch(id)
2762 2574 if n:
2763 2575 return n
2764 2576
2765 2577 raise error.LookupError(id, self.display_id, _(b'no match found'))
2766 2578
2767 2579 def shortest(self, node, minlength=1):
2768 2580 """Find the shortest unambiguous prefix that matches node."""
2769 2581
2770 2582 def isvalid(prefix):
2771 2583 try:
2772 2584 matchednode = self._partialmatch(prefix)
2773 2585 except error.AmbiguousPrefixLookupError:
2774 2586 return False
2775 2587 except error.WdirUnsupported:
2776 2588 # single 'ff...' match
2777 2589 return True
2778 2590 if matchednode is None:
2779 2591 raise error.LookupError(node, self.display_id, _(b'no node'))
2780 2592 return True
2781 2593
2782 2594 def maybewdir(prefix):
2783 2595 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
2784 2596
2785 2597 hexnode = hex(node)
2786 2598
2787 2599 def disambiguate(hexnode, minlength):
2788 2600 """Disambiguate against wdirid."""
2789 2601 for length in range(minlength, len(hexnode) + 1):
2790 2602 prefix = hexnode[:length]
2791 2603 if not maybewdir(prefix):
2792 2604 return prefix
2793 2605
2794 2606 if not getattr(self, 'filteredrevs', None):
2795 2607 try:
2796 2608 length = max(self.index.shortest(node), minlength)
2797 2609 return disambiguate(hexnode, length)
2798 2610 except error.RevlogError:
2799 2611 if node != self.nodeconstants.wdirid:
2800 2612 raise error.LookupError(
2801 2613 node, self.display_id, _(b'no node')
2802 2614 )
2803 2615 except AttributeError:
2804 2616 # Fall through to pure code
2805 2617 pass
2806 2618
2807 2619 if node == self.nodeconstants.wdirid:
2808 2620 for length in range(minlength, len(hexnode) + 1):
2809 2621 prefix = hexnode[:length]
2810 2622 if isvalid(prefix):
2811 2623 return prefix
2812 2624
2813 2625 for length in range(minlength, len(hexnode) + 1):
2814 2626 prefix = hexnode[:length]
2815 2627 if isvalid(prefix):
2816 2628 return disambiguate(hexnode, length)
2817 2629
2818 2630 def cmp(self, node, text):
2819 2631 """compare text with a given file revision
2820 2632
2821 2633 returns True if text is different than what is stored.
2822 2634 """
2823 2635 p1, p2 = self.parents(node)
2824 2636 return storageutil.hashrevisionsha1(text, p1, p2) != node
2825 2637
2826 2638 def deltaparent(self, rev):
2827 2639 """return deltaparent of the given revision"""
2828 2640 base = self.index[rev][3]
2829 2641 if base == rev:
2830 2642 return nullrev
2831 2643 elif self.delta_config.general_delta:
2832 2644 return base
2833 2645 else:
2834 2646 return rev - 1
2835 2647
2836 2648 def issnapshot(self, rev):
2837 2649 """tells whether rev is a snapshot"""
2838 2650 ret = self._inner.issnapshot(rev)
2839 2651 self.issnapshot = self._inner.issnapshot
2840 2652 return ret
2841 2653
2842 2654 def snapshotdepth(self, rev):
2843 2655 """number of snapshot in the chain before this one"""
2844 2656 if not self.issnapshot(rev):
2845 2657 raise error.ProgrammingError(b'revision %d not a snapshot')
2846 2658 return len(self._inner._deltachain(rev)[0]) - 1
2847 2659
2848 2660 def revdiff(self, rev1, rev2):
2849 2661 """return or calculate a delta between two revisions
2850 2662
2851 2663 The delta calculated is in binary form and is intended to be written to
2852 2664 revlog data directly. So this function needs raw revision data.
2853 2665 """
2854 2666 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2855 2667 return bytes(self._inner._chunk(rev2))
2856 2668
2857 2669 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
2858 2670
2859 2671 def revision(self, nodeorrev):
2860 2672 """return an uncompressed revision of a given node or revision
2861 2673 number.
2862 2674 """
2863 2675 return self._revisiondata(nodeorrev)
2864 2676
2865 2677 def sidedata(self, nodeorrev):
2866 2678 """a map of extra data related to the changeset but not part of the hash
2867 2679
2868 2680 This function currently return a dictionary. However, more advanced
2869 2681 mapping object will likely be used in the future for a more
2870 2682 efficient/lazy code.
2871 2683 """
2872 2684 # deal with <nodeorrev> argument type
2873 2685 if isinstance(nodeorrev, int):
2874 2686 rev = nodeorrev
2875 2687 else:
2876 2688 rev = self.rev(nodeorrev)
2877 2689 return self._sidedata(rev)
2878 2690
2879 2691 def _rawtext(self, node, rev):
2880 2692 """return the possibly unvalidated rawtext for a revision
2881 2693
2882 2694 returns (rev, rawtext, validated)
2883 2695 """
2884 2696 # Check if we have the entry in cache
2885 2697 # The cache entry looks like (node, rev, rawtext)
2886 2698 if self._inner._revisioncache:
2887 2699 if self._inner._revisioncache[0] == node:
2888 2700 return (rev, self._inner._revisioncache[2], True)
2889 2701
2890 2702 if rev is None:
2891 2703 rev = self.rev(node)
2892 2704
2893 2705 return self._inner.raw_text(node, rev)
2894 2706
2895 2707 def _revisiondata(self, nodeorrev, raw=False):
2896 2708 # deal with <nodeorrev> argument type
2897 2709 if isinstance(nodeorrev, int):
2898 2710 rev = nodeorrev
2899 2711 node = self.node(rev)
2900 2712 else:
2901 2713 node = nodeorrev
2902 2714 rev = None
2903 2715
2904 2716 # fast path the special `nullid` rev
2905 2717 if node == self.nullid:
2906 2718 return b""
2907 2719
2908 2720 # ``rawtext`` is the text as stored inside the revlog. Might be the
2909 2721 # revision or might need to be processed to retrieve the revision.
2910 2722 rev, rawtext, validated = self._rawtext(node, rev)
2911 2723
2912 2724 if raw and validated:
2913 2725 # if we don't want to process the raw text and that raw
2914 2726 # text is cached, we can exit early.
2915 2727 return rawtext
2916 2728 if rev is None:
2917 2729 rev = self.rev(node)
2918 2730 # the revlog's flag for this revision
2919 2731 # (usually alter its state or content)
2920 2732 flags = self.flags(rev)
2921 2733
2922 2734 if validated and flags == REVIDX_DEFAULT_FLAGS:
2923 2735 # no extra flags set, no flag processor runs, text = rawtext
2924 2736 return rawtext
2925 2737
2926 2738 if raw:
2927 2739 validatehash = flagutil.processflagsraw(self, rawtext, flags)
2928 2740 text = rawtext
2929 2741 else:
2930 2742 r = flagutil.processflagsread(self, rawtext, flags)
2931 2743 text, validatehash = r
2932 2744 if validatehash:
2933 2745 self.checkhash(text, node, rev=rev)
2934 2746 if not validated:
2935 2747 self._inner._revisioncache = (node, rev, rawtext)
2936 2748
2937 2749 return text
2938 2750
2939 2751 def _sidedata(self, rev):
2940 2752 """Return the sidedata for a given revision number."""
2941 2753 sidedata_end = None
2942 2754 if self._docket is not None:
2943 2755 sidedata_end = self._docket.sidedata_end
2944 2756 return self._inner.sidedata(rev, sidedata_end)
2945 2757
2946 2758 def rawdata(self, nodeorrev):
2947 2759 """return an uncompressed raw data of a given node or revision number."""
2948 2760 return self._revisiondata(nodeorrev, raw=True)
2949 2761
2950 2762 def hash(self, text, p1, p2):
2951 2763 """Compute a node hash.
2952 2764
2953 2765 Available as a function so that subclasses can replace the hash
2954 2766 as needed.
2955 2767 """
2956 2768 return storageutil.hashrevisionsha1(text, p1, p2)
2957 2769
2958 2770 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2959 2771 """Check node hash integrity.
2960 2772
2961 2773 Available as a function so that subclasses can extend hash mismatch
2962 2774 behaviors as needed.
2963 2775 """
2964 2776 try:
2965 2777 if p1 is None and p2 is None:
2966 2778 p1, p2 = self.parents(node)
2967 2779 if node != self.hash(text, p1, p2):
2968 2780 # Clear the revision cache on hash failure. The revision cache
2969 2781 # only stores the raw revision and clearing the cache does have
2970 2782 # the side-effect that we won't have a cache hit when the raw
2971 2783 # revision data is accessed. But this case should be rare and
2972 2784 # it is extra work to teach the cache about the hash
2973 2785 # verification state.
2974 2786 if (
2975 2787 self._inner._revisioncache
2976 2788 and self._inner._revisioncache[0] == node
2977 2789 ):
2978 2790 self._inner._revisioncache = None
2979 2791
2980 2792 revornode = rev
2981 2793 if revornode is None:
2982 2794 revornode = templatefilters.short(hex(node))
2983 2795 raise error.RevlogError(
2984 2796 _(b"integrity check failed on %s:%s")
2985 2797 % (self.display_id, pycompat.bytestr(revornode))
2986 2798 )
2987 2799 except error.RevlogError:
2988 2800 if self.feature_config.censorable and storageutil.iscensoredtext(
2989 2801 text
2990 2802 ):
2991 2803 raise error.CensoredNodeError(self.display_id, node, text)
2992 2804 raise
2993 2805
2994 2806 @property
2995 2807 def _split_index_file(self):
2996 2808 """the path where to expect the index of an ongoing splitting operation
2997 2809
2998 2810 The file will only exist if a splitting operation is in progress, but
2999 2811 it is always expected at the same location."""
3000 2812 parts = self.radix.split(b'/')
3001 2813 if len(parts) > 1:
3002 2814 # adds a '-s' prefix to the ``data/` or `meta/` base
3003 2815 head = parts[0] + b'-s'
3004 2816 mids = parts[1:-1]
3005 2817 tail = parts[-1] + b'.i'
3006 2818 pieces = [head] + mids + [tail]
3007 2819 return b'/'.join(pieces)
3008 2820 else:
3009 2821 # the revlog is stored at the root of the store (changelog or
3010 2822 # manifest), no risk of collision.
3011 2823 return self.radix + b'.i.s'
3012 2824
3013 2825 def _enforceinlinesize(self, tr, side_write=True):
3014 2826 """Check if the revlog is too big for inline and convert if so.
3015 2827
3016 2828 This should be called after revisions are added to the revlog. If the
3017 2829 revlog has grown too large to be an inline revlog, it will convert it
3018 2830 to use multiple index and data files.
3019 2831 """
3020 2832 tiprev = len(self) - 1
3021 2833 total_size = self.start(tiprev) + self.length(tiprev)
3022 2834 if not self._inline or total_size < _maxinline:
3023 2835 return
3024 2836
3025 2837 if self._docket is not None:
3026 2838 msg = b"inline revlog should not have a docket"
3027 2839 raise error.ProgrammingError(msg)
3028 2840
2841 # In the common case, we enforce inline size because the revlog has
2842 # been appened too. And in such case, it must have an initial offset
2843 # recorded in the transaction.
3029 2844 troffset = tr.findoffset(self._inner.canonical_index_file)
3030 if troffset is None:
2845 pre_touched = troffset is not None
2846 if not pre_touched and self.target[0] != KIND_CHANGELOG:
3031 2847 raise error.RevlogError(
3032 2848 _(b"%s not found in the transaction") % self._indexfile
3033 2849 )
3034 if troffset:
3035 tr.addbackup(self._inner.canonical_index_file, for_offset=True)
2850
2851 tr.addbackup(self._inner.canonical_index_file, for_offset=pre_touched)
3036 2852 tr.add(self._datafile, 0)
3037 2853
3038 2854 new_index_file_path = None
3039 2855 if side_write:
3040 2856 old_index_file_path = self._indexfile
3041 2857 new_index_file_path = self._split_index_file
3042 2858 opener = self.opener
3043 2859 weak_self = weakref.ref(self)
3044 2860
3045 2861 # the "split" index replace the real index when the transaction is
3046 2862 # finalized
3047 2863 def finalize_callback(tr):
3048 2864 opener.rename(
3049 2865 new_index_file_path,
3050 2866 old_index_file_path,
3051 2867 checkambig=True,
3052 2868 )
3053 2869 maybe_self = weak_self()
3054 2870 if maybe_self is not None:
3055 2871 maybe_self._indexfile = old_index_file_path
3056 2872 maybe_self._inner.index_file = maybe_self._indexfile
3057 2873
3058 2874 def abort_callback(tr):
3059 2875 maybe_self = weak_self()
3060 2876 if maybe_self is not None:
3061 2877 maybe_self._indexfile = old_index_file_path
3062 2878 maybe_self._inner.inline = True
3063 2879 maybe_self._inner.index_file = old_index_file_path
3064 2880
3065 2881 tr.registertmp(new_index_file_path)
3066 2882 if self.target[1] is not None:
3067 2883 callback_id = b'000-revlog-split-%d-%s' % self.target
3068 2884 else:
3069 2885 callback_id = b'000-revlog-split-%d' % self.target[0]
3070 2886 tr.addfinalize(callback_id, finalize_callback)
3071 2887 tr.addabort(callback_id, abort_callback)
3072 2888
3073 2889 self._format_flags &= ~FLAG_INLINE_DATA
3074 2890 self._inner.split_inline(
3075 2891 tr,
3076 2892 self._format_flags | self._format_version,
3077 2893 new_index_file_path=new_index_file_path,
3078 2894 )
3079 2895
3080 2896 self._inline = False
3081 2897 if new_index_file_path is not None:
3082 2898 self._indexfile = new_index_file_path
3083 2899
3084 2900 nodemaputil.setup_persistent_nodemap(tr, self)
3085 2901
3086 2902 def _nodeduplicatecallback(self, transaction, node):
3087 2903 """called when trying to add a node already stored."""
3088 2904
3089 2905 @contextlib.contextmanager
3090 2906 def reading(self):
3091 2907 with self._inner.reading():
3092 2908 yield
3093 2909
3094 2910 @contextlib.contextmanager
3095 2911 def _writing(self, transaction):
3096 2912 if self._trypending:
3097 2913 msg = b'try to write in a `trypending` revlog: %s'
3098 2914 msg %= self.display_id
3099 2915 raise error.ProgrammingError(msg)
3100 2916 if self._inner.is_writing:
3101 2917 yield
3102 2918 else:
3103 2919 data_end = None
3104 2920 sidedata_end = None
3105 2921 if self._docket is not None:
3106 2922 data_end = self._docket.data_end
3107 2923 sidedata_end = self._docket.sidedata_end
3108 2924 with self._inner.writing(
3109 2925 transaction,
3110 2926 data_end=data_end,
3111 2927 sidedata_end=sidedata_end,
3112 2928 ):
3113 2929 yield
3114 2930 if self._docket is not None:
3115 2931 self._write_docket(transaction)
3116 2932
3117 2933 @property
3118 2934 def is_delaying(self):
3119 2935 return self._inner.is_delaying
3120 2936
3121 2937 def _write_docket(self, transaction):
3122 2938 """write the current docket on disk
3123 2939
3124 2940 Exist as a method to help changelog to implement transaction logic
3125 2941
3126 2942 We could also imagine using the same transaction logic for all revlog
3127 2943 since docket are cheap."""
3128 2944 self._docket.write(transaction)
3129 2945
3130 2946 def addrevision(
3131 2947 self,
3132 2948 text,
3133 2949 transaction,
3134 2950 link,
3135 2951 p1,
3136 2952 p2,
3137 2953 cachedelta=None,
3138 2954 node=None,
3139 2955 flags=REVIDX_DEFAULT_FLAGS,
3140 2956 deltacomputer=None,
3141 2957 sidedata=None,
3142 2958 ):
3143 2959 """add a revision to the log
3144 2960
3145 2961 text - the revision data to add
3146 2962 transaction - the transaction object used for rollback
3147 2963 link - the linkrev data to add
3148 2964 p1, p2 - the parent nodeids of the revision
3149 2965 cachedelta - an optional precomputed delta
3150 2966 node - nodeid of revision; typically node is not specified, and it is
3151 2967 computed by default as hash(text, p1, p2), however subclasses might
3152 2968 use different hashing method (and override checkhash() in such case)
3153 2969 flags - the known flags to set on the revision
3154 2970 deltacomputer - an optional deltacomputer instance shared between
3155 2971 multiple calls
3156 2972 """
3157 2973 if link == nullrev:
3158 2974 raise error.RevlogError(
3159 2975 _(b"attempted to add linkrev -1 to %s") % self.display_id
3160 2976 )
3161 2977
3162 2978 if sidedata is None:
3163 2979 sidedata = {}
3164 2980 elif sidedata and not self.feature_config.has_side_data:
3165 2981 raise error.ProgrammingError(
3166 2982 _(b"trying to add sidedata to a revlog who don't support them")
3167 2983 )
3168 2984
3169 2985 if flags:
3170 2986 node = node or self.hash(text, p1, p2)
3171 2987
3172 2988 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
3173 2989
3174 2990 # If the flag processor modifies the revision data, ignore any provided
3175 2991 # cachedelta.
3176 2992 if rawtext != text:
3177 2993 cachedelta = None
3178 2994
3179 2995 if len(rawtext) > _maxentrysize:
3180 2996 raise error.RevlogError(
3181 2997 _(
3182 2998 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
3183 2999 )
3184 3000 % (self.display_id, len(rawtext))
3185 3001 )
3186 3002
3187 3003 node = node or self.hash(rawtext, p1, p2)
3188 3004 rev = self.index.get_rev(node)
3189 3005 if rev is not None:
3190 3006 return rev
3191 3007
3192 3008 if validatehash:
3193 3009 self.checkhash(rawtext, node, p1=p1, p2=p2)
3194 3010
3195 3011 return self.addrawrevision(
3196 3012 rawtext,
3197 3013 transaction,
3198 3014 link,
3199 3015 p1,
3200 3016 p2,
3201 3017 node,
3202 3018 flags,
3203 3019 cachedelta=cachedelta,
3204 3020 deltacomputer=deltacomputer,
3205 3021 sidedata=sidedata,
3206 3022 )
3207 3023
3208 3024 def addrawrevision(
3209 3025 self,
3210 3026 rawtext,
3211 3027 transaction,
3212 3028 link,
3213 3029 p1,
3214 3030 p2,
3215 3031 node,
3216 3032 flags,
3217 3033 cachedelta=None,
3218 3034 deltacomputer=None,
3219 3035 sidedata=None,
3220 3036 ):
3221 3037 """add a raw revision with known flags, node and parents
3222 3038 useful when reusing a revision not stored in this revlog (ex: received
3223 3039 over wire, or read from an external bundle).
3224 3040 """
3225 3041 with self._writing(transaction):
3226 3042 return self._addrevision(
3227 3043 node,
3228 3044 rawtext,
3229 3045 transaction,
3230 3046 link,
3231 3047 p1,
3232 3048 p2,
3233 3049 flags,
3234 3050 cachedelta,
3235 3051 deltacomputer=deltacomputer,
3236 3052 sidedata=sidedata,
3237 3053 )
3238 3054
3239 3055 def compress(self, data):
3240 3056 return self._inner.compress(data)
3241 3057
3242 3058 def decompress(self, data):
3243 3059 return self._inner.decompress(data)
3244 3060
3245 3061 def _addrevision(
3246 3062 self,
3247 3063 node,
3248 3064 rawtext,
3249 3065 transaction,
3250 3066 link,
3251 3067 p1,
3252 3068 p2,
3253 3069 flags,
3254 3070 cachedelta,
3255 3071 alwayscache=False,
3256 3072 deltacomputer=None,
3257 3073 sidedata=None,
3258 3074 ):
3259 3075 """internal function to add revisions to the log
3260 3076
3261 3077 see addrevision for argument descriptions.
3262 3078
3263 3079 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
3264 3080
3265 3081 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
3266 3082 be used.
3267 3083
3268 3084 invariants:
3269 3085 - rawtext is optional (can be None); if not set, cachedelta must be set.
3270 3086 if both are set, they must correspond to each other.
3271 3087 """
3272 3088 if node == self.nullid:
3273 3089 raise error.RevlogError(
3274 3090 _(b"%s: attempt to add null revision") % self.display_id
3275 3091 )
3276 3092 if (
3277 3093 node == self.nodeconstants.wdirid
3278 3094 or node in self.nodeconstants.wdirfilenodeids
3279 3095 ):
3280 3096 raise error.RevlogError(
3281 3097 _(b"%s: attempt to add wdir revision") % self.display_id
3282 3098 )
3283 3099 if self._inner._writinghandles is None:
3284 3100 msg = b'adding revision outside `revlog._writing` context'
3285 3101 raise error.ProgrammingError(msg)
3286 3102
3287 3103 btext = [rawtext]
3288 3104
3289 3105 curr = len(self)
3290 3106 prev = curr - 1
3291 3107
3292 3108 offset = self._get_data_offset(prev)
3293 3109
3294 3110 if self._concurrencychecker:
3295 3111 ifh, dfh, sdfh = self._inner._writinghandles
3296 3112 # XXX no checking for the sidedata file
3297 3113 if self._inline:
3298 3114 # offset is "as if" it were in the .d file, so we need to add on
3299 3115 # the size of the entry metadata.
3300 3116 self._concurrencychecker(
3301 3117 ifh, self._indexfile, offset + curr * self.index.entry_size
3302 3118 )
3303 3119 else:
3304 3120 # Entries in the .i are a consistent size.
3305 3121 self._concurrencychecker(
3306 3122 ifh, self._indexfile, curr * self.index.entry_size
3307 3123 )
3308 3124 self._concurrencychecker(dfh, self._datafile, offset)
3309 3125
3310 3126 p1r, p2r = self.rev(p1), self.rev(p2)
3311 3127
3312 3128 # full versions are inserted when the needed deltas
3313 3129 # become comparable to the uncompressed text
3314 3130 if rawtext is None:
3315 3131 # need rawtext size, before changed by flag processors, which is
3316 3132 # the non-raw size. use revlog explicitly to avoid filelog's extra
3317 3133 # logic that might remove metadata size.
3318 3134 textlen = mdiff.patchedsize(
3319 3135 revlog.size(self, cachedelta[0]), cachedelta[1]
3320 3136 )
3321 3137 else:
3322 3138 textlen = len(rawtext)
3323 3139
3324 3140 if deltacomputer is None:
3325 3141 write_debug = None
3326 3142 if self.delta_config.debug_delta:
3327 3143 write_debug = transaction._report
3328 3144 deltacomputer = deltautil.deltacomputer(
3329 3145 self, write_debug=write_debug
3330 3146 )
3331 3147
3332 3148 if cachedelta is not None and len(cachedelta) == 2:
3333 3149 # If the cached delta has no information about how it should be
3334 3150 # reused, add the default reuse instruction according to the
3335 3151 # revlog's configuration.
3336 3152 if (
3337 3153 self.delta_config.general_delta
3338 3154 and self.delta_config.lazy_delta_base
3339 3155 ):
3340 3156 delta_base_reuse = DELTA_BASE_REUSE_TRY
3341 3157 else:
3342 3158 delta_base_reuse = DELTA_BASE_REUSE_NO
3343 3159 cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
3344 3160
3345 3161 revinfo = revlogutils.revisioninfo(
3346 3162 node,
3347 3163 p1,
3348 3164 p2,
3349 3165 btext,
3350 3166 textlen,
3351 3167 cachedelta,
3352 3168 flags,
3353 3169 )
3354 3170
3355 3171 deltainfo = deltacomputer.finddeltainfo(revinfo)
3356 3172
3357 3173 compression_mode = COMP_MODE_INLINE
3358 3174 if self._docket is not None:
3359 3175 default_comp = self._docket.default_compression_header
3360 3176 r = deltautil.delta_compression(default_comp, deltainfo)
3361 3177 compression_mode, deltainfo = r
3362 3178
3363 3179 sidedata_compression_mode = COMP_MODE_INLINE
3364 3180 if sidedata and self.feature_config.has_side_data:
3365 3181 sidedata_compression_mode = COMP_MODE_PLAIN
3366 3182 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
3367 3183 sidedata_offset = self._docket.sidedata_end
3368 3184 h, comp_sidedata = self._inner.compress(serialized_sidedata)
3369 3185 if (
3370 3186 h != b'u'
3371 3187 and comp_sidedata[0:1] != b'\0'
3372 3188 and len(comp_sidedata) < len(serialized_sidedata)
3373 3189 ):
3374 3190 assert not h
3375 3191 if (
3376 3192 comp_sidedata[0:1]
3377 3193 == self._docket.default_compression_header
3378 3194 ):
3379 3195 sidedata_compression_mode = COMP_MODE_DEFAULT
3380 3196 serialized_sidedata = comp_sidedata
3381 3197 else:
3382 3198 sidedata_compression_mode = COMP_MODE_INLINE
3383 3199 serialized_sidedata = comp_sidedata
3384 3200 else:
3385 3201 serialized_sidedata = b""
3386 3202 # Don't store the offset if the sidedata is empty, that way
3387 3203 # we can easily detect empty sidedata and they will be no different
3388 3204 # than ones we manually add.
3389 3205 sidedata_offset = 0
3390 3206
3391 3207 rank = RANK_UNKNOWN
3392 3208 if self.feature_config.compute_rank:
3393 3209 if (p1r, p2r) == (nullrev, nullrev):
3394 3210 rank = 1
3395 3211 elif p1r != nullrev and p2r == nullrev:
3396 3212 rank = 1 + self.fast_rank(p1r)
3397 3213 elif p1r == nullrev and p2r != nullrev:
3398 3214 rank = 1 + self.fast_rank(p2r)
3399 3215 else: # merge node
3400 3216 if rustdagop is not None and self.index.rust_ext_compat:
3401 3217 rank = rustdagop.rank(self.index, p1r, p2r)
3402 3218 else:
3403 3219 pmin, pmax = sorted((p1r, p2r))
3404 3220 rank = 1 + self.fast_rank(pmax)
3405 3221 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
3406 3222
3407 3223 e = revlogutils.entry(
3408 3224 flags=flags,
3409 3225 data_offset=offset,
3410 3226 data_compressed_length=deltainfo.deltalen,
3411 3227 data_uncompressed_length=textlen,
3412 3228 data_compression_mode=compression_mode,
3413 3229 data_delta_base=deltainfo.base,
3414 3230 link_rev=link,
3415 3231 parent_rev_1=p1r,
3416 3232 parent_rev_2=p2r,
3417 3233 node_id=node,
3418 3234 sidedata_offset=sidedata_offset,
3419 3235 sidedata_compressed_length=len(serialized_sidedata),
3420 3236 sidedata_compression_mode=sidedata_compression_mode,
3421 3237 rank=rank,
3422 3238 )
3423 3239
3424 3240 self.index.append(e)
3425 3241 entry = self.index.entry_binary(curr)
3426 3242 if curr == 0 and self._docket is None:
3427 3243 header = self._format_flags | self._format_version
3428 3244 header = self.index.pack_header(header)
3429 3245 entry = header + entry
3430 3246 self._writeentry(
3431 3247 transaction,
3432 3248 entry,
3433 3249 deltainfo.data,
3434 3250 link,
3435 3251 offset,
3436 3252 serialized_sidedata,
3437 3253 sidedata_offset,
3438 3254 )
3439 3255
3440 3256 rawtext = btext[0]
3441 3257
3442 3258 if alwayscache and rawtext is None:
3443 3259 rawtext = deltacomputer.buildtext(revinfo)
3444 3260
3445 3261 if type(rawtext) == bytes: # only accept immutable objects
3446 3262 self._inner._revisioncache = (node, curr, rawtext)
3447 3263 self._chainbasecache[curr] = deltainfo.chainbase
3448 3264 return curr
3449 3265
3450 3266 def _get_data_offset(self, prev):
3451 3267 """Returns the current offset in the (in-transaction) data file.
3452 3268 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
3453 3269 file to store that information: since sidedata can be rewritten to the
3454 3270 end of the data file within a transaction, you can have cases where, for
3455 3271 example, rev `n` does not have sidedata while rev `n - 1` does, leading
3456 3272 to `n - 1`'s sidedata being written after `n`'s data.
3457 3273
3458 3274 TODO cache this in a docket file before getting out of experimental."""
3459 3275 if self._docket is None:
3460 3276 return self.end(prev)
3461 3277 else:
3462 3278 return self._docket.data_end
3463 3279
3464 3280 def _writeentry(
3465 3281 self,
3466 3282 transaction,
3467 3283 entry,
3468 3284 data,
3469 3285 link,
3470 3286 offset,
3471 3287 sidedata,
3472 3288 sidedata_offset,
3473 3289 ):
3474 3290 # Files opened in a+ mode have inconsistent behavior on various
3475 3291 # platforms. Windows requires that a file positioning call be made
3476 3292 # when the file handle transitions between reads and writes. See
3477 3293 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
3478 3294 # platforms, Python or the platform itself can be buggy. Some versions
3479 3295 # of Solaris have been observed to not append at the end of the file
3480 3296 # if the file was seeked to before the end. See issue4943 for more.
3481 3297 #
3482 3298 # We work around this issue by inserting a seek() before writing.
3483 3299 # Note: This is likely not necessary on Python 3. However, because
3484 3300 # the file handle is reused for reads and may be seeked there, we need
3485 3301 # to be careful before changing this.
3486 3302 index_end = data_end = sidedata_end = None
3487 3303 if self._docket is not None:
3488 3304 index_end = self._docket.index_end
3489 3305 data_end = self._docket.data_end
3490 3306 sidedata_end = self._docket.sidedata_end
3491 3307
3492 3308 files_end = self._inner.write_entry(
3493 3309 transaction,
3494 3310 entry,
3495 3311 data,
3496 3312 link,
3497 3313 offset,
3498 3314 sidedata,
3499 3315 sidedata_offset,
3500 3316 index_end,
3501 3317 data_end,
3502 3318 sidedata_end,
3503 3319 )
3504 3320 self._enforceinlinesize(transaction)
3505 3321 if self._docket is not None:
3506 3322 self._docket.index_end = files_end[0]
3507 3323 self._docket.data_end = files_end[1]
3508 3324 self._docket.sidedata_end = files_end[2]
3509 3325
3510 3326 nodemaputil.setup_persistent_nodemap(transaction, self)
3511 3327
3512 3328 def addgroup(
3513 3329 self,
3514 3330 deltas,
3515 3331 linkmapper,
3516 3332 transaction,
3517 3333 alwayscache=False,
3518 3334 addrevisioncb=None,
3519 3335 duplicaterevisioncb=None,
3520 3336 debug_info=None,
3521 3337 delta_base_reuse_policy=None,
3522 3338 ):
3523 3339 """
3524 3340 add a delta group
3525 3341
3526 3342 given a set of deltas, add them to the revision log. the
3527 3343 first delta is against its parent, which should be in our
3528 3344 log, the rest are against the previous delta.
3529 3345
3530 3346 If ``addrevisioncb`` is defined, it will be called with arguments of
3531 3347 this revlog and the node that was added.
3532 3348 """
3533 3349
3534 3350 if self._adding_group:
3535 3351 raise error.ProgrammingError(b'cannot nest addgroup() calls')
3536 3352
3537 3353 # read the default delta-base reuse policy from revlog config if the
3538 3354 # group did not specify one.
3539 3355 if delta_base_reuse_policy is None:
3540 3356 if (
3541 3357 self.delta_config.general_delta
3542 3358 and self.delta_config.lazy_delta_base
3543 3359 ):
3544 3360 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
3545 3361 else:
3546 3362 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
3547 3363
3548 3364 self._adding_group = True
3549 3365 empty = True
3550 3366 try:
3551 3367 with self._writing(transaction):
3552 3368 write_debug = None
3553 3369 if self.delta_config.debug_delta:
3554 3370 write_debug = transaction._report
3555 3371 deltacomputer = deltautil.deltacomputer(
3556 3372 self,
3557 3373 write_debug=write_debug,
3558 3374 debug_info=debug_info,
3559 3375 )
3560 3376 # loop through our set of deltas
3561 3377 for data in deltas:
3562 3378 (
3563 3379 node,
3564 3380 p1,
3565 3381 p2,
3566 3382 linknode,
3567 3383 deltabase,
3568 3384 delta,
3569 3385 flags,
3570 3386 sidedata,
3571 3387 ) = data
3572 3388 link = linkmapper(linknode)
3573 3389 flags = flags or REVIDX_DEFAULT_FLAGS
3574 3390
3575 3391 rev = self.index.get_rev(node)
3576 3392 if rev is not None:
3577 3393 # this can happen if two branches make the same change
3578 3394 self._nodeduplicatecallback(transaction, rev)
3579 3395 if duplicaterevisioncb:
3580 3396 duplicaterevisioncb(self, rev)
3581 3397 empty = False
3582 3398 continue
3583 3399
3584 3400 for p in (p1, p2):
3585 3401 if not self.index.has_node(p):
3586 3402 raise error.LookupError(
3587 3403 p, self.radix, _(b'unknown parent')
3588 3404 )
3589 3405
3590 3406 if not self.index.has_node(deltabase):
3591 3407 raise error.LookupError(
3592 3408 deltabase, self.display_id, _(b'unknown delta base')
3593 3409 )
3594 3410
3595 3411 baserev = self.rev(deltabase)
3596 3412
3597 3413 if baserev != nullrev and self.iscensored(baserev):
3598 3414 # if base is censored, delta must be full replacement in a
3599 3415 # single patch operation
3600 3416 hlen = struct.calcsize(b">lll")
3601 3417 oldlen = self.rawsize(baserev)
3602 3418 newlen = len(delta) - hlen
3603 3419 if delta[:hlen] != mdiff.replacediffheader(
3604 3420 oldlen, newlen
3605 3421 ):
3606 3422 raise error.CensoredBaseError(
3607 3423 self.display_id, self.node(baserev)
3608 3424 )
3609 3425
3610 3426 if not flags and self._peek_iscensored(baserev, delta):
3611 3427 flags |= REVIDX_ISCENSORED
3612 3428
3613 3429 # We assume consumers of addrevisioncb will want to retrieve
3614 3430 # the added revision, which will require a call to
3615 3431 # revision(). revision() will fast path if there is a cache
3616 3432 # hit. So, we tell _addrevision() to always cache in this case.
3617 3433 # We're only using addgroup() in the context of changegroup
3618 3434 # generation so the revision data can always be handled as raw
3619 3435 # by the flagprocessor.
3620 3436 rev = self._addrevision(
3621 3437 node,
3622 3438 None,
3623 3439 transaction,
3624 3440 link,
3625 3441 p1,
3626 3442 p2,
3627 3443 flags,
3628 3444 (baserev, delta, delta_base_reuse_policy),
3629 3445 alwayscache=alwayscache,
3630 3446 deltacomputer=deltacomputer,
3631 3447 sidedata=sidedata,
3632 3448 )
3633 3449
3634 3450 if addrevisioncb:
3635 3451 addrevisioncb(self, rev)
3636 3452 empty = False
3637 3453 finally:
3638 3454 self._adding_group = False
3639 3455 return not empty
3640 3456
3641 3457 def iscensored(self, rev):
3642 3458 """Check if a file revision is censored."""
3643 3459 if not self.feature_config.censorable:
3644 3460 return False
3645 3461
3646 3462 return self.flags(rev) & REVIDX_ISCENSORED
3647 3463
3648 3464 def _peek_iscensored(self, baserev, delta):
3649 3465 """Quickly check if a delta produces a censored revision."""
3650 3466 if not self.feature_config.censorable:
3651 3467 return False
3652 3468
3653 3469 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
3654 3470
3655 3471 def getstrippoint(self, minlink):
3656 3472 """find the minimum rev that must be stripped to strip the linkrev
3657 3473
3658 3474 Returns a tuple containing the minimum rev and a set of all revs that
3659 3475 have linkrevs that will be broken by this strip.
3660 3476 """
3661 3477 return storageutil.resolvestripinfo(
3662 3478 minlink,
3663 3479 len(self) - 1,
3664 3480 self.headrevs(),
3665 3481 self.linkrev,
3666 3482 self.parentrevs,
3667 3483 )
3668 3484
3669 3485 def strip(self, minlink, transaction):
3670 3486 """truncate the revlog on the first revision with a linkrev >= minlink
3671 3487
3672 3488 This function is called when we're stripping revision minlink and
3673 3489 its descendants from the repository.
3674 3490
3675 3491 We have to remove all revisions with linkrev >= minlink, because
3676 3492 the equivalent changelog revisions will be renumbered after the
3677 3493 strip.
3678 3494
3679 3495 So we truncate the revlog on the first of these revisions, and
3680 3496 trust that the caller has saved the revisions that shouldn't be
3681 3497 removed and that it'll re-add them after this truncation.
3682 3498 """
3683 3499 if len(self) == 0:
3684 3500 return
3685 3501
3686 3502 rev, _ = self.getstrippoint(minlink)
3687 3503 if rev == len(self):
3688 3504 return
3689 3505
3690 3506 # first truncate the files on disk
3691 3507 data_end = self.start(rev)
3692 3508 if not self._inline:
3693 3509 transaction.add(self._datafile, data_end)
3694 3510 end = rev * self.index.entry_size
3695 3511 else:
3696 3512 end = data_end + (rev * self.index.entry_size)
3697 3513
3698 3514 if self._sidedatafile:
3699 3515 sidedata_end = self.sidedata_cut_off(rev)
3700 3516 transaction.add(self._sidedatafile, sidedata_end)
3701 3517
3702 3518 transaction.add(self._indexfile, end)
3703 3519 if self._docket is not None:
3704 3520 # XXX we could, leverage the docket while stripping. However it is
3705 3521 # not powerfull enough at the time of this comment
3706 3522 self._docket.index_end = end
3707 3523 self._docket.data_end = data_end
3708 3524 self._docket.sidedata_end = sidedata_end
3709 3525 self._docket.write(transaction, stripping=True)
3710 3526
3711 3527 # then reset internal state in memory to forget those revisions
3712 3528 self._chaininfocache = util.lrucachedict(500)
3713 3529 self._inner.clear_cache()
3714 3530
3715 3531 del self.index[rev:-1]
3716 3532
3717 3533 def checksize(self):
3718 3534 """Check size of index and data files
3719 3535
3720 3536 return a (dd, di) tuple.
3721 3537 - dd: extra bytes for the "data" file
3722 3538 - di: extra bytes for the "index" file
3723 3539
3724 3540 A healthy revlog will return (0, 0).
3725 3541 """
3726 3542 expected = 0
3727 3543 if len(self):
3728 3544 expected = max(0, self.end(len(self) - 1))
3729 3545
3730 3546 try:
3731 3547 with self._datafp() as f:
3732 3548 f.seek(0, io.SEEK_END)
3733 3549 actual = f.tell()
3734 3550 dd = actual - expected
3735 3551 except FileNotFoundError:
3736 3552 dd = 0
3737 3553
3738 3554 try:
3739 3555 f = self.opener(self._indexfile)
3740 3556 f.seek(0, io.SEEK_END)
3741 3557 actual = f.tell()
3742 3558 f.close()
3743 3559 s = self.index.entry_size
3744 3560 i = max(0, actual // s)
3745 3561 di = actual - (i * s)
3746 3562 if self._inline:
3747 3563 databytes = 0
3748 3564 for r in self:
3749 3565 databytes += max(0, self.length(r))
3750 3566 dd = 0
3751 3567 di = actual - len(self) * s - databytes
3752 3568 except FileNotFoundError:
3753 3569 di = 0
3754 3570
3755 3571 return (dd, di)
3756 3572
3757 3573 def files(self):
3758 3574 """return list of files that compose this revlog"""
3759 3575 res = [self._indexfile]
3760 3576 if self._docket_file is None:
3761 3577 if not self._inline:
3762 3578 res.append(self._datafile)
3763 3579 else:
3764 3580 res.append(self._docket_file)
3765 3581 res.extend(self._docket.old_index_filepaths(include_empty=False))
3766 3582 if self._docket.data_end:
3767 3583 res.append(self._datafile)
3768 3584 res.extend(self._docket.old_data_filepaths(include_empty=False))
3769 3585 if self._docket.sidedata_end:
3770 3586 res.append(self._sidedatafile)
3771 3587 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
3772 3588 return res
3773 3589
3774 3590 def emitrevisions(
3775 3591 self,
3776 3592 nodes,
3777 3593 nodesorder=None,
3778 3594 revisiondata=False,
3779 3595 assumehaveparentrevisions=False,
3780 3596 deltamode=repository.CG_DELTAMODE_STD,
3781 3597 sidedata_helpers=None,
3782 3598 debug_info=None,
3783 3599 ):
3784 3600 if nodesorder not in (b'nodes', b'storage', b'linear', None):
3785 3601 raise error.ProgrammingError(
3786 3602 b'unhandled value for nodesorder: %s' % nodesorder
3787 3603 )
3788 3604
3789 3605 if nodesorder is None and not self.delta_config.general_delta:
3790 3606 nodesorder = b'storage'
3791 3607
3792 3608 if (
3793 3609 not self._storedeltachains
3794 3610 and deltamode != repository.CG_DELTAMODE_PREV
3795 3611 ):
3796 3612 deltamode = repository.CG_DELTAMODE_FULL
3797 3613
3798 3614 return storageutil.emitrevisions(
3799 3615 self,
3800 3616 nodes,
3801 3617 nodesorder,
3802 3618 revlogrevisiondelta,
3803 3619 deltaparentfn=self.deltaparent,
3804 3620 candeltafn=self._candelta,
3805 3621 rawsizefn=self.rawsize,
3806 3622 revdifffn=self.revdiff,
3807 3623 flagsfn=self.flags,
3808 3624 deltamode=deltamode,
3809 3625 revisiondata=revisiondata,
3810 3626 assumehaveparentrevisions=assumehaveparentrevisions,
3811 3627 sidedata_helpers=sidedata_helpers,
3812 3628 debug_info=debug_info,
3813 3629 )
3814 3630
3815 3631 DELTAREUSEALWAYS = b'always'
3816 3632 DELTAREUSESAMEREVS = b'samerevs'
3817 3633 DELTAREUSENEVER = b'never'
3818 3634
3819 3635 DELTAREUSEFULLADD = b'fulladd'
3820 3636
3821 3637 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
3822 3638
3823 3639 def clone(
3824 3640 self,
3825 3641 tr,
3826 3642 destrevlog,
3827 3643 addrevisioncb=None,
3828 3644 deltareuse=DELTAREUSESAMEREVS,
3829 3645 forcedeltabothparents=None,
3830 3646 sidedata_helpers=None,
3831 3647 ):
3832 3648 """Copy this revlog to another, possibly with format changes.
3833 3649
3834 3650 The destination revlog will contain the same revisions and nodes.
3835 3651 However, it may not be bit-for-bit identical due to e.g. delta encoding
3836 3652 differences.
3837 3653
3838 3654 The ``deltareuse`` argument control how deltas from the existing revlog
3839 3655 are preserved in the destination revlog. The argument can have the
3840 3656 following values:
3841 3657
3842 3658 DELTAREUSEALWAYS
3843 3659 Deltas will always be reused (if possible), even if the destination
3844 3660 revlog would not select the same revisions for the delta. This is the
3845 3661 fastest mode of operation.
3846 3662 DELTAREUSESAMEREVS
3847 3663 Deltas will be reused if the destination revlog would pick the same
3848 3664 revisions for the delta. This mode strikes a balance between speed
3849 3665 and optimization.
3850 3666 DELTAREUSENEVER
3851 3667 Deltas will never be reused. This is the slowest mode of execution.
3852 3668 This mode can be used to recompute deltas (e.g. if the diff/delta
3853 3669 algorithm changes).
3854 3670 DELTAREUSEFULLADD
3855 3671 Revision will be re-added as if their were new content. This is
3856 3672 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3857 3673 eg: large file detection and handling.
3858 3674
3859 3675 Delta computation can be slow, so the choice of delta reuse policy can
3860 3676 significantly affect run time.
3861 3677
3862 3678 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3863 3679 two extremes. Deltas will be reused if they are appropriate. But if the
3864 3680 delta could choose a better revision, it will do so. This means if you
3865 3681 are converting a non-generaldelta revlog to a generaldelta revlog,
3866 3682 deltas will be recomputed if the delta's parent isn't a parent of the
3867 3683 revision.
3868 3684
3869 3685 In addition to the delta policy, the ``forcedeltabothparents``
3870 3686 argument controls whether to force compute deltas against both parents
3871 3687 for merges. By default, the current default is used.
3872 3688
3873 3689 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3874 3690 `sidedata_helpers`.
3875 3691 """
3876 3692 if deltareuse not in self.DELTAREUSEALL:
3877 3693 raise ValueError(
3878 3694 _(b'value for deltareuse invalid: %s') % deltareuse
3879 3695 )
3880 3696
3881 3697 if len(destrevlog):
3882 3698 raise ValueError(_(b'destination revlog is not empty'))
3883 3699
3884 3700 if getattr(self, 'filteredrevs', None):
3885 3701 raise ValueError(_(b'source revlog has filtered revisions'))
3886 3702 if getattr(destrevlog, 'filteredrevs', None):
3887 3703 raise ValueError(_(b'destination revlog has filtered revisions'))
3888 3704
3889 3705 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3890 3706 # if possible.
3891 3707 old_delta_config = destrevlog.delta_config
3892 3708 destrevlog.delta_config = destrevlog.delta_config.copy()
3893 3709
3894 3710 try:
3895 3711 if deltareuse == self.DELTAREUSEALWAYS:
3896 3712 destrevlog.delta_config.lazy_delta_base = True
3897 3713 destrevlog.delta_config.lazy_delta = True
3898 3714 elif deltareuse == self.DELTAREUSESAMEREVS:
3899 3715 destrevlog.delta_config.lazy_delta_base = False
3900 3716 destrevlog.delta_config.lazy_delta = True
3901 3717 elif deltareuse == self.DELTAREUSENEVER:
3902 3718 destrevlog.delta_config.lazy_delta_base = False
3903 3719 destrevlog.delta_config.lazy_delta = False
3904 3720
3905 3721 delta_both_parents = (
3906 3722 forcedeltabothparents or old_delta_config.delta_both_parents
3907 3723 )
3908 3724 destrevlog.delta_config.delta_both_parents = delta_both_parents
3909 3725
3910 3726 with self.reading(), destrevlog._writing(tr):
3911 3727 self._clone(
3912 3728 tr,
3913 3729 destrevlog,
3914 3730 addrevisioncb,
3915 3731 deltareuse,
3916 3732 forcedeltabothparents,
3917 3733 sidedata_helpers,
3918 3734 )
3919 3735
3920 3736 finally:
3921 3737 destrevlog.delta_config = old_delta_config
3922 3738
3923 3739 def _clone(
3924 3740 self,
3925 3741 tr,
3926 3742 destrevlog,
3927 3743 addrevisioncb,
3928 3744 deltareuse,
3929 3745 forcedeltabothparents,
3930 3746 sidedata_helpers,
3931 3747 ):
3932 3748 """perform the core duty of `revlog.clone` after parameter processing"""
3933 3749 write_debug = None
3934 3750 if self.delta_config.debug_delta:
3935 3751 write_debug = tr._report
3936 3752 deltacomputer = deltautil.deltacomputer(
3937 3753 destrevlog,
3938 3754 write_debug=write_debug,
3939 3755 )
3940 3756 index = self.index
3941 3757 for rev in self:
3942 3758 entry = index[rev]
3943 3759
3944 3760 # Some classes override linkrev to take filtered revs into
3945 3761 # account. Use raw entry from index.
3946 3762 flags = entry[0] & 0xFFFF
3947 3763 linkrev = entry[4]
3948 3764 p1 = index[entry[5]][7]
3949 3765 p2 = index[entry[6]][7]
3950 3766 node = entry[7]
3951 3767
3952 3768 # (Possibly) reuse the delta from the revlog if allowed and
3953 3769 # the revlog chunk is a delta.
3954 3770 cachedelta = None
3955 3771 rawtext = None
3956 3772 if deltareuse == self.DELTAREUSEFULLADD:
3957 3773 text = self._revisiondata(rev)
3958 3774 sidedata = self.sidedata(rev)
3959 3775
3960 3776 if sidedata_helpers is not None:
3961 3777 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3962 3778 self, sidedata_helpers, sidedata, rev
3963 3779 )
3964 3780 flags = flags | new_flags[0] & ~new_flags[1]
3965 3781
3966 3782 destrevlog.addrevision(
3967 3783 text,
3968 3784 tr,
3969 3785 linkrev,
3970 3786 p1,
3971 3787 p2,
3972 3788 cachedelta=cachedelta,
3973 3789 node=node,
3974 3790 flags=flags,
3975 3791 deltacomputer=deltacomputer,
3976 3792 sidedata=sidedata,
3977 3793 )
3978 3794 else:
3979 3795 if destrevlog.delta_config.lazy_delta:
3980 3796 dp = self.deltaparent(rev)
3981 3797 if dp != nullrev:
3982 3798 cachedelta = (dp, bytes(self._inner._chunk(rev)))
3983 3799
3984 3800 sidedata = None
3985 3801 if not cachedelta:
3986 3802 try:
3987 3803 rawtext = self._revisiondata(rev)
3988 3804 except error.CensoredNodeError as censored:
3989 3805 assert flags & REVIDX_ISCENSORED
3990 3806 rawtext = censored.tombstone
3991 3807 sidedata = self.sidedata(rev)
3992 3808 if sidedata is None:
3993 3809 sidedata = self.sidedata(rev)
3994 3810
3995 3811 if sidedata_helpers is not None:
3996 3812 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3997 3813 self, sidedata_helpers, sidedata, rev
3998 3814 )
3999 3815 flags = flags | new_flags[0] & ~new_flags[1]
4000 3816
4001 3817 destrevlog._addrevision(
4002 3818 node,
4003 3819 rawtext,
4004 3820 tr,
4005 3821 linkrev,
4006 3822 p1,
4007 3823 p2,
4008 3824 flags,
4009 3825 cachedelta,
4010 3826 deltacomputer=deltacomputer,
4011 3827 sidedata=sidedata,
4012 3828 )
4013 3829
4014 3830 if addrevisioncb:
4015 3831 addrevisioncb(self, rev, node)
4016 3832
4017 3833 def censorrevision(self, tr, censornode, tombstone=b''):
4018 3834 if self._format_version == REVLOGV0:
4019 3835 raise error.RevlogError(
4020 3836 _(b'cannot censor with version %d revlogs')
4021 3837 % self._format_version
4022 3838 )
4023 3839 elif self._format_version == REVLOGV1:
4024 3840 rewrite.v1_censor(self, tr, censornode, tombstone)
4025 3841 else:
4026 3842 rewrite.v2_censor(self, tr, censornode, tombstone)
4027 3843
4028 3844 def verifyintegrity(self, state):
4029 3845 """Verifies the integrity of the revlog.
4030 3846
4031 3847 Yields ``revlogproblem`` instances describing problems that are
4032 3848 found.
4033 3849 """
4034 3850 dd, di = self.checksize()
4035 3851 if dd:
4036 3852 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
4037 3853 if di:
4038 3854 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
4039 3855
4040 3856 version = self._format_version
4041 3857
4042 3858 # The verifier tells us what version revlog we should be.
4043 3859 if version != state[b'expectedversion']:
4044 3860 yield revlogproblem(
4045 3861 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
4046 3862 % (self.display_id, version, state[b'expectedversion'])
4047 3863 )
4048 3864
4049 3865 state[b'skipread'] = set()
4050 3866 state[b'safe_renamed'] = set()
4051 3867
4052 3868 for rev in self:
4053 3869 node = self.node(rev)
4054 3870
4055 3871 # Verify contents. 4 cases to care about:
4056 3872 #
4057 3873 # common: the most common case
4058 3874 # rename: with a rename
4059 3875 # meta: file content starts with b'\1\n', the metadata
4060 3876 # header defined in filelog.py, but without a rename
4061 3877 # ext: content stored externally
4062 3878 #
4063 3879 # More formally, their differences are shown below:
4064 3880 #
4065 3881 # | common | rename | meta | ext
4066 3882 # -------------------------------------------------------
4067 3883 # flags() | 0 | 0 | 0 | not 0
4068 3884 # renamed() | False | True | False | ?
4069 3885 # rawtext[0:2]=='\1\n'| False | True | True | ?
4070 3886 #
4071 3887 # "rawtext" means the raw text stored in revlog data, which
4072 3888 # could be retrieved by "rawdata(rev)". "text"
4073 3889 # mentioned below is "revision(rev)".
4074 3890 #
4075 3891 # There are 3 different lengths stored physically:
4076 3892 # 1. L1: rawsize, stored in revlog index
4077 3893 # 2. L2: len(rawtext), stored in revlog data
4078 3894 # 3. L3: len(text), stored in revlog data if flags==0, or
4079 3895 # possibly somewhere else if flags!=0
4080 3896 #
4081 3897 # L1 should be equal to L2. L3 could be different from them.
4082 3898 # "text" may or may not affect commit hash depending on flag
4083 3899 # processors (see flagutil.addflagprocessor).
4084 3900 #
4085 3901 # | common | rename | meta | ext
4086 3902 # -------------------------------------------------
4087 3903 # rawsize() | L1 | L1 | L1 | L1
4088 3904 # size() | L1 | L2-LM | L1(*) | L1 (?)
4089 3905 # len(rawtext) | L2 | L2 | L2 | L2
4090 3906 # len(text) | L2 | L2 | L2 | L3
4091 3907 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
4092 3908 #
4093 3909 # LM: length of metadata, depending on rawtext
4094 3910 # (*): not ideal, see comment in filelog.size
4095 3911 # (?): could be "- len(meta)" if the resolved content has
4096 3912 # rename metadata
4097 3913 #
4098 3914 # Checks needed to be done:
4099 3915 # 1. length check: L1 == L2, in all cases.
4100 3916 # 2. hash check: depending on flag processor, we may need to
4101 3917 # use either "text" (external), or "rawtext" (in revlog).
4102 3918
4103 3919 try:
4104 3920 skipflags = state.get(b'skipflags', 0)
4105 3921 if skipflags:
4106 3922 skipflags &= self.flags(rev)
4107 3923
4108 3924 _verify_revision(self, skipflags, state, node)
4109 3925
4110 3926 l1 = self.rawsize(rev)
4111 3927 l2 = len(self.rawdata(node))
4112 3928
4113 3929 if l1 != l2:
4114 3930 yield revlogproblem(
4115 3931 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
4116 3932 node=node,
4117 3933 )
4118 3934
4119 3935 except error.CensoredNodeError:
4120 3936 if state[b'erroroncensored']:
4121 3937 yield revlogproblem(
4122 3938 error=_(b'censored file data'), node=node
4123 3939 )
4124 3940 state[b'skipread'].add(node)
4125 3941 except Exception as e:
4126 3942 yield revlogproblem(
4127 3943 error=_(b'unpacking %s: %s')
4128 3944 % (short(node), stringutil.forcebytestr(e)),
4129 3945 node=node,
4130 3946 )
4131 3947 state[b'skipread'].add(node)
4132 3948
4133 3949 def storageinfo(
4134 3950 self,
4135 3951 exclusivefiles=False,
4136 3952 sharedfiles=False,
4137 3953 revisionscount=False,
4138 3954 trackedsize=False,
4139 3955 storedsize=False,
4140 3956 ):
4141 3957 d = {}
4142 3958
4143 3959 if exclusivefiles:
4144 3960 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
4145 3961 if not self._inline:
4146 3962 d[b'exclusivefiles'].append((self.opener, self._datafile))
4147 3963
4148 3964 if sharedfiles:
4149 3965 d[b'sharedfiles'] = []
4150 3966
4151 3967 if revisionscount:
4152 3968 d[b'revisionscount'] = len(self)
4153 3969
4154 3970 if trackedsize:
4155 3971 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
4156 3972
4157 3973 if storedsize:
4158 3974 d[b'storedsize'] = sum(
4159 3975 self.opener.stat(path).st_size for path in self.files()
4160 3976 )
4161 3977
4162 3978 return d
4163 3979
4164 3980 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
4165 3981 if not self.feature_config.has_side_data:
4166 3982 return
4167 3983 # revlog formats with sidedata support does not support inline
4168 3984 assert not self._inline
4169 3985 if not helpers[1] and not helpers[2]:
4170 3986 # Nothing to generate or remove
4171 3987 return
4172 3988
4173 3989 new_entries = []
4174 3990 # append the new sidedata
4175 3991 with self._writing(transaction):
4176 3992 ifh, dfh, sdfh = self._inner._writinghandles
4177 3993 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
4178 3994
4179 3995 current_offset = sdfh.tell()
4180 3996 for rev in range(startrev, endrev + 1):
4181 3997 entry = self.index[rev]
4182 3998 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
4183 3999 store=self,
4184 4000 sidedata_helpers=helpers,
4185 4001 sidedata={},
4186 4002 rev=rev,
4187 4003 )
4188 4004
4189 4005 serialized_sidedata = sidedatautil.serialize_sidedata(
4190 4006 new_sidedata
4191 4007 )
4192 4008
4193 4009 sidedata_compression_mode = COMP_MODE_INLINE
4194 4010 if serialized_sidedata and self.feature_config.has_side_data:
4195 4011 sidedata_compression_mode = COMP_MODE_PLAIN
4196 4012 h, comp_sidedata = self._inner.compress(serialized_sidedata)
4197 4013 if (
4198 4014 h != b'u'
4199 4015 and comp_sidedata[0] != b'\0'
4200 4016 and len(comp_sidedata) < len(serialized_sidedata)
4201 4017 ):
4202 4018 assert not h
4203 4019 if (
4204 4020 comp_sidedata[0]
4205 4021 == self._docket.default_compression_header
4206 4022 ):
4207 4023 sidedata_compression_mode = COMP_MODE_DEFAULT
4208 4024 serialized_sidedata = comp_sidedata
4209 4025 else:
4210 4026 sidedata_compression_mode = COMP_MODE_INLINE
4211 4027 serialized_sidedata = comp_sidedata
4212 4028 if entry[8] != 0 or entry[9] != 0:
4213 4029 # rewriting entries that already have sidedata is not
4214 4030 # supported yet, because it introduces garbage data in the
4215 4031 # revlog.
4216 4032 msg = b"rewriting existing sidedata is not supported yet"
4217 4033 raise error.Abort(msg)
4218 4034
4219 4035 # Apply (potential) flags to add and to remove after running
4220 4036 # the sidedata helpers
4221 4037 new_offset_flags = entry[0] | flags[0] & ~flags[1]
4222 4038 entry_update = (
4223 4039 current_offset,
4224 4040 len(serialized_sidedata),
4225 4041 new_offset_flags,
4226 4042 sidedata_compression_mode,
4227 4043 )
4228 4044
4229 4045 # the sidedata computation might have move the file cursors around
4230 4046 sdfh.seek(current_offset, os.SEEK_SET)
4231 4047 sdfh.write(serialized_sidedata)
4232 4048 new_entries.append(entry_update)
4233 4049 current_offset += len(serialized_sidedata)
4234 4050 self._docket.sidedata_end = sdfh.tell()
4235 4051
4236 4052 # rewrite the new index entries
4237 4053 ifh.seek(startrev * self.index.entry_size)
4238 4054 for i, e in enumerate(new_entries):
4239 4055 rev = startrev + i
4240 4056 self.index.replace_sidedata_info(rev, *e)
4241 4057 packed = self.index.entry_binary(rev)
4242 4058 if rev == 0 and self._docket is None:
4243 4059 header = self._format_flags | self._format_version
4244 4060 header = self.index.pack_header(header)
4245 4061 packed = header + packed
4246 4062 ifh.write(packed)
@@ -1,1107 +1,1118 b''
1 1 Setting up test
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo 0 > afile
6 6 $ hg add afile
7 7 $ hg commit -m "0.0"
8 8 $ echo 1 >> afile
9 9 $ hg commit -m "0.1"
10 10 $ echo 2 >> afile
11 11 $ hg commit -m "0.2"
12 12 $ echo 3 >> afile
13 13 $ hg commit -m "0.3"
14 14 $ hg update -C 0
15 15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 16 $ echo 1 >> afile
17 17 $ hg commit -m "1.1"
18 18 created new head
19 19 $ echo 2 >> afile
20 20 $ hg commit -m "1.2"
21 21 $ echo "a line" > fred
22 22 $ echo 3 >> afile
23 23 $ hg add fred
24 24 $ hg commit -m "1.3"
25 25 $ hg mv afile adifferentfile
26 26 $ hg commit -m "1.3m"
27 27 $ hg update -C 3
28 28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 29 $ hg mv afile anotherfile
30 30 $ hg commit -m "0.3m"
31 31 $ hg verify -q
32 32 $ cd ..
33 33 $ hg init empty
34 34
35 35 Bundle and phase
36 36
37 37 $ hg -R test phase --force --secret 0
38 38 $ hg -R test bundle phase.hg empty
39 39 searching for changes
40 40 no changes found (ignored 9 secret changesets)
41 41 [1]
42 42 $ hg -R test phase --draft -r 'head()'
43 43
44 44 Bundle --all
45 45
46 46 $ hg -R test bundle --all all.hg
47 47 9 changesets found
48 48
49 49 Bundle test to full.hg
50 50
51 51 $ hg -R test bundle full.hg empty
52 52 searching for changes
53 53 9 changesets found
54 54
55 55 Unbundle full.hg in test
56 56
57 57 $ hg -R test unbundle full.hg
58 58 adding changesets
59 59 adding manifests
60 60 adding file changes
61 61 added 0 changesets with 0 changes to 4 files
62 62 (run 'hg update' to get a working copy)
63 63
64 64 Verify empty
65 65
66 66 $ hg -R empty heads
67 67 [1]
68 68 $ hg -R empty verify -q
69 69
70 70 #if repobundlerepo
71 71
72 72 Pull full.hg into test (using --cwd)
73 73
74 74 $ hg --cwd test pull ../full.hg
75 75 pulling from ../full.hg
76 76 searching for changes
77 77 no changes found
78 78
79 79 Verify that there are no leaked temporary files after pull (issue2797)
80 80
81 81 $ ls test/.hg | grep .hg10un
82 82 [1]
83 83
84 84 Pull full.hg into empty (using --cwd)
85 85
86 86 $ hg --cwd empty pull ../full.hg
87 87 pulling from ../full.hg
88 88 requesting all changes
89 89 adding changesets
90 90 adding manifests
91 91 adding file changes
92 92 added 9 changesets with 7 changes to 4 files (+1 heads)
93 93 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
94 94 (run 'hg heads' to see heads, 'hg merge' to merge)
95 95
96 96 Rollback empty
97 97
98 98 $ hg -R empty rollback
99 99 repository tip rolled back to revision -1 (undo pull)
100 100
101 101 Pull full.hg into empty again (using --cwd)
102 102
103 103 $ hg --cwd empty pull ../full.hg
104 104 pulling from ../full.hg
105 105 requesting all changes
106 106 adding changesets
107 107 adding manifests
108 108 adding file changes
109 109 added 9 changesets with 7 changes to 4 files (+1 heads)
110 110 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
111 111 (run 'hg heads' to see heads, 'hg merge' to merge)
112 112
113 113 Pull full.hg into test (using -R)
114 114
115 115 $ hg -R test pull full.hg
116 116 pulling from full.hg
117 117 searching for changes
118 118 no changes found
119 119
120 120 Pull full.hg into empty (using -R)
121 121
122 122 $ hg -R empty pull full.hg
123 123 pulling from full.hg
124 124 searching for changes
125 125 no changes found
126 126
127 127 Rollback empty
128 128
129 129 $ hg -R empty rollback
130 130 repository tip rolled back to revision -1 (undo pull)
131 131
132 132 Pull full.hg into empty again (using -R)
133 133
134 134 $ hg -R empty pull full.hg
135 135 pulling from full.hg
136 136 requesting all changes
137 137 adding changesets
138 138 adding manifests
139 139 adding file changes
140 140 added 9 changesets with 7 changes to 4 files (+1 heads)
141 141 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
142 142 (run 'hg heads' to see heads, 'hg merge' to merge)
143 143
144 144 Log -R full.hg in fresh empty
145 145
146 146 $ rm -r empty
147 147 $ hg init empty
148 148 $ cd empty
149 149 $ hg -R bundle://../full.hg log
150 150 changeset: 8:aa35859c02ea
151 151 tag: tip
152 152 parent: 3:eebf5a27f8ca
153 153 user: test
154 154 date: Thu Jan 01 00:00:00 1970 +0000
155 155 summary: 0.3m
156 156
157 157 changeset: 7:a6a34bfa0076
158 158 user: test
159 159 date: Thu Jan 01 00:00:00 1970 +0000
160 160 summary: 1.3m
161 161
162 162 changeset: 6:7373c1169842
163 163 user: test
164 164 date: Thu Jan 01 00:00:00 1970 +0000
165 165 summary: 1.3
166 166
167 167 changeset: 5:1bb50a9436a7
168 168 user: test
169 169 date: Thu Jan 01 00:00:00 1970 +0000
170 170 summary: 1.2
171 171
172 172 changeset: 4:095197eb4973
173 173 parent: 0:f9ee2f85a263
174 174 user: test
175 175 date: Thu Jan 01 00:00:00 1970 +0000
176 176 summary: 1.1
177 177
178 178 changeset: 3:eebf5a27f8ca
179 179 user: test
180 180 date: Thu Jan 01 00:00:00 1970 +0000
181 181 summary: 0.3
182 182
183 183 changeset: 2:e38ba6f5b7e0
184 184 user: test
185 185 date: Thu Jan 01 00:00:00 1970 +0000
186 186 summary: 0.2
187 187
188 188 changeset: 1:34c2bf6b0626
189 189 user: test
190 190 date: Thu Jan 01 00:00:00 1970 +0000
191 191 summary: 0.1
192 192
193 193 changeset: 0:f9ee2f85a263
194 194 user: test
195 195 date: Thu Jan 01 00:00:00 1970 +0000
196 196 summary: 0.0
197 197
198 198 Make sure bundlerepo doesn't leak tempfiles (issue2491)
199 199
200 200 $ ls .hg
201 201 00changelog.i
202 202 cache
203 203 requires
204 204 store
205 205 wcache
206 206
207 207 Pull ../full.hg into empty (with hook)
208 208
209 209 $ cat >> .hg/hgrc <<EOF
210 210 > [hooks]
211 211 > changegroup = sh -c "printenv.py --line changegroup"
212 212 > EOF
213 213
214 214 doesn't work (yet ?)
215 215 NOTE: msys is mangling the URL below
216 216
217 217 hg -R bundle://../full.hg verify
218 218
219 219 $ hg pull bundle://../full.hg
220 220 pulling from bundle:../full.hg
221 221 requesting all changes
222 222 adding changesets
223 223 adding manifests
224 224 adding file changes
225 225 added 9 changesets with 7 changes to 4 files (+1 heads)
226 226 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
227 227 changegroup hook: HG_HOOKNAME=changegroup
228 228 HG_HOOKTYPE=changegroup
229 229 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
230 230 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
231 231 HG_SOURCE=pull
232 232 HG_TXNID=TXN:$ID$
233 233 HG_TXNNAME=pull
234 234 bundle:../full.hg (no-msys !)
235 235 bundle;../full.hg (msys !)
236 236 HG_URL=bundle:../full.hg (no-msys !)
237 237 HG_URL=bundle;../full.hg (msys !)
238 238
239 239 (run 'hg heads' to see heads, 'hg merge' to merge)
240 240
241 241 Rollback empty
242 242
243 243 $ hg rollback
244 244 repository tip rolled back to revision -1 (undo pull)
245 245 $ cd ..
246 246
247 247 Log -R bundle:empty+full.hg
248 248
249 249 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
250 250 8 7 6 5 4 3 2 1 0
251 251
252 252 Pull full.hg into empty again (using -R; with hook)
253 253
254 254 $ hg -R empty pull full.hg
255 255 pulling from full.hg
256 256 requesting all changes
257 257 adding changesets
258 258 adding manifests
259 259 adding file changes
260 260 added 9 changesets with 7 changes to 4 files (+1 heads)
261 261 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
262 262 changegroup hook: HG_HOOKNAME=changegroup
263 263 HG_HOOKTYPE=changegroup
264 264 HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735
265 265 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf
266 266 HG_SOURCE=pull
267 267 HG_TXNID=TXN:$ID$
268 268 HG_TXNNAME=pull
269 269 bundle:empty+full.hg
270 270 HG_URL=bundle:empty+full.hg
271 271
272 272 (run 'hg heads' to see heads, 'hg merge' to merge)
273 273
274 274 #endif
275 275
276 276 Cannot produce streaming clone bundles with "hg bundle"
277 277
278 278 $ hg -R test bundle -t packed1 packed.hg
279 279 abort: packed bundles cannot be produced by "hg bundle"
280 280 (use 'hg debugcreatestreamclonebundle')
281 281 [10]
282 282
283 283 packed1 is produced properly
284 284
285 285
286 286 #if reporevlogstore rust
287 287
288 288 $ hg -R test debugcreatestreamclonebundle packed.hg
289 writing 2665 bytes for 6 files
289 writing 2665 bytes for 6 files (no-rust !)
290 writing 2919 bytes for 9 files (rust !)
290 291 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
291 292
292 293 $ f -B 64 --size --sha1 --hexdump packed.hg
293 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
294 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
295 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
294 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702 (no-rust !)
295 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........| (no-rust !)
296 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald| (no-rust !)
297 packed.hg: size=3181, sha1=b202787710a1c109246554be589506cd2916acb7 (rust !)
298 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 09 00 00 |HGS1UN..........| (rust !)
299 0010: 00 00 00 00 0b 67 00 3b 67 65 6e 65 72 61 6c 64 |.....g.;generald| (rust !)
296 300 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
297 301 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
298 302 $ hg debugbundle --spec packed.hg
299 303 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
300 304 #endif
301 305
302 306 #if reporevlogstore no-rust zstd
303 307
304 308 $ hg -R test debugcreatestreamclonebundle packed.hg
305 writing 2665 bytes for 6 files
309 writing 2665 bytes for 7 files
306 310 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
307 311
308 312 $ f -B 64 --size --sha1 --hexdump packed.hg
309 packed.hg: size=2865, sha1=353d10311f4befa195d9a1ca4b8e26518115c702
310 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
313 packed.hg: size=2882, sha1=6525b07e6bfced4b6c2319cb58c6ff76ca72fa13
314 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 07 00 00 |HGS1UN..........|
311 315 0010: 00 00 00 00 0a 69 00 3b 67 65 6e 65 72 61 6c 64 |.....i.;generald|
312 316 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 2d 63 6f 6d 70 |elta,revlog-comp|
313 317 0030: 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c 72 65 76 |ression-zstd,rev|
314 318 $ hg debugbundle --spec packed.hg
315 319 none-packed1;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog
316 320 #endif
317 321
318 322 #if reporevlogstore no-rust no-zstd
319 323
320 324 $ hg -R test debugcreatestreamclonebundle packed.hg
321 writing 2664 bytes for 6 files
325 writing 2664 bytes for 7 files
322 326 bundle requirements: generaldelta, revlogv1, sparserevlog
323 327
324 328 $ f -B 64 --size --sha1 --hexdump packed.hg
325 packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
326 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
329 packed.hg: size=2857, sha1=3a7353323915b095baa6f2ee0a5aed588f11f5f0
330 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 07 00 00 |HGS1UN..........|
327 331 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
328 332 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
329 333 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
330 334 $ hg debugbundle --spec packed.hg
331 335 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
332 336 #endif
333 337
334 338 #if reporevlogstore
335 339
336 340 generaldelta requirement is not listed in stream clone bundles unless used
337 341
338 342 $ hg --config format.usegeneraldelta=false init testnongd
339 343 $ cd testnongd
340 344 $ touch foo
341 345 $ hg -q commit -A -m initial
342 346 $ cd ..
343 347
344 348 #endif
345 349
346 350 #if reporevlogstore rust
347 351
348 352 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
349 writing 301 bytes for 3 files
353 writing 301 bytes for 3 files (no-rust !)
354 writing 427 bytes for 6 files (rust !)
350 355 bundle requirements: revlog-compression-zstd, revlogv1
351 356
352 357 $ f -B 64 --size --sha1 --hexdump packednongd.hg
353 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
354 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
355 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
358 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5 (no-rust !)
359 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........| (no-rust !)
360 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c| (no-rust !)
361 packednongd.hg: size=593, sha1=1ad0cbea11b5dd7b0437e54ae20fc5f8df118521 (rust !)
362 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........| (rust !)
363 0010: 00 00 00 00 01 ab 00 21 72 65 76 6c 6f 67 2d 63 |.......!revlog-c| (rust !)
356 364 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
357 365 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
358 366
359 367 $ hg debugbundle --spec packednongd.hg
360 368 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
361 369
362 370 #endif
363 371
364 372 #if reporevlogstore no-rust zstd
365 373
366 374 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
367 writing 301 bytes for 3 files
375 writing 301 bytes for 4 files
368 376 bundle requirements: revlog-compression-zstd, revlogv1
369 377
370 378 $ f -B 64 --size --sha1 --hexdump packednongd.hg
371 packednongd.hg: size=407, sha1=0b8714422b785ba8eb98c916b41ffd5fb994c9b5
372 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
379 packednongd.hg: size=423, sha1=4269c89cf64b6a4377be75a3983771c4153362bf
380 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 04 00 00 |HGS1UN..........|
373 381 0010: 00 00 00 00 01 2d 00 21 72 65 76 6c 6f 67 2d 63 |.....-.!revlog-c|
374 382 0020: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 2c |ompression-zstd,|
375 383 0030: 72 65 76 6c 6f 67 76 31 00 64 61 74 61 2f 66 6f |revlogv1.data/fo|
376 384
377 385 $ hg debugbundle --spec packednongd.hg
378 386 none-packed1;requirements%3Drevlog-compression-zstd%2Crevlogv1
379 387
380 388
381 389 #endif
382 390
383 391 #if reporevlogstore no-rust no-zstd
384 392
385 393 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
386 writing 301 bytes for 3 files
394 writing 301 bytes for 4 files
387 395 bundle requirements: revlogv1
388 396
389 397 $ f -B 64 --size --sha1 --hexdump packednongd.hg
390 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
391 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
398 packednongd.hg: size=399, sha1=99bb89decfc6674a3cf2cc87accc8c5332ede7fd
399 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 04 00 00 |HGS1UN..........|
392 400 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
393 401 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
394 402 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
395 403
396 404 $ hg debugbundle --spec packednongd.hg
397 405 none-packed1;requirements%3Drevlogv1
398 406
399 407
400 408 #endif
401 409
402 410 #if reporevlogstore
403 411
404 412 Warning emitted when packed bundles contain secret changesets
405 413
406 414 $ hg init testsecret
407 415 $ cd testsecret
408 416 $ touch foo
409 417 $ hg -q commit -A -m initial
410 418 $ hg phase --force --secret -r .
411 419 $ cd ..
412 420
413 421 #endif
414 422
415 423 #if reporevlogstore rust
416 424
417 425 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
418 426 (warning: stream clone bundle will contain secret revisions)
419 writing 301 bytes for 3 files
427 writing 301 bytes for 3 files (no-rust !)
428 writing 427 bytes for 6 files (rust !)
420 429 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
421 430
422 431 #endif
423 432
424 433 #if reporevlogstore no-rust zstd
425 434
426 435 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
427 436 (warning: stream clone bundle will contain secret revisions)
428 writing 301 bytes for 3 files
437 writing 301 bytes for 4 files
429 438 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog
430 439
431 440 #endif
432 441
433 442 #if reporevlogstore no-rust no-zstd
434 443
435 444 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
436 445 (warning: stream clone bundle will contain secret revisions)
437 writing 301 bytes for 3 files
446 writing 301 bytes for 4 files
438 447 bundle requirements: generaldelta, revlogv1, sparserevlog
439 448
440 449 #endif
441 450
442 451 #if reporevlogstore
443 452
444 453 Unpacking packed1 bundles with "hg unbundle" isn't allowed
445 454
446 455 $ hg init packed
447 456 $ hg -R packed unbundle packed.hg
448 457 abort: packed bundles cannot be applied with "hg unbundle"
449 458 (use "hg debugapplystreamclonebundle")
450 459 [10]
451 460
452 461 packed1 can be consumed from debug command
453 462
454 463 (this also confirms that streamclone-ed changes are visible via
455 464 @filecache properties to in-process procedures before closing
456 465 transaction)
457 466
458 467 $ cat > $TESTTMP/showtip.py <<EOF
459 468 >
460 469 > def showtip(ui, repo, hooktype, **kwargs):
461 470 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
462 471 >
463 472 > def reposetup(ui, repo):
464 473 > # this confirms (and ensures) that (empty) 00changelog.i
465 474 > # before streamclone is already cached as repo.changelog
466 475 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
467 476 >
468 477 > # this confirms that streamclone-ed changes are visible to
469 478 > # in-process procedures before closing transaction
470 479 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
471 480 >
472 481 > # this confirms that streamclone-ed changes are still visible
473 482 > # after closing transaction
474 483 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
475 484 > EOF
476 485 $ cat >> $HGRCPATH <<EOF
477 486 > [extensions]
478 487 > showtip = $TESTTMP/showtip.py
479 488 > EOF
480 489
481 490 $ hg -R packed debugapplystreamclonebundle packed.hg
482 6 files to transfer, 2.60 KB of data
491 7 files to transfer, 2.60 KB of data (no-rust !)
492 9 files to transfer, 2.85 KB of data (rust !)
483 493 pretxnopen: 000000000000
484 494 pretxnclose: aa35859c02ea
485 transferred 2.60 KB in * seconds (* */sec) (glob)
495 transferred 2.60 KB in * seconds (* */sec) (glob) (no-rust !)
496 transferred 2.85 KB in * seconds (* */sec) (glob) (rust !)
486 497 txnclose: aa35859c02ea
487 498
488 499 (for safety, confirm visibility of streamclone-ed changes by another
489 500 process, too)
490 501
491 502 $ hg -R packed tip -T "{node|short}\n"
492 503 aa35859c02ea
493 504
494 505 $ cat >> $HGRCPATH <<EOF
495 506 > [extensions]
496 507 > showtip = !
497 508 > EOF
498 509
499 510 Does not work on non-empty repo
500 511
501 512 $ hg -R packed debugapplystreamclonebundle packed.hg
502 513 abort: cannot apply stream clone bundle on non-empty repo
503 514 [255]
504 515
505 516 #endif
506 517
507 518 Create partial clones
508 519
509 520 $ rm -r empty
510 521 $ hg init empty
511 522 $ hg clone -r 3 test partial
512 523 adding changesets
513 524 adding manifests
514 525 adding file changes
515 526 added 4 changesets with 4 changes to 1 files
516 527 new changesets f9ee2f85a263:eebf5a27f8ca
517 528 updating to branch default
518 529 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
519 530 $ hg clone partial partial2
520 531 updating to branch default
521 532 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
522 533 $ cd partial
523 534
524 535 #if repobundlerepo
525 536
526 537 Log -R full.hg in partial
527 538
528 539 $ hg -R bundle://../full.hg log -T phases
529 540 changeset: 8:aa35859c02ea
530 541 tag: tip
531 542 phase: draft
532 543 parent: 3:eebf5a27f8ca
533 544 user: test
534 545 date: Thu Jan 01 00:00:00 1970 +0000
535 546 summary: 0.3m
536 547
537 548 changeset: 7:a6a34bfa0076
538 549 phase: draft
539 550 user: test
540 551 date: Thu Jan 01 00:00:00 1970 +0000
541 552 summary: 1.3m
542 553
543 554 changeset: 6:7373c1169842
544 555 phase: draft
545 556 user: test
546 557 date: Thu Jan 01 00:00:00 1970 +0000
547 558 summary: 1.3
548 559
549 560 changeset: 5:1bb50a9436a7
550 561 phase: draft
551 562 user: test
552 563 date: Thu Jan 01 00:00:00 1970 +0000
553 564 summary: 1.2
554 565
555 566 changeset: 4:095197eb4973
556 567 phase: draft
557 568 parent: 0:f9ee2f85a263
558 569 user: test
559 570 date: Thu Jan 01 00:00:00 1970 +0000
560 571 summary: 1.1
561 572
562 573 changeset: 3:eebf5a27f8ca
563 574 phase: public
564 575 user: test
565 576 date: Thu Jan 01 00:00:00 1970 +0000
566 577 summary: 0.3
567 578
568 579 changeset: 2:e38ba6f5b7e0
569 580 phase: public
570 581 user: test
571 582 date: Thu Jan 01 00:00:00 1970 +0000
572 583 summary: 0.2
573 584
574 585 changeset: 1:34c2bf6b0626
575 586 phase: public
576 587 user: test
577 588 date: Thu Jan 01 00:00:00 1970 +0000
578 589 summary: 0.1
579 590
580 591 changeset: 0:f9ee2f85a263
581 592 phase: public
582 593 user: test
583 594 date: Thu Jan 01 00:00:00 1970 +0000
584 595 summary: 0.0
585 596
586 597
587 598 Incoming full.hg in partial
588 599
589 600 $ hg incoming bundle://../full.hg
590 601 comparing with bundle:../full.hg
591 602 searching for changes
592 603 changeset: 4:095197eb4973
593 604 parent: 0:f9ee2f85a263
594 605 user: test
595 606 date: Thu Jan 01 00:00:00 1970 +0000
596 607 summary: 1.1
597 608
598 609 changeset: 5:1bb50a9436a7
599 610 user: test
600 611 date: Thu Jan 01 00:00:00 1970 +0000
601 612 summary: 1.2
602 613
603 614 changeset: 6:7373c1169842
604 615 user: test
605 616 date: Thu Jan 01 00:00:00 1970 +0000
606 617 summary: 1.3
607 618
608 619 changeset: 7:a6a34bfa0076
609 620 user: test
610 621 date: Thu Jan 01 00:00:00 1970 +0000
611 622 summary: 1.3m
612 623
613 624 changeset: 8:aa35859c02ea
614 625 tag: tip
615 626 parent: 3:eebf5a27f8ca
616 627 user: test
617 628 date: Thu Jan 01 00:00:00 1970 +0000
618 629 summary: 0.3m
619 630
620 631
621 632 Outgoing -R full.hg vs partial2 in partial
622 633
623 634 $ hg -R bundle://../full.hg outgoing ../partial2
624 635 comparing with ../partial2
625 636 searching for changes
626 637 changeset: 4:095197eb4973
627 638 parent: 0:f9ee2f85a263
628 639 user: test
629 640 date: Thu Jan 01 00:00:00 1970 +0000
630 641 summary: 1.1
631 642
632 643 changeset: 5:1bb50a9436a7
633 644 user: test
634 645 date: Thu Jan 01 00:00:00 1970 +0000
635 646 summary: 1.2
636 647
637 648 changeset: 6:7373c1169842
638 649 user: test
639 650 date: Thu Jan 01 00:00:00 1970 +0000
640 651 summary: 1.3
641 652
642 653 changeset: 7:a6a34bfa0076
643 654 user: test
644 655 date: Thu Jan 01 00:00:00 1970 +0000
645 656 summary: 1.3m
646 657
647 658 changeset: 8:aa35859c02ea
648 659 tag: tip
649 660 parent: 3:eebf5a27f8ca
650 661 user: test
651 662 date: Thu Jan 01 00:00:00 1970 +0000
652 663 summary: 0.3m
653 664
654 665
655 666 Outgoing -R does-not-exist.hg vs partial2 in partial
656 667
657 668 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
658 669 abort: *../does-not-exist.hg* (glob)
659 670 [255]
660 671
661 672 #endif
662 673
663 674 $ cd ..
664 675
665 676 hide outer repo
666 677 $ hg init
667 678
668 679 Direct clone from bundle (all-history)
669 680
670 681 #if repobundlerepo
671 682
672 683 $ hg clone full.hg full-clone
673 684 requesting all changes
674 685 adding changesets
675 686 adding manifests
676 687 adding file changes
677 688 added 9 changesets with 7 changes to 4 files (+1 heads)
678 689 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
679 690 updating to branch default
680 691 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
681 692 $ hg -R full-clone heads
682 693 changeset: 8:aa35859c02ea
683 694 tag: tip
684 695 parent: 3:eebf5a27f8ca
685 696 user: test
686 697 date: Thu Jan 01 00:00:00 1970 +0000
687 698 summary: 0.3m
688 699
689 700 changeset: 7:a6a34bfa0076
690 701 user: test
691 702 date: Thu Jan 01 00:00:00 1970 +0000
692 703 summary: 1.3m
693 704
694 705 $ rm -r full-clone
695 706
696 707 When cloning from a non-copiable repository into '', do not
697 708 recurse infinitely (issue2528)
698 709
699 710 $ hg clone full.hg ''
700 711 abort: empty destination path is not valid
701 712 [10]
702 713
703 714 test for https://bz.mercurial-scm.org/216
704 715
705 716 Unbundle incremental bundles into fresh empty in one go
706 717
707 718 $ rm -r empty
708 719 $ hg init empty
709 720 $ hg -R test bundle --base null -r 0 ../0.hg
710 721 1 changesets found
711 722 $ hg -R test bundle --exact -r 1 ../1.hg
712 723 1 changesets found
713 724 $ hg -R empty unbundle -u ../0.hg ../1.hg
714 725 adding changesets
715 726 adding manifests
716 727 adding file changes
717 728 added 1 changesets with 1 changes to 1 files
718 729 new changesets f9ee2f85a263 (1 drafts)
719 730 adding changesets
720 731 adding manifests
721 732 adding file changes
722 733 added 1 changesets with 1 changes to 1 files
723 734 new changesets 34c2bf6b0626 (1 drafts)
724 735 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
725 736
726 737 View full contents of the bundle
727 738 $ hg -R test bundle --base null -r 3 ../partial.hg
728 739 4 changesets found
729 740 $ cd test
730 741 $ hg -R ../../partial.hg log -r "bundle()"
731 742 changeset: 0:f9ee2f85a263
732 743 user: test
733 744 date: Thu Jan 01 00:00:00 1970 +0000
734 745 summary: 0.0
735 746
736 747 changeset: 1:34c2bf6b0626
737 748 user: test
738 749 date: Thu Jan 01 00:00:00 1970 +0000
739 750 summary: 0.1
740 751
741 752 changeset: 2:e38ba6f5b7e0
742 753 user: test
743 754 date: Thu Jan 01 00:00:00 1970 +0000
744 755 summary: 0.2
745 756
746 757 changeset: 3:eebf5a27f8ca
747 758 user: test
748 759 date: Thu Jan 01 00:00:00 1970 +0000
749 760 summary: 0.3
750 761
751 762 $ cd ..
752 763
753 764 #endif
754 765
755 766 test for 540d1059c802
756 767
757 768 $ hg init orig
758 769 $ cd orig
759 770 $ echo foo > foo
760 771 $ hg add foo
761 772 $ hg ci -m 'add foo'
762 773
763 774 $ hg clone . ../copy
764 775 updating to branch default
765 776 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
766 777 $ hg tag foo
767 778
768 779 $ cd ../copy
769 780 $ echo >> foo
770 781 $ hg ci -m 'change foo'
771 782 $ hg bundle ../bundle.hg ../orig
772 783 searching for changes
773 784 1 changesets found
774 785
775 786 $ cd ..
776 787
777 788 #if repobundlerepo
778 789 $ cd orig
779 790 $ hg incoming ../bundle.hg
780 791 comparing with ../bundle.hg
781 792 searching for changes
782 793 changeset: 2:ed1b79f46b9a
783 794 tag: tip
784 795 parent: 0:bbd179dfa0a7
785 796 user: test
786 797 date: Thu Jan 01 00:00:00 1970 +0000
787 798 summary: change foo
788 799
789 800 $ cd ..
790 801
791 802 test bundle with # in the filename (issue2154):
792 803
793 804 $ cp bundle.hg 'test#bundle.hg'
794 805 $ cd orig
795 806 $ hg incoming '../test#bundle.hg'
796 807 comparing with ../test
797 808 abort: unknown revision 'bundle.hg'
798 809 [10]
799 810
800 811 note that percent encoding is not handled:
801 812
802 813 $ hg incoming ../test%23bundle.hg
803 814 abort: repository ../test%23bundle.hg not found
804 815 [255]
805 816 $ cd ..
806 817
807 818 #endif
808 819
809 820 test to bundle revisions on the newly created branch (issue3828):
810 821
811 822 $ hg -q clone -U test test-clone
812 823 $ cd test
813 824
814 825 $ hg -q branch foo
815 826 $ hg commit -m "create foo branch"
816 827 $ hg -q outgoing ../test-clone
817 828 9:b4f5acb1ee27
818 829 $ hg -q bundle --branch foo foo.hg ../test-clone
819 830 #if repobundlerepo
820 831 $ hg -R foo.hg -q log -r "bundle()"
821 832 9:b4f5acb1ee27
822 833 #endif
823 834
824 835 $ cd ..
825 836
826 837 test for https://bz.mercurial-scm.org/1144
827 838
828 839 test that verify bundle does not traceback
829 840
830 841 partial history bundle, fails w/ unknown parent
831 842
832 843 $ hg -R bundle.hg verify
833 844 abort: 00changelog@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
834 845 [50]
835 846
836 847 full history bundle, refuses to verify non-local repo
837 848
838 849 #if repobundlerepo
839 850 $ hg -R all.hg verify
840 851 abort: cannot verify bundle or remote repos
841 852 [255]
842 853 #endif
843 854
844 855 but, regular verify must continue to work
845 856
846 857 $ hg -R orig verify -q
847 858
848 859 #if repobundlerepo
849 860 diff against bundle
850 861
851 862 $ hg init b
852 863 $ cd b
853 864 $ hg -R ../all.hg diff -r tip
854 865 diff -r aa35859c02ea anotherfile
855 866 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
856 867 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
857 868 @@ -1,4 +0,0 @@
858 869 -0
859 870 -1
860 871 -2
861 872 -3
862 873 $ cd ..
863 874 #endif
864 875
865 876 bundle single branch
866 877
867 878 $ hg init branchy
868 879 $ cd branchy
869 880 $ echo a >a
870 881 $ echo x >x
871 882 $ hg ci -Ama
872 883 adding a
873 884 adding x
874 885 $ echo c >c
875 886 $ echo xx >x
876 887 $ hg ci -Amc
877 888 adding c
878 889 $ echo c1 >c1
879 890 $ hg ci -Amc1
880 891 adding c1
881 892 $ hg up 0
882 893 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
883 894 $ echo b >b
884 895 $ hg ci -Amb
885 896 adding b
886 897 created new head
887 898 $ echo b1 >b1
888 899 $ echo xx >x
889 900 $ hg ci -Amb1
890 901 adding b1
891 902 $ hg clone -q -r2 . part
892 903
893 904 == bundling via incoming
894 905
895 906 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
896 907 comparing with .
897 908 searching for changes
898 909 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
899 910 057f4db07f61970e1c11e83be79e9d08adc4dc31
900 911
901 912 == bundling
902 913
903 914 $ hg bundle bundle.hg part --debug --config progress.debug=true
904 915 query 1; heads
905 916 searching for changes
906 917 all remote heads known locally
907 918 2 changesets found
908 919 list of changesets:
909 920 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
910 921 057f4db07f61970e1c11e83be79e9d08adc4dc31
911 922 bundle2-output-bundle: "HG20", (1 params) 2 parts total
912 923 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
913 924 changesets: 1/2 chunks (50.00%)
914 925 changesets: 2/2 chunks (100.00%)
915 926 manifests: 1/2 chunks (50.00%)
916 927 manifests: 2/2 chunks (100.00%)
917 928 files: b 1/3 files (33.33%)
918 929 files: b1 2/3 files (66.67%)
919 930 files: x 3/3 files (100.00%)
920 931 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
921 932
922 933 #if repobundlerepo
923 934 == Test for issue3441
924 935
925 936 $ hg clone -q -r0 . part2
926 937 $ hg -q -R part2 pull bundle.hg
927 938 $ hg -R part2 verify -q
928 939 #endif
929 940
930 941 == Test bundling no commits
931 942
932 943 $ hg bundle -r 'public()' no-output.hg
933 944 abort: no commits to bundle
934 945 [10]
935 946
936 947 $ cd ..
937 948
938 949 When user merges to the revision existing only in the bundle,
939 950 it should show warning that second parent of the working
940 951 directory does not exist
941 952
942 953 $ hg init update2bundled
943 954 $ cd update2bundled
944 955 $ cat <<EOF >> .hg/hgrc
945 956 > [extensions]
946 957 > strip =
947 958 > EOF
948 959 $ echo "aaa" >> a
949 960 $ hg commit -A -m 0
950 961 adding a
951 962 $ echo "bbb" >> b
952 963 $ hg commit -A -m 1
953 964 adding b
954 965 $ echo "ccc" >> c
955 966 $ hg commit -A -m 2
956 967 adding c
957 968 $ hg update -r 1
958 969 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
959 970 $ echo "ddd" >> d
960 971 $ hg commit -A -m 3
961 972 adding d
962 973 created new head
963 974 $ hg update -r 2
964 975 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
965 976 $ hg log -G
966 977 o changeset: 3:8bd3e1f196af
967 978 | tag: tip
968 979 | parent: 1:a01eca7af26d
969 980 | user: test
970 981 | date: Thu Jan 01 00:00:00 1970 +0000
971 982 | summary: 3
972 983 |
973 984 | @ changeset: 2:4652c276ac4f
974 985 |/ user: test
975 986 | date: Thu Jan 01 00:00:00 1970 +0000
976 987 | summary: 2
977 988 |
978 989 o changeset: 1:a01eca7af26d
979 990 | user: test
980 991 | date: Thu Jan 01 00:00:00 1970 +0000
981 992 | summary: 1
982 993 |
983 994 o changeset: 0:4fe08cd4693e
984 995 user: test
985 996 date: Thu Jan 01 00:00:00 1970 +0000
986 997 summary: 0
987 998
988 999
989 1000 #if repobundlerepo
990 1001 $ hg bundle --base 1 -r 3 ../update2bundled.hg
991 1002 1 changesets found
992 1003 $ hg strip -r 3
993 1004 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
994 1005 $ hg merge -R ../update2bundled.hg -r 3
995 1006 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
996 1007 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
997 1008 (branch merge, don't forget to commit)
998 1009
999 1010 When user updates to the revision existing only in the bundle,
1000 1011 it should show warning
1001 1012
1002 1013 $ hg update -R ../update2bundled.hg --clean -r 3
1003 1014 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
1004 1015 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1005 1016
1006 1017 When user updates to the revision existing in the local repository
1007 1018 the warning shouldn't be emitted
1008 1019
1009 1020 $ hg update -R ../update2bundled.hg -r 0
1010 1021 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1011 1022 #endif
1012 1023
1013 1024 Test the option that create slim bundle
1014 1025
1015 1026 $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
1016 1027 3 changesets found
1017 1028
1018 1029 Test the option that create and no-delta's bundle
1019 1030 $ hg bundle -a --config devel.bundle.delta=full ./full.hg
1020 1031 3 changesets found
1021 1032
1022 1033
1023 1034 Test the debug statistic when building a bundle
1024 1035 -----------------------------------------------
1025 1036
1026 1037 $ hg bundle -a ./default.hg --config debug.bundling-stats=yes
1027 1038 3 changesets found
1028 1039 DEBUG-BUNDLING: revisions: 9
1029 1040 DEBUG-BUNDLING: changelog: 3
1030 1041 DEBUG-BUNDLING: manifest: 3
1031 1042 DEBUG-BUNDLING: files: 3 (for 3 revlogs)
1032 1043 DEBUG-BUNDLING: deltas:
1033 1044 DEBUG-BUNDLING: from-storage: 2 (100% of available 2)
1034 1045 DEBUG-BUNDLING: computed: 7
1035 1046 DEBUG-BUNDLING: full: 7 (100% of native 7)
1036 1047 DEBUG-BUNDLING: changelog: 3 (100% of native 3)
1037 1048 DEBUG-BUNDLING: manifests: 1 (100% of native 1)
1038 1049 DEBUG-BUNDLING: files: 3 (100% of native 3)
1039 1050
1040 1051 Test the debug output when applying delta
1041 1052 -----------------------------------------
1042 1053
1043 1054 $ hg init foo
1044 1055 $ hg -R foo unbundle ./slim.hg \
1045 1056 > --config debug.revlog.debug-delta=yes \
1046 1057 > --config storage.revlog.reuse-external-delta=no \
1047 1058 > --config storage.revlog.reuse-external-delta-parent=no
1048 1059 adding changesets
1049 1060 DBG-DELTAS: CHANGELOG: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1050 1061 DBG-DELTAS: CHANGELOG: rev=1: delta-base=1 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1051 1062 DBG-DELTAS: CHANGELOG: rev=2: delta-base=2 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1052 1063 adding manifests
1053 1064 DBG-DELTAS: MANIFESTLOG: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1054 1065 DBG-DELTAS: MANIFESTLOG: rev=1: delta-base=0 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
1055 1066 DBG-DELTAS: MANIFESTLOG: rev=2: delta-base=1 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
1056 1067 adding file changes
1057 1068 DBG-DELTAS: FILELOG:a: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1058 1069 DBG-DELTAS: FILELOG:b: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1059 1070 DBG-DELTAS: FILELOG:c: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
1060 1071 added 3 changesets with 3 changes to 3 files
1061 1072 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1062 1073 (run 'hg update' to get a working copy)
1063 1074
1064 1075
1065 1076 Test the debug statistic when applying a bundle
1066 1077 -----------------------------------------------
1067 1078
1068 1079 $ hg init bar
1069 1080 $ hg -R bar unbundle ./default.hg --config debug.unbundling-stats=yes
1070 1081 adding changesets
1071 1082 adding manifests
1072 1083 adding file changes
1073 1084 DEBUG-UNBUNDLING: revisions: 9
1074 1085 DEBUG-UNBUNDLING: changelog: 3 ( 33%)
1075 1086 DEBUG-UNBUNDLING: manifests: 3 ( 33%)
1076 1087 DEBUG-UNBUNDLING: files: 3 ( 33%)
1077 1088 DEBUG-UNBUNDLING: total-time: ?????????????? seconds (glob)
1078 1089 DEBUG-UNBUNDLING: changelog: ?????????????? seconds (???%) (glob)
1079 1090 DEBUG-UNBUNDLING: manifests: ?????????????? seconds (???%) (glob)
1080 1091 DEBUG-UNBUNDLING: files: ?????????????? seconds (???%) (glob)
1081 1092 DEBUG-UNBUNDLING: type-count:
1082 1093 DEBUG-UNBUNDLING: changelog:
1083 1094 DEBUG-UNBUNDLING: full: 3
1084 1095 DEBUG-UNBUNDLING: cached: 3 (100%)
1085 1096 DEBUG-UNBUNDLING: manifests:
1086 1097 DEBUG-UNBUNDLING: full: 1
1087 1098 DEBUG-UNBUNDLING: cached: 1 (100%)
1088 1099 DEBUG-UNBUNDLING: delta: 2
1089 1100 DEBUG-UNBUNDLING: cached: 2 (100%)
1090 1101 DEBUG-UNBUNDLING: files:
1091 1102 DEBUG-UNBUNDLING: full: 3
1092 1103 DEBUG-UNBUNDLING: cached: 3 (100%)
1093 1104 DEBUG-UNBUNDLING: type-time:
1094 1105 DEBUG-UNBUNDLING: changelog:
1095 1106 DEBUG-UNBUNDLING: full: ?????????????? seconds (???% of total) (glob)
1096 1107 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1097 1108 DEBUG-UNBUNDLING: manifests:
1098 1109 DEBUG-UNBUNDLING: full: ?????????????? seconds (???% of total) (glob)
1099 1110 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1100 1111 DEBUG-UNBUNDLING: delta: ?????????????? seconds (???% of total) (glob)
1101 1112 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1102 1113 DEBUG-UNBUNDLING: files:
1103 1114 DEBUG-UNBUNDLING: full: ?????????????? seconds (???% of total) (glob)
1104 1115 DEBUG-UNBUNDLING: cached: ?????????????? seconds (???% of total) (glob)
1105 1116 added 3 changesets with 3 changes to 3 files
1106 1117 new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
1107 1118 (run 'hg update' to get a working copy)
@@ -1,1134 +1,1136 b''
1 1 Test exchange of common information using bundle2
2 2
3 3
4 4 $ getmainid() {
5 5 > hg -R main log --template '{node}\n' --rev "$1"
6 6 > }
7 7
8 8 enable obsolescence
9 9
10 10 $ cp $HGRCPATH $TESTTMP/hgrc.orig
11 11 $ cat > $TESTTMP/bundle2-pushkey-hook.sh << EOF
12 12 > echo pushkey: lock state after \"\$HG_NAMESPACE\"
13 13 > hg debuglock
14 14 > EOF
15 15
16 16 $ cat >> $HGRCPATH << EOF
17 17 > [experimental]
18 18 > evolution.createmarkers=True
19 19 > evolution.exchange=True
20 20 > bundle2-output-capture=True
21 21 > [command-templates]
22 22 > log={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
23 23 > [web]
24 24 > push_ssl = false
25 25 > allow_push = *
26 26 > [phases]
27 27 > publish=False
28 28 > [hooks]
29 29 > pretxnclose.tip = hg log -r tip -T "pre-close-tip:{node|short} {phase} {bookmarks}\n"
30 30 > txnclose.tip = hg log -r tip -T "postclose-tip:{node|short} {phase} {bookmarks}\n"
31 31 > txnclose.env = sh -c "HG_LOCAL= printenv.py txnclose"
32 32 > pushkey= sh "$TESTTMP/bundle2-pushkey-hook.sh"
33 33 > EOF
34 34
35 35 The extension requires a repo (currently unused)
36 36
37 37 $ hg init main
38 38 $ cd main
39 39 $ touch a
40 40 $ hg add a
41 41 $ hg commit -m 'a'
42 42 pre-close-tip:3903775176ed draft
43 43 postclose-tip:3903775176ed draft
44 44 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
45 45
46 46 $ hg unbundle $TESTDIR/bundles/rebase.hg
47 47 adding changesets
48 48 adding manifests
49 49 adding file changes
50 50 pre-close-tip:02de42196ebe draft
51 51 added 8 changesets with 7 changes to 7 files (+3 heads)
52 52 new changesets cd010b8cd998:02de42196ebe (8 drafts)
53 53 postclose-tip:02de42196ebe draft
54 54 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:$ID$ HG_TXNNAME=unbundle
55 55 bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
56 56 (run 'hg heads' to see heads, 'hg merge' to merge)
57 57
58 58 $ cd ..
59 59
60 60 Real world exchange
61 61 =====================
62 62
63 63 Add more obsolescence information
64 64
65 65 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
66 66 pre-close-tip:02de42196ebe draft
67 67 1 new obsolescence markers
68 68 postclose-tip:02de42196ebe draft
69 69 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
70 70 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
71 71 pre-close-tip:02de42196ebe draft
72 72 1 new obsolescence markers
73 73 postclose-tip:02de42196ebe draft
74 74 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
75 75
76 76 clone --pull
77 77
78 78 $ hg -R main phase --public cd010b8cd998
79 79 pre-close-tip:02de42196ebe draft
80 80 postclose-tip:02de42196ebe draft
81 81 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
82 82 $ hg clone main other --pull --rev 9520eea781bc
83 83 adding changesets
84 84 adding manifests
85 85 adding file changes
86 86 pre-close-tip:9520eea781bc draft
87 87 added 2 changesets with 2 changes to 2 files
88 88 1 new obsolescence markers
89 89 new changesets cd010b8cd998:9520eea781bc (1 drafts)
90 90 postclose-tip:9520eea781bc draft
91 91 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_NODE_LAST=9520eea781bcca16c1e15acc0ba14335a0e8e5ba HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
92 92 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
93 93 updating to branch default
94 94 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
95 95 $ hg -R other log -G
96 96 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
97 97 |
98 98 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
99 99
100 100 $ hg -R other debugobsolete
101 101 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
102 102
103 103 pull
104 104
105 105 $ hg -R main phase --public 9520eea781bc
106 106 pre-close-tip:02de42196ebe draft
107 107 postclose-tip:02de42196ebe draft
108 108 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
109 109 $ hg -R other pull -r 24b6387c8c8c
110 110 pulling from $TESTTMP/main
111 111 searching for changes
112 112 adding changesets
113 113 adding manifests
114 114 adding file changes
115 115 pre-close-tip:24b6387c8c8c draft
116 116 added 1 changesets with 1 changes to 1 files (+1 heads)
117 117 1 new obsolescence markers
118 118 new changesets 24b6387c8c8c (1 drafts)
119 119 postclose-tip:24b6387c8c8c draft
120 120 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_NODE_LAST=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
121 121 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
122 122 (run 'hg heads' to see heads, 'hg merge' to merge)
123 123 $ hg -R other log -G
124 124 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
125 125 |
126 126 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
127 127 |/
128 128 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
129 129
130 130 $ hg -R other debugobsolete
131 131 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
132 132 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
133 133
134 134 pull empty (with phase movement)
135 135
136 136 $ hg -R main phase --public 24b6387c8c8c
137 137 pre-close-tip:02de42196ebe draft
138 138 postclose-tip:02de42196ebe draft
139 139 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
140 140 $ hg -R other pull -r 24b6387c8c8c
141 141 pulling from $TESTTMP/main
142 142 no changes found
143 143 pre-close-tip:24b6387c8c8c public
144 144 1 local changesets published
145 145 postclose-tip:24b6387c8c8c public
146 146 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
147 147 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
148 148 $ hg -R other log -G
149 149 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
150 150 |
151 151 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
152 152 |/
153 153 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
154 154
155 155 $ hg -R other debugobsolete
156 156 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
157 157 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
158 158
159 159 pull empty
160 160
161 161 $ hg -R other pull -r 24b6387c8c8c
162 162 pulling from $TESTTMP/main
163 163 no changes found
164 164 pre-close-tip:24b6387c8c8c public
165 165 postclose-tip:24b6387c8c8c public
166 166 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
167 167 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
168 168 $ hg -R other log -G
169 169 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
170 170 |
171 171 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
172 172 |/
173 173 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
174 174
175 175 $ hg -R other debugobsolete
176 176 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
177 177 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
178 178
179 179 add extra data to test their exchange during push
180 180
181 181 $ hg -R main bookmark --rev eea13746799a book_eea1
182 182 pre-close-tip:02de42196ebe draft
183 183 postclose-tip:02de42196ebe draft
184 184 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
185 185 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
186 186 pre-close-tip:02de42196ebe draft
187 187 1 new obsolescence markers
188 188 postclose-tip:02de42196ebe draft
189 189 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
190 190 $ hg -R main bookmark --rev 02de42196ebe book_02de
191 191 pre-close-tip:02de42196ebe draft book_02de
192 192 postclose-tip:02de42196ebe draft book_02de
193 193 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
194 194 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
195 195 pre-close-tip:02de42196ebe draft book_02de
196 196 1 new obsolescence markers
197 197 postclose-tip:02de42196ebe draft book_02de
198 198 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
199 199 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
200 200 pre-close-tip:02de42196ebe draft book_02de
201 201 postclose-tip:02de42196ebe draft book_02de
202 202 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
203 203 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
204 204 pre-close-tip:02de42196ebe draft book_02de
205 205 1 new obsolescence markers
206 206 postclose-tip:02de42196ebe draft book_02de
207 207 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
208 208 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
209 209 pre-close-tip:02de42196ebe draft book_02de
210 210 postclose-tip:02de42196ebe draft book_02de
211 211 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
212 212 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
213 213 pre-close-tip:02de42196ebe draft book_02de
214 214 1 new obsolescence markers
215 215 postclose-tip:02de42196ebe draft book_02de
216 216 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
217 217 $ hg -R main bookmark --rev 32af7686d403 book_32af
218 218 pre-close-tip:02de42196ebe draft book_02de
219 219 postclose-tip:02de42196ebe draft book_02de
220 220 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
221 221 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
222 222 pre-close-tip:02de42196ebe draft book_02de
223 223 1 new obsolescence markers
224 224 postclose-tip:02de42196ebe draft book_02de
225 225 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=debugobsolete
226 226
227 227 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
228 228 pre-close-tip:24b6387c8c8c public
229 229 postclose-tip:24b6387c8c8c public
230 230 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
231 231 $ hg -R other bookmark --rev cd010b8cd998 book_02de
232 232 pre-close-tip:24b6387c8c8c public
233 233 postclose-tip:24b6387c8c8c public
234 234 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
235 235 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
236 236 pre-close-tip:24b6387c8c8c public
237 237 postclose-tip:24b6387c8c8c public
238 238 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
239 239 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
240 240 pre-close-tip:24b6387c8c8c public
241 241 postclose-tip:24b6387c8c8c public
242 242 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
243 243 $ hg -R other bookmark --rev cd010b8cd998 book_32af
244 244 pre-close-tip:24b6387c8c8c public
245 245 postclose-tip:24b6387c8c8c public
246 246 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=bookmark
247 247
248 248 $ hg -R main phase --public eea13746799a
249 249 pre-close-tip:02de42196ebe draft book_02de
250 250 postclose-tip:02de42196ebe draft book_02de
251 251 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
252 252
253 253 push
254 254 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
255 255 pushing to other
256 256 searching for changes
257 257 remote: adding changesets
258 258 remote: adding manifests
259 259 remote: adding file changes
260 260 remote: pre-close-tip:eea13746799a public book_eea1
261 261 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
262 262 remote: 1 new obsolescence markers
263 263 remote: pushkey: lock state after "bookmarks"
264 264 remote: lock: free
265 265 remote: wlock: free
266 266 remote: postclose-tip:eea13746799a public book_eea1
267 267 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_NODE_LAST=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:$ID$ HG_TXNNAME=push HG_URL=file:$TESTTMP/other
268 268 updating bookmark book_eea1
269 269 pre-close-tip:02de42196ebe draft book_02de
270 270 postclose-tip:02de42196ebe draft book_02de
271 271 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
272 272 file:/*/$TESTTMP/other HG_URL=file:$TESTTMP/other (glob)
273 273 $ hg -R other log -G
274 274 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
275 275 |\
276 276 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
277 277 | |
278 278 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
279 279 |/
280 280 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
281 281
282 282 $ hg -R other debugobsolete
283 283 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
284 284 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
285 285 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
286 286
287 287 pull over ssh
288 288
289 289 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
290 290 pulling from ssh://user@dummy/main
291 291 searching for changes
292 292 adding changesets
293 293 adding manifests
294 294 adding file changes
295 295 updating bookmark book_02de
296 296 pre-close-tip:02de42196ebe draft book_02de
297 297 added 1 changesets with 1 changes to 1 files (+1 heads)
298 298 1 new obsolescence markers
299 299 new changesets 02de42196ebe (1 drafts)
300 300 postclose-tip:02de42196ebe draft book_02de
301 301 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_NODE_LAST=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
302 302 ssh://user@dummy/main HG_URL=ssh://user@dummy/main
303 303 (run 'hg heads' to see heads, 'hg merge' to merge)
304 304 $ hg -R other debugobsolete
305 305 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
306 306 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
307 307 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
308 308 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
309 309
310 310 pull over http
311 311
312 312 $ hg serve -R main -p $HGPORT -d --pid-file=main.pid -E main-error.log
313 313 $ cat main.pid >> $DAEMON_PIDS
314 314
315 315 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
316 316 pulling from http://localhost:$HGPORT/
317 317 searching for changes
318 318 adding changesets
319 319 adding manifests
320 320 adding file changes
321 321 updating bookmark book_42cc
322 322 pre-close-tip:42ccdea3bb16 draft book_42cc
323 323 added 1 changesets with 1 changes to 1 files (+1 heads)
324 324 1 new obsolescence markers
325 325 new changesets 42ccdea3bb16 (1 drafts)
326 326 postclose-tip:42ccdea3bb16 draft book_42cc
327 327 txnclose hook: HG_BOOKMARK_MOVED=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_NODE_LAST=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_TXNNAME=pull
328 328 http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
329 329 (run 'hg heads .' to see heads, 'hg merge' to merge)
330 330 $ cat main-error.log
331 331 $ hg -R other debugobsolete
332 332 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
333 333 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
334 334 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
335 335 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
336 336 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
337 337
338 338 push over ssh
339 339
340 340 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
341 341 pushing to ssh://user@dummy/other
342 342 searching for changes
343 343 remote: adding changesets
344 344 remote: adding manifests
345 345 remote: adding file changes
346 346 remote: pre-close-tip:5fddd98957c8 draft book_5fdd
347 347 remote: added 1 changesets with 1 changes to 1 files
348 348 remote: 1 new obsolescence markers
349 349 remote: pushkey: lock state after "bookmarks"
350 350 remote: lock: free
351 351 remote: wlock: free
352 352 remote: postclose-tip:5fddd98957c8 draft book_5fdd
353 353 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_NODE_LAST=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_TXNNAME=serve HG_URL=remote:ssh:$LOCALIP
354 354 updating bookmark book_5fdd
355 355 pre-close-tip:02de42196ebe draft book_02de
356 356 postclose-tip:02de42196ebe draft book_02de
357 357 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
358 358 ssh://user@dummy/other HG_URL=ssh://user@dummy/other
359 359 $ hg -R other log -G
360 360 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
361 361 |
362 362 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
363 363 |
364 364 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
365 365 | |
366 366 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
367 367 | |/|
368 368 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
369 369 |/ /
370 370 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
371 371 |/
372 372 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
373 373
374 374 $ hg -R other debugobsolete
375 375 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
376 376 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
377 377 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
378 378 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
379 379 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
380 380 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
381 381
382 382 push over http
383 383
384 384 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
385 385 $ cat other.pid >> $DAEMON_PIDS
386 386
387 387 $ hg -R main phase --public 32af7686d403
388 388 pre-close-tip:02de42196ebe draft book_02de
389 389 postclose-tip:02de42196ebe draft book_02de
390 390 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_PHASES_MOVED=1 HG_TXNID=TXN:$ID$ HG_TXNNAME=phase
391 391 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
392 392 pushing to http://localhost:$HGPORT2/
393 393 searching for changes
394 394 remote: adding changesets
395 395 remote: adding manifests
396 396 remote: adding file changes
397 397 remote: pre-close-tip:32af7686d403 public book_32af
398 398 remote: added 1 changesets with 1 changes to 1 files
399 399 remote: 1 new obsolescence markers
400 400 remote: pushkey: lock state after "bookmarks"
401 401 remote: lock: free
402 402 remote: wlock: free
403 403 remote: postclose-tip:32af7686d403 public book_32af
404 404 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_NEW_OBSMARKERS=1 HG_NODE=32af7686d403cf45b5d95f2d70cebea587ac806a HG_NODE_LAST=32af7686d403cf45b5d95f2d70cebea587ac806a HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_TXNNAME=serve HG_URL=remote:http:$LOCALIP: (glob)
405 405 updating bookmark book_32af
406 406 pre-close-tip:02de42196ebe draft book_02de
407 407 postclose-tip:02de42196ebe draft book_02de
408 408 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_SOURCE=push-response HG_TXNID=TXN:$ID$ HG_TXNNAME=push-response
409 409 http://localhost:$HGPORT2/ HG_URL=http://localhost:$HGPORT2/
410 410 $ cat other-error.log
411 411
412 412 Check final content.
413 413
414 414 $ hg -R other log -G
415 415 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
416 416 |
417 417 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
418 418 |
419 419 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
420 420 |
421 421 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
422 422 | |
423 423 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
424 424 | |/|
425 425 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
426 426 |/ /
427 427 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
428 428 |/
429 429 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
430 430
431 431 $ hg -R other debugobsolete
432 432 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
433 433 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
434 434 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
435 435 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
436 436 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
437 437 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
438 438 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
439 439
440 440 (check that no 'pending' files remain)
441 441
442 442 $ ls -1 other/.hg/bookmarks*
443 443 other/.hg/bookmarks
444 444 $ ls -1 other/.hg/store/phaseroots*
445 445 other/.hg/store/phaseroots
446 446 $ ls -1 other/.hg/store/00changelog.i*
447 447 other/.hg/store/00changelog.i
448 448
449 449 Error Handling
450 450 ==============
451 451
452 452 Check that errors are properly returned to the client during push.
453 453
454 454 Setting up
455 455
456 456 $ cat > failpush.py << EOF
457 457 > """A small extension that makes push fails when using bundle2
458 458 >
459 459 > used to test error handling in bundle2
460 460 > """
461 461 >
462 462 > from mercurial import error
463 463 > from mercurial import bundle2
464 464 > from mercurial import exchange
465 465 > from mercurial import extensions
466 466 > from mercurial import registrar
467 467 > cmdtable = {}
468 468 > command = registrar.command(cmdtable)
469 469 >
470 470 > configtable = {}
471 471 > configitem = registrar.configitem(configtable)
472 472 > configitem(b'failpush', b'reason',
473 473 > default=None,
474 474 > )
475 475 >
476 476 > def _pushbundle2failpart(pushop, bundler):
477 477 > reason = pushop.ui.config(b'failpush', b'reason')
478 478 > part = None
479 479 > if reason == b'abort':
480 480 > bundler.newpart(b'test:abort')
481 481 > if reason == b'unknown':
482 482 > bundler.newpart(b'test:unknown')
483 483 > if reason == b'race':
484 484 > # 20 Bytes of crap
485 485 > bundler.newpart(b'check:heads', data=b'01234567890123456789')
486 486 >
487 487 > @bundle2.parthandler(b"test:abort")
488 488 > def handleabort(op, part):
489 489 > raise error.Abort(b'Abandon ship!', hint=b"don't panic")
490 490 >
491 491 > def uisetup(ui):
492 492 > exchange.b2partsgenmapping[b'failpart'] = _pushbundle2failpart
493 493 > exchange.b2partsgenorder.insert(0, b'failpart')
494 494 >
495 495 > EOF
496 496
497 497 $ cd main
498 498 $ hg up tip
499 499 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
500 500 $ echo 'I' > I
501 501 $ hg add I
502 502 $ hg ci -m 'I'
503 503 pre-close-tip:e7ec4e813ba6 draft
504 504 postclose-tip:e7ec4e813ba6 draft
505 505 txnclose hook: HG_HOOKNAME=txnclose.env HG_HOOKTYPE=txnclose HG_TXNID=TXN:$ID$ HG_TXNNAME=commit
506 506 $ hg id
507 507 e7ec4e813ba6 tip
508 508 $ cd ..
509 509
510 510 $ cat << EOF >> $HGRCPATH
511 511 > [extensions]
512 512 > failpush=$TESTTMP/failpush.py
513 513 > EOF
514 514
515 515 $ killdaemons.py
516 516 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
517 517 $ cat other.pid >> $DAEMON_PIDS
518 518
519 519 Doing the actual push: Abort error
520 520
521 521 $ cat << EOF >> $HGRCPATH
522 522 > [failpush]
523 523 > reason = abort
524 524 > EOF
525 525
526 526 $ hg -R main push other -r e7ec4e813ba6
527 527 pushing to other
528 528 searching for changes
529 529 abort: Abandon ship!
530 530 (don't panic)
531 531 [255]
532 532
533 533 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
534 534 pushing to ssh://user@dummy/other
535 535 searching for changes
536 536 remote: Abandon ship!
537 537 remote: (don't panic)
538 538 abort: push failed on remote
539 539 [100]
540 540
541 541 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
542 542 pushing to http://localhost:$HGPORT2/
543 543 searching for changes
544 544 remote: Abandon ship!
545 545 remote: (don't panic)
546 546 abort: push failed on remote
547 547 [100]
548 548
549 549
550 550 Doing the actual push: unknown mandatory parts
551 551
552 552 $ cat << EOF >> $HGRCPATH
553 553 > [failpush]
554 554 > reason = unknown
555 555 > EOF
556 556
557 557 $ hg -R main push other -r e7ec4e813ba6
558 558 pushing to other
559 559 searching for changes
560 560 abort: missing support for test:unknown
561 561 [100]
562 562
563 563 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
564 564 pushing to ssh://user@dummy/other
565 565 searching for changes
566 566 abort: missing support for test:unknown
567 567 [100]
568 568
569 569 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
570 570 pushing to http://localhost:$HGPORT2/
571 571 searching for changes
572 572 abort: missing support for test:unknown
573 573 [100]
574 574
575 575 Doing the actual push: race
576 576
577 577 $ cat << EOF >> $HGRCPATH
578 578 > [failpush]
579 579 > reason = race
580 580 > EOF
581 581
582 582 $ hg -R main push other -r e7ec4e813ba6
583 583 pushing to other
584 584 searching for changes
585 585 abort: push failed:
586 586 'remote repository changed while pushing - please try again'
587 587 [255]
588 588
589 589 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
590 590 pushing to ssh://user@dummy/other
591 591 searching for changes
592 592 abort: push failed:
593 593 'remote repository changed while pushing - please try again'
594 594 [255]
595 595
596 596 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
597 597 pushing to http://localhost:$HGPORT2/
598 598 searching for changes
599 599 abort: push failed:
600 600 'remote repository changed while pushing - please try again'
601 601 [255]
602 602
603 603 Doing the actual push: hook abort
604 604
605 605 $ cat << EOF >> $HGRCPATH
606 606 > [failpush]
607 607 > reason =
608 608 > [hooks]
609 609 > pretxnclose.failpush = sh -c "echo 'You shall not pass!'; false"
610 610 > txnabort.failpush = sh -c "echo 'Cleaning up the mess...'"
611 611 > EOF
612 612
613 613 $ killdaemons.py
614 614 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
615 615 $ cat other.pid >> $DAEMON_PIDS
616 616
617 617 $ hg -R main push other -r e7ec4e813ba6
618 618 pushing to other
619 619 searching for changes
620 620 remote: adding changesets
621 621 remote: adding manifests
622 622 remote: adding file changes
623 623 remote: pre-close-tip:e7ec4e813ba6 draft
624 624 remote: You shall not pass!
625 625 remote: transaction abort!
626 626 remote: Cleaning up the mess...
627 627 remote: rollback completed
628 628 abort: pretxnclose.failpush hook exited with status 1
629 629 [40]
630 630
631 631 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
632 632 pushing to ssh://user@dummy/other
633 633 searching for changes
634 634 remote: adding changesets
635 635 remote: adding manifests
636 636 remote: adding file changes
637 637 remote: pre-close-tip:e7ec4e813ba6 draft
638 638 remote: You shall not pass!
639 639 remote: transaction abort!
640 640 remote: Cleaning up the mess...
641 641 remote: rollback completed
642 642 remote: pretxnclose.failpush hook exited with status 1
643 643 abort: push failed on remote
644 644 [100]
645 645
646 646 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
647 647 pushing to http://localhost:$HGPORT2/
648 648 searching for changes
649 649 remote: adding changesets
650 650 remote: adding manifests
651 651 remote: adding file changes
652 652 remote: pre-close-tip:e7ec4e813ba6 draft
653 653 remote: You shall not pass!
654 654 remote: transaction abort!
655 655 remote: Cleaning up the mess...
656 656 remote: rollback completed
657 657 remote: pretxnclose.failpush hook exited with status 1
658 658 abort: push failed on remote
659 659 [100]
660 660
661 661 (check that no 'pending' files remain)
662 662
663 663 $ ls -1 other/.hg/bookmarks*
664 664 other/.hg/bookmarks
665 665 $ ls -1 other/.hg/store/phaseroots*
666 666 other/.hg/store/phaseroots
667 667 $ ls -1 other/.hg/store/00changelog.i*
668 668 other/.hg/store/00changelog.i
669 669
670 670 Check error from hook during the unbundling process itself
671 671
672 672 $ cat << EOF >> $HGRCPATH
673 673 > pretxnchangegroup = sh -c "echo 'Fail early!'; false"
674 674 > EOF
675 675 $ killdaemons.py # reload http config
676 676 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
677 677 $ cat other.pid >> $DAEMON_PIDS
678 678
679 679 $ hg -R main push other -r e7ec4e813ba6
680 680 pushing to other
681 681 searching for changes
682 682 remote: adding changesets
683 683 remote: adding manifests
684 684 remote: adding file changes
685 685 remote: Fail early!
686 686 remote: transaction abort!
687 687 remote: Cleaning up the mess...
688 688 remote: rollback completed
689 689 abort: pretxnchangegroup hook exited with status 1
690 690 [40]
691 691 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
692 692 pushing to ssh://user@dummy/other
693 693 searching for changes
694 694 remote: adding changesets
695 695 remote: adding manifests
696 696 remote: adding file changes
697 697 remote: Fail early!
698 698 remote: transaction abort!
699 699 remote: Cleaning up the mess...
700 700 remote: rollback completed
701 701 remote: pretxnchangegroup hook exited with status 1
702 702 abort: push failed on remote
703 703 [100]
704 704 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
705 705 pushing to http://localhost:$HGPORT2/
706 706 searching for changes
707 707 remote: adding changesets
708 708 remote: adding manifests
709 709 remote: adding file changes
710 710 remote: Fail early!
711 711 remote: transaction abort!
712 712 remote: Cleaning up the mess...
713 713 remote: rollback completed
714 714 remote: pretxnchangegroup hook exited with status 1
715 715 abort: push failed on remote
716 716 [100]
717 717
718 718 Check output capture control.
719 719
720 720 (should be still forced for http, disabled for local and ssh)
721 721
722 722 $ cat >> $HGRCPATH << EOF
723 723 > [experimental]
724 724 > bundle2-output-capture=False
725 725 > EOF
726 726
727 727 $ hg -R main push other -r e7ec4e813ba6
728 728 pushing to other
729 729 searching for changes
730 730 adding changesets
731 731 adding manifests
732 732 adding file changes
733 733 Fail early!
734 734 transaction abort!
735 735 Cleaning up the mess...
736 736 rollback completed
737 737 abort: pretxnchangegroup hook exited with status 1
738 738 [40]
739 739 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
740 740 pushing to ssh://user@dummy/other
741 741 searching for changes
742 742 remote: adding changesets
743 743 remote: adding manifests
744 744 remote: adding file changes
745 745 remote: Fail early!
746 746 remote: transaction abort!
747 747 remote: Cleaning up the mess...
748 748 remote: rollback completed
749 749 remote: pretxnchangegroup hook exited with status 1
750 750 abort: push failed on remote
751 751 [100]
752 752 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
753 753 pushing to http://localhost:$HGPORT2/
754 754 searching for changes
755 755 remote: adding changesets
756 756 remote: adding manifests
757 757 remote: adding file changes
758 758 remote: Fail early!
759 759 remote: transaction abort!
760 760 remote: Cleaning up the mess...
761 761 remote: rollback completed
762 762 remote: pretxnchangegroup hook exited with status 1
763 763 abort: push failed on remote
764 764 [100]
765 765
766 766 Check abort from mandatory pushkey
767 767
768 768 $ cat > mandatorypart.py << EOF
769 769 > from mercurial import exchange
770 770 > from mercurial import pushkey
771 771 > from mercurial import node
772 772 > from mercurial import error
773 773 > @exchange.b2partsgenerator(b'failingpuskey')
774 774 > def addfailingpushey(pushop, bundler):
775 775 > enc = pushkey.encode
776 776 > part = bundler.newpart(b'pushkey')
777 777 > part.addparam(b'namespace', enc(b'phases'))
778 778 > part.addparam(b'key', enc(b'cd010b8cd998f3981a5a8115f94f8da4ab506089'))
779 779 > part.addparam(b'old', enc(b'0')) # successful update
780 780 > part.addparam(b'new', enc(b'0'))
781 781 > def fail(pushop, exc):
782 782 > raise error.Abort(b'Correct phase push failed (because hooks)')
783 783 > pushop.pkfailcb[part.id] = fail
784 784 > EOF
785 785 $ cat >> $HGRCPATH << EOF
786 786 > [hooks]
787 787 > pretxnchangegroup=
788 788 > pretxnclose.failpush=
789 789 > prepushkey.failpush = sh -c "echo 'do not push the key !'; false"
790 790 > [extensions]
791 791 > mandatorypart=$TESTTMP/mandatorypart.py
792 792 > EOF
793 793 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
794 794 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
795 795 $ cat other.pid >> $DAEMON_PIDS
796 796
797 797 (Failure from a hook)
798 798
799 799 $ hg -R main push other -r e7ec4e813ba6
800 800 pushing to other
801 801 searching for changes
802 802 adding changesets
803 803 adding manifests
804 804 adding file changes
805 805 do not push the key !
806 806 pushkey-abort: prepushkey.failpush hook exited with status 1
807 807 transaction abort!
808 808 Cleaning up the mess...
809 809 rollback completed
810 810 abort: Correct phase push failed (because hooks)
811 811 [255]
812 812 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
813 813 pushing to ssh://user@dummy/other
814 814 searching for changes
815 815 remote: adding changesets
816 816 remote: adding manifests
817 817 remote: adding file changes
818 818 remote: do not push the key !
819 819 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
820 820 remote: transaction abort!
821 821 remote: Cleaning up the mess...
822 822 remote: rollback completed
823 823 abort: Correct phase push failed (because hooks)
824 824 [255]
825 825 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
826 826 pushing to http://localhost:$HGPORT2/
827 827 searching for changes
828 828 remote: adding changesets
829 829 remote: adding manifests
830 830 remote: adding file changes
831 831 remote: do not push the key !
832 832 remote: pushkey-abort: prepushkey.failpush hook exited with status 1
833 833 remote: transaction abort!
834 834 remote: Cleaning up the mess...
835 835 remote: rollback completed
836 836 abort: Correct phase push failed (because hooks)
837 837 [255]
838 838
839 839 (Failure from a the pushkey)
840 840
841 841 $ cat > mandatorypart.py << EOF
842 842 > from mercurial import exchange
843 843 > from mercurial import pushkey
844 844 > from mercurial import node
845 845 > from mercurial import error
846 846 > @exchange.b2partsgenerator(b'failingpuskey')
847 847 > def addfailingpushey(pushop, bundler):
848 848 > enc = pushkey.encode
849 849 > part = bundler.newpart(b'pushkey')
850 850 > part.addparam(b'namespace', enc(b'phases'))
851 851 > part.addparam(b'key', enc(b'cd010b8cd998f3981a5a8115f94f8da4ab506089'))
852 852 > part.addparam(b'old', enc(b'4')) # will fail
853 853 > part.addparam(b'new', enc(b'3'))
854 854 > def fail(pushop, exc):
855 855 > raise error.Abort(b'Clown phase push failed')
856 856 > pushop.pkfailcb[part.id] = fail
857 857 > EOF
858 858 $ cat >> $HGRCPATH << EOF
859 859 > [hooks]
860 860 > prepushkey.failpush =
861 861 > EOF
862 862 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS # reload http config
863 863 $ hg serve -R other -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
864 864 $ cat other.pid >> $DAEMON_PIDS
865 865
866 866 $ hg -R main push other -r e7ec4e813ba6
867 867 pushing to other
868 868 searching for changes
869 869 adding changesets
870 870 adding manifests
871 871 adding file changes
872 872 transaction abort!
873 873 Cleaning up the mess...
874 874 rollback completed
875 875 pushkey: lock state after "phases"
876 876 lock: free
877 877 wlock: free
878 878 abort: Clown phase push failed
879 879 [255]
880 880 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
881 881 pushing to ssh://user@dummy/other
882 882 searching for changes
883 883 remote: adding changesets
884 884 remote: adding manifests
885 885 remote: adding file changes
886 886 remote: transaction abort!
887 887 remote: Cleaning up the mess...
888 888 remote: rollback completed
889 889 remote: pushkey: lock state after "phases"
890 890 remote: lock: free
891 891 remote: wlock: free
892 892 abort: Clown phase push failed
893 893 [255]
894 894 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
895 895 pushing to http://localhost:$HGPORT2/
896 896 searching for changes
897 897 remote: adding changesets
898 898 remote: adding manifests
899 899 remote: adding file changes
900 900 remote: transaction abort!
901 901 remote: Cleaning up the mess...
902 902 remote: rollback completed
903 903 remote: pushkey: lock state after "phases"
904 904 remote: lock: free
905 905 remote: wlock: free
906 906 abort: Clown phase push failed
907 907 [255]
908 908
909 909 Test lazily acquiring the lock during unbundle
910 910 $ cp $TESTTMP/hgrc.orig $HGRCPATH
911 911
912 912 $ cat >> $TESTTMP/locktester.py <<EOF
913 913 > import os
914 914 > from mercurial import bundle2, error, extensions
915 915 > def checklock(orig, repo, *args, **kwargs):
916 916 > if repo.svfs.lexists(b"lock"):
917 917 > raise error.Abort(b"Lock should not be taken")
918 918 > return orig(repo, *args, **kwargs)
919 919 > def extsetup(ui):
920 920 > extensions.wrapfunction(bundle2, 'processbundle', checklock)
921 921 > EOF
922 922
923 923 $ hg init lazylock
924 924 $ cat >> lazylock/.hg/hgrc <<EOF
925 925 > [extensions]
926 926 > locktester=$TESTTMP/locktester.py
927 927 > EOF
928 928
929 929 $ hg clone -q ssh://user@dummy/lazylock lazylockclient
930 930 $ cd lazylockclient
931 931 $ touch a && hg ci -Aqm a
932 932 $ hg push
933 933 pushing to ssh://user@dummy/lazylock
934 934 searching for changes
935 935 remote: Lock should not be taken
936 936 abort: push failed on remote
937 937 [100]
938 938
939 939 $ cat >> ../lazylock/.hg/hgrc <<EOF
940 940 > [experimental]
941 941 > bundle2lazylocking=True
942 942 > EOF
943 943 $ hg push
944 944 pushing to ssh://user@dummy/lazylock
945 945 searching for changes
946 946 remote: adding changesets
947 947 remote: adding manifests
948 948 remote: adding file changes
949 949 remote: added 1 changesets with 1 changes to 1 files
950 950
951 951 $ cd ..
952 952
953 953 Servers can disable bundle1 for clone/pull operations
954 954
955 955 $ killdaemons.py
956 956 $ hg init bundle2onlyserver
957 957 $ cd bundle2onlyserver
958 958 $ cat > .hg/hgrc << EOF
959 959 > [server]
960 960 > bundle1.pull = false
961 961 > EOF
962 962
963 963 $ touch foo
964 964 $ hg -q commit -A -m initial
965 965
966 966 $ hg serve -p $HGPORT -d --pid-file=hg.pid
967 967 $ cat hg.pid >> $DAEMON_PIDS
968 968
969 969 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
970 970 requesting all changes
971 971 abort: remote error:
972 972 incompatible Mercurial client; bundle2 required
973 973 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
974 974 [100]
975 975 $ killdaemons.py
976 976 $ cd ..
977 977
978 978 bundle1 can still pull non-generaldelta repos when generaldelta bundle1 disabled
979 979
980 980 $ hg --config format.usegeneraldelta=false init notgdserver
981 981 $ cd notgdserver
982 982 $ cat > .hg/hgrc << EOF
983 983 > [server]
984 984 > bundle1gd.pull = false
985 985 > EOF
986 986
987 987 $ touch foo
988 988 $ hg -q commit -A -m initial
989 989 $ hg serve -p $HGPORT -d --pid-file=hg.pid
990 990 $ cat hg.pid >> $DAEMON_PIDS
991 991
992 992 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-1
993 993 requesting all changes
994 994 adding changesets
995 995 adding manifests
996 996 adding file changes
997 997 added 1 changesets with 1 changes to 1 files
998 998 new changesets 96ee1d7354c4
999 999 updating to branch default
1000 1000 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1001 1001
1002 1002 $ killdaemons.py
1003 1003 $ cd ../bundle2onlyserver
1004 1004
1005 1005 bundle1 pull can be disabled for generaldelta repos only
1006 1006
1007 1007 $ cat > .hg/hgrc << EOF
1008 1008 > [server]
1009 1009 > bundle1gd.pull = false
1010 1010 > EOF
1011 1011
1012 1012 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1013 1013 $ cat hg.pid >> $DAEMON_PIDS
1014 1014 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
1015 1015 requesting all changes
1016 1016 abort: remote error:
1017 1017 incompatible Mercurial client; bundle2 required
1018 1018 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1019 1019 [100]
1020 1020
1021 1021 $ killdaemons.py
1022 1022
1023 1023 Verify the global server.bundle1 option works
1024 1024
1025 1025 $ cd ..
1026 1026 $ cat > bundle2onlyserver/.hg/hgrc << EOF
1027 1027 > [server]
1028 1028 > bundle1 = false
1029 1029 > EOF
1030 1030 $ hg serve -R bundle2onlyserver -p $HGPORT -d --pid-file=hg.pid
1031 1031 $ cat hg.pid >> $DAEMON_PIDS
1032 1032 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT not-bundle2
1033 1033 requesting all changes
1034 1034 abort: remote error:
1035 1035 incompatible Mercurial client; bundle2 required
1036 1036 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1037 1037 [100]
1038 1038 $ killdaemons.py
1039 1039
1040 1040 $ hg --config devel.legacy.exchange=bundle1 clone ssh://user@dummy/bundle2onlyserver not-bundle2-ssh
1041 1041 requesting all changes
1042 1042 adding changesets
1043 1043 remote: abort: incompatible Mercurial client; bundle2 required
1044 1044 remote: (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1045 transaction abort!
1046 rollback completed
1045 1047 abort: stream ended unexpectedly (got 0 bytes, expected 4)
1046 1048 [255]
1047 1049
1048 1050 $ cat > bundle2onlyserver/.hg/hgrc << EOF
1049 1051 > [server]
1050 1052 > bundle1gd = false
1051 1053 > EOF
1052 1054 $ hg serve -R bundle2onlyserver -p $HGPORT -d --pid-file=hg.pid
1053 1055 $ cat hg.pid >> $DAEMON_PIDS
1054 1056
1055 1057 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
1056 1058 requesting all changes
1057 1059 abort: remote error:
1058 1060 incompatible Mercurial client; bundle2 required
1059 1061 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1060 1062 [100]
1061 1063
1062 1064 $ killdaemons.py
1063 1065
1064 1066 $ cd notgdserver
1065 1067 $ cat > .hg/hgrc << EOF
1066 1068 > [server]
1067 1069 > bundle1gd = false
1068 1070 > EOF
1069 1071 $ hg serve -p $HGPORT -d --pid-file=hg.pid
1070 1072 $ cat hg.pid >> $DAEMON_PIDS
1071 1073
1072 1074 $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-2
1073 1075 requesting all changes
1074 1076 adding changesets
1075 1077 adding manifests
1076 1078 adding file changes
1077 1079 added 1 changesets with 1 changes to 1 files
1078 1080 new changesets 96ee1d7354c4
1079 1081 updating to branch default
1080 1082 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1081 1083
1082 1084 $ killdaemons.py
1083 1085 $ cd ../bundle2onlyserver
1084 1086
1085 1087 Verify bundle1 pushes can be disabled
1086 1088
1087 1089 $ cat > .hg/hgrc << EOF
1088 1090 > [server]
1089 1091 > bundle1.push = false
1090 1092 > [web]
1091 1093 > allow_push = *
1092 1094 > push_ssl = false
1093 1095 > EOF
1094 1096
1095 1097 $ hg serve -p $HGPORT -d --pid-file=hg.pid -E error.log
1096 1098 $ cat hg.pid >> $DAEMON_PIDS
1097 1099 $ cd ..
1098 1100
1099 1101 $ hg clone http://localhost:$HGPORT bundle2-only
1100 1102 requesting all changes
1101 1103 adding changesets
1102 1104 adding manifests
1103 1105 adding file changes
1104 1106 added 1 changesets with 1 changes to 1 files
1105 1107 new changesets 96ee1d7354c4
1106 1108 updating to branch default
1107 1109 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1108 1110 $ cd bundle2-only
1109 1111 $ echo commit > foo
1110 1112 $ hg commit -m commit
1111 1113 $ hg --config devel.legacy.exchange=bundle1 push
1112 1114 pushing to http://localhost:$HGPORT/
1113 1115 searching for changes
1114 1116 abort: remote error:
1115 1117 incompatible Mercurial client; bundle2 required
1116 1118 (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1117 1119 [100]
1118 1120
1119 1121 (also check with ssh)
1120 1122
1121 1123 $ hg --config devel.legacy.exchange=bundle1 push ssh://user@dummy/bundle2onlyserver
1122 1124 pushing to ssh://user@dummy/bundle2onlyserver
1123 1125 searching for changes
1124 1126 remote: abort: incompatible Mercurial client; bundle2 required
1125 1127 remote: (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
1126 1128 [1]
1127 1129
1128 1130 $ hg push
1129 1131 pushing to http://localhost:$HGPORT/
1130 1132 searching for changes
1131 1133 remote: adding changesets
1132 1134 remote: adding manifests
1133 1135 remote: adding file changes
1134 1136 remote: added 1 changesets with 1 changes to 1 files
@@ -1,179 +1,186 b''
1 1 Test stream cloning while a revlog split happens
2 2 ------------------------------------------------
3 3
4 4 #testcases stream-bundle2-v2 stream-bundle2-v3
5 5
6 6 #if stream-bundle2-v3
7 7 $ cat << EOF >> $HGRCPATH
8 8 > [experimental]
9 9 > stream-v3 = yes
10 10 > EOF
11 11 #endif
12 12
13 13 setup a repository for tests
14 14 ----------------------------
15 15
16 16 $ cat >> $HGRCPATH << EOF
17 17 > [format]
18 18 > # skip compression to make it easy to trigger a split
19 19 > revlog-compression=none
20 20 > [phases]
21 21 > publish=no
22 22 > EOF
23 23
24 24 $ hg init server
25 25 $ cd server
26 26 $ file="some-file"
27 27 $ printf '%20d' '1' > $file
28 28 $ hg commit -Aqma
29 29 $ printf '%1024d' '1' > $file
30 30 $ hg commit -Aqmb
31 31 $ printf '%20d' '1' > $file
32 32 $ hg commit -Aqmc
33 33
34 34 check the revlog is inline
35 35
36 36 $ f -s .hg/store/data/some-file*
37 37 .hg/store/data/some-file.i: size=1259
38 38 $ hg debug-revlog-index some-file
39 39 rev linkrev nodeid p1-nodeid p2-nodeid
40 40 0 0 ed70cecbc103 000000000000 000000000000
41 41 1 1 7241018db64c ed70cecbc103 000000000000
42 42 2 2 fa1120531cc1 7241018db64c 000000000000
43 43 $ cd ..
44 44
45 45 setup synchronisation file
46 46
47 47 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
48 48 $ export HG_TEST_STREAM_WALKED_FILE_1
49 49 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
50 50 $ export HG_TEST_STREAM_WALKED_FILE_2
51 51 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
52 52 $ export HG_TEST_STREAM_WALKED_FILE_3
53 53
54 54
55 55 Test stream-clone raced by a revlog-split
56 56 =========================================
57 57
58 58 Test stream-clone where the file is split right after the lock section is done
59 59
60 60 Start the server
61 61
62 62 $ hg serve -R server \
63 63 > -p $HGPORT1 -d --error errors.log --pid-file=hg.pid \
64 64 > --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
65 65 $ cat hg.pid >> $DAEMON_PIDS
66 66
67 67 Start a client doing a streaming clone
68 68
69 69 $ ( \
70 70 > hg clone --debug --stream -U http://localhost:$HGPORT1 \
71 71 > clone-while-split > client.log 2>&1; \
72 72 > touch "$HG_TEST_STREAM_WALKED_FILE_3" \
73 73 > ) &
74 74
75 75 Wait for the server to be done collecting data
76 76
77 77 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
78 78
79 79 trigger a split
80 80
81 81 $ dd if=/dev/zero of=server/$file bs=1k count=128 > /dev/null 2>&1
82 82 $ hg -R server ci -m "triggering a split" --config ui.timeout.warn=-1
83 83
84 84 unlock the stream generation
85 85
86 86 $ touch $HG_TEST_STREAM_WALKED_FILE_2
87 87
88 88 wait for the client to be done cloning.
89 89
90 90 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
91 91
92 92 Check everything is fine
93 93
94 94 $ cat client.log
95 95 using http://localhost:$HGPORT1/
96 96 sending capabilities command
97 97 query 1; heads
98 98 sending batch command
99 99 streaming all changes
100 100 sending getbundle command
101 101 bundle2-input-bundle: with-transaction
102 102 bundle2-input-part: "stream2" (params: 3 mandatory) supported (stream-bundle2-v2 !)
103 103 bundle2-input-part: "stream3-exp" (params: 1 mandatory) supported (stream-bundle2-v3 !)
104 104 applying stream bundle
105 7 files to transfer, 2.11 KB of data (stream-bundle2-v2 !)
105 8 files to transfer, 2.11 KB of data (stream-bundle2-v2 no-rust !)
106 10 files to transfer, 2.29 KB of data (stream-bundle2-v2 rust !)
106 107 adding [s] data/some-file.i (1.23 KB) (stream-bundle2-v2 !)
107 108 7 entries to transfer (stream-bundle2-v3 !)
108 109 adding [s] data/some-file.d (1.04 KB) (stream-bundle2-v3 !)
109 110 adding [s] data/some-file.i (192 bytes) (stream-bundle2-v3 !)
110 111 adding [s] phaseroots (43 bytes)
111 112 adding [s] 00manifest.i (348 bytes)
112 adding [s] 00changelog.i (381 bytes)
113 adding [s] 00changelog.n (62 bytes) (rust !)
114 adding [s] 00changelog-88698448.nd (128 bytes) (rust !)
115 adding [s] 00changelog.d (189 bytes)
116 adding [s] 00changelog.i (192 bytes)
113 117 adding [c] branch2-served (94 bytes)
114 118 adding [c] rbc-names-v1 (7 bytes)
115 119 adding [c] rbc-revs-v1 (24 bytes)
116 120 updating the branch cache
117 transferred 2.11 KB in * seconds (* */sec) (glob)
118 bundle2-input-part: total payload size 2268 (stream-bundle2-v2 !)
119 bundle2-input-part: total payload size 2296 (stream-bundle2-v3 !)
121 transferred 2.11 KB in * seconds (* */sec) (glob) (no-rust !)
122 transferred 2.29 KB in * seconds (* */sec) (glob) (rust !)
123 bundle2-input-part: total payload size 2285 (stream-bundle2-v2 no-rust !)
124 bundle2-input-part: total payload size 2518 (stream-bundle2-v2 rust !)
125 bundle2-input-part: total payload size 2313 (stream-bundle2-v3 no-rust !)
126 bundle2-input-part: total payload size 2546 (stream-bundle2-v3 rust !)
120 127 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
121 128 bundle2-input-bundle: 2 parts total
122 129 checking for updated bookmarks
123 130 updating the branch cache
124 131 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
125 132 $ tail -2 errors.log
126 133 $ hg -R clone-while-split verify
127 134 checking changesets
128 135 checking manifests
129 136 crosschecking files in changesets and manifests
130 137 checking files
131 138 checking dirstate
132 139 checked 3 changesets with 3 changes to 1 files
133 140 $ hg -R clone-while-split tip
134 141 changeset: 2:dbd9854c38a6
135 142 tag: tip
136 143 user: test
137 144 date: Thu Jan 01 00:00:00 1970 +0000
138 145 summary: c
139 146
140 147 $ hg -R clone-while-split debug-revlog-index some-file
141 148 rev linkrev nodeid p1-nodeid p2-nodeid
142 149 0 0 ed70cecbc103 000000000000 000000000000
143 150 1 1 7241018db64c ed70cecbc103 000000000000
144 151 2 2 fa1120531cc1 7241018db64c 000000000000
145 152 $ hg -R server phase --rev 'all()'
146 153 0: draft
147 154 1: draft
148 155 2: draft
149 156 3: draft
150 157 $ hg -R clone-while-split phase --rev 'all()'
151 158 0: draft
152 159 1: draft
153 160 2: draft
154 161
155 162 subsequent pull work
156 163
157 164 $ hg -R clone-while-split pull
158 165 pulling from http://localhost:$HGPORT1/
159 166 searching for changes
160 167 adding changesets
161 168 adding manifests
162 169 adding file changes
163 170 added 1 changesets with 1 changes to 1 files
164 171 new changesets df05c6cb1406 (1 drafts)
165 172 (run 'hg update' to get a working copy)
166 173
167 174 $ hg -R clone-while-split debug-revlog-index some-file
168 175 rev linkrev nodeid p1-nodeid p2-nodeid
169 176 0 0 ed70cecbc103 000000000000 000000000000
170 177 1 1 7241018db64c ed70cecbc103 000000000000
171 178 2 2 fa1120531cc1 7241018db64c 000000000000
172 179 3 3 a631378adaa3 fa1120531cc1 000000000000
173 180 $ hg -R clone-while-split verify
174 181 checking changesets
175 182 checking manifests
176 183 crosschecking files in changesets and manifests
177 184 checking files
178 185 checking dirstate
179 186 checked 4 changesets with 4 changes to 1 files
@@ -1,994 +1,1036 b''
1 1 #require serve no-reposimplestore no-chg
2 2
3 3 #testcases stream-legacy stream-bundle2-v2 stream-bundle2-v3
4 4
5 5 #if stream-legacy
6 6 $ cat << EOF >> $HGRCPATH
7 7 > [server]
8 8 > bundle2.stream = no
9 > [format]
10 > # persistent nodemap is too broken with legacy format,
11 > # however client with nodemap support will have better stream support.
12 > use-persistent-nodemap=no
9 13 > EOF
10 14 #endif
11 15 #if stream-bundle2-v3
12 16 $ cat << EOF >> $HGRCPATH
13 17 > [experimental]
14 18 > stream-v3 = yes
15 19 > EOF
16 20 #endif
17 21
18 22 Initialize repository
19 23
20 24 $ hg init server
21 25 $ cd server
22 26 $ sh $TESTDIR/testlib/stream_clone_setup.sh
23 27 adding 00changelog-ab349180a0405010.nd
24 28 adding 00changelog.d
25 29 adding 00changelog.i
26 30 adding 00changelog.n
27 31 adding 00manifest.d
28 32 adding 00manifest.i
29 33 adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
30 34 adding data/foo.d
31 35 adding data/foo.i
32 36 adding data/foo.n
33 37 adding data/undo.babar
34 38 adding data/undo.d
35 39 adding data/undo.foo.d
36 40 adding data/undo.foo.i
37 41 adding data/undo.foo.n
38 42 adding data/undo.i
39 43 adding data/undo.n
40 44 adding data/undo.py
41 45 adding foo.d
42 46 adding foo.i
43 47 adding foo.n
44 48 adding meta/foo.d
45 49 adding meta/foo.i
46 50 adding meta/foo.n
47 51 adding meta/undo.babar
48 52 adding meta/undo.d
49 53 adding meta/undo.foo.d
50 54 adding meta/undo.foo.i
51 55 adding meta/undo.foo.n
52 56 adding meta/undo.i
53 57 adding meta/undo.n
54 58 adding meta/undo.py
55 59 adding savanah/foo.d
56 60 adding savanah/foo.i
57 61 adding savanah/foo.n
58 62 adding savanah/undo.babar
59 63 adding savanah/undo.d
60 64 adding savanah/undo.foo.d
61 65 adding savanah/undo.foo.i
62 66 adding savanah/undo.foo.n
63 67 adding savanah/undo.i
64 68 adding savanah/undo.n
65 69 adding savanah/undo.py
66 70 adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc)
67 71 adding store/foo.d
68 72 adding store/foo.i
69 73 adding store/foo.n
70 74 adding store/undo.babar
71 75 adding store/undo.d
72 76 adding store/undo.foo.d
73 77 adding store/undo.foo.i
74 78 adding store/undo.foo.n
75 79 adding store/undo.i
76 80 adding store/undo.n
77 81 adding store/undo.py
78 82 adding undo.babar
79 83 adding undo.d
80 84 adding undo.foo.d
81 85 adding undo.foo.i
82 86 adding undo.foo.n
83 87 adding undo.i
84 88 adding undo.n
85 89 adding undo.py
86 90
87 91 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
88 92 $ cat hg.pid > $DAEMON_PIDS
89 93 $ cd ..
90 94
91 95 Check local clone
92 96 ==================
93 97
94 98 The logic is close enough of uncompressed.
95 99 This is present here to reuse the testing around file with "special" names.
96 100
97 101 $ hg clone server local-clone
98 102 updating to branch default
99 103 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 104
101 105 Check that the clone went well
102 106
103 107 $ hg verify -R local-clone -q
104 108
105 109 Check uncompressed
106 110 ==================
107 111
108 112 Cannot stream clone when server.uncompressed is set
109 113
110 114 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
111 115 200 Script output follows
112 116
113 117 1
114 118
115 119 #if stream-legacy
116 120 $ hg debugcapabilities http://localhost:$HGPORT
117 121 Main capabilities:
118 122 batch
119 123 branchmap
120 124 $USUAL_BUNDLE2_CAPS_SERVER$
121 125 changegroupsubset
122 126 compression=$BUNDLE2_COMPRESSIONS$
123 127 getbundle
124 128 httpheader=1024
125 129 httpmediatype=0.1rx,0.1tx,0.2tx
126 130 known
127 131 lookup
128 132 pushkey
129 133 unbundle=HG10GZ,HG10BZ,HG10UN
130 134 unbundlehash
131 135 Bundle2 capabilities:
132 136 HG20
133 137 bookmarks
134 138 changegroup
135 139 01
136 140 02
137 141 03
138 142 checkheads
139 143 related
140 144 digests
141 145 md5
142 146 sha1
143 147 sha512
144 148 error
145 149 abort
146 150 unsupportedcontent
147 151 pushraced
148 152 pushkey
149 153 hgtagsfnodes
150 154 listkeys
151 155 phases
152 156 heads
153 157 pushkey
154 158 remote-changegroup
155 159 http
156 160 https
157 161
158 162 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
159 163 warning: stream clone requested but server has them disabled
160 164 requesting all changes
161 165 adding changesets
162 166 adding manifests
163 167 adding file changes
164 168 added 3 changesets with 1088 changes to 1088 files
165 169 new changesets 96ee1d7354c4:5223b5e3265f
166 170
167 171 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
168 172 200 Script output follows
169 173 content-type: application/mercurial-0.2
170 174
171 175
172 176 $ f --size body --hexdump --bytes 100
173 177 body: size=140
174 178 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
175 179 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
176 180 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
177 181 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
178 182 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
179 183 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
180 184 0060: 69 73 20 66 |is f|
181 185
182 186 #endif
183 187 #if stream-bundle2-v2
184 188 $ hg debugcapabilities http://localhost:$HGPORT
185 189 Main capabilities:
186 190 batch
187 191 branchmap
188 192 $USUAL_BUNDLE2_CAPS_SERVER$
189 193 changegroupsubset
190 194 compression=$BUNDLE2_COMPRESSIONS$
191 195 getbundle
192 196 httpheader=1024
193 197 httpmediatype=0.1rx,0.1tx,0.2tx
194 198 known
195 199 lookup
196 200 pushkey
197 201 unbundle=HG10GZ,HG10BZ,HG10UN
198 202 unbundlehash
199 203 Bundle2 capabilities:
200 204 HG20
201 205 bookmarks
202 206 changegroup
203 207 01
204 208 02
205 209 03
206 210 checkheads
207 211 related
208 212 digests
209 213 md5
210 214 sha1
211 215 sha512
212 216 error
213 217 abort
214 218 unsupportedcontent
215 219 pushraced
216 220 pushkey
217 221 hgtagsfnodes
218 222 listkeys
219 223 phases
220 224 heads
221 225 pushkey
222 226 remote-changegroup
223 227 http
224 228 https
225 229
226 230 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
227 231 warning: stream clone requested but server has them disabled
228 232 requesting all changes
229 233 adding changesets
230 234 adding manifests
231 235 adding file changes
232 236 added 3 changesets with 1088 changes to 1088 files
233 237 new changesets 96ee1d7354c4:5223b5e3265f
234 238
235 239 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
236 240 200 Script output follows
237 241 content-type: application/mercurial-0.2
238 242
239 243
240 244 $ f --size body --hexdump --bytes 100
241 245 body: size=140
242 246 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
243 247 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
244 248 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
245 249 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
246 250 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
247 251 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
248 252 0060: 69 73 20 66 |is f|
249 253
250 254 #endif
251 255 #if stream-bundle2-v3
252 256 $ hg debugcapabilities http://localhost:$HGPORT
253 257 Main capabilities:
254 258 batch
255 259 branchmap
256 260 $USUAL_BUNDLE2_CAPS_SERVER$
257 261 changegroupsubset
258 262 compression=$BUNDLE2_COMPRESSIONS$
259 263 getbundle
260 264 httpheader=1024
261 265 httpmediatype=0.1rx,0.1tx,0.2tx
262 266 known
263 267 lookup
264 268 pushkey
265 269 unbundle=HG10GZ,HG10BZ,HG10UN
266 270 unbundlehash
267 271 Bundle2 capabilities:
268 272 HG20
269 273 bookmarks
270 274 changegroup
271 275 01
272 276 02
273 277 03
274 278 checkheads
275 279 related
276 280 digests
277 281 md5
278 282 sha1
279 283 sha512
280 284 error
281 285 abort
282 286 unsupportedcontent
283 287 pushraced
284 288 pushkey
285 289 hgtagsfnodes
286 290 listkeys
287 291 phases
288 292 heads
289 293 pushkey
290 294 remote-changegroup
291 295 http
292 296 https
293 297
294 298 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
295 299 warning: stream clone requested but server has them disabled
296 300 requesting all changes
297 301 adding changesets
298 302 adding manifests
299 303 adding file changes
300 304 added 3 changesets with 1088 changes to 1088 files
301 305 new changesets 96ee1d7354c4:5223b5e3265f
302 306
303 307 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
304 308 200 Script output follows
305 309 content-type: application/mercurial-0.2
306 310
307 311
308 312 $ f --size body --hexdump --bytes 100
309 313 body: size=140
310 314 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
311 315 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
312 316 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
313 317 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
314 318 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
315 319 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
316 320 0060: 69 73 20 66 |is f|
317 321
318 322 #endif
319 323
320 324 $ killdaemons.py
321 325 $ cd server
322 326 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
323 327 $ cat hg.pid > $DAEMON_PIDS
324 328 $ cd ..
325 329
326 330 Basic clone
327 331
328 332 #if stream-legacy
329 333 $ hg clone --stream -U http://localhost:$HGPORT clone1
330 334 streaming all changes
331 1090 files to transfer, 102 KB of data (no-zstd !)
335 1091 files to transfer, 102 KB of data (no-zstd !)
332 336 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
333 1090 files to transfer, 98.8 KB of data (zstd !)
337 1091 files to transfer, 98.8 KB of data (zstd !)
334 338 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
335 339 searching for changes
336 340 no changes found
337 341 $ cat server/errors.txt
338 342 #endif
339 343 #if stream-bundle2-v2
340 344 $ hg clone --stream -U http://localhost:$HGPORT clone1
341 345 streaming all changes
342 1093 files to transfer, 102 KB of data (no-zstd !)
346 1094 files to transfer, 102 KB of data (no-zstd !)
343 347 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
344 1093 files to transfer, 98.9 KB of data (zstd !)
345 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
348 1094 files to transfer, 98.9 KB of data (zstd no-rust !)
349 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
350 1096 files to transfer, 99.0 KB of data (zstd rust !)
351 transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
346 352
347 353 $ ls -1 clone1/.hg/cache
348 354 branch2-base
349 355 branch2-immutable
350 356 branch2-served
351 357 branch2-served.hidden
352 358 branch2-visible
353 359 branch2-visible-hidden
354 360 rbc-names-v1
355 361 rbc-revs-v1
356 362 tags2
357 363 tags2-served
358 364 $ cat server/errors.txt
359 365 #endif
360 366 #if stream-bundle2-v3
361 367 $ hg clone --stream -U http://localhost:$HGPORT clone1
362 368 streaming all changes
363 369 1093 entries to transfer
364 370 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
365 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
371 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
372 transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
366 373
367 374 $ ls -1 clone1/.hg/cache
368 375 branch2-base
369 376 branch2-immutable
370 377 branch2-served
371 378 branch2-served.hidden
372 379 branch2-visible
373 380 branch2-visible-hidden
374 381 rbc-names-v1
375 382 rbc-revs-v1
376 383 tags2
377 384 tags2-served
378 385 $ cat server/errors.txt
379 386 #endif
380 387
381 388 getbundle requests with stream=1 are uncompressed
382 389
383 390 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
384 391 200 Script output follows
385 392 content-type: application/mercurial-0.2
386 393
387 394
388 395 #if no-zstd no-rust
389 396 $ f --size --hex --bytes 256 body
390 body: size=119123
397 body: size=119140
391 398 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
392 399 0010: 62 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |b.STREAM2.......|
393 400 0020: 06 09 04 0c 26 62 79 74 65 63 6f 75 6e 74 31 30 |....&bytecount10|
394 401 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109|
395 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
402 0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen|
396 403 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
397 404 0060: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
398 405 0070: 6c 6f 67 00 00 80 00 73 08 42 64 61 74 61 2f 30 |log....s.Bdata/0|
399 406 0080: 2e 69 00 03 00 01 00 00 00 00 00 00 00 02 00 00 |.i..............|
400 407 0090: 00 01 00 00 00 00 00 00 00 01 ff ff ff ff ff ff |................|
401 408 00a0: ff ff 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 |...)c.I.#....Vg.|
402 409 00b0: 67 2c 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 |g,i..9..........|
403 410 00c0: 00 00 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 |..u0s&Edata/00ch|
404 411 00d0: 61 6e 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 |angelog-ab349180|
405 412 00e0: 61 30 34 30 35 30 31 30 2e 6e 64 2e 69 00 03 00 |a0405010.nd.i...|
406 413 00f0: 01 00 00 00 00 00 00 00 05 00 00 00 04 00 00 00 |................|
407 414 #endif
408 415 #if zstd no-rust
409 416 $ f --size --hex --bytes 256 body
410 body: size=116310 (no-bigendian !)
411 body: size=116305 (bigendian !)
417 body: size=116327 (no-bigendian !)
418 body: size=116322 (bigendian !)
412 419 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
413 420 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
414 421 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
415 422 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-bigendian !)
416 423 0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (bigendian !)
417 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
424 0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen|
418 425 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
419 426 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
420 427 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
421 428 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
422 429 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
423 430 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
424 431 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
425 432 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
426 433 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
427 434 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
428 435 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
429 436 #endif
430 437 #if zstd rust no-dirstate-v2
431 438 $ f --size --hex --bytes 256 body
432 body: size=116310
439 body: size=116310 (no-rust !)
440 body: size=116495 (rust no-stream-legacy no-bigendian !)
441 body: size=116490 (rust no-stream-legacy bigendian !)
442 body: size=116327 (rust stream-legacy no-bigendian !)
443 body: size=116322 (rust stream-legacy bigendian !)
433 444 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
434 445 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
435 446 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
436 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109|
437 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
447 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-rust !)
448 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen| (no-rust !)
449 0030: 31 34 30 32 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1402filecount109| (rust no-stream-legacy no-bigendian !)
450 0030: 31 33 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1397filecount109| (rust no-stream-legacy bigendian !)
451 0040: 36 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |6requirementsgen| (rust no-stream-legacy !)
452 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (rust stream-legacy no-bigendian !)
453 0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (rust stream-legacy bigendian !)
454 0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen| (rust stream-legacy !)
438 455 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
439 456 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
440 457 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
441 458 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
442 459 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
443 460 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
444 461 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
445 462 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
446 463 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
447 464 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
448 465 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
449 466 #endif
450 467 #if zstd dirstate-v2
451 468 $ f --size --hex --bytes 256 body
452 469 body: size=109549
453 470 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
454 471 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
455 472 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
456 473 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
457 474 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
458 475 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
459 476 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
460 477 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
461 478 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
462 479 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
463 480 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
464 481 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
465 482 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
466 483 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
467 484 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
468 485 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
469 486 #endif
470 487
471 488 --uncompressed is an alias to --stream
472 489
473 490 #if stream-legacy
474 491 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
475 492 streaming all changes
476 1090 files to transfer, 102 KB of data (no-zstd !)
493 1091 files to transfer, 102 KB of data (no-zstd !)
477 494 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
478 1090 files to transfer, 98.8 KB of data (zstd !)
495 1091 files to transfer, 98.8 KB of data (zstd !)
479 496 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
480 497 searching for changes
481 498 no changes found
482 499 #endif
483 500 #if stream-bundle2-v2
484 501 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
485 502 streaming all changes
486 1093 files to transfer, 102 KB of data (no-zstd !)
503 1094 files to transfer, 102 KB of data (no-zstd !)
487 504 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
488 1093 files to transfer, 98.9 KB of data (zstd !)
489 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
505 1094 files to transfer, 98.9 KB of data (zstd no-rust !)
506 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
507 1096 files to transfer, 99.0 KB of data (zstd rust !)
508 transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
490 509 #endif
491 510 #if stream-bundle2-v3
492 511 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
493 512 streaming all changes
494 513 1093 entries to transfer
495 514 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
496 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
515 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
516 transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
497 517 #endif
498 518
499 519 Clone with background file closing enabled
500 520
501 521 #if stream-legacy
502 522 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
503 523 using http://localhost:$HGPORT/
504 524 sending capabilities command
505 525 sending branchmap command
506 526 streaming all changes
507 527 sending stream_out command
508 1090 files to transfer, 102 KB of data (no-zstd !)
509 1090 files to transfer, 98.8 KB of data (zstd !)
528 1091 files to transfer, 102 KB of data (no-zstd !)
529 1091 files to transfer, 98.8 KB of data (zstd !)
510 530 starting 4 threads for background file closing
511 531 updating the branch cache
512 532 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
513 533 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
514 534 query 1; heads
515 535 sending batch command
516 536 searching for changes
517 537 all remote heads known locally
518 538 no changes found
519 539 sending getbundle command
520 540 bundle2-input-bundle: with-transaction
521 541 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
522 542 bundle2-input-part: "phase-heads" supported
523 543 bundle2-input-part: total payload size 24
524 544 bundle2-input-bundle: 2 parts total
525 545 checking for updated bookmarks
526 546 updating the branch cache
527 547 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
528 548 #endif
529 549 #if stream-bundle2-v2
530 550 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
531 551 using http://localhost:$HGPORT/
532 552 sending capabilities command
533 553 query 1; heads
534 554 sending batch command
535 555 streaming all changes
536 556 sending getbundle command
537 557 bundle2-input-bundle: with-transaction
538 558 bundle2-input-part: "stream2" (params: 3 mandatory) supported
539 559 applying stream bundle
540 1093 files to transfer, 102 KB of data (no-zstd !)
541 1093 files to transfer, 98.9 KB of data (zstd !)
560 1094 files to transfer, 102 KB of data (no-zstd !)
561 1094 files to transfer, 98.9 KB of data (zstd no-rust !)
562 1096 files to transfer, 99.0 KB of data (zstd rust !)
542 563 starting 4 threads for background file closing
543 564 starting 4 threads for background file closing
544 565 updating the branch cache
545 566 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
546 bundle2-input-part: total payload size 118984 (no-zstd !)
547 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
548 bundle2-input-part: total payload size 116145 (zstd no-bigendian !)
549 bundle2-input-part: total payload size 116140 (zstd bigendian !)
567 bundle2-input-part: total payload size 119001 (no-zstd !)
568 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
569 transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
570 bundle2-input-part: total payload size 116162 (zstd no-bigendian no-rust !)
571 bundle2-input-part: total payload size 116330 (zstd no-bigendian rust !)
572 bundle2-input-part: total payload size 116157 (zstd bigendian no-rust !)
573 bundle2-input-part: total payload size 116325 (zstd bigendian rust !)
550 574 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
551 575 bundle2-input-bundle: 2 parts total
552 576 checking for updated bookmarks
553 577 updating the branch cache
554 578 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
555 579 #endif
556 580 #if stream-bundle2-v3
557 581 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
558 582 using http://localhost:$HGPORT/
559 583 sending capabilities command
560 584 query 1; heads
561 585 sending batch command
562 586 streaming all changes
563 587 sending getbundle command
564 588 bundle2-input-bundle: with-transaction
565 589 bundle2-input-part: "stream3-exp" (params: 1 mandatory) supported
566 590 applying stream bundle
567 591 1093 entries to transfer
568 592 starting 4 threads for background file closing
569 593 starting 4 threads for background file closing
570 594 updating the branch cache
571 595 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
572 bundle2-input-part: total payload size 120079 (no-zstd !)
573 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
574 bundle2-input-part: total payload size 117240 (zstd no-bigendian !)
575 bundle2-input-part: total payload size 116138 (zstd bigendian !)
596 bundle2-input-part: total payload size 120096 (no-zstd !)
597 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
598 transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
599 bundle2-input-part: total payload size 117257 (zstd no-rust no-bigendian !)
600 bundle2-input-part: total payload size 117425 (zstd rust no-bigendian !)
601 bundle2-input-part: total payload size 117252 (zstd bigendian no-rust !)
602 bundle2-input-part: total payload size 117420 (zstd bigendian rust !)
576 603 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
577 604 bundle2-input-bundle: 2 parts total
578 605 checking for updated bookmarks
579 606 updating the branch cache
580 607 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
581 608 #endif
582 609
583 610 Cannot stream clone when there are secret changesets
584 611
585 612 $ hg -R server phase --force --secret -r tip
586 613 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
587 614 warning: stream clone requested but server has them disabled
588 615 requesting all changes
589 616 adding changesets
590 617 adding manifests
591 618 adding file changes
592 619 added 2 changesets with 1025 changes to 1025 files
593 620 new changesets 96ee1d7354c4:c17445101a72
594 621
595 622 $ killdaemons.py
596 623
597 624 Streaming of secrets can be overridden by server config
598 625
599 626 $ cd server
600 627 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
601 628 $ cat hg.pid > $DAEMON_PIDS
602 629 $ cd ..
603 630
604 631 #if stream-legacy
605 632 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
606 633 streaming all changes
607 1090 files to transfer, 102 KB of data (no-zstd !)
634 1091 files to transfer, 102 KB of data (no-zstd !)
608 635 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
609 1090 files to transfer, 98.8 KB of data (zstd !)
636 1091 files to transfer, 98.8 KB of data (zstd !)
610 637 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
611 638 searching for changes
612 639 no changes found
613 640 #endif
614 641 #if stream-bundle2-v2
615 642 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
616 643 streaming all changes
617 1093 files to transfer, 102 KB of data (no-zstd !)
644 1094 files to transfer, 102 KB of data (no-zstd !)
618 645 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
619 1093 files to transfer, 98.9 KB of data (zstd !)
620 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
646 1094 files to transfer, 98.9 KB of data (zstd no-rust !)
647 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
648 1096 files to transfer, 99.0 KB of data (zstd rust !)
649 transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
621 650 #endif
622 651 #if stream-bundle2-v3
623 652 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
624 653 streaming all changes
625 654 1093 entries to transfer
626 655 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
627 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
656 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
657 transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
628 658 #endif
629 659
630 660 $ killdaemons.py
631 661
632 662 Verify interaction between preferuncompressed and secret presence
633 663
634 664 $ cd server
635 665 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
636 666 $ cat hg.pid > $DAEMON_PIDS
637 667 $ cd ..
638 668
639 669 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
640 670 requesting all changes
641 671 adding changesets
642 672 adding manifests
643 673 adding file changes
644 674 added 2 changesets with 1025 changes to 1025 files
645 675 new changesets 96ee1d7354c4:c17445101a72
646 676
647 677 $ killdaemons.py
648 678
649 679 Clone not allowed when full bundles disabled and can't serve secrets
650 680
651 681 $ cd server
652 682 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
653 683 $ cat hg.pid > $DAEMON_PIDS
654 684 $ cd ..
655 685
656 686 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
657 687 warning: stream clone requested but server has them disabled
658 688 requesting all changes
659 689 remote: abort: server has pull-based clones disabled
660 690 abort: pull failed on remote
661 691 (remove --pull if specified or upgrade Mercurial)
662 692 [100]
663 693
664 694 Local stream clone with secrets involved
665 695 (This is just a test over behavior: if you have access to the repo's files,
666 696 there is no security so it isn't important to prevent a clone here.)
667 697
668 698 $ hg clone -U --stream server local-secret
669 699 warning: stream clone requested but server has them disabled
670 700 requesting all changes
671 701 adding changesets
672 702 adding manifests
673 703 adding file changes
674 704 added 2 changesets with 1025 changes to 1025 files
675 705 new changesets 96ee1d7354c4:c17445101a72
676 706
677 707 Stream clone while repo is changing:
678 708
679 709 $ mkdir changing
680 710 $ cd changing
681 711
682 712 prepare repo with small and big file to cover both code paths in emitrevlogdata
683 713
684 714 $ hg init repo
685 715 $ touch repo/f1
686 716 $ $TESTDIR/seq.py 50000 > repo/f2
687 717 $ hg -R repo ci -Aqm "0"
688 718 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
689 719 $ export HG_TEST_STREAM_WALKED_FILE_1
690 720 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
691 721 $ export HG_TEST_STREAM_WALKED_FILE_2
692 722 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
693 723 $ export HG_TEST_STREAM_WALKED_FILE_3
694 724 # $ cat << EOF >> $HGRCPATH
695 725 # > [hooks]
696 726 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
697 727 # > EOF
698 728 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
699 729 $ cat hg.pid >> $DAEMON_PIDS
700 730
701 731 clone while modifying the repo between stating file with write lock and
702 732 actually serving file content
703 733
704 734 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
705 735 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
706 736 $ echo >> repo/f1
707 737 $ echo >> repo/f2
708 738 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
709 739 $ touch $HG_TEST_STREAM_WALKED_FILE_2
710 740 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
711 741 $ hg -R clone id
712 742 000000000000
713 743 $ cat errors.log
714 744 $ cd ..
715 745
716 746 Stream repository with bookmarks
717 747 --------------------------------
718 748
719 749 (revert introduction of secret changeset)
720 750
721 751 $ hg -R server phase --draft 'secret()'
722 752
723 753 add a bookmark
724 754
725 755 $ hg -R server bookmark -r tip some-bookmark
726 756
727 757 clone it
728 758
729 759 #if stream-legacy
730 760 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
731 761 streaming all changes
732 1090 files to transfer, 102 KB of data (no-zstd !)
762 1091 files to transfer, 102 KB of data (no-zstd !)
733 763 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
734 1090 files to transfer, 98.8 KB of data (zstd !)
764 1091 files to transfer, 98.8 KB of data (zstd !)
735 765 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
736 766 searching for changes
737 767 no changes found
738 768 updating to branch default
739 769 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
740 770 #endif
741 771 #if stream-bundle2-v2
742 772 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
743 773 streaming all changes
744 1096 files to transfer, 102 KB of data (no-zstd !)
774 1097 files to transfer, 102 KB of data (no-zstd !)
745 775 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
746 1096 files to transfer, 99.1 KB of data (zstd !)
747 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
776 1097 files to transfer, 99.1 KB of data (zstd no-rust !)
777 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
778 1099 files to transfer, 99.2 KB of data (zstd rust !)
779 transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
748 780 updating to branch default
749 781 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
750 782 #endif
751 783 #if stream-bundle2-v3
752 784 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
753 785 streaming all changes
754 786 1096 entries to transfer
755 787 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
756 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
788 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
789 transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
757 790 updating to branch default
758 791 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
759 792 #endif
760 793 $ hg verify -R with-bookmarks -q
761 794 $ hg -R with-bookmarks bookmarks
762 795 some-bookmark 2:5223b5e3265f
763 796
764 797 Stream repository with phases
765 798 -----------------------------
766 799
767 800 Clone as publishing
768 801
769 802 $ hg -R server phase -r 'all()'
770 803 0: draft
771 804 1: draft
772 805 2: draft
773 806
774 807 #if stream-legacy
775 808 $ hg clone --stream http://localhost:$HGPORT phase-publish
776 809 streaming all changes
777 1090 files to transfer, 102 KB of data (no-zstd !)
810 1091 files to transfer, 102 KB of data (no-zstd !)
778 811 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
779 1090 files to transfer, 98.8 KB of data (zstd !)
812 1091 files to transfer, 98.8 KB of data (zstd !)
780 813 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
781 814 searching for changes
782 815 no changes found
783 816 updating to branch default
784 817 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
785 818 #endif
786 819 #if stream-bundle2-v2
787 820 $ hg clone --stream http://localhost:$HGPORT phase-publish
788 821 streaming all changes
789 1096 files to transfer, 102 KB of data (no-zstd !)
822 1097 files to transfer, 102 KB of data (no-zstd !)
790 823 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
791 1096 files to transfer, 99.1 KB of data (zstd !)
792 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
824 1097 files to transfer, 99.1 KB of data (zstd no-rust !)
825 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
826 1099 files to transfer, 99.2 KB of data (zstd rust !)
827 transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
793 828 updating to branch default
794 829 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
795 830 #endif
796 831 #if stream-bundle2-v3
797 832 $ hg clone --stream http://localhost:$HGPORT phase-publish
798 833 streaming all changes
799 834 1096 entries to transfer
800 835 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
801 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
836 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
837 transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
802 838 updating to branch default
803 839 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
804 840 #endif
805 841 $ hg verify -R phase-publish -q
806 842 $ hg -R phase-publish phase -r 'all()'
807 843 0: public
808 844 1: public
809 845 2: public
810 846
811 847 Clone as non publishing
812 848
813 849 $ cat << EOF >> server/.hg/hgrc
814 850 > [phases]
815 851 > publish = False
816 852 > EOF
817 853 $ killdaemons.py
818 854 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
819 855 $ cat hg.pid > $DAEMON_PIDS
820 856
821 857 #if stream-legacy
822 858
823 859 With v1 of the stream protocol, changeset are always cloned as public. It make
824 860 stream v1 unsuitable for non-publishing repository.
825 861
826 862 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
827 863 streaming all changes
828 1090 files to transfer, 102 KB of data (no-zstd !)
864 1091 files to transfer, 102 KB of data (no-zstd !)
829 865 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
830 1090 files to transfer, 98.8 KB of data (zstd !)
866 1091 files to transfer, 98.8 KB of data (zstd !)
831 867 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
832 868 searching for changes
833 869 no changes found
834 870 updating to branch default
835 871 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
836 872 $ hg -R phase-no-publish phase -r 'all()'
837 873 0: public
838 874 1: public
839 875 2: public
840 876 #endif
841 877 #if stream-bundle2-v2
842 878 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
843 879 streaming all changes
844 1097 files to transfer, 102 KB of data (no-zstd !)
880 1098 files to transfer, 102 KB of data (no-zstd !)
845 881 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
846 1097 files to transfer, 99.1 KB of data (zstd !)
847 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
882 1098 files to transfer, 99.1 KB of data (zstd no-rust !)
883 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
884 1100 files to transfer, 99.2 KB of data (zstd rust !)
885 transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
848 886 updating to branch default
849 887 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
850 888 $ hg -R phase-no-publish phase -r 'all()'
851 889 0: draft
852 890 1: draft
853 891 2: draft
854 892 #endif
855 893 #if stream-bundle2-v3
856 894 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
857 895 streaming all changes
858 896 1097 entries to transfer
859 897 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
860 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
898 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
899 transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
861 900 updating to branch default
862 901 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
863 902 $ hg -R phase-no-publish phase -r 'all()'
864 903 0: draft
865 904 1: draft
866 905 2: draft
867 906 #endif
868 907 $ hg verify -R phase-no-publish -q
869 908
870 909 $ killdaemons.py
871 910
872 911 #if stream-legacy
873 912
874 913 With v1 of the stream protocol, changeset are always cloned as public. There's
875 914 no obsolescence markers exchange in stream v1.
876 915
877 916 #endif
878 917 #if stream-bundle2-v2
879 918
880 919 Stream repository with obsolescence
881 920 -----------------------------------
882 921
883 922 Clone non-publishing with obsolescence
884 923
885 924 $ cat >> $HGRCPATH << EOF
886 925 > [experimental]
887 926 > evolution=all
888 927 > EOF
889 928
890 929 $ cd server
891 930 $ echo foo > foo
892 931 $ hg -q commit -m 'about to be pruned'
893 932 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
894 933 1 new obsolescence markers
895 934 obsoleted 1 changesets
896 935 $ hg up null -q
897 936 $ hg log -T '{rev}: {phase}\n'
898 937 2: draft
899 938 1: draft
900 939 0: draft
901 940 $ hg serve -p $HGPORT -d --pid-file=hg.pid
902 941 $ cat hg.pid > $DAEMON_PIDS
903 942 $ cd ..
904 943
905 944 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
906 945 streaming all changes
907 1098 files to transfer, 102 KB of data (no-zstd !)
946 1099 files to transfer, 102 KB of data (no-zstd !)
908 947 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
909 1098 files to transfer, 99.5 KB of data (zstd !)
910 transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !)
948 1099 files to transfer, 99.5 KB of data (zstd no-rust !)
949 transferred 99.5 KB in * seconds (* */sec) (glob) (zstd no-rust !)
950 1101 files to transfer, 99.6 KB of data (zstd rust !)
951 transferred 99.6 KB in * seconds (* */sec) (glob) (zstd rust !)
911 952 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
912 953 2: draft
913 954 1: draft
914 955 0: draft
915 956 $ hg debugobsolete -R with-obsolescence
916 957 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
917 958 $ hg verify -R with-obsolescence -q
918 959
919 960 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
920 961 streaming all changes
921 962 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
922 963 abort: pull failed on remote
923 964 [100]
924 965
925 966 $ killdaemons.py
926 967
927 968 #endif
928 969 #if stream-bundle2-v3
929 970
930 971 Stream repository with obsolescence
931 972 -----------------------------------
932 973
933 974 Clone non-publishing with obsolescence
934 975
935 976 $ cat >> $HGRCPATH << EOF
936 977 > [experimental]
937 978 > evolution=all
938 979 > EOF
939 980
940 981 $ cd server
941 982 $ echo foo > foo
942 983 $ hg -q commit -m 'about to be pruned'
943 984 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
944 985 1 new obsolescence markers
945 986 obsoleted 1 changesets
946 987 $ hg up null -q
947 988 $ hg log -T '{rev}: {phase}\n'
948 989 2: draft
949 990 1: draft
950 991 0: draft
951 992 $ hg serve -p $HGPORT -d --pid-file=hg.pid
952 993 $ cat hg.pid > $DAEMON_PIDS
953 994 $ cd ..
954 995
955 996 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
956 997 streaming all changes
957 998 1098 entries to transfer
958 999 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
959 transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !)
1000 transferred 99.5 KB in * seconds (* */sec) (glob) (zstd no-rust !)
1001 transferred 99.6 KB in * seconds (* */sec) (glob) (zstd rust !)
960 1002 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
961 1003 2: draft
962 1004 1: draft
963 1005 0: draft
964 1006 $ hg debugobsolete -R with-obsolescence
965 1007 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
966 1008 $ hg verify -R with-obsolescence -q
967 1009
968 1010 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
969 1011 streaming all changes
970 1012 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
971 1013 abort: pull failed on remote
972 1014 [100]
973 1015
974 1016 $ killdaemons.py
975 1017
976 1018 #endif
977 1019
978 1020 Cloning a repo with no requirements doesn't give some obscure error
979 1021
980 1022 $ mkdir -p empty-repo/.hg
981 1023 $ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo2
982 1024 $ hg --cwd empty-repo2 verify -q
983 1025
984 1026 Cloning a repo with an empty manifestlog doesn't give some weird error
985 1027
986 1028 $ rm -r empty-repo; hg init empty-repo
987 1029 $ (cd empty-repo; touch x; hg commit -Am empty; hg debugstrip -r 0) > /dev/null
988 1030 $ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo3
989 1031 $ hg --cwd empty-repo3 verify -q 2>&1 | grep -v warning
990 1032 [1]
991 1033
992 1034 The warnings filtered out here are talking about zero-length 'orphan' data files.
993 1035 Those are harmless, so that's fine.
994 1036
@@ -1,1296 +1,1316 b''
1 1 Prepare repo a:
2 2
3 3 $ hg init a
4 4 $ cd a
5 5 $ echo a > a
6 6 $ hg add a
7 7 $ hg commit -m test
8 8 $ echo first line > b
9 9 $ hg add b
10 10
11 11 Create a non-inlined filelog:
12 12
13 13 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
14 14 $ for j in 0 1 2 3 4 5 6 7 8 9; do
15 15 > cat data1 >> b
16 16 > hg commit -m test
17 17 > done
18 18
19 19 List files in store/data (should show a 'b.d'):
20 20
21 21 #if reporevlogstore
22 22 $ for i in .hg/store/data/*; do
23 23 > echo $i
24 24 > done
25 25 .hg/store/data/a.i
26 26 .hg/store/data/b.d
27 27 .hg/store/data/b.i
28 28 #endif
29 29
30 30 Trigger branchcache creation:
31 31
32 32 $ hg branches
33 33 default 10:a7949464abda
34 34 $ ls .hg/cache
35 35 branch2-served
36 36 rbc-names-v1
37 37 rbc-revs-v1
38 38
39 39 Default operation:
40 40
41 41 $ hg clone . ../b
42 42 updating to branch default
43 43 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
44 44 $ cd ../b
45 45
46 46 Ensure branchcache got copied over:
47 47
48 48 $ ls .hg/cache
49 49 branch2-base
50 50 branch2-immutable
51 51 branch2-served
52 52 branch2-served.hidden
53 53 branch2-visible
54 54 branch2-visible-hidden
55 55 rbc-names-v1
56 56 rbc-revs-v1
57 57 tags2
58 58 tags2-served
59 59
60 60 $ cat a
61 61 a
62 62 $ hg verify -q
63 63
64 64 Invalid dest '' must abort:
65 65
66 66 $ hg clone . ''
67 67 abort: empty destination path is not valid
68 68 [10]
69 69
70 70 No update, with debug option:
71 71
72 72 #if hardlink
73 73 $ hg --debug clone -U . ../c --config progress.debug=true
74 linking: 1/15 files (6.67%)
75 linking: 2/15 files (13.33%)
76 linking: 3/15 files (20.00%)
77 linking: 4/15 files (26.67%)
78 linking: 5/15 files (33.33%)
79 linking: 6/15 files (40.00%)
80 linking: 7/15 files (46.67%)
81 linking: 8/15 files (53.33%)
82 linking: 9/15 files (60.00%)
83 linking: 10/15 files (66.67%)
84 linking: 11/15 files (73.33%)
85 linking: 12/15 files (80.00%)
86 linking: 13/15 files (86.67%)
87 linking: 14/15 files (93.33%)
88 linking: 15/15 files (100.00%)
89 linked 15 files
74 linking: 1/16 files (6.25%) (no-rust !)
75 linking: 2/16 files (12.50%) (no-rust !)
76 linking: 3/16 files (18.75%) (no-rust !)
77 linking: 4/16 files (25.00%) (no-rust !)
78 linking: 5/16 files (31.25%) (no-rust !)
79 linking: 6/16 files (37.50%) (no-rust !)
80 linking: 7/16 files (43.75%) (no-rust !)
81 linking: 8/16 files (50.00%) (no-rust !)
82 linking: 9/16 files (56.25%) (no-rust !)
83 linking: 10/16 files (62.50%) (no-rust !)
84 linking: 11/16 files (68.75%) (no-rust !)
85 linking: 12/16 files (75.00%) (no-rust !)
86 linking: 13/16 files (81.25%) (no-rust !)
87 linking: 14/16 files (87.50%) (no-rust !)
88 linking: 15/16 files (93.75%) (no-rust !)
89 linking: 16/16 files (100.00%) (no-rust !)
90 linked 16 files (no-rust !)
91 linking: 1/18 files (5.56%) (rust !)
92 linking: 2/18 files (11.11%) (rust !)
93 linking: 3/18 files (16.67%) (rust !)
94 linking: 4/18 files (22.22%) (rust !)
95 linking: 5/18 files (27.78%) (rust !)
96 linking: 6/18 files (33.33%) (rust !)
97 linking: 7/18 files (38.89%) (rust !)
98 linking: 8/18 files (44.44%) (rust !)
99 linking: 9/18 files (50.00%) (rust !)
100 linking: 10/18 files (55.56%) (rust !)
101 linking: 11/18 files (61.11%) (rust !)
102 linking: 12/18 files (66.67%) (rust !)
103 linking: 13/18 files (72.22%) (rust !)
104 linking: 14/18 files (77.78%) (rust !)
105 linking: 15/18 files (83.33%) (rust !)
106 linking: 16/18 files (88.89%) (rust !)
107 linking: 17/18 files (94.44%) (rust !)
108 linking: 18/18 files (100.00%) (rust !)
109 linked 18 files (rust !)
90 110 updating the branch cache
91 111 #else
92 112 $ hg --debug clone -U . ../c --config progress.debug=true
93 113 linking: 1 files
94 114 copying: 2 files
95 115 copying: 3 files
96 116 copying: 4 files
97 117 copying: 5 files
98 118 copying: 6 files
99 119 copying: 7 files
100 120 copying: 8 files
101 121 #endif
102 122 $ cd ../c
103 123
104 124 Ensure branchcache got copied over:
105 125
106 126 $ ls .hg/cache
107 127 branch2-base
108 128 branch2-immutable
109 129 branch2-served
110 130 branch2-served.hidden
111 131 branch2-visible
112 132 branch2-visible-hidden
113 133 rbc-names-v1
114 134 rbc-revs-v1
115 135 tags2
116 136 tags2-served
117 137
118 138 $ cat a 2>/dev/null || echo "a not present"
119 139 a not present
120 140 $ hg verify -q
121 141
122 142 Default destination:
123 143
124 144 $ mkdir ../d
125 145 $ cd ../d
126 146 $ hg clone ../a
127 147 destination directory: a
128 148 updating to branch default
129 149 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 150 $ cd a
131 151 $ hg cat a
132 152 a
133 153 $ cd ../..
134 154
135 155 Check that we drop the 'file:' from the path before writing the .hgrc:
136 156
137 157 $ hg clone file:a e
138 158 updating to branch default
139 159 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
140 160 $ grep 'file:' e/.hg/hgrc
141 161 [1]
142 162
143 163 Check that path aliases are expanded:
144 164
145 165 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
146 166 $ hg -R f showconfig paths.default
147 167 $TESTTMP/a#0
148 168
149 169 Use --pull:
150 170
151 171 $ hg clone --pull a g
152 172 requesting all changes
153 173 adding changesets
154 174 adding manifests
155 175 adding file changes
156 176 added 11 changesets with 11 changes to 2 files
157 177 new changesets acb14030fe0a:a7949464abda
158 178 updating to branch default
159 179 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
160 180 $ hg -R g verify -q
161 181
162 182 Invalid dest '' with --pull must abort (issue2528):
163 183
164 184 $ hg clone --pull a ''
165 185 abort: empty destination path is not valid
166 186 [10]
167 187
168 188 Clone to '.':
169 189
170 190 $ mkdir h
171 191 $ cd h
172 192 $ hg clone ../a .
173 193 updating to branch default
174 194 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 195 $ cd ..
176 196
177 197
178 198 *** Tests for option -u ***
179 199
180 200 Adding some more history to repo a:
181 201
182 202 $ cd a
183 203 $ hg tag ref1
184 204 $ echo the quick brown fox >a
185 205 $ hg ci -m "hacked default"
186 206 $ hg up ref1
187 207 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
188 208 $ hg branch stable
189 209 marked working directory as branch stable
190 210 (branches are permanent and global, did you want a bookmark?)
191 211 $ echo some text >a
192 212 $ hg ci -m "starting branch stable"
193 213 $ hg tag ref2
194 214 $ echo some more text >a
195 215 $ hg ci -m "another change for branch stable"
196 216 $ hg up ref2
197 217 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
198 218 $ hg parents
199 219 changeset: 13:e8ece76546a6
200 220 branch: stable
201 221 tag: ref2
202 222 parent: 10:a7949464abda
203 223 user: test
204 224 date: Thu Jan 01 00:00:00 1970 +0000
205 225 summary: starting branch stable
206 226
207 227
208 228 Repo a has two heads:
209 229
210 230 $ hg heads
211 231 changeset: 15:0aae7cf88f0d
212 232 branch: stable
213 233 tag: tip
214 234 user: test
215 235 date: Thu Jan 01 00:00:00 1970 +0000
216 236 summary: another change for branch stable
217 237
218 238 changeset: 12:f21241060d6a
219 239 user: test
220 240 date: Thu Jan 01 00:00:00 1970 +0000
221 241 summary: hacked default
222 242
223 243
224 244 $ cd ..
225 245
226 246
227 247 Testing --noupdate with --updaterev (must abort):
228 248
229 249 $ hg clone --noupdate --updaterev 1 a ua
230 250 abort: cannot specify both --noupdate and --updaterev
231 251 [10]
232 252
233 253
234 254 Testing clone -u:
235 255
236 256 $ hg clone -u . a ua
237 257 updating to branch stable
238 258 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
239 259
240 260 Repo ua has both heads:
241 261
242 262 $ hg -R ua heads
243 263 changeset: 15:0aae7cf88f0d
244 264 branch: stable
245 265 tag: tip
246 266 user: test
247 267 date: Thu Jan 01 00:00:00 1970 +0000
248 268 summary: another change for branch stable
249 269
250 270 changeset: 12:f21241060d6a
251 271 user: test
252 272 date: Thu Jan 01 00:00:00 1970 +0000
253 273 summary: hacked default
254 274
255 275
256 276 Same revision checked out in repo a and ua:
257 277
258 278 $ hg -R a parents --template "{node|short}\n"
259 279 e8ece76546a6
260 280 $ hg -R ua parents --template "{node|short}\n"
261 281 e8ece76546a6
262 282
263 283 $ rm -r ua
264 284
265 285
266 286 Testing clone --pull -u:
267 287
268 288 $ hg clone --pull -u . a ua
269 289 requesting all changes
270 290 adding changesets
271 291 adding manifests
272 292 adding file changes
273 293 added 16 changesets with 16 changes to 3 files (+1 heads)
274 294 new changesets acb14030fe0a:0aae7cf88f0d
275 295 updating to branch stable
276 296 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
277 297
278 298 Repo ua has both heads:
279 299
280 300 $ hg -R ua heads
281 301 changeset: 15:0aae7cf88f0d
282 302 branch: stable
283 303 tag: tip
284 304 user: test
285 305 date: Thu Jan 01 00:00:00 1970 +0000
286 306 summary: another change for branch stable
287 307
288 308 changeset: 12:f21241060d6a
289 309 user: test
290 310 date: Thu Jan 01 00:00:00 1970 +0000
291 311 summary: hacked default
292 312
293 313
294 314 Same revision checked out in repo a and ua:
295 315
296 316 $ hg -R a parents --template "{node|short}\n"
297 317 e8ece76546a6
298 318 $ hg -R ua parents --template "{node|short}\n"
299 319 e8ece76546a6
300 320
301 321 $ rm -r ua
302 322
303 323
304 324 Testing clone -u <branch>:
305 325
306 326 $ hg clone -u stable a ua
307 327 updating to branch stable
308 328 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
309 329
310 330 Repo ua has both heads:
311 331
312 332 $ hg -R ua heads
313 333 changeset: 15:0aae7cf88f0d
314 334 branch: stable
315 335 tag: tip
316 336 user: test
317 337 date: Thu Jan 01 00:00:00 1970 +0000
318 338 summary: another change for branch stable
319 339
320 340 changeset: 12:f21241060d6a
321 341 user: test
322 342 date: Thu Jan 01 00:00:00 1970 +0000
323 343 summary: hacked default
324 344
325 345
326 346 Branch 'stable' is checked out:
327 347
328 348 $ hg -R ua parents
329 349 changeset: 15:0aae7cf88f0d
330 350 branch: stable
331 351 tag: tip
332 352 user: test
333 353 date: Thu Jan 01 00:00:00 1970 +0000
334 354 summary: another change for branch stable
335 355
336 356
337 357 $ rm -r ua
338 358
339 359
340 360 Testing default checkout:
341 361
342 362 $ hg clone a ua
343 363 updating to branch default
344 364 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
345 365
346 366 Repo ua has both heads:
347 367
348 368 $ hg -R ua heads
349 369 changeset: 15:0aae7cf88f0d
350 370 branch: stable
351 371 tag: tip
352 372 user: test
353 373 date: Thu Jan 01 00:00:00 1970 +0000
354 374 summary: another change for branch stable
355 375
356 376 changeset: 12:f21241060d6a
357 377 user: test
358 378 date: Thu Jan 01 00:00:00 1970 +0000
359 379 summary: hacked default
360 380
361 381
362 382 Branch 'default' is checked out:
363 383
364 384 $ hg -R ua parents
365 385 changeset: 12:f21241060d6a
366 386 user: test
367 387 date: Thu Jan 01 00:00:00 1970 +0000
368 388 summary: hacked default
369 389
370 390 Test clone with a branch named "@" (issue3677)
371 391
372 392 $ hg -R ua branch @
373 393 marked working directory as branch @
374 394 $ hg -R ua commit -m 'created branch @'
375 395 $ hg clone ua atbranch
376 396 updating to branch default
377 397 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
378 398 $ hg -R atbranch heads
379 399 changeset: 16:798b6d97153e
380 400 branch: @
381 401 tag: tip
382 402 parent: 12:f21241060d6a
383 403 user: test
384 404 date: Thu Jan 01 00:00:00 1970 +0000
385 405 summary: created branch @
386 406
387 407 changeset: 15:0aae7cf88f0d
388 408 branch: stable
389 409 user: test
390 410 date: Thu Jan 01 00:00:00 1970 +0000
391 411 summary: another change for branch stable
392 412
393 413 changeset: 12:f21241060d6a
394 414 user: test
395 415 date: Thu Jan 01 00:00:00 1970 +0000
396 416 summary: hacked default
397 417
398 418 $ hg -R atbranch parents
399 419 changeset: 12:f21241060d6a
400 420 user: test
401 421 date: Thu Jan 01 00:00:00 1970 +0000
402 422 summary: hacked default
403 423
404 424
405 425 $ rm -r ua atbranch
406 426
407 427
408 428 Testing #<branch>:
409 429
410 430 $ hg clone -u . a#stable ua
411 431 adding changesets
412 432 adding manifests
413 433 adding file changes
414 434 added 14 changesets with 14 changes to 3 files
415 435 new changesets acb14030fe0a:0aae7cf88f0d
416 436 updating to branch stable
417 437 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
418 438
419 439 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
420 440
421 441 $ hg -R ua heads
422 442 changeset: 13:0aae7cf88f0d
423 443 branch: stable
424 444 tag: tip
425 445 user: test
426 446 date: Thu Jan 01 00:00:00 1970 +0000
427 447 summary: another change for branch stable
428 448
429 449 changeset: 10:a7949464abda
430 450 user: test
431 451 date: Thu Jan 01 00:00:00 1970 +0000
432 452 summary: test
433 453
434 454
435 455 Same revision checked out in repo a and ua:
436 456
437 457 $ hg -R a parents --template "{node|short}\n"
438 458 e8ece76546a6
439 459 $ hg -R ua parents --template "{node|short}\n"
440 460 e8ece76546a6
441 461
442 462 $ rm -r ua
443 463
444 464
445 465 Testing -u -r <branch>:
446 466
447 467 $ hg clone -u . -r stable a ua
448 468 adding changesets
449 469 adding manifests
450 470 adding file changes
451 471 added 14 changesets with 14 changes to 3 files
452 472 new changesets acb14030fe0a:0aae7cf88f0d
453 473 updating to branch stable
454 474 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
455 475
456 476 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
457 477
458 478 $ hg -R ua heads
459 479 changeset: 13:0aae7cf88f0d
460 480 branch: stable
461 481 tag: tip
462 482 user: test
463 483 date: Thu Jan 01 00:00:00 1970 +0000
464 484 summary: another change for branch stable
465 485
466 486 changeset: 10:a7949464abda
467 487 user: test
468 488 date: Thu Jan 01 00:00:00 1970 +0000
469 489 summary: test
470 490
471 491
472 492 Same revision checked out in repo a and ua:
473 493
474 494 $ hg -R a parents --template "{node|short}\n"
475 495 e8ece76546a6
476 496 $ hg -R ua parents --template "{node|short}\n"
477 497 e8ece76546a6
478 498
479 499 $ rm -r ua
480 500
481 501
482 502 Testing -r <branch>:
483 503
484 504 $ hg clone -r stable a ua
485 505 adding changesets
486 506 adding manifests
487 507 adding file changes
488 508 added 14 changesets with 14 changes to 3 files
489 509 new changesets acb14030fe0a:0aae7cf88f0d
490 510 updating to branch stable
491 511 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
492 512
493 513 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
494 514
495 515 $ hg -R ua heads
496 516 changeset: 13:0aae7cf88f0d
497 517 branch: stable
498 518 tag: tip
499 519 user: test
500 520 date: Thu Jan 01 00:00:00 1970 +0000
501 521 summary: another change for branch stable
502 522
503 523 changeset: 10:a7949464abda
504 524 user: test
505 525 date: Thu Jan 01 00:00:00 1970 +0000
506 526 summary: test
507 527
508 528
509 529 Branch 'stable' is checked out:
510 530
511 531 $ hg -R ua parents
512 532 changeset: 13:0aae7cf88f0d
513 533 branch: stable
514 534 tag: tip
515 535 user: test
516 536 date: Thu Jan 01 00:00:00 1970 +0000
517 537 summary: another change for branch stable
518 538
519 539
520 540 $ rm -r ua
521 541
522 542
523 543 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
524 544 iterable in addbranchrevs()
525 545
526 546 $ cat <<EOF > simpleclone.py
527 547 > from mercurial import hg, ui as uimod
528 548 > myui = uimod.ui.load()
529 549 > repo = hg.repository(myui, b'a')
530 550 > hg.clone(myui, {}, repo, dest=b"ua")
531 551 > EOF
532 552
533 553 $ "$PYTHON" simpleclone.py
534 554 updating to branch default
535 555 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
536 556
537 557 $ rm -r ua
538 558
539 559 $ cat <<EOF > branchclone.py
540 560 > from mercurial import extensions, hg, ui as uimod
541 561 > myui = uimod.ui.load()
542 562 > extensions.loadall(myui)
543 563 > extensions.populateui(myui)
544 564 > repo = hg.repository(myui, b'a')
545 565 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
546 566 > EOF
547 567
548 568 $ "$PYTHON" branchclone.py
549 569 adding changesets
550 570 adding manifests
551 571 adding file changes
552 572 added 14 changesets with 14 changes to 3 files
553 573 new changesets acb14030fe0a:0aae7cf88f0d
554 574 updating to branch stable
555 575 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
556 576 $ rm -r ua
557 577
558 578 Local clones don't get confused by unusual experimental.evolution options
559 579
560 580 $ hg clone \
561 581 > --config experimental.evolution=allowunstable,allowdivergence,exchange \
562 582 > a ua
563 583 updating to branch default
564 584 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
565 585 $ rm -r ua
566 586
567 587 $ hg clone \
568 588 > --config experimental.evolution.createmarkers=no \
569 589 > --config experimental.evolution.allowunstable=yes \
570 590 > --config experimental.evolution.allowdivergence=yes \
571 591 > --config experimental.evolution.exchange=yes \
572 592 > a ua
573 593 updating to branch default
574 594 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
575 595 $ rm -r ua
576 596
577 597 Test clone with special '@' bookmark:
578 598 $ cd a
579 599 $ hg bookmark -r a7949464abda @ # branch point of stable from default
580 600 $ hg clone . ../i
581 601 updating to bookmark @
582 602 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
583 603 $ hg id -i ../i
584 604 a7949464abda
585 605 $ rm -r ../i
586 606
587 607 $ hg bookmark -f -r stable @
588 608 $ hg bookmarks
589 609 @ 15:0aae7cf88f0d
590 610 $ hg clone . ../i
591 611 updating to bookmark @ on branch stable
592 612 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
593 613 $ hg id -i ../i
594 614 0aae7cf88f0d
595 615 $ cd "$TESTTMP"
596 616
597 617
598 618 Testing failures:
599 619
600 620 $ mkdir fail
601 621 $ cd fail
602 622
603 623 No local source
604 624
605 625 $ hg clone a b
606 626 abort: repository a not found
607 627 [255]
608 628
609 629 Invalid URL
610 630
611 631 $ hg clone http://invalid:url/a b
612 632 abort: error: nonnumeric port: 'url'
613 633 [100]
614 634
615 635 No remote source
616 636
617 637 #if windows
618 638 $ hg clone http://$LOCALIP:3121/a b
619 639 abort: error: * (glob)
620 640 [100]
621 641 #else
622 642 $ hg clone http://$LOCALIP:3121/a b
623 643 abort: error: *refused* (glob)
624 644 [100]
625 645 #endif
626 646 $ rm -rf b # work around bug with http clone
627 647
628 648
629 649 #if unix-permissions no-root
630 650
631 651 Inaccessible source
632 652
633 653 $ mkdir a
634 654 $ chmod 000 a
635 655 $ hg clone a b
636 656 abort: $EACCES$: *$TESTTMP/fail/a/.hg* (glob)
637 657 [255]
638 658
639 659 Inaccessible destination
640 660
641 661 $ hg init b
642 662 $ cd b
643 663 $ hg clone . ../a
644 664 abort: $EACCES$: *../a* (glob)
645 665 [255]
646 666 $ cd ..
647 667 $ chmod 700 a
648 668 $ rm -r a b
649 669
650 670 #endif
651 671
652 672
653 673 #if fifo
654 674
655 675 Source of wrong type
656 676
657 677 $ mkfifo a
658 678 $ hg clone a b
659 679 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
660 680 [255]
661 681 $ rm a
662 682
663 683 #endif
664 684
665 685 Default destination, same directory
666 686
667 687 $ hg init q
668 688 $ hg clone q
669 689 destination directory: q
670 690 abort: destination 'q' is not empty
671 691 [10]
672 692
673 693 destination directory not empty
674 694
675 695 $ mkdir a
676 696 $ echo stuff > a/a
677 697 $ hg clone q a
678 698 abort: destination 'a' is not empty
679 699 [10]
680 700
681 701
682 702 #if unix-permissions no-root
683 703
684 704 leave existing directory in place after clone failure
685 705
686 706 $ hg init c
687 707 $ cd c
688 708 $ echo c > c
689 709 $ hg commit -A -m test
690 710 adding c
691 711 $ chmod -rx .hg/store/data
692 712 $ cd ..
693 713 $ mkdir d
694 714 $ hg clone c d 2> err
695 715 [255]
696 716 $ test -d d
697 717 $ test -d d/.hg
698 718 [1]
699 719
700 720 re-enable perm to allow deletion
701 721
702 722 $ chmod +rx c/.hg/store/data
703 723
704 724 #endif
705 725
706 726 $ cd ..
707 727
708 728 Test clone from the repository in (emulated) revlog format 0 (issue4203):
709 729
710 730 $ mkdir issue4203
711 731 $ mkdir -p src/.hg
712 732 $ echo foo > src/foo
713 733 $ hg -R src add src/foo
714 734 $ hg -R src commit -m '#0'
715 735 $ hg -R src log -q
716 736 0:e1bab28bca43
717 737 $ hg -R src debugrevlog -c | grep -E 'format|flags'
718 738 format : 0
719 739 flags : (none)
720 740 $ hg root -R src -T json | sed 's|\\\\|\\|g'
721 741 [
722 742 {
723 743 "hgpath": "$TESTTMP/src/.hg",
724 744 "reporoot": "$TESTTMP/src",
725 745 "storepath": "$TESTTMP/src/.hg"
726 746 }
727 747 ]
728 748 $ hg clone -U -q src dst
729 749 $ hg -R dst log -q
730 750 0:e1bab28bca43
731 751
732 752 Create repositories to test auto sharing functionality
733 753
734 754 $ cat >> $HGRCPATH << EOF
735 755 > [extensions]
736 756 > share=
737 757 > EOF
738 758
739 759 $ hg init empty
740 760 $ hg init source1a
741 761 $ cd source1a
742 762 $ echo initial1 > foo
743 763 $ hg -q commit -A -m initial
744 764 $ echo second > foo
745 765 $ hg commit -m second
746 766 $ cd ..
747 767
748 768 $ hg init filteredrev0
749 769 $ cd filteredrev0
750 770 $ cat >> .hg/hgrc << EOF
751 771 > [experimental]
752 772 > evolution.createmarkers=True
753 773 > EOF
754 774 $ echo initial1 > foo
755 775 $ hg -q commit -A -m initial0
756 776 $ hg -q up -r null
757 777 $ echo initial2 > foo
758 778 $ hg -q commit -A -m initial1
759 779 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
760 780 1 new obsolescence markers
761 781 obsoleted 1 changesets
762 782 $ cd ..
763 783
764 784 $ hg -q clone --pull source1a source1b
765 785 $ cd source1a
766 786 $ hg bookmark bookA
767 787 $ echo 1a > foo
768 788 $ hg commit -m 1a
769 789 $ cd ../source1b
770 790 $ hg -q up -r 0
771 791 $ echo head1 > foo
772 792 $ hg commit -m head1
773 793 created new head
774 794 $ hg bookmark head1
775 795 $ hg -q up -r 0
776 796 $ echo head2 > foo
777 797 $ hg commit -m head2
778 798 created new head
779 799 $ hg bookmark head2
780 800 $ hg -q up -r 0
781 801 $ hg branch branch1
782 802 marked working directory as branch branch1
783 803 (branches are permanent and global, did you want a bookmark?)
784 804 $ echo branch1 > foo
785 805 $ hg commit -m branch1
786 806 $ hg -q up -r 0
787 807 $ hg branch branch2
788 808 marked working directory as branch branch2
789 809 $ echo branch2 > foo
790 810 $ hg commit -m branch2
791 811 $ cd ..
792 812 $ hg init source2
793 813 $ cd source2
794 814 $ echo initial2 > foo
795 815 $ hg -q commit -A -m initial2
796 816 $ echo second > foo
797 817 $ hg commit -m second
798 818 $ cd ..
799 819
800 820 Clone with auto share from an empty repo should not result in share
801 821
802 822 $ mkdir share
803 823 $ hg --config share.pool=share clone empty share-empty
804 824 (not using pooled storage: remote appears to be empty)
805 825 updating to branch default
806 826 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
807 827 $ ls share
808 828 $ test -d share-empty/.hg/store
809 829 $ test -f share-empty/.hg/sharedpath
810 830 [1]
811 831
812 832 Clone with auto share from a repo with filtered revision 0 should not result in share
813 833
814 834 $ hg --config share.pool=share clone filteredrev0 share-filtered
815 835 (not using pooled storage: unable to resolve identity of remote)
816 836 requesting all changes
817 837 adding changesets
818 838 adding manifests
819 839 adding file changes
820 840 added 1 changesets with 1 changes to 1 files
821 841 new changesets e082c1832e09
822 842 updating to branch default
823 843 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
824 844
825 845 Clone from repo with content should result in shared store being created
826 846
827 847 $ hg --config share.pool=share clone source1a share-dest1a
828 848 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
829 849 requesting all changes
830 850 adding changesets
831 851 adding manifests
832 852 adding file changes
833 853 added 3 changesets with 3 changes to 1 files
834 854 new changesets b5f04eac9d8f:e5bfe23c0b47
835 855 searching for changes
836 856 no changes found
837 857 adding remote bookmark bookA
838 858 updating working directory
839 859 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
840 860
841 861 The shared repo should have been created
842 862
843 863 $ ls share
844 864 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
845 865
846 866 The destination should point to it
847 867
848 868 $ cat share-dest1a/.hg/sharedpath; echo
849 869 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
850 870
851 871 The destination should have bookmarks
852 872
853 873 $ hg -R share-dest1a bookmarks
854 874 bookA 2:e5bfe23c0b47
855 875
856 876 The default path should be the remote, not the share
857 877
858 878 $ hg -R share-dest1a config paths.default
859 879 $TESTTMP/source1a
860 880
861 881 Clone with existing share dir should result in pull + share
862 882
863 883 $ hg --config share.pool=share clone source1b share-dest1b
864 884 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
865 885 searching for changes
866 886 adding changesets
867 887 adding manifests
868 888 adding file changes
869 889 adding remote bookmark head1
870 890 adding remote bookmark head2
871 891 added 4 changesets with 4 changes to 1 files (+4 heads)
872 892 new changesets 4a8dc1ab4c13:6bacf4683960
873 893 updating working directory
874 894 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
875 895
876 896 $ ls share
877 897 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
878 898
879 899 $ cat share-dest1b/.hg/sharedpath; echo
880 900 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
881 901
882 902 We only get bookmarks from the remote, not everything in the share
883 903
884 904 $ hg -R share-dest1b bookmarks
885 905 head1 3:4a8dc1ab4c13
886 906 head2 4:99f71071f117
887 907
888 908 Default path should be source, not share.
889 909
890 910 $ hg -R share-dest1b config paths.default
891 911 $TESTTMP/source1b
892 912
893 913 Checked out revision should be head of default branch
894 914
895 915 $ hg -R share-dest1b log -r .
896 916 changeset: 4:99f71071f117
897 917 bookmark: head2
898 918 parent: 0:b5f04eac9d8f
899 919 user: test
900 920 date: Thu Jan 01 00:00:00 1970 +0000
901 921 summary: head2
902 922
903 923
904 924 Clone from unrelated repo should result in new share
905 925
906 926 $ hg --config share.pool=share clone source2 share-dest2
907 927 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
908 928 requesting all changes
909 929 adding changesets
910 930 adding manifests
911 931 adding file changes
912 932 added 2 changesets with 2 changes to 1 files
913 933 new changesets 22aeff664783:63cf6c3dba4a
914 934 searching for changes
915 935 no changes found
916 936 updating working directory
917 937 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
918 938
919 939 $ ls share
920 940 22aeff664783fd44c6d9b435618173c118c3448e
921 941 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
922 942
923 943 remote naming mode works as advertised
924 944
925 945 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
926 946 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
927 947 requesting all changes
928 948 adding changesets
929 949 adding manifests
930 950 adding file changes
931 951 added 3 changesets with 3 changes to 1 files
932 952 new changesets b5f04eac9d8f:e5bfe23c0b47
933 953 searching for changes
934 954 no changes found
935 955 adding remote bookmark bookA
936 956 updating working directory
937 957 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
938 958
939 959 $ ls shareremote
940 960 195bb1fcdb595c14a6c13e0269129ed78f6debde
941 961
942 962 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
943 963 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
944 964 requesting all changes
945 965 adding changesets
946 966 adding manifests
947 967 adding file changes
948 968 added 6 changesets with 6 changes to 1 files (+4 heads)
949 969 new changesets b5f04eac9d8f:6bacf4683960
950 970 searching for changes
951 971 no changes found
952 972 adding remote bookmark head1
953 973 adding remote bookmark head2
954 974 updating working directory
955 975 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
956 976
957 977 $ ls shareremote
958 978 195bb1fcdb595c14a6c13e0269129ed78f6debde
959 979 c0d4f83847ca2a873741feb7048a45085fd47c46
960 980
961 981 request to clone a single revision is respected in sharing mode
962 982
963 983 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
964 984 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
965 985 adding changesets
966 986 adding manifests
967 987 adding file changes
968 988 added 2 changesets with 2 changes to 1 files
969 989 new changesets b5f04eac9d8f:4a8dc1ab4c13
970 990 no changes found
971 991 adding remote bookmark head1
972 992 updating working directory
973 993 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
974 994
975 995 $ hg -R share-1arev log -G
976 996 @ changeset: 1:4a8dc1ab4c13
977 997 | bookmark: head1
978 998 | tag: tip
979 999 | user: test
980 1000 | date: Thu Jan 01 00:00:00 1970 +0000
981 1001 | summary: head1
982 1002 |
983 1003 o changeset: 0:b5f04eac9d8f
984 1004 user: test
985 1005 date: Thu Jan 01 00:00:00 1970 +0000
986 1006 summary: initial
987 1007
988 1008
989 1009 making another clone should only pull down requested rev
990 1010
991 1011 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
992 1012 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
993 1013 searching for changes
994 1014 adding changesets
995 1015 adding manifests
996 1016 adding file changes
997 1017 adding remote bookmark head1
998 1018 adding remote bookmark head2
999 1019 added 1 changesets with 1 changes to 1 files (+1 heads)
1000 1020 new changesets 99f71071f117
1001 1021 updating working directory
1002 1022 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1003 1023
1004 1024 $ hg -R share-1brev log -G
1005 1025 @ changeset: 2:99f71071f117
1006 1026 | bookmark: head2
1007 1027 | tag: tip
1008 1028 | parent: 0:b5f04eac9d8f
1009 1029 | user: test
1010 1030 | date: Thu Jan 01 00:00:00 1970 +0000
1011 1031 | summary: head2
1012 1032 |
1013 1033 | o changeset: 1:4a8dc1ab4c13
1014 1034 |/ bookmark: head1
1015 1035 | user: test
1016 1036 | date: Thu Jan 01 00:00:00 1970 +0000
1017 1037 | summary: head1
1018 1038 |
1019 1039 o changeset: 0:b5f04eac9d8f
1020 1040 user: test
1021 1041 date: Thu Jan 01 00:00:00 1970 +0000
1022 1042 summary: initial
1023 1043
1024 1044
1025 1045 Request to clone a single branch is respected in sharing mode
1026 1046
1027 1047 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1028 1048 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1029 1049 adding changesets
1030 1050 adding manifests
1031 1051 adding file changes
1032 1052 added 2 changesets with 2 changes to 1 files
1033 1053 new changesets b5f04eac9d8f:5f92a6c1a1b1
1034 1054 no changes found
1035 1055 updating working directory
1036 1056 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1037 1057
1038 1058 $ hg -R share-1bbranch1 log -G
1039 1059 o changeset: 1:5f92a6c1a1b1
1040 1060 | branch: branch1
1041 1061 | tag: tip
1042 1062 | user: test
1043 1063 | date: Thu Jan 01 00:00:00 1970 +0000
1044 1064 | summary: branch1
1045 1065 |
1046 1066 @ changeset: 0:b5f04eac9d8f
1047 1067 user: test
1048 1068 date: Thu Jan 01 00:00:00 1970 +0000
1049 1069 summary: initial
1050 1070
1051 1071
1052 1072 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1053 1073 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1054 1074 searching for changes
1055 1075 adding changesets
1056 1076 adding manifests
1057 1077 adding file changes
1058 1078 added 1 changesets with 1 changes to 1 files (+1 heads)
1059 1079 new changesets 6bacf4683960
1060 1080 updating working directory
1061 1081 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1062 1082
1063 1083 $ hg -R share-1bbranch2 log -G
1064 1084 o changeset: 2:6bacf4683960
1065 1085 | branch: branch2
1066 1086 | tag: tip
1067 1087 | parent: 0:b5f04eac9d8f
1068 1088 | user: test
1069 1089 | date: Thu Jan 01 00:00:00 1970 +0000
1070 1090 | summary: branch2
1071 1091 |
1072 1092 | o changeset: 1:5f92a6c1a1b1
1073 1093 |/ branch: branch1
1074 1094 | user: test
1075 1095 | date: Thu Jan 01 00:00:00 1970 +0000
1076 1096 | summary: branch1
1077 1097 |
1078 1098 @ changeset: 0:b5f04eac9d8f
1079 1099 user: test
1080 1100 date: Thu Jan 01 00:00:00 1970 +0000
1081 1101 summary: initial
1082 1102
1083 1103
1084 1104 -U is respected in share clone mode
1085 1105
1086 1106 $ hg --config share.pool=share clone -U source1a share-1anowc
1087 1107 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1088 1108 searching for changes
1089 1109 no changes found
1090 1110 adding remote bookmark bookA
1091 1111
1092 1112 $ ls -A share-1anowc
1093 1113 .hg
1094 1114
1095 1115 Test that auto sharing doesn't cause failure of "hg clone local remote"
1096 1116
1097 1117 $ cd $TESTTMP
1098 1118 $ hg -R a id -r 0
1099 1119 acb14030fe0a
1100 1120 $ hg id -R remote -r 0
1101 1121 abort: repository remote not found
1102 1122 [255]
1103 1123 $ hg --config share.pool=share -q clone a ssh://user@dummy/remote
1104 1124 $ hg -R remote id -r 0
1105 1125 acb14030fe0a
1106 1126
1107 1127 Cloning into pooled storage doesn't race (issue5104)
1108 1128
1109 1129 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1110 1130 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1111 1131 $ wait
1112 1132
1113 1133 $ hg -R share-destrace1 log -r tip
1114 1134 changeset: 2:e5bfe23c0b47
1115 1135 bookmark: bookA
1116 1136 tag: tip
1117 1137 user: test
1118 1138 date: Thu Jan 01 00:00:00 1970 +0000
1119 1139 summary: 1a
1120 1140
1121 1141
1122 1142 $ hg -R share-destrace2 log -r tip
1123 1143 changeset: 2:e5bfe23c0b47
1124 1144 bookmark: bookA
1125 1145 tag: tip
1126 1146 user: test
1127 1147 date: Thu Jan 01 00:00:00 1970 +0000
1128 1148 summary: 1a
1129 1149
1130 1150 One repo should be new, the other should be shared from the pool. We
1131 1151 don't care which is which, so we just make sure we always print the
1132 1152 one containing "new pooled" first, then one one containing "existing
1133 1153 pooled".
1134 1154
1135 1155 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1136 1156 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1137 1157 requesting all changes
1138 1158 adding changesets
1139 1159 adding manifests
1140 1160 adding file changes
1141 1161 added 3 changesets with 3 changes to 1 files
1142 1162 new changesets b5f04eac9d8f:e5bfe23c0b47
1143 1163 searching for changes
1144 1164 no changes found
1145 1165 adding remote bookmark bookA
1146 1166 updating working directory
1147 1167 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1148 1168
1149 1169 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1150 1170 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1151 1171 searching for changes
1152 1172 no changes found
1153 1173 adding remote bookmark bookA
1154 1174 updating working directory
1155 1175 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1156 1176
1157 1177 SEC: check for unsafe ssh url
1158 1178
1159 1179 $ cat >> $HGRCPATH << EOF
1160 1180 > [ui]
1161 1181 > ssh = sh -c "read l; read l; read l"
1162 1182 > EOF
1163 1183
1164 1184 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1165 1185 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1166 1186 [255]
1167 1187 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1168 1188 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1169 1189 [255]
1170 1190 $ hg clone 'ssh://fakehost|touch%20owned/path'
1171 1191 abort: no suitable response from remote hg
1172 1192 [255]
1173 1193 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1174 1194 abort: no suitable response from remote hg
1175 1195 [255]
1176 1196
1177 1197 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1178 1198 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1179 1199 [255]
1180 1200
1181 1201 #if windows
1182 1202 $ hg clone "ssh://%26touch%20owned%20/" --debug
1183 1203 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1184 1204 sending hello command
1185 1205 sending between command
1186 1206 abort: no suitable response from remote hg
1187 1207 [255]
1188 1208 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1189 1209 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1190 1210 sending hello command
1191 1211 sending between command
1192 1212 abort: no suitable response from remote hg
1193 1213 [255]
1194 1214 #else
1195 1215 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1196 1216 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1197 1217 sending hello command
1198 1218 sending between command
1199 1219 abort: no suitable response from remote hg
1200 1220 [255]
1201 1221 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1202 1222 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1203 1223 sending hello command
1204 1224 sending between command
1205 1225 abort: no suitable response from remote hg
1206 1226 [255]
1207 1227 #endif
1208 1228
1209 1229 $ hg clone "ssh://v-alid.example.com/" --debug
1210 1230 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1211 1231 sending hello command
1212 1232 sending between command
1213 1233 abort: no suitable response from remote hg
1214 1234 [255]
1215 1235
1216 1236 We should not have created a file named owned - if it exists, the
1217 1237 attack succeeded.
1218 1238 $ if test -f owned; then echo 'you got owned'; fi
1219 1239
1220 1240 Cloning without fsmonitor enabled does not print a warning for small repos
1221 1241
1222 1242 $ hg clone a fsmonitor-default
1223 1243 updating to bookmark @ on branch stable
1224 1244 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1225 1245
1226 1246 Lower the warning threshold to simulate a large repo
1227 1247
1228 1248 $ cat >> $HGRCPATH << EOF
1229 1249 > [fsmonitor]
1230 1250 > warn_update_file_count = 2
1231 1251 > warn_update_file_count_rust = 2
1232 1252 > EOF
1233 1253
1234 1254 We should see a warning about no fsmonitor on supported platforms
1235 1255
1236 1256 #if linuxormacos no-fsmonitor
1237 1257 $ hg clone a nofsmonitor
1238 1258 updating to bookmark @ on branch stable
1239 1259 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1240 1260 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1241 1261 #else
1242 1262 $ hg clone a nofsmonitor
1243 1263 updating to bookmark @ on branch stable
1244 1264 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1245 1265 #endif
1246 1266
1247 1267 We should not see warning about fsmonitor when it is enabled
1248 1268
1249 1269 #if fsmonitor
1250 1270 $ hg clone a fsmonitor-enabled
1251 1271 updating to bookmark @ on branch stable
1252 1272 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1253 1273 #endif
1254 1274
1255 1275 We can disable the fsmonitor warning
1256 1276
1257 1277 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1258 1278 updating to bookmark @ on branch stable
1259 1279 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1260 1280
1261 1281 Loaded fsmonitor but disabled in config should still print warning
1262 1282
1263 1283 #if linuxormacos fsmonitor
1264 1284 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1265 1285 updating to bookmark @ on branch stable
1266 1286 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1267 1287 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1268 1288 #endif
1269 1289
1270 1290 Warning not printed if working directory isn't empty
1271 1291
1272 1292 $ hg -q clone a fsmonitor-update
1273 1293 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1274 1294 $ cd fsmonitor-update
1275 1295 $ hg up acb14030fe0a
1276 1296 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1277 1297 (leaving bookmark @)
1278 1298 $ hg up cf0fe1914066
1279 1299 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1280 1300
1281 1301 `hg update` from null revision also prints
1282 1302
1283 1303 $ hg up null
1284 1304 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1285 1305
1286 1306 #if linuxormacos no-fsmonitor
1287 1307 $ hg up cf0fe1914066
1288 1308 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1289 1309 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290 1310 #else
1291 1311 $ hg up cf0fe1914066
1292 1312 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1293 1313 #endif
1294 1314
1295 1315 $ cd ..
1296 1316
@@ -1,852 +1,869 b''
1 1 #require no-reposimplestore no-chg
2 2
3 3 Set up a server
4 4
5 5 $ hg init server
6 6 $ cd server
7 7 $ cat >> .hg/hgrc << EOF
8 8 > [extensions]
9 9 > clonebundles =
10 10 > EOF
11 11
12 12 $ touch foo
13 13 $ hg -q commit -A -m 'add foo'
14 14 $ touch bar
15 15 $ hg -q commit -A -m 'add bar'
16 16
17 17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
18 18 $ cat hg.pid >> $DAEMON_PIDS
19 19 $ cd ..
20 20
21 21 Missing manifest should not result in server lookup
22 22
23 23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
24 24 requesting all changes
25 25 adding changesets
26 26 adding manifests
27 27 adding file changes
28 28 added 2 changesets with 2 changes to 2 files
29 29 new changesets 53245c60e682:aaff8d2ffbbf
30 30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
31 31
32 32 $ cat server/access.log
33 33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
34 34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
36 36
37 37 Empty manifest file results in retrieval
38 38 (the extension only checks if the manifest file exists)
39 39
40 40 $ touch server/.hg/clonebundles.manifest
41 41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
42 42 no clone bundles available on remote; falling back to regular clone
43 43 requesting all changes
44 44 adding changesets
45 45 adding manifests
46 46 adding file changes
47 47 added 2 changesets with 2 changes to 2 files
48 48 new changesets 53245c60e682:aaff8d2ffbbf
49 49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
50 50
51 51 Manifest file with invalid URL aborts
52 52
53 53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
54 54 $ hg clone http://localhost:$HGPORT 404-url
55 55 applying clone bundle from http://does.not.exist/bundle.hg
56 56 error fetching bundle: (.* not known|(\[Errno -?\d+] )?([Nn]o address associated with (host)?name|Temporary failure in name resolution|Name does not resolve)) (re) (no-windows !)
57 57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
58 58 abort: error applying bundle
59 59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
60 60 [255]
61 61
62 62 Manifest file with URL with unknown scheme skips the URL
63 63 $ echo 'weirdscheme://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
64 64 $ hg clone http://localhost:$HGPORT unknown-scheme
65 65 no compatible clone bundles available on server; falling back to regular clone
66 66 (you may want to report this to the server operator)
67 67 requesting all changes
68 68 adding changesets
69 69 adding manifests
70 70 adding file changes
71 71 added 2 changesets with 2 changes to 2 files
72 72 new changesets 53245c60e682:aaff8d2ffbbf
73 73 updating to branch default
74 74 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 75
76 76 Server is not running aborts
77 77
78 78 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
79 79 $ hg clone http://localhost:$HGPORT server-not-runner
80 80 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
81 81 error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
82 82 abort: error applying bundle
83 83 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
84 84 [255]
85 85
86 86 Server returns 404
87 87
88 88 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
89 89 $ cat http.pid >> $DAEMON_PIDS
90 90 $ hg clone http://localhost:$HGPORT running-404
91 91 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
92 92 HTTP error fetching bundle: HTTP Error 404: File not found
93 93 abort: error applying bundle
94 94 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
95 95 [255]
96 96
97 97 We can override failure to fall back to regular clone
98 98
99 99 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
100 100 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
101 101 HTTP error fetching bundle: HTTP Error 404: File not found
102 102 falling back to normal clone
103 103 requesting all changes
104 104 adding changesets
105 105 adding manifests
106 106 adding file changes
107 107 added 2 changesets with 2 changes to 2 files
108 108 new changesets 53245c60e682:aaff8d2ffbbf
109 109
110 110 Bundle with partial content works
111 111
112 112 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
113 113 1 changesets found
114 114
115 115 We verify exact bundle content as an extra check against accidental future
116 116 changes. If this output changes, we could break old clients.
117 117
118 118 $ f --size --hexdump partial.hg
119 119 partial.hg: size=207
120 120 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
121 121 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
122 122 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
123 123 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
124 124 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
125 125 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
126 126 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
127 127 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
128 128 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
129 129 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
130 130 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
131 131 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
132 132 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
133 133
134 134 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
135 135 $ hg clone -U http://localhost:$HGPORT partial-bundle
136 136 applying clone bundle from http://localhost:$HGPORT1/partial.hg
137 137 adding changesets
138 138 adding manifests
139 139 adding file changes
140 140 added 1 changesets with 1 changes to 1 files
141 141 finished applying clone bundle
142 142 searching for changes
143 143 adding changesets
144 144 adding manifests
145 145 adding file changes
146 146 added 1 changesets with 1 changes to 1 files
147 147 new changesets aaff8d2ffbbf
148 148 1 local changesets published
149 149
150 150 Incremental pull doesn't fetch bundle
151 151
152 152 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
153 153 adding changesets
154 154 adding manifests
155 155 adding file changes
156 156 added 1 changesets with 1 changes to 1 files
157 157 new changesets 53245c60e682
158 158
159 159 $ cd partial-clone
160 160 $ hg pull
161 161 pulling from http://localhost:$HGPORT/
162 162 searching for changes
163 163 adding changesets
164 164 adding manifests
165 165 adding file changes
166 166 added 1 changesets with 1 changes to 1 files
167 167 new changesets aaff8d2ffbbf
168 168 (run 'hg update' to get a working copy)
169 169 $ cd ..
170 170
171 171 Bundle with full content works
172 172
173 173 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
174 174 2 changesets found
175 175
176 176 Again, we perform an extra check against bundle content changes. If this content
177 177 changes, clone bundles produced by new Mercurial versions may not be readable
178 178 by old clients.
179 179
180 180 $ f --size --hexdump full.hg
181 181 full.hg: size=442
182 182 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
183 183 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
184 184 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
185 185 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
186 186 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
187 187 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
188 188 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
189 189 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
190 190 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
191 191 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
192 192 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
193 193 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
194 194 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
195 195 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
196 196 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
197 197 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
198 198 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
199 199 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
200 200 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
201 201 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
202 202 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
203 203 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
204 204 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
205 205 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
206 206 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
207 207 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
208 208 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
209 209 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
210 210
211 211 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
212 212 $ hg clone -U http://localhost:$HGPORT full-bundle
213 213 applying clone bundle from http://localhost:$HGPORT1/full.hg
214 214 adding changesets
215 215 adding manifests
216 216 adding file changes
217 217 added 2 changesets with 2 changes to 2 files
218 218 finished applying clone bundle
219 219 searching for changes
220 220 no changes found
221 221 2 local changesets published
222 222
223 223 Feature works over SSH
224 224
225 225 $ hg clone -U ssh://user@dummy/server ssh-full-clone
226 226 applying clone bundle from http://localhost:$HGPORT1/full.hg
227 227 adding changesets
228 228 adding manifests
229 229 adding file changes
230 230 added 2 changesets with 2 changes to 2 files
231 231 finished applying clone bundle
232 232 searching for changes
233 233 no changes found
234 234 2 local changesets published
235 235
236 236 Inline bundle
237 237 =============
238 238
239 239 Checking bundle retrieved over the wireprotocol
240 240
241 241 Feature works over SSH with inline bundle
242 242 -----------------------------------------
243 243
244 244 $ mkdir server/.hg/bundle-cache/
245 245 $ cp full.hg server/.hg/bundle-cache/
246 246 $ echo "peer-bundle-cache://full.hg" > server/.hg/clonebundles.manifest
247 247 $ hg clone -U ssh://user@dummy/server ssh-inline-clone
248 248 applying clone bundle from peer-bundle-cache://full.hg
249 249 adding changesets
250 250 adding manifests
251 251 adding file changes
252 252 added 2 changesets with 2 changes to 2 files
253 253 finished applying clone bundle
254 254 searching for changes
255 255 no changes found
256 256 2 local changesets published
257 257
258 258 HTTP Supports
259 259 -------------
260 260
261 261 $ hg clone -U http://localhost:$HGPORT http-inline-clone
262 262 applying clone bundle from peer-bundle-cache://full.hg
263 263 adding changesets
264 264 adding manifests
265 265 adding file changes
266 266 added 2 changesets with 2 changes to 2 files
267 267 finished applying clone bundle
268 268 searching for changes
269 269 no changes found
270 270 2 local changesets published
271 271
272 272
273 273 Check local behavior
274 274 --------------------
275 275
276 276 We don't use the clone bundle, but we do not crash either.
277 277
278 278 $ hg clone -U ./server local-inline-clone-default
279 279 $ hg clone -U ./server local-inline-clone-pull --pull
280 280 requesting all changes
281 281 adding changesets
282 282 adding manifests
283 283 adding file changes
284 284 added 2 changesets with 2 changes to 2 files
285 285 new changesets 53245c60e682:aaff8d2ffbbf
286 286
287 287 Pre-transmit Hook
288 288 -----------------
289 289
290 290 Hooks work with inline bundle
291 291
292 292 $ cp server/.hg/hgrc server/.hg/hgrc-beforeinlinehooks
293 293 $ echo "[hooks]" >> server/.hg/hgrc
294 294 $ echo "pretransmit-inline-clone-bundle=echo foo" >> server/.hg/hgrc
295 295 $ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook
296 296 applying clone bundle from peer-bundle-cache://full.hg
297 297 remote: foo
298 298 adding changesets
299 299 adding manifests
300 300 adding file changes
301 301 added 2 changesets with 2 changes to 2 files
302 302 finished applying clone bundle
303 303 searching for changes
304 304 no changes found
305 305 2 local changesets published
306 306
307 307 Hooks can make an inline bundle fail
308 308
309 309 $ cp server/.hg/hgrc-beforeinlinehooks server/.hg/hgrc
310 310 $ echo "[hooks]" >> server/.hg/hgrc
311 311 $ echo "pretransmit-inline-clone-bundle=echo bar && false" >> server/.hg/hgrc
312 312 $ hg clone -U ssh://user@dummy/server ssh-inline-clone-hook-fail
313 313 applying clone bundle from peer-bundle-cache://full.hg
314 314 remote: bar
315 315 remote: abort: pretransmit-inline-clone-bundle hook exited with status 1
316 316 abort: stream ended unexpectedly (got 0 bytes, expected 1)
317 317 [255]
318 318 $ cp server/.hg/hgrc-beforeinlinehooks server/.hg/hgrc
319 319
320 320 Other tests
321 321 ===========
322 322
323 323 Entry with unknown BUNDLESPEC is filtered and not used
324 324
325 325 $ cat > server/.hg/clonebundles.manifest << EOF
326 326 > http://bad.entry1 BUNDLESPEC=UNKNOWN
327 327 > http://bad.entry2 BUNDLESPEC=xz-v1
328 328 > http://bad.entry3 BUNDLESPEC=none-v100
329 329 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
330 330 > EOF
331 331
332 332 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
333 333 applying clone bundle from http://localhost:$HGPORT1/full.hg
334 334 adding changesets
335 335 adding manifests
336 336 adding file changes
337 337 added 2 changesets with 2 changes to 2 files
338 338 finished applying clone bundle
339 339 searching for changes
340 340 no changes found
341 341 2 local changesets published
342 342
343 343 Automatic fallback when all entries are filtered
344 344
345 345 $ cat > server/.hg/clonebundles.manifest << EOF
346 346 > http://bad.entry BUNDLESPEC=UNKNOWN
347 347 > EOF
348 348
349 349 $ hg clone -U http://localhost:$HGPORT filter-all
350 350 no compatible clone bundles available on server; falling back to regular clone
351 351 (you may want to report this to the server operator)
352 352 requesting all changes
353 353 adding changesets
354 354 adding manifests
355 355 adding file changes
356 356 added 2 changesets with 2 changes to 2 files
357 357 new changesets 53245c60e682:aaff8d2ffbbf
358 358
359 359 We require a Python version that supports SNI. Therefore, URLs requiring SNI
360 360 are not filtered.
361 361
362 362 $ cp full.hg sni.hg
363 363 $ cat > server/.hg/clonebundles.manifest << EOF
364 364 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
365 365 > http://localhost:$HGPORT1/full.hg
366 366 > EOF
367 367
368 368 $ hg clone -U http://localhost:$HGPORT sni-supported
369 369 applying clone bundle from http://localhost:$HGPORT1/sni.hg
370 370 adding changesets
371 371 adding manifests
372 372 adding file changes
373 373 added 2 changesets with 2 changes to 2 files
374 374 finished applying clone bundle
375 375 searching for changes
376 376 no changes found
377 377 2 local changesets published
378 378
379 379 Stream clone bundles are supported
380 380
381 381 $ hg -R server debugcreatestreamclonebundle packed.hg
382 writing 613 bytes for 4 files
382 writing 613 bytes for 5 files (no-rust !)
383 writing 739 bytes for 7 files (rust !)
383 384 bundle requirements: generaldelta, revlogv1, sparserevlog (no-rust no-zstd !)
384 385 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (no-rust zstd !)
385 386 bundle requirements: generaldelta, revlog-compression-zstd, revlogv1, sparserevlog (rust !)
386 387
387 388 No bundle spec should work
388 389
389 390 $ cat > server/.hg/clonebundles.manifest << EOF
390 391 > http://localhost:$HGPORT1/packed.hg
391 392 > EOF
392 393
393 394 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
394 395 applying clone bundle from http://localhost:$HGPORT1/packed.hg
395 4 files to transfer, 613 bytes of data
396 transferred 613 bytes in *.* seconds (*) (glob)
396 5 files to transfer, 613 bytes of data (no-rust !)
397 transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
398 7 files to transfer, 739 bytes of data (rust !)
399 transferred 739 bytes in *.* seconds (*) (glob) (rust !)
397 400 finished applying clone bundle
398 401 searching for changes
399 402 no changes found
400 403
401 404 Bundle spec without parameters should work
402 405
403 406 $ cat > server/.hg/clonebundles.manifest << EOF
404 407 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
405 408 > EOF
406 409
407 410 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
408 411 applying clone bundle from http://localhost:$HGPORT1/packed.hg
409 4 files to transfer, 613 bytes of data
410 transferred 613 bytes in *.* seconds (*) (glob)
412 5 files to transfer, 613 bytes of data (no-rust !)
413 transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
414 7 files to transfer, 739 bytes of data (rust !)
415 transferred 739 bytes in *.* seconds (*) (glob) (rust !)
411 416 finished applying clone bundle
412 417 searching for changes
413 418 no changes found
414 419
415 420 Bundle spec with format requirements should work
416 421
417 422 $ cat > server/.hg/clonebundles.manifest << EOF
418 423 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
419 424 > EOF
420 425
421 426 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
422 427 applying clone bundle from http://localhost:$HGPORT1/packed.hg
423 4 files to transfer, 613 bytes of data
424 transferred 613 bytes in *.* seconds (*) (glob)
428 5 files to transfer, 613 bytes of data (no-rust !)
429 transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
430 7 files to transfer, 739 bytes of data (rust !)
431 transferred 739 bytes in *.* seconds (*) (glob) (rust !)
425 432 finished applying clone bundle
426 433 searching for changes
427 434 no changes found
428 435
429 436 Stream bundle spec with unknown requirements should be filtered out
430 437
431 438 $ cat > server/.hg/clonebundles.manifest << EOF
432 439 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
433 440 > EOF
434 441
435 442 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
436 443 no compatible clone bundles available on server; falling back to regular clone
437 444 (you may want to report this to the server operator)
438 445 requesting all changes
439 446 adding changesets
440 447 adding manifests
441 448 adding file changes
442 449 added 2 changesets with 2 changes to 2 files
443 450 new changesets 53245c60e682:aaff8d2ffbbf
444 451
445 452 Set up manifest for testing preferences
446 453 (Remember, the TYPE does not have to match reality - the URL is
447 454 important)
448 455
449 456 $ cp full.hg gz-a.hg
450 457 $ cp full.hg gz-b.hg
451 458 $ cp full.hg bz2-a.hg
452 459 $ cp full.hg bz2-b.hg
453 460 $ cat > server/.hg/clonebundles.manifest << EOF
454 461 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
455 462 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
456 463 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
457 464 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
458 465 > EOF
459 466
460 467 Preferring an undefined attribute will take first entry
461 468
462 469 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
463 470 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
464 471 adding changesets
465 472 adding manifests
466 473 adding file changes
467 474 added 2 changesets with 2 changes to 2 files
468 475 finished applying clone bundle
469 476 searching for changes
470 477 no changes found
471 478 2 local changesets published
472 479
473 480 Preferring bz2 type will download first entry of that type
474 481
475 482 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
476 483 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
477 484 adding changesets
478 485 adding manifests
479 486 adding file changes
480 487 added 2 changesets with 2 changes to 2 files
481 488 finished applying clone bundle
482 489 searching for changes
483 490 no changes found
484 491 2 local changesets published
485 492
486 493 Preferring multiple values of an option works
487 494
488 495 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
489 496 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
490 497 adding changesets
491 498 adding manifests
492 499 adding file changes
493 500 added 2 changesets with 2 changes to 2 files
494 501 finished applying clone bundle
495 502 searching for changes
496 503 no changes found
497 504 2 local changesets published
498 505
499 506 Sorting multiple values should get us back to original first entry
500 507
501 508 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
502 509 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
503 510 adding changesets
504 511 adding manifests
505 512 adding file changes
506 513 added 2 changesets with 2 changes to 2 files
507 514 finished applying clone bundle
508 515 searching for changes
509 516 no changes found
510 517 2 local changesets published
511 518
512 519 Preferring multiple attributes has correct order
513 520
514 521 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
515 522 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
516 523 adding changesets
517 524 adding manifests
518 525 adding file changes
519 526 added 2 changesets with 2 changes to 2 files
520 527 finished applying clone bundle
521 528 searching for changes
522 529 no changes found
523 530 2 local changesets published
524 531
525 532 Test where attribute is missing from some entries
526 533
527 534 $ cat > server/.hg/clonebundles.manifest << EOF
528 535 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
529 536 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
530 537 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
531 538 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
532 539 > EOF
533 540
534 541 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
535 542 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
536 543 adding changesets
537 544 adding manifests
538 545 adding file changes
539 546 added 2 changesets with 2 changes to 2 files
540 547 finished applying clone bundle
541 548 searching for changes
542 549 no changes found
543 550 2 local changesets published
544 551
545 552 Test a bad attribute list
546 553
547 554 $ hg --config ui.clonebundleprefers=bad clone -U http://localhost:$HGPORT bad-input
548 555 abort: invalid ui.clonebundleprefers item: bad
549 556 (each comma separated item should be key=value pairs)
550 557 [255]
551 558 $ hg --config ui.clonebundleprefers=key=val,bad,key2=val2 clone \
552 559 > -U http://localhost:$HGPORT bad-input
553 560 abort: invalid ui.clonebundleprefers item: bad
554 561 (each comma separated item should be key=value pairs)
555 562 [255]
556 563
557 564
558 565 Test interaction between clone bundles and --stream
559 566
560 567 A manifest with just a gzip bundle
561 568
562 569 $ cat > server/.hg/clonebundles.manifest << EOF
563 570 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
564 571 > EOF
565 572
566 573 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
567 574 no compatible clone bundles available on server; falling back to regular clone
568 575 (you may want to report this to the server operator)
569 576 streaming all changes
570 9 files to transfer, 816 bytes of data
571 transferred 816 bytes in * seconds (*) (glob)
577 10 files to transfer, 816 bytes of data (no-rust !)
578 transferred 816 bytes in * seconds (*) (glob) (no-rust !)
579 12 files to transfer, 942 bytes of data (rust !)
580 transferred 942 bytes in *.* seconds (*) (glob) (rust !)
572 581
573 582 A manifest with a stream clone but no BUNDLESPEC
574 583
575 584 $ cat > server/.hg/clonebundles.manifest << EOF
576 585 > http://localhost:$HGPORT1/packed.hg
577 586 > EOF
578 587
579 588 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
580 589 no compatible clone bundles available on server; falling back to regular clone
581 590 (you may want to report this to the server operator)
582 591 streaming all changes
583 9 files to transfer, 816 bytes of data
584 transferred 816 bytes in * seconds (*) (glob)
592 10 files to transfer, 816 bytes of data (no-rust !)
593 transferred 816 bytes in * seconds (*) (glob) (no-rust !)
594 12 files to transfer, 942 bytes of data (rust !)
595 transferred 942 bytes in *.* seconds (*) (glob) (rust !)
585 596
586 597 A manifest with a gzip bundle and a stream clone
587 598
588 599 $ cat > server/.hg/clonebundles.manifest << EOF
589 600 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
590 601 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
591 602 > EOF
592 603
593 604 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
594 605 applying clone bundle from http://localhost:$HGPORT1/packed.hg
595 4 files to transfer, 613 bytes of data
596 transferred 613 bytes in * seconds (*) (glob)
606 5 files to transfer, 613 bytes of data (no-rust !)
607 transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
608 7 files to transfer, 739 bytes of data (rust !)
609 transferred 739 bytes in *.* seconds (*) (glob) (rust !)
597 610 finished applying clone bundle
598 611 searching for changes
599 612 no changes found
600 613
601 614 A manifest with a gzip bundle and stream clone with supported requirements
602 615
603 616 $ cat > server/.hg/clonebundles.manifest << EOF
604 617 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
605 618 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
606 619 > EOF
607 620
608 621 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
609 622 applying clone bundle from http://localhost:$HGPORT1/packed.hg
610 4 files to transfer, 613 bytes of data
611 transferred 613 bytes in * seconds (*) (glob)
623 5 files to transfer, 613 bytes of data (no-rust !)
624 transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
625 7 files to transfer, 739 bytes of data (rust !)
626 transferred 739 bytes in *.* seconds (*) (glob) (rust !)
612 627 finished applying clone bundle
613 628 searching for changes
614 629 no changes found
615 630
616 631 A manifest with a gzip bundle and a stream clone with unsupported requirements
617 632
618 633 $ cat > server/.hg/clonebundles.manifest << EOF
619 634 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
620 635 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
621 636 > EOF
622 637
623 638 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
624 639 no compatible clone bundles available on server; falling back to regular clone
625 640 (you may want to report this to the server operator)
626 641 streaming all changes
627 9 files to transfer, 816 bytes of data
628 transferred 816 bytes in * seconds (*) (glob)
642 10 files to transfer, 816 bytes of data (no-rust !)
643 transferred 816 bytes in * seconds (*) (glob) (no-rust !)
644 12 files to transfer, 942 bytes of data (rust !)
645 transferred 942 bytes in *.* seconds (*) (glob) (rust !)
629 646
630 647 Test clone bundle retrieved through bundle2
631 648
632 649 $ cat << EOF >> $HGRCPATH
633 650 > [extensions]
634 651 > largefiles=
635 652 > EOF
636 653 $ killdaemons.py
637 654 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
638 655 $ cat hg.pid >> $DAEMON_PIDS
639 656
640 657 $ hg -R server debuglfput gz-a.hg
641 658 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
642 659
643 660 $ cat > server/.hg/clonebundles.manifest << EOF
644 661 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
645 662 > EOF
646 663
647 664 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
648 665 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
649 666 adding changesets
650 667 adding manifests
651 668 adding file changes
652 669 added 2 changesets with 2 changes to 2 files
653 670 finished applying clone bundle
654 671 searching for changes
655 672 no changes found
656 673 2 local changesets published
657 674 $ killdaemons.py
658 675
659 676 A manifest with a gzip bundle requiring too much memory for a 16MB system and working
660 677 on a 32MB system.
661 678
662 679 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
663 680 $ cat http.pid >> $DAEMON_PIDS
664 681 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
665 682 $ cat hg.pid >> $DAEMON_PIDS
666 683
667 684 $ cat > server/.hg/clonebundles.manifest << EOF
668 685 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 REQUIREDRAM=12MB
669 686 > EOF
670 687
671 688 $ hg clone -U --debug --config ui.available-memory=16MB http://localhost:$HGPORT gzip-too-large
672 689 using http://localhost:$HGPORT/
673 690 sending capabilities command
674 691 sending clonebundles_manifest command
675 692 filtering http://localhost:$HGPORT1/gz-a.hg as it needs more than 2/3 of system memory
676 693 no compatible clone bundles available on server; falling back to regular clone
677 694 (you may want to report this to the server operator)
678 695 query 1; heads
679 696 sending batch command
680 697 requesting all changes
681 698 sending getbundle command
682 699 bundle2-input-bundle: with-transaction
683 700 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
684 701 adding changesets
685 702 add changeset 53245c60e682
686 703 add changeset aaff8d2ffbbf
687 704 adding manifests
688 705 adding file changes
689 706 adding bar revisions
690 707 adding foo revisions
691 708 bundle2-input-part: total payload size 936
692 709 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
693 710 bundle2-input-part: "phase-heads" supported
694 711 bundle2-input-part: total payload size 24
695 712 bundle2-input-bundle: 3 parts total
696 713 checking for updated bookmarks
697 714 updating the branch cache
698 715 added 2 changesets with 2 changes to 2 files
699 716 new changesets 53245c60e682:aaff8d2ffbbf
700 717 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
701 718 updating the branch cache
702 719 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
703 720
704 721 $ hg clone -U --debug --config ui.available-memory=32MB http://localhost:$HGPORT gzip-too-large2
705 722 using http://localhost:$HGPORT/
706 723 sending capabilities command
707 724 sending clonebundles_manifest command
708 725 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
709 726 bundle2-input-bundle: 1 params with-transaction
710 727 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
711 728 adding changesets
712 729 add changeset 53245c60e682
713 730 add changeset aaff8d2ffbbf
714 731 adding manifests
715 732 adding file changes
716 733 adding bar revisions
717 734 adding foo revisions
718 735 bundle2-input-part: total payload size 920
719 736 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
720 737 bundle2-input-part: total payload size 59
721 738 bundle2-input-bundle: 2 parts total
722 739 updating the branch cache
723 740 added 2 changesets with 2 changes to 2 files
724 741 finished applying clone bundle
725 742 query 1; heads
726 743 sending batch command
727 744 searching for changes
728 745 all remote heads known locally
729 746 no changes found
730 747 sending getbundle command
731 748 bundle2-input-bundle: with-transaction
732 749 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
733 750 bundle2-input-part: "phase-heads" supported
734 751 bundle2-input-part: total payload size 24
735 752 bundle2-input-bundle: 2 parts total
736 753 checking for updated bookmarks
737 754 2 local changesets published
738 755 calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
739 756 updating the branch cache
740 757 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
741 758 $ killdaemons.py
742 759
743 760 Testing a clone bundles that involves revlog splitting (issue6811)
744 761 ==================================================================
745 762
746 763 $ cat >> $HGRCPATH << EOF
747 764 > [format]
748 765 > revlog-compression=none
749 766 > use-persistent-nodemap=no
750 767 > EOF
751 768
752 769 $ hg init server-revlog-split/
753 770 $ cd server-revlog-split
754 771 $ cat >> .hg/hgrc << EOF
755 772 > [extensions]
756 773 > clonebundles =
757 774 > EOF
758 775 $ echo foo > A
759 776 $ hg add A
760 777 $ hg commit -m 'initial commit'
761 778 IMPORTANT: the revlogs must not be split
762 779 $ ls -1 .hg/store/00manifest.*
763 780 .hg/store/00manifest.i
764 781 $ ls -1 .hg/store/data/_a.*
765 782 .hg/store/data/_a.i
766 783
767 784 do big enough update to split the revlogs
768 785
769 786 $ $TESTDIR/seq.py 100000 > A
770 787 $ mkdir foo
771 788 $ cd foo
772 789 $ touch `$TESTDIR/seq.py 10000`
773 790 $ cd ..
774 791 $ hg add -q foo
775 792 $ hg commit -m 'split the manifest and one filelog'
776 793
777 794 IMPORTANT: now the revlogs must be split
778 795 $ ls -1 .hg/store/00manifest.*
779 796 .hg/store/00manifest.d
780 797 .hg/store/00manifest.i
781 798 $ ls -1 .hg/store/data/_a.*
782 799 .hg/store/data/_a.d
783 800 .hg/store/data/_a.i
784 801
785 802 Add an extra commit on top of that
786 803
787 804 $ echo foo >> A
788 805 $ hg commit -m 'one extra commit'
789 806
790 807 $ cd ..
791 808
792 809 Do a bundle that contains the split, but not the update
793 810
794 811 $ hg bundle --exact --rev '::(default~1)' -R server-revlog-split/ --type gzip-v2 split-test.hg
795 812 2 changesets found
796 813
797 814 $ cat > server-revlog-split/.hg/clonebundles.manifest << EOF
798 815 > http://localhost:$HGPORT1/split-test.hg BUNDLESPEC=gzip-v2
799 816 > EOF
800 817
801 818 start the necessary server
802 819
803 820 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
804 821 $ cat http.pid >> $DAEMON_PIDS
805 822 $ hg -R server-revlog-split serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
806 823 $ cat hg.pid >> $DAEMON_PIDS
807 824
808 825 Check that clone works fine
809 826 ===========================
810 827
811 828 Here, the initial clone will trigger a revlog split (which is a bit clowny it
812 829 itself, but whatever). The split revlogs will see additionnal data added to
813 830 them in the subsequent pull. This should not be a problem
814 831
815 832 $ hg clone http://localhost:$HGPORT revlog-split-in-the-bundle
816 833 applying clone bundle from http://localhost:$HGPORT1/split-test.hg
817 834 adding changesets
818 835 adding manifests
819 836 adding file changes
820 837 added 2 changesets with 10002 changes to 10001 files
821 838 finished applying clone bundle
822 839 searching for changes
823 840 adding changesets
824 841 adding manifests
825 842 adding file changes
826 843 added 1 changesets with 1 changes to 1 files
827 844 new changesets e3879eaa1db7
828 845 2 local changesets published
829 846 updating to branch default
830 847 10001 files updated, 0 files merged, 0 files removed, 0 files unresolved
831 848
832 849 check the results
833 850
834 851 $ cd revlog-split-in-the-bundle
835 852 $ f --size .hg/store/00manifest.*
836 853 .hg/store/00manifest.d: size=499037
837 854 .hg/store/00manifest.i: size=192
838 855 $ f --size .hg/store/data/_a.*
839 856 .hg/store/data/_a.d: size=588917
840 857 .hg/store/data/_a.i: size=192
841 858
842 859 manifest should work
843 860
844 861 $ hg files -r tip | wc -l
845 862 \s*10001 (re)
846 863
847 864 file content should work
848 865
849 866 $ hg cat -r tip A | wc -l
850 867 \s*100001 (re)
851 868
852 869
@@ -1,39 +1,39 b''
1 1 #require rust
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [format]
5 5 > use-dirstate-v2=1
6 6 > [storage]
7 7 > dirstate-v2.slow-path=allow
8 8 > EOF
9 9
10 10 $ hg init t
11 11 $ cd t
12 12
13 13 $ for i in 1 2 3 4 5 6 7 8 9 10; do touch foobar$i; done
14 14 $ hg add .
15 15 adding foobar1
16 16 adding foobar10
17 17 adding foobar2
18 18 adding foobar3
19 19 adding foobar4
20 20 adding foobar5
21 21 adding foobar6
22 22 adding foobar7
23 23 adding foobar8
24 24 adding foobar9
25 25 $ hg commit -m "1"
26 26
27 27 Check that there's no space leak on debugrebuilddirstate
28 28
29 29 $ f --size .hg/dirstate*
30 30 .hg/dirstate: size=133
31 .hg/dirstate.b870a51b: size=511
32 $ hg debugrebuilddirstate
33 $ f --size .hg/dirstate*
34 .hg/dirstate: size=133
35 31 .hg/dirstate.88698448: size=511
36 32 $ hg debugrebuilddirstate
37 33 $ f --size .hg/dirstate*
38 34 .hg/dirstate: size=133
39 35 .hg/dirstate.6b8ab34b: size=511
36 $ hg debugrebuilddirstate
37 $ f --size .hg/dirstate*
38 .hg/dirstate: size=133
39 .hg/dirstate.b875dfc5: size=511
@@ -1,818 +1,818 b''
1 1 $ cat << EOF >> $HGRCPATH
2 2 > [ui]
3 3 > interactive=yes
4 4 > EOF
5 5
6 6 $ hg init debugrevlog
7 7 $ cd debugrevlog
8 8 $ echo a > a
9 9 $ hg ci -Am adda
10 10 adding a
11 11 $ hg rm .
12 12 removing a
13 13 $ hg ci -Am make-it-empty
14 14 $ hg revert --all -r 0
15 15 adding a
16 16 $ hg ci -Am make-it-full
17 17 #if reporevlogstore
18 18 $ hg debugrevlog -c
19 19 format : 1
20 flags : inline
20 flags : (none)
21 21
22 22 revisions : 3
23 23 merges : 0 ( 0.00%)
24 24 normal : 3 (100.00%)
25 25 revisions : 3
26 26 empty : 0 ( 0.00%)
27 27 text : 0 (100.00%)
28 28 delta : 0 (100.00%)
29 29 snapshot : 3 (100.00%)
30 30 lvl-0 : 3 (100.00%)
31 31 deltas : 0 ( 0.00%)
32 32 revision size : 191
33 33 snapshot : 191 (100.00%)
34 34 lvl-0 : 191 (100.00%)
35 35 deltas : 0 ( 0.00%)
36 36
37 37 chunks : 3
38 38 0x75 (u) : 3 (100.00%)
39 39 chunks size : 191
40 40 0x75 (u) : 191 (100.00%)
41 41
42 42
43 43 total-stored-content: 188 bytes
44 44
45 45 avg chain length : 0
46 46 max chain length : 0
47 47 max chain reach : 67
48 48 compression ratio : 0
49 49
50 50 uncompressed data size (min/max/avg) : 57 / 66 / 62
51 51 full revision size (min/max/avg) : 58 / 67 / 63
52 52 inter-snapshot size (min/max/avg) : 0 / 0 / 0
53 53 delta size (min/max/avg) : 0 / 0 / 0
54 54 $ hg debugrevlog -m
55 55 format : 1
56 56 flags : inline, generaldelta
57 57
58 58 revisions : 3
59 59 merges : 0 ( 0.00%)
60 60 normal : 3 (100.00%)
61 61 revisions : 3
62 62 empty : 1 (33.33%)
63 63 text : 1 (100.00%)
64 64 delta : 0 ( 0.00%)
65 65 snapshot : 2 (66.67%)
66 66 lvl-0 : 2 (66.67%)
67 67 deltas : 0 ( 0.00%)
68 68 revision size : 88
69 69 snapshot : 88 (100.00%)
70 70 lvl-0 : 88 (100.00%)
71 71 deltas : 0 ( 0.00%)
72 72
73 73 chunks : 3
74 74 empty : 1 (33.33%)
75 75 0x75 (u) : 2 (66.67%)
76 76 chunks size : 88
77 77 empty : 0 ( 0.00%)
78 78 0x75 (u) : 88 (100.00%)
79 79
80 80
81 81 total-stored-content: 86 bytes
82 82
83 83 avg chain length : 0
84 84 max chain length : 0
85 85 max chain reach : 44
86 86 compression ratio : 0
87 87
88 88 uncompressed data size (min/max/avg) : 0 / 43 / 28
89 89 full revision size (min/max/avg) : 44 / 44 / 44
90 90 inter-snapshot size (min/max/avg) : 0 / 0 / 0
91 91 delta size (min/max/avg) : 0 / 0 / 0
92 92 $ hg debugrevlog a
93 93 format : 1
94 94 flags : inline, generaldelta
95 95
96 96 revisions : 1
97 97 merges : 0 ( 0.00%)
98 98 normal : 1 (100.00%)
99 99 revisions : 1
100 100 empty : 0 ( 0.00%)
101 101 text : 0 (100.00%)
102 102 delta : 0 (100.00%)
103 103 snapshot : 1 (100.00%)
104 104 lvl-0 : 1 (100.00%)
105 105 deltas : 0 ( 0.00%)
106 106 revision size : 3
107 107 snapshot : 3 (100.00%)
108 108 lvl-0 : 3 (100.00%)
109 109 deltas : 0 ( 0.00%)
110 110
111 111 chunks : 1
112 112 0x75 (u) : 1 (100.00%)
113 113 chunks size : 3
114 114 0x75 (u) : 3 (100.00%)
115 115
116 116
117 117 total-stored-content: 2 bytes
118 118
119 119 avg chain length : 0
120 120 max chain length : 0
121 121 max chain reach : 3
122 122 compression ratio : 0
123 123
124 124 uncompressed data size (min/max/avg) : 2 / 2 / 2
125 125 full revision size (min/max/avg) : 3 / 3 / 3
126 126 inter-snapshot size (min/max/avg) : 0 / 0 / 0
127 127 delta size (min/max/avg) : 0 / 0 / 0
128 128 #endif
129 129
130 130 Test debugindex, with and without the --verbose/--debug flag
131 131 $ hg debugrevlogindex a
132 132 rev linkrev nodeid p1 p2
133 133 0 0 b789fdd96dc2 000000000000 000000000000
134 134
135 135 #if no-reposimplestore
136 136 $ hg --verbose debugrevlogindex a
137 137 rev offset length linkrev nodeid p1 p2
138 138 0 0 3 0 b789fdd96dc2 000000000000 000000000000
139 139
140 140 $ hg --debug debugrevlogindex a
141 141 rev offset length linkrev nodeid p1 p2
142 142 0 0 3 0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
143 143 #endif
144 144
145 145 $ hg debugrevlogindex -f 1 a
146 146 rev flag size link p1 p2 nodeid
147 147 0 0000 2 0 -1 -1 b789fdd96dc2
148 148
149 149 #if no-reposimplestore
150 150 $ hg --verbose debugrevlogindex -f 1 a
151 151 rev flag offset length size link p1 p2 nodeid
152 152 0 0000 0 3 2 0 -1 -1 b789fdd96dc2
153 153
154 154 $ hg --debug debugrevlogindex -f 1 a
155 155 rev flag offset length size link p1 p2 nodeid
156 156 0 0000 0 3 2 0 -1 -1 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
157 157 #endif
158 158
159 159 $ hg debugindex -c
160 160 rev linkrev nodeid p1-nodeid p2-nodeid
161 161 0 0 07f494440405 000000000000 000000000000
162 162 1 1 8cccb4b5fec2 07f494440405 000000000000
163 163 2 2 b1e228c512c5 8cccb4b5fec2 000000000000
164 164 $ hg debugindex -c --debug
165 165 rev rank linkrev nodeid p1-rev p1-nodeid p2-rev p2-nodeid full-size delta-base flags comp-mode data-offset chunk-size sd-comp-mode sidedata-offset sd-chunk-size
166 166 0 -1 0 07f4944404050f47db2e5c5071e0e84e7a27bba9 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 57 0 0 2 0 58 inline 0 0
167 167 1 -1 1 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a 0 07f4944404050f47db2e5c5071e0e84e7a27bba9 -1 0000000000000000000000000000000000000000 66 1 0 2 58 67 inline 0 0
168 168 2 -1 2 b1e228c512c5d7066d70562ed839c3323a62d6d2 1 8cccb4b5fec20cafeb99dd01c26d4dee8ea4388a -1 0000000000000000000000000000000000000000 65 2 0 2 125 66 inline 0 0
169 169 $ hg debugindex -m
170 170 rev linkrev nodeid p1-nodeid p2-nodeid
171 171 0 0 a0c8bcbbb45c 000000000000 000000000000
172 172 1 1 57faf8a737ae a0c8bcbbb45c 000000000000
173 173 2 2 a35b10320954 57faf8a737ae 000000000000
174 174 $ hg debugindex -m --debug
175 175 rev rank linkrev nodeid p1-rev p1-nodeid p2-rev p2-nodeid full-size delta-base flags comp-mode data-offset chunk-size sd-comp-mode sidedata-offset sd-chunk-size
176 176 0 -1 0 a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 43 0 0 2 0 44 inline 0 0
177 177 1 -1 1 57faf8a737ae7faf490582941a82319ba6529dca 0 a0c8bcbbb45c63b90b70ad007bf38961f64f2af0 -1 0000000000000000000000000000000000000000 0 1 0 2 44 0 inline 0 0
178 178 2 -1 2 a35b103209548032201c16c7688cb2657f037a38 1 57faf8a737ae7faf490582941a82319ba6529dca -1 0000000000000000000000000000000000000000 43 2 0 2 44 44 inline 0 0
179 179 $ hg debugindex a
180 180 rev linkrev nodeid p1-nodeid p2-nodeid
181 181 0 0 b789fdd96dc2 000000000000 000000000000
182 182 $ hg debugindex --debug a
183 183 rev rank linkrev nodeid p1-rev p1-nodeid p2-rev p2-nodeid full-size delta-base flags comp-mode data-offset chunk-size sd-comp-mode sidedata-offset sd-chunk-size
184 184 0 -1 0 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 2 0 0 2 0 3 inline 0 0
185 185
186 186 debugdelta chain basic output
187 187
188 188 #if reporevlogstore pure
189 189 $ hg debugindexstats
190 190 abort: debugindexstats only works with native code
191 191 [255]
192 192 #endif
193 193 #if reporevlogstore no-pure
194 194 $ hg debugindexstats
195 195 node trie capacity: 4
196 196 node trie count: 2
197 197 node trie depth: 1
198 198 node trie last rev scanned: -1 (no-rust !)
199 199 node trie last rev scanned: 3 (rust !)
200 200 node trie lookups: 4 (no-rust !)
201 201 node trie lookups: 2 (rust !)
202 202 node trie misses: 1
203 203 node trie splits: 1
204 204 revs in memory: 3
205 205 #endif
206 206
207 207 #if reporevlogstore no-pure
208 208 $ hg debugdeltachain -m --all-info
209 209 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
210 210 0 -1 -1 1 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
211 211 1 0 -1 2 1 -1 base 0 0 0 0.00000 0 0 0.00000 0 0 1.00000 1
212 212 2 1 -1 3 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
213 213
214 214 $ hg debugdeltachain -m -T '{rev} {chainid} {chainlen}\n'
215 215 0 1 1
216 216 1 2 1
217 217 2 3 1
218 218
219 219 $ hg debugdeltachain -m -Tjson --size-info
220 220 [
221 221 {
222 222 "chainid": 1,
223 223 "chainlen": 1,
224 224 "chainratio": 1.0232558139534884,
225 225 "chainsize": 44,
226 226 "compsize": 44,
227 227 "deltatype": "base",
228 228 "p1": -1,
229 229 "p2": -1,
230 230 "prevrev": -1,
231 231 "rev": 0,
232 232 "uncompsize": 43
233 233 },
234 234 {
235 235 "chainid": 2,
236 236 "chainlen": 1,
237 237 "chainratio": 0,
238 238 "chainsize": 0,
239 239 "compsize": 0,
240 240 "deltatype": "base",
241 241 "p1": 0,
242 242 "p2": -1,
243 243 "prevrev": -1,
244 244 "rev": 1,
245 245 "uncompsize": 0
246 246 },
247 247 {
248 248 "chainid": 3,
249 249 "chainlen": 1,
250 250 "chainratio": 1.0232558139534884,
251 251 "chainsize": 44,
252 252 "compsize": 44,
253 253 "deltatype": "base",
254 254 "p1": 1,
255 255 "p2": -1,
256 256 "prevrev": -1,
257 257 "rev": 2,
258 258 "uncompsize": 43
259 259 }
260 260 ]
261 261
262 262 $ hg debugdeltachain -m -Tjson --all-info
263 263 [
264 264 {
265 265 "chainid": 1,
266 266 "chainlen": 1,
267 267 "chainratio": 1.0232558139534884,
268 268 "chainsize": 44,
269 269 "compsize": 44,
270 270 "deltatype": "base",
271 271 "extradist": 0,
272 272 "extraratio": 0.0,
273 273 "largestblock": 44,
274 274 "lindist": 44,
275 275 "p1": -1,
276 276 "p2": -1,
277 277 "prevrev": -1,
278 278 "readdensity": 1.0,
279 279 "readsize": 44,
280 280 "rev": 0,
281 281 "srchunks": 1,
282 282 "uncompsize": 43
283 283 },
284 284 {
285 285 "chainid": 2,
286 286 "chainlen": 1,
287 287 "chainratio": 0,
288 288 "chainsize": 0,
289 289 "compsize": 0,
290 290 "deltatype": "base",
291 291 "extradist": 0,
292 292 "extraratio": 0,
293 293 "largestblock": 0,
294 294 "lindist": 0,
295 295 "p1": 0,
296 296 "p2": -1,
297 297 "prevrev": -1,
298 298 "readdensity": 1,
299 299 "readsize": 0,
300 300 "rev": 1,
301 301 "srchunks": 1,
302 302 "uncompsize": 0
303 303 },
304 304 {
305 305 "chainid": 3,
306 306 "chainlen": 1,
307 307 "chainratio": 1.0232558139534884,
308 308 "chainsize": 44,
309 309 "compsize": 44,
310 310 "deltatype": "base",
311 311 "extradist": 0,
312 312 "extraratio": 0.0,
313 313 "largestblock": 44,
314 314 "lindist": 44,
315 315 "p1": 1,
316 316 "p2": -1,
317 317 "prevrev": -1,
318 318 "readdensity": 1.0,
319 319 "readsize": 44,
320 320 "rev": 2,
321 321 "srchunks": 1,
322 322 "uncompsize": 43
323 323 }
324 324 ]
325 325
326 326 debugdelta chain with sparse read enabled
327 327
328 328 $ cat >> $HGRCPATH <<EOF
329 329 > [experimental]
330 330 > sparse-read = True
331 331 > EOF
332 332 $ hg debugdeltachain -m --all-info
333 333 rev p1 p2 chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
334 334 0 -1 -1 1 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
335 335 1 0 -1 2 1 -1 base 0 0 0 0.00000 0 0 0.00000 0 0 1.00000 1
336 336 2 1 -1 3 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
337 337
338 338 $ hg debugdeltachain -m --sparse-info -T '{rev} {chainid} {chainlen} {readsize} {largestblock} {readdensity}\n'
339 339 0 1 1 44 44 1.0
340 340 1 2 1 0 0 1
341 341 2 3 1 44 44 1.0
342 342
343 343 $ hg debugdeltachain -m -Tjson --sparse-info
344 344 [
345 345 {
346 346 "chainid": 1,
347 347 "chainlen": 1,
348 348 "deltatype": "base",
349 349 "largestblock": 44,
350 350 "p1": -1,
351 351 "p2": -1,
352 352 "prevrev": -1,
353 353 "readdensity": 1.0,
354 354 "readsize": 44,
355 355 "rev": 0,
356 356 "srchunks": 1
357 357 },
358 358 {
359 359 "chainid": 2,
360 360 "chainlen": 1,
361 361 "deltatype": "base",
362 362 "largestblock": 0,
363 363 "p1": 0,
364 364 "p2": -1,
365 365 "prevrev": -1,
366 366 "readdensity": 1,
367 367 "readsize": 0,
368 368 "rev": 1,
369 369 "srchunks": 1
370 370 },
371 371 {
372 372 "chainid": 3,
373 373 "chainlen": 1,
374 374 "deltatype": "base",
375 375 "largestblock": 44,
376 376 "p1": 1,
377 377 "p2": -1,
378 378 "prevrev": -1,
379 379 "readdensity": 1.0,
380 380 "readsize": 44,
381 381 "rev": 2,
382 382 "srchunks": 1
383 383 }
384 384 ]
385 385
386 386 $ hg debugdeltachain -m -Tjson --all-info
387 387 [
388 388 {
389 389 "chainid": 1,
390 390 "chainlen": 1,
391 391 "chainratio": 1.0232558139534884,
392 392 "chainsize": 44,
393 393 "compsize": 44,
394 394 "deltatype": "base",
395 395 "extradist": 0,
396 396 "extraratio": 0.0,
397 397 "largestblock": 44,
398 398 "lindist": 44,
399 399 "p1": -1,
400 400 "p2": -1,
401 401 "prevrev": -1,
402 402 "readdensity": 1.0,
403 403 "readsize": 44,
404 404 "rev": 0,
405 405 "srchunks": 1,
406 406 "uncompsize": 43
407 407 },
408 408 {
409 409 "chainid": 2,
410 410 "chainlen": 1,
411 411 "chainratio": 0,
412 412 "chainsize": 0,
413 413 "compsize": 0,
414 414 "deltatype": "base",
415 415 "extradist": 0,
416 416 "extraratio": 0,
417 417 "largestblock": 0,
418 418 "lindist": 0,
419 419 "p1": 0,
420 420 "p2": -1,
421 421 "prevrev": -1,
422 422 "readdensity": 1,
423 423 "readsize": 0,
424 424 "rev": 1,
425 425 "srchunks": 1,
426 426 "uncompsize": 0
427 427 },
428 428 {
429 429 "chainid": 3,
430 430 "chainlen": 1,
431 431 "chainratio": 1.0232558139534884,
432 432 "chainsize": 44,
433 433 "compsize": 44,
434 434 "deltatype": "base",
435 435 "extradist": 0,
436 436 "extraratio": 0.0,
437 437 "largestblock": 44,
438 438 "lindist": 44,
439 439 "p1": 1,
440 440 "p2": -1,
441 441 "prevrev": -1,
442 442 "readdensity": 1.0,
443 443 "readsize": 44,
444 444 "rev": 2,
445 445 "srchunks": 1,
446 446 "uncompsize": 43
447 447 }
448 448 ]
449 449
450 450 $ printf "This test checks things.\n" >> a
451 451 $ hg ci -m a
452 452 $ hg branch other
453 453 marked working directory as branch other
454 454 (branches are permanent and global, did you want a bookmark?)
455 455 $ for i in `$TESTDIR/seq.py 5`; do
456 456 > printf "shorter ${i}" >> a
457 457 > hg ci -m "a other:$i"
458 458 > hg up -q default
459 459 > printf "for the branch default we want longer chains: ${i}" >> a
460 460 > hg ci -m "a default:$i"
461 461 > hg up -q other
462 462 > done
463 463 $ hg debugdeltachain a -T '{rev} {srchunks}\n' --all-info\
464 464 > --config experimental.sparse-read.density-threshold=0.50 \
465 465 > --config experimental.sparse-read.min-gap-size=0
466 466 0 1
467 467 1 1
468 468 2 1
469 469 3 1
470 470 4 1
471 471 5 1
472 472 6 1
473 473 7 1
474 474 8 1
475 475 9 1
476 476 10 2 (no-zstd !)
477 477 10 1 (zstd !)
478 478 11 1
479 479 $ hg --config extensions.strip= strip --no-backup -r 1
480 480 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
481 481
482 482 Test max chain len
483 483 $ cat >> $HGRCPATH << EOF
484 484 > [format]
485 485 > maxchainlen=4
486 486 > EOF
487 487
488 488 $ printf "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
489 489 $ hg ci -m a
490 490 $ printf "b\n" >> a
491 491 $ hg ci -m a
492 492 $ printf "c\n" >> a
493 493 $ hg ci -m a
494 494 $ printf "d\n" >> a
495 495 $ hg ci -m a
496 496 $ printf "e\n" >> a
497 497 $ hg ci -m a
498 498 $ printf "f\n" >> a
499 499 $ hg ci -m a
500 500 $ printf 'g\n' >> a
501 501 $ hg ci -m a
502 502 $ printf 'h\n' >> a
503 503 $ hg ci -m a
504 504
505 505 $ hg debugrevlog -d a
506 506 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
507 507 0 -1 -1 0 ??? 0 0 0 0 ??? ???? ? 1 0 (glob)
508 508 1 0 -1 ??? ??? 0 0 0 0 ??? ???? ? 1 1 (glob)
509 509 2 1 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
510 510 3 2 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
511 511 4 3 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 4 (glob)
512 512 5 4 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 0 (glob)
513 513 6 5 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 1 (glob)
514 514 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
515 515 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
516 516 #endif
517 517
518 518 Test debuglocks command:
519 519
520 520 $ hg debuglocks
521 521 lock: free
522 522 wlock: free
523 523
524 524 * Test setting the lock
525 525
526 526 waitlock <file> will wait for file to be created. If it isn't in a reasonable
527 527 amount of time, displays error message and returns 1
528 528 $ waitlock() {
529 529 > start=`date +%s`
530 530 > timeout=5
531 531 > while [ \( ! -f $1 \) -a \( ! -L $1 \) ]; do
532 532 > now=`date +%s`
533 533 > if [ "`expr $now - $start`" -gt $timeout ]; then
534 534 > echo "timeout: $1 was not created in $timeout seconds"
535 535 > return 1
536 536 > fi
537 537 > sleep 0.1
538 538 > done
539 539 > }
540 540 $ dolock() {
541 541 > {
542 542 > waitlock .hg/unlock
543 543 > rm -f .hg/unlock
544 544 > echo y
545 545 > } | hg debuglocks "$@" > /dev/null
546 546 > }
547 547 $ dolock -s &
548 548 $ waitlock .hg/store/lock
549 549
550 550 $ hg debuglocks
551 551 lock: user *, process * (*s) (glob)
552 552 wlock: free
553 553 [1]
554 554 $ touch .hg/unlock
555 555 $ wait
556 556 $ [ -f .hg/store/lock ] || echo "There is no lock"
557 557 There is no lock
558 558
559 559 * Test setting the wlock
560 560
561 561 $ dolock -S &
562 562 $ waitlock .hg/wlock
563 563
564 564 $ hg debuglocks
565 565 lock: free
566 566 wlock: user *, process * (*s) (glob)
567 567 [1]
568 568 $ touch .hg/unlock
569 569 $ wait
570 570 $ [ -f .hg/wlock ] || echo "There is no wlock"
571 571 There is no wlock
572 572
573 573 * Test setting both locks
574 574
575 575 $ dolock -Ss &
576 576 $ waitlock .hg/wlock && waitlock .hg/store/lock
577 577
578 578 $ hg debuglocks
579 579 lock: user *, process * (*s) (glob)
580 580 wlock: user *, process * (*s) (glob)
581 581 [2]
582 582
583 583 * Test failing to set a lock
584 584
585 585 $ hg debuglocks -s
586 586 abort: lock is already held
587 587 [255]
588 588
589 589 $ hg debuglocks -S
590 590 abort: wlock is already held
591 591 [255]
592 592
593 593 $ touch .hg/unlock
594 594 $ wait
595 595
596 596 $ hg debuglocks
597 597 lock: free
598 598 wlock: free
599 599
600 600 * Test forcing the lock
601 601
602 602 $ dolock -s &
603 603 $ waitlock .hg/store/lock
604 604
605 605 $ hg debuglocks
606 606 lock: user *, process * (*s) (glob)
607 607 wlock: free
608 608 [1]
609 609
610 610 $ hg debuglocks -L
611 611
612 612 $ hg debuglocks
613 613 lock: free
614 614 wlock: free
615 615
616 616 $ touch .hg/unlock
617 617 $ wait
618 618
619 619 * Test forcing the wlock
620 620
621 621 $ dolock -S &
622 622 $ waitlock .hg/wlock
623 623
624 624 $ hg debuglocks
625 625 lock: free
626 626 wlock: user *, process * (*s) (glob)
627 627 [1]
628 628
629 629 $ hg debuglocks -W
630 630
631 631 $ hg debuglocks
632 632 lock: free
633 633 wlock: free
634 634
635 635 $ touch .hg/unlock
636 636 $ wait
637 637
638 638 Test WdirUnsupported exception
639 639
640 640 $ hg debugdata -c ffffffffffffffffffffffffffffffffffffffff
641 641 abort: working directory revision cannot be specified
642 642 [255]
643 643
644 644 Test cache warming command
645 645
646 646 $ rm -rf .hg/cache/
647 647 $ hg debugupdatecaches --debug
648 648 updating the branch cache
649 649 $ ls -r .hg/cache/*
650 650 .hg/cache/tags2-served
651 651 .hg/cache/tags2
652 652 .hg/cache/rbc-revs-v1
653 653 .hg/cache/rbc-names-v1
654 654 .hg/cache/hgtagsfnodes1
655 655 .hg/cache/branch2-visible-hidden
656 656 .hg/cache/branch2-visible
657 657 .hg/cache/branch2-served.hidden
658 658 .hg/cache/branch2-served
659 659 .hg/cache/branch2-immutable
660 660 .hg/cache/branch2-base
661 661
662 662 Test debugcolor
663 663
664 664 #if no-windows
665 665 $ hg debugcolor --style --color always | grep -E 'mode|style|log\.'
666 666 color mode: 'ansi'
667 667 available style:
668 668 \x1b[0;33mlog.changeset\x1b[0m: \x1b[0;33myellow\x1b[0m (esc)
669 669 #endif
670 670
671 671 $ hg debugcolor --style --color never
672 672 color mode: None
673 673 available style:
674 674
675 675 $ cd ..
676 676
677 677 Test internal debugstacktrace command
678 678
679 679 $ cat > debugstacktrace.py << EOF
680 680 > from mercurial import (
681 681 > util,
682 682 > )
683 683 > from mercurial.utils import (
684 684 > procutil,
685 685 > )
686 686 > def f():
687 687 > util.debugstacktrace(f=procutil.stdout)
688 688 > g()
689 689 > def g():
690 690 > util.dst(b'hello from g\\n', skip=1)
691 691 > h()
692 692 > def h():
693 693 > util.dst(b'hi ...\\nfrom h hidden in g', 1, depth=2)
694 694 > f()
695 695 > EOF
696 696 $ "$PYTHON" debugstacktrace.py
697 697 stacktrace at:
698 698 *debugstacktrace.py:15 in * (glob)
699 699 *debugstacktrace.py:8 in f (glob)
700 700 hello from g at:
701 701 *debugstacktrace.py:15 in * (glob)
702 702 *debugstacktrace.py:9 in f (glob)
703 703 hi ...
704 704 from h hidden in g at:
705 705 *debugstacktrace.py:9 in f (glob)
706 706 *debugstacktrace.py:12 in g (glob)
707 707
708 708 Test debugcapabilities command:
709 709
710 710 $ hg debugcapabilities ./debugrevlog/
711 711 Main capabilities:
712 712 branchmap
713 713 $USUAL_BUNDLE2_CAPS$
714 714 getbundle
715 715 known
716 716 lookup
717 717 pushkey
718 718 unbundle
719 719 Bundle2 capabilities:
720 720 HG20
721 721 bookmarks
722 722 changegroup
723 723 01
724 724 02
725 725 03
726 726 checkheads
727 727 related
728 728 digests
729 729 md5
730 730 sha1
731 731 sha512
732 732 error
733 733 abort
734 734 unsupportedcontent
735 735 pushraced
736 736 pushkey
737 737 hgtagsfnodes
738 738 listkeys
739 739 phases
740 740 heads
741 741 pushkey
742 742 remote-changegroup
743 743 http
744 744 https
745 745 stream
746 746 v2
747 747
748 748 Test debugpeer
749 749
750 750 $ hg debugpeer ssh://user@dummy/debugrevlog
751 751 url: ssh://user@dummy/debugrevlog
752 752 local: no
753 753 pushable: yes
754 754
755 755 #if rust
756 756
757 757 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
758 758 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
759 759 devel-peer-request: hello+between
760 760 devel-peer-request: pairs: 81 bytes
761 761 sending hello command
762 762 sending between command
763 763 remote: 473
764 764 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlog-compression-zstd,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
765 765 remote: 1
766 766 devel-peer-request: protocaps
767 767 devel-peer-request: caps: * bytes (glob)
768 768 sending protocaps command
769 769 url: ssh://user@dummy/debugrevlog
770 770 local: no
771 771 pushable: yes
772 772
773 773 #endif
774 774
775 775 #if no-rust zstd
776 776
777 777 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
778 778 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
779 779 devel-peer-request: hello+between
780 780 devel-peer-request: pairs: 81 bytes
781 781 sending hello command
782 782 sending between command
783 783 remote: 473
784 784 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlog-compression-zstd,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
785 785 remote: 1
786 786 devel-peer-request: protocaps
787 787 devel-peer-request: caps: * bytes (glob)
788 788 sending protocaps command
789 789 url: ssh://user@dummy/debugrevlog
790 790 local: no
791 791 pushable: yes
792 792
793 793 #endif
794 794
795 795 #if no-rust no-zstd
796 796
797 797 $ hg --debug debugpeer ssh://user@dummy/debugrevlog
798 798 running .* ".*[/\\]dummyssh" ['"]user@dummy['"] ['"]hg -R debugrevlog serve --stdio['"] (re)
799 799 devel-peer-request: hello+between
800 800 devel-peer-request: pairs: 81 bytes
801 801 sending hello command
802 802 sending between command
803 803 remote: 449
804 804 remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
805 805 remote: 1
806 806 devel-peer-request: protocaps
807 807 devel-peer-request: caps: * bytes (glob)
808 808 sending protocaps command
809 809 url: ssh://user@dummy/debugrevlog
810 810 local: no
811 811 pushable: yes
812 812
813 813 #endif
814 814
815 815 Test debugshell
816 816
817 817 $ hg debugshell -c 'ui.write(b"%s\n" % ui.username())'
818 818 test
@@ -1,121 +1,122 b''
1 1 Testing cloning with the EOL extension
2 2
3 3 $ cat >> $HGRCPATH <<EOF
4 4 > [extensions]
5 5 > eol =
6 6 >
7 7 > [eol]
8 8 > native = CRLF
9 9 > EOF
10 10
11 11 setup repository
12 12
13 13 $ hg init repo
14 14 $ cd repo
15 15 $ cat > .hgeol <<EOF
16 16 > [patterns]
17 17 > **.txt = native
18 18 > EOF
19 19 $ printf "first\r\nsecond\r\nthird\r\n" > a.txt
20 20 $ hg commit --addremove -m 'checkin'
21 21 adding .hgeol
22 22 adding a.txt
23 23
24 24 Test commit of removed .hgeol and how it immediately makes the automatic
25 25 changes explicit and committable.
26 26
27 27 $ cd ..
28 28 $ hg clone repo repo-2
29 29 updating to branch default
30 30 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 31 $ cd repo-2
32 32 $ cat a.txt
33 33 first\r (esc)
34 34 second\r (esc)
35 35 third\r (esc)
36 36 $ hg cat a.txt
37 37 first
38 38 second
39 39 third
40 40 $ hg remove .hgeol
41 41 $ touch a.txt * # ensure consistent st dirtyness checks, ignoring dirstate timing
42 42 $ hg st -v --debug
43 43 M a.txt
44 44 R .hgeol
45 45 $ hg commit -m 'remove eol'
46 46 $ hg exp
47 47 # HG changeset patch
48 48 # User test
49 49 # Date 0 0
50 50 # Thu Jan 01 00:00:00 1970 +0000
51 51 # Node ID 3c20c2d90333b6ecdc8f7aa8f9b73223c7c7a608
52 52 # Parent 90f94e2cf4e24628afddd641688dfe4cd476d6e4
53 53 remove eol
54 54
55 55 diff -r 90f94e2cf4e2 -r 3c20c2d90333 .hgeol
56 56 --- a/.hgeol Thu Jan 01 00:00:00 1970 +0000
57 57 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
58 58 @@ -1,2 +0,0 @@
59 59 -[patterns]
60 60 -**.txt = native
61 61 diff -r 90f94e2cf4e2 -r 3c20c2d90333 a.txt
62 62 --- a/a.txt Thu Jan 01 00:00:00 1970 +0000
63 63 +++ b/a.txt Thu Jan 01 00:00:00 1970 +0000
64 64 @@ -1,3 +1,3 @@
65 65 -first
66 66 -second
67 67 -third
68 68 +first\r (esc)
69 69 +second\r (esc)
70 70 +third\r (esc)
71 71 $ hg push --quiet
72 72 $ cd ..
73 73
74 74 Test clone of repo with .hgeol in working dir, but no .hgeol in default
75 75 checkout revision tip. The repo is correctly updated to be consistent and have
76 76 the exact content checked out without filtering, ignoring the current .hgeol in
77 77 the source repo:
78 78
79 79 $ cat repo/.hgeol
80 80 [patterns]
81 81 **.txt = native
82 82 $ hg clone repo repo-3 -v --debug
83 linked 7 files
83 linked 8 files (no-rust !)
84 linked 10 files (rust !)
84 85 updating to branch default
85 86 resolving manifests
86 87 branchmerge: False, force: False, partial: False
87 88 ancestor: 000000000000, local: 000000000000+, remote: 3c20c2d90333
88 89 calling hook preupdate.eol: hgext.eol.preupdate
89 90 a.txt: remote created -> g
90 91 getting a.txt
91 92 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 93 updating the branch cache
93 94 $ cd repo-3
94 95
95 96 $ cat a.txt
96 97 first\r (esc)
97 98 second\r (esc)
98 99 third\r (esc)
99 100
100 101 Test clone of revision with .hgeol
101 102
102 103 $ cd ..
103 104 $ hg clone -r 0 repo repo-4
104 105 adding changesets
105 106 adding manifests
106 107 adding file changes
107 108 added 1 changesets with 2 changes to 2 files
108 109 new changesets 90f94e2cf4e2
109 110 updating to branch default
110 111 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 112 $ cd repo-4
112 113 $ cat .hgeol
113 114 [patterns]
114 115 **.txt = native
115 116
116 117 $ cat a.txt
117 118 first\r (esc)
118 119 second\r (esc)
119 120 third\r (esc)
120 121
121 122 $ cd ..
@@ -1,532 +1,538 b''
1 1 #require repofncache
2 2
3 3 An extension which will set fncache chunksize to 1 byte to make sure that logic
4 4 does not break
5 5
6 6 $ cat > chunksize.py <<EOF
7 7 > from mercurial import store
8 8 > store.fncache_chunksize = 1
9 9 > EOF
10 10
11 11 $ cat >> $HGRCPATH <<EOF
12 12 > [extensions]
13 13 > chunksize = $TESTTMP/chunksize.py
14 14 > EOF
15 15
16 16 Init repo1:
17 17
18 18 $ hg init repo1
19 19 $ cd repo1
20 20 $ echo "some text" > a
21 21 $ hg add
22 22 adding a
23 23 $ hg ci -m first
24 24 $ cat .hg/store/fncache | sort
25 25 data/a.i
26 26
27 27 Testing a.i/b:
28 28
29 29 $ mkdir a.i
30 30 $ echo "some other text" > a.i/b
31 31 $ hg add
32 32 adding a.i/b
33 33 $ hg ci -m second
34 34 $ cat .hg/store/fncache | sort
35 35 data/a.i
36 36 data/a.i.hg/b.i
37 37
38 38 Testing a.i.hg/c:
39 39
40 40 $ mkdir a.i.hg
41 41 $ echo "yet another text" > a.i.hg/c
42 42 $ hg add
43 43 adding a.i.hg/c
44 44 $ hg ci -m third
45 45 $ cat .hg/store/fncache | sort
46 46 data/a.i
47 47 data/a.i.hg.hg/c.i
48 48 data/a.i.hg/b.i
49 49
50 50 Testing verify:
51 51
52 52 $ hg verify -q
53 53
54 54 $ rm .hg/store/fncache
55 55
56 56 $ hg verify
57 57 checking changesets
58 58 checking manifests
59 59 crosschecking files in changesets and manifests
60 60 checking files
61 61 warning: revlog 'data/a.i' not in fncache!
62 62 warning: revlog 'data/a.i.hg/c.i' not in fncache!
63 63 warning: revlog 'data/a.i/b.i' not in fncache!
64 64 checking dirstate
65 65 checked 3 changesets with 3 changes to 3 files
66 66 3 warnings encountered!
67 67 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
68 68
69 69 Follow the hint to make sure it works
70 70
71 71 $ hg debugrebuildfncache
72 72 adding data/a.i
73 73 adding data/a.i.hg/c.i
74 74 adding data/a.i/b.i
75 75 3 items added, 0 removed from fncache
76 76
77 77 $ hg verify -q
78 78
79 79 $ cd ..
80 80
81 81 Non store repo:
82 82
83 83 $ hg --config format.usestore=False init foo
84 84 $ cd foo
85 85 $ mkdir tst.d
86 86 $ echo foo > tst.d/foo
87 87 $ hg ci -Amfoo
88 88 adding tst.d/foo
89 89 $ find .hg | sort
90 90 .hg
91 .hg/00changelog-6b8ab34b.nd (rust !)
92 .hg/00changelog.d
91 93 .hg/00changelog.i
94 .hg/00changelog.n (rust !)
92 95 .hg/00manifest.i
93 96 .hg/branch
94 97 .hg/cache
95 98 .hg/cache/branch2-served
96 99 .hg/cache/rbc-names-v1
97 100 .hg/cache/rbc-revs-v1
98 101 .hg/data
99 102 .hg/data/tst.d.hg
100 103 .hg/data/tst.d.hg/foo.i
101 104 .hg/dirstate
102 105 .hg/fsmonitor.state (fsmonitor !)
103 106 .hg/last-message.txt
104 107 .hg/phaseroots
105 108 .hg/requires
106 109 .hg/undo
107 110 .hg/undo.backup.branch.bck
108 111 .hg/undo.backupfiles
109 112 .hg/undo.desc
110 113 .hg/wcache
111 114 .hg/wcache/checkisexec (execbit !)
112 115 .hg/wcache/checklink (symlink !)
113 116 .hg/wcache/checklink-target (symlink !)
114 117 .hg/wcache/manifestfulltextcache (reporevlogstore !)
115 118 $ cd ..
116 119
117 120 Non fncache repo:
118 121
119 122 $ hg --config format.usefncache=False init bar
120 123 $ cd bar
121 124 $ mkdir tst.d
122 125 $ echo foo > tst.d/Foo
123 126 $ hg ci -Amfoo
124 127 adding tst.d/Foo
125 128 $ find .hg | sort
126 129 .hg
127 130 .hg/00changelog.i
128 131 .hg/branch
129 132 .hg/cache
130 133 .hg/cache/branch2-served
131 134 .hg/cache/rbc-names-v1
132 135 .hg/cache/rbc-revs-v1
133 136 .hg/dirstate
134 137 .hg/fsmonitor.state (fsmonitor !)
135 138 .hg/last-message.txt
136 139 .hg/requires
137 140 .hg/store
141 .hg/store/00changelog-b875dfc5.nd (rust !)
142 .hg/store/00changelog.d
138 143 .hg/store/00changelog.i
144 .hg/store/00changelog.n (rust !)
139 145 .hg/store/00manifest.i
140 146 .hg/store/data
141 147 .hg/store/data/tst.d.hg
142 148 .hg/store/data/tst.d.hg/_foo.i
143 149 .hg/store/phaseroots
144 150 .hg/store/requires
145 151 .hg/store/undo
146 152 .hg/store/undo.backupfiles
147 153 .hg/undo.backup.branch.bck
148 154 .hg/undo.desc
149 155 .hg/wcache
150 156 .hg/wcache/checkisexec (execbit !)
151 157 .hg/wcache/checklink (symlink !)
152 158 .hg/wcache/checklink-target (symlink !)
153 159 .hg/wcache/manifestfulltextcache (reporevlogstore !)
154 160 $ cd ..
155 161
156 162 Encoding of reserved / long paths in the store
157 163
158 164 $ hg init r2
159 165 $ cd r2
160 166 $ cat <<EOF > .hg/hgrc
161 167 > [ui]
162 168 > portablefilenames = ignore
163 169 > EOF
164 170
165 171 $ hg import -q --bypass - <<EOF
166 172 > # HG changeset patch
167 173 > # User test
168 174 > # Date 0 0
169 175 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
170 176 > # Parent 0000000000000000000000000000000000000000
171 177 > 1
172 178 >
173 179 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
174 180 > new file mode 100644
175 181 > --- /dev/null
176 182 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
177 183 > @@ -0,0 +1,1 @@
178 184 > +foo
179 185 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
180 186 > new file mode 100644
181 187 > --- /dev/null
182 188 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
183 189 > @@ -0,0 +1,1 @@
184 190 > +foo
185 191 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
186 192 > new file mode 100644
187 193 > --- /dev/null
188 194 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
189 195 > @@ -0,0 +1,1 @@
190 196 > +foo
191 197 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
192 198 > new file mode 100644
193 199 > --- /dev/null
194 200 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
195 201 > @@ -0,0 +1,1 @@
196 202 > +foo
197 203 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
198 204 > new file mode 100644
199 205 > --- /dev/null
200 206 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
201 207 > @@ -0,0 +1,1 @@
202 208 > +foo
203 209 > EOF
204 210
205 211 $ find .hg/store -name *.i | sort
206 212 .hg/store/00changelog.i
207 213 .hg/store/00manifest.i
208 214 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
209 215 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
210 216 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
211 217 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
212 218 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
213 219
214 220 $ cd ..
215 221
216 222 Aborting lock does not prevent fncache writes
217 223
218 224 $ cat > exceptionext.py <<EOF
219 225 > import os
220 226 > from mercurial import commands, error, extensions
221 227 >
222 228 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
223 229 > def releasewrap():
224 230 > l.held = False # ensure __del__ is a noop
225 231 > raise error.Abort(b"forced lock failure")
226 232 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
227 233 > return l
228 234 >
229 235 > def reposetup(ui, repo):
230 236 > extensions.wrapfunction(repo, '_lock', lockexception)
231 237 >
232 238 > cmdtable = {}
233 239 >
234 240 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
235 241 > # at the end of dispatching (for intentional "forced lcok failure")
236 242 > def commitwrap(orig, ui, repo, *pats, **opts):
237 243 > repo = repo.unfiltered() # to use replaced repo._lock certainly
238 244 > wlock = repo.wlock()
239 245 > try:
240 246 > return orig(ui, repo, *pats, **opts)
241 247 > finally:
242 248 > # multiple 'relase()' is needed for complete releasing wlock,
243 249 > # because "forced" abort at last releasing store lock
244 250 > # prevents wlock from being released at same 'lockmod.release()'
245 251 > for i in range(wlock.held):
246 252 > wlock.release()
247 253 >
248 254 > def extsetup(ui):
249 255 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
250 256 > EOF
251 257 $ extpath=`pwd`/exceptionext.py
252 258 $ hg init fncachetxn
253 259 $ cd fncachetxn
254 260 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
255 261 $ touch y
256 262 $ hg ci -qAm y
257 263 abort: forced lock failure
258 264 [255]
259 265 $ cat .hg/store/fncache
260 266 data/y.i
261 267
262 268 Aborting transaction prevents fncache change
263 269
264 270 $ cat > ../exceptionext.py <<EOF
265 271 > import os
266 272 > from mercurial import commands, error, extensions, localrepo
267 273 >
268 274 > def wrapper(orig, self, *args, **kwargs):
269 275 > tr = orig(self, *args, **kwargs)
270 276 > def fail(tr):
271 277 > raise error.Abort(b"forced transaction failure")
272 278 > # zzz prefix to ensure it sorted after store.write
273 279 > tr.addfinalize(b'zzz-forcefails', fail)
274 280 > return tr
275 281 >
276 282 > def uisetup(ui):
277 283 > extensions.wrapfunction(
278 284 > localrepo.localrepository, 'transaction', wrapper)
279 285 >
280 286 > cmdtable = {}
281 287 >
282 288 > EOF
283 289
284 290 Clean cached version
285 291 $ rm -f "${extpath}c"
286 292 $ rm -Rf "`dirname $extpath`/__pycache__"
287 293
288 294 $ touch z
289 295 $ hg ci -qAm z
290 296 transaction abort!
291 297 rollback completed
292 298 abort: forced transaction failure
293 299 [255]
294 300 $ cat .hg/store/fncache
295 301 data/y.i
296 302
297 303 Aborted transactions can be recovered later
298 304
299 305 $ cat > ../exceptionext.py <<EOF
300 306 > import os
301 307 > import signal
302 308 > from mercurial import (
303 309 > commands,
304 310 > error,
305 311 > extensions,
306 312 > localrepo,
307 313 > transaction,
308 314 > )
309 315 >
310 316 > def trwrapper(orig, self, *args, **kwargs):
311 317 > tr = orig(self, *args, **kwargs)
312 318 > def fail(tr):
313 319 > os.kill(os.getpid(), signal.SIGKILL)
314 320 > # zzz prefix to ensure it sorted after store.write
315 321 > tr.addfinalize(b'zzz-forcefails', fail)
316 322 > return tr
317 323 >
318 324 > def uisetup(ui):
319 325 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
320 326 > trwrapper)
321 327 >
322 328 > cmdtable = {}
323 329 >
324 330 > EOF
325 331
326 332 Clean cached versions
327 333 $ rm -f "${extpath}c"
328 334 $ rm -Rf "`dirname $extpath`/__pycache__"
329 335
330 336 $ hg up -q 1
331 337 $ touch z
332 338 # Cannot rely on the return code value as chg use a different one.
333 339 # So we use a `|| echo` trick
334 340 # XXX-CHG fixing chg behavior would be nice here.
335 341 $ hg ci -qAm z || echo "He's Dead, Jim." 2>/dev/null
336 342 *Killed* (glob) (?)
337 343 He's Dead, Jim.
338 344 $ cat .hg/store/fncache | sort
339 345 data/y.i
340 346 data/z.i
341 347 $ hg recover --verify
342 348 rolling back interrupted transaction
343 349 checking changesets
344 350 checking manifests
345 351 crosschecking files in changesets and manifests
346 352 checking files
347 353 checking dirstate
348 354 checked 1 changesets with 1 changes to 1 files
349 355 $ cat .hg/store/fncache
350 356 data/y.i
351 357
352 358 $ cd ..
353 359
354 360 debugrebuildfncache does nothing unless repo has fncache requirement
355 361
356 362 $ hg --config format.usefncache=false init nofncache
357 363 $ cd nofncache
358 364 $ hg debugrebuildfncache
359 365 (not rebuilding fncache because repository does not support fncache)
360 366
361 367 $ cd ..
362 368
363 369 debugrebuildfncache works on empty repository
364 370
365 371 $ hg init empty
366 372 $ cd empty
367 373 $ hg debugrebuildfncache
368 374 fncache already up to date
369 375 $ cd ..
370 376
371 377 debugrebuildfncache on an up to date repository no-ops
372 378
373 379 $ hg init repo
374 380 $ cd repo
375 381 $ echo initial > foo
376 382 $ echo initial > .bar
377 383 $ hg commit -A -m initial
378 384 adding .bar
379 385 adding foo
380 386
381 387 $ cat .hg/store/fncache | sort
382 388 data/.bar.i
383 389 data/foo.i
384 390
385 391 $ hg debugrebuildfncache
386 392 fncache already up to date
387 393
388 394 debugrebuildfncache restores deleted fncache file
389 395
390 396 $ rm -f .hg/store/fncache
391 397 $ hg debugrebuildfncache
392 398 adding data/.bar.i
393 399 adding data/foo.i
394 400 2 items added, 0 removed from fncache
395 401
396 402 $ cat .hg/store/fncache | sort
397 403 data/.bar.i
398 404 data/foo.i
399 405
400 406 Rebuild after rebuild should no-op
401 407
402 408 $ hg debugrebuildfncache
403 409 fncache already up to date
404 410
405 411 A single missing file should get restored, an extra file should be removed
406 412
407 413 $ cat > .hg/store/fncache << EOF
408 414 > data/foo.i
409 415 > data/bad-entry.i
410 416 > EOF
411 417
412 418 $ hg debugrebuildfncache
413 419 removing data/bad-entry.i
414 420 adding data/.bar.i
415 421 1 items added, 1 removed from fncache
416 422
417 423 $ cat .hg/store/fncache | sort
418 424 data/.bar.i
419 425 data/foo.i
420 426
421 427 debugrebuildfncache recovers from truncated line in fncache
422 428
423 429 $ printf a > .hg/store/fncache
424 430 $ hg debugrebuildfncache
425 431 fncache does not ends with a newline
426 432 adding data/.bar.i
427 433 adding data/foo.i
428 434 2 items added, 0 removed from fncache
429 435
430 436 $ cat .hg/store/fncache | sort
431 437 data/.bar.i
432 438 data/foo.i
433 439
434 440 $ cd ..
435 441
436 442 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
437 443
438 444 $ hg --config format.dotencode=false init nodotencode
439 445 $ cd nodotencode
440 446 $ echo initial > foo
441 447 $ echo initial > .bar
442 448 $ hg commit -A -m initial
443 449 adding .bar
444 450 adding foo
445 451
446 452 $ cat .hg/store/fncache | sort
447 453 data/.bar.i
448 454 data/foo.i
449 455
450 456 $ rm .hg/store/fncache
451 457 $ hg debugrebuildfncache
452 458 adding data/.bar.i
453 459 adding data/foo.i
454 460 2 items added, 0 removed from fncache
455 461
456 462 $ cat .hg/store/fncache | sort
457 463 data/.bar.i
458 464 data/foo.i
459 465
460 466 $ cd ..
461 467
462 468 In repositories that have accumulated a large number of files over time, the
463 469 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
464 470 The cache should not loaded when committing changes to existing files, or when unbundling
465 471 changesets that only contain changes to existing files:
466 472
467 473 $ cat > fncacheloadwarn.py << EOF
468 474 > from mercurial import extensions, localrepo
469 475 >
470 476 > def extsetup(ui):
471 477 > def wrapstore(orig, requirements, *args):
472 478 > store = orig(requirements, *args)
473 479 > if b'store' in requirements and b'fncache' in requirements:
474 480 > instrumentfncachestore(store, ui)
475 481 > return store
476 482 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
477 483 >
478 484 > def instrumentfncachestore(fncachestore, ui):
479 485 > class instrumentedfncache(type(fncachestore.fncache)):
480 486 > def _load(self):
481 487 > ui.warn(b'fncache load triggered!\n')
482 488 > super(instrumentedfncache, self)._load()
483 489 > fncachestore.fncache.__class__ = instrumentedfncache
484 490 > EOF
485 491
486 492 $ fncachextpath=`pwd`/fncacheloadwarn.py
487 493 $ hg init nofncacheload
488 494 $ cd nofncacheload
489 495 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
490 496
491 497 A new file should trigger a load, as we'd want to update the fncache set in that case:
492 498
493 499 $ touch foo
494 500 $ hg ci -qAm foo
495 501 fncache load triggered!
496 502
497 503 But modifying that file should not:
498 504
499 505 $ echo bar >> foo
500 506 $ hg ci -qm foo
501 507
502 508 If a transaction has been aborted, the zero-size truncated index file will
503 509 not prevent the fncache from being loaded; rather than actually abort
504 510 a transaction, we simulate the situation by creating a zero-size index file:
505 511
506 512 $ touch .hg/store/data/bar.i
507 513 $ touch bar
508 514 $ hg ci -qAm bar
509 515 fncache load triggered!
510 516
511 517 Unbundling should follow the same rules; existing files should not cause a load:
512 518
513 519 (loading during the clone is expected)
514 520 $ hg clone -q . tobundle
515 521 fncache load triggered!
516 522 fncache load triggered!
517 523 fncache load triggered!
518 524
519 525 $ echo 'new line' > tobundle/bar
520 526 $ hg -R tobundle ci -qm bar
521 527 $ hg -R tobundle bundle -q barupdated.hg
522 528 $ hg unbundle -q barupdated.hg
523 529
524 530 but adding new files should:
525 531
526 532 $ touch tobundle/newfile
527 533 $ hg -R tobundle ci -qAm newfile
528 534 $ hg -R tobundle bundle -q newfile.hg
529 535 $ hg unbundle -q newfile.hg
530 536 fncache load triggered!
531 537
532 538 $ cd ..
@@ -1,425 +1,469 b''
1 1 #require hardlink reporevlogstore
2 2
3 3 $ cat > nlinks.py <<EOF
4 4 > import sys
5 5 > from mercurial import pycompat, util
6 6 > for f in sorted(sys.stdin.readlines()):
7 7 > f = f[:-1]
8 8 > print(util.nlinks(pycompat.fsencode(f)), f)
9 9 > EOF
10 10
11 11 $ nlinksdir()
12 12 > {
13 13 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
14 14 > }
15 15
16 16 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
17 17
18 18 $ cat > linkcp.py <<EOF
19 19 > import sys
20 20 > from mercurial import pycompat, util
21 21 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
22 22 > pycompat.fsencode(sys.argv[2]), hardlink=True)
23 23 > EOF
24 24
25 25 $ linkcp()
26 26 > {
27 27 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
28 28 > }
29 29
30 30 Prepare repo r1:
31 31
32 32 $ hg init r1
33 33 $ cd r1
34 34
35 35 $ echo c1 > f1
36 36 $ hg add f1
37 37 $ hg ci -m0
38 38
39 39 $ mkdir d1
40 40 $ cd d1
41 41 $ echo c2 > f2
42 42 $ hg add f2
43 43 $ hg ci -m1
44 44 $ cd ../..
45 45
46 46 $ nlinksdir r1/.hg/store
47 1 r1/.hg/store/00changelog-b870a51b.nd (rust !)
48 1 r1/.hg/store/00changelog.d
47 49 1 r1/.hg/store/00changelog.i
50 1 r1/.hg/store/00changelog.n (rust !)
48 51 1 r1/.hg/store/00manifest.i
49 52 1 r1/.hg/store/data/d1/f2.i
50 53 1 r1/.hg/store/data/f1.i
51 54 1 r1/.hg/store/fncache (repofncache !)
52 55 1 r1/.hg/store/phaseroots
53 56 1 r1/.hg/store/requires
54 57 1 r1/.hg/store/undo
58 1 r1/.hg/store/undo.backup.00changelog.n.bck (rust !)
55 59 1 r1/.hg/store/undo.backup.fncache.bck (repofncache !)
56 60 1 r1/.hg/store/undo.backupfiles
57 61
58 62
59 63 Create hardlinked clone r2:
60 64
61 65 $ hg clone -U --debug r1 r2 --config progress.debug=true
62 linking: 1/7 files (14.29%)
63 linking: 2/7 files (28.57%)
64 linking: 3/7 files (42.86%)
65 linking: 4/7 files (57.14%)
66 linking: 5/7 files (71.43%)
67 linking: 6/7 files (85.71%)
68 linking: 7/7 files (100.00%)
69 linked 7 files
66 linking: 1/8 files (12.50%) (no-rust !)
67 linking: 2/8 files (25.00%) (no-rust !)
68 linking: 3/8 files (37.50%) (no-rust !)
69 linking: 4/8 files (50.00%) (no-rust !)
70 linking: 5/8 files (62.50%) (no-rust !)
71 linking: 6/8 files (75.00%) (no-rust !)
72 linking: 7/8 files (87.50%) (no-rust !)
73 linking: 8/8 files (100.00%) (no-rust !)
74 linked 8 files (no-rust !)
75 linking: 1/10 files (10.00%) (rust !)
76 linking: 2/10 files (20.00%) (rust !)
77 linking: 3/10 files (30.00%) (rust !)
78 linking: 4/10 files (40.00%) (rust !)
79 linking: 5/10 files (50.00%) (rust !)
80 linking: 6/10 files (60.00%) (rust !)
81 linking: 7/10 files (70.00%) (rust !)
82 linking: 8/10 files (80.00%) (rust !)
83 linking: 9/10 files (90.00%) (rust !)
84 linking: 10/10 files (100.00%) (rust !)
85 linked 10 files (rust !)
70 86 updating the branch cache
71 87
72 88 Create non-hardlinked clone r3:
73 89
74 90 $ hg clone --pull r1 r3
75 91 requesting all changes
76 92 adding changesets
77 93 adding manifests
78 94 adding file changes
79 95 added 2 changesets with 2 changes to 2 files
80 96 new changesets 40d85e9847f2:7069c422939c
81 97 updating to branch default
82 98 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
83 99
84 100
85 101 Repos r1 and r2 should now contain hardlinked files:
86 102
87 103 $ nlinksdir r1/.hg/store
104 1 r1/.hg/store/00changelog-b870a51b.nd (rust !)
105 2 r1/.hg/store/00changelog.d
88 106 2 r1/.hg/store/00changelog.i
107 1 r1/.hg/store/00changelog.n (rust !)
89 108 2 r1/.hg/store/00manifest.i
90 109 2 r1/.hg/store/data/d1/f2.i
91 110 2 r1/.hg/store/data/f1.i
92 111 1 r1/.hg/store/fncache (repofncache !)
93 112 1 r1/.hg/store/phaseroots
94 113 1 r1/.hg/store/requires
95 114 1 r1/.hg/store/undo
115 1 r1/.hg/store/undo.backup.00changelog.n.bck (rust !)
96 116 1 r1/.hg/store/undo.backup.fncache.bck (repofncache !)
97 117 1 r1/.hg/store/undo.backupfiles
98 118
99 119 $ nlinksdir r2/.hg/store
120 1 r2/.hg/store/00changelog-b870a51b.nd (rust !)
121 2 r2/.hg/store/00changelog.d
100 122 2 r2/.hg/store/00changelog.i
123 1 r2/.hg/store/00changelog.n (rust !)
101 124 2 r2/.hg/store/00manifest.i
102 125 2 r2/.hg/store/data/d1/f2.i
103 126 2 r2/.hg/store/data/f1.i
104 127 1 r2/.hg/store/fncache (repofncache !)
105 128 1 r2/.hg/store/requires
106 129
107 130 Repo r3 should not be hardlinked:
108 131
109 132 $ nlinksdir r3/.hg/store
133 1 r3/.hg/store/00changelog-88698448.nd (rust !)
134 1 r3/.hg/store/00changelog.d
110 135 1 r3/.hg/store/00changelog.i
136 1 r3/.hg/store/00changelog.n (rust !)
111 137 1 r3/.hg/store/00manifest.i
112 138 1 r3/.hg/store/data/d1/f2.i
113 139 1 r3/.hg/store/data/f1.i
114 140 1 r3/.hg/store/fncache (repofncache !)
115 141 1 r3/.hg/store/phaseroots
116 142 1 r3/.hg/store/requires
117 143 1 r3/.hg/store/undo
118 144 1 r3/.hg/store/undo.backupfiles
119 145
120 146
121 147 Create a non-inlined filelog in r3:
122 148
123 149 $ cd r3/d1
124 150 >>> f = open('data1', 'wb')
125 151 >>> for x in range(10000):
126 152 ... f.write(b"%d\n" % x) and None
127 153 >>> f.close()
128 154 $ for j in 0 1 2 3 4 5 6 7 8 9; do
129 155 > cat data1 >> f2
130 156 > hg commit -m$j
131 157 > done
132 158 $ cd ../..
133 159
134 160 $ nlinksdir r3/.hg/store
161 1 r3/.hg/store/00changelog-ea337809.nd (rust !)
162 1 r3/.hg/store/00changelog.d
135 163 1 r3/.hg/store/00changelog.i
164 1 r3/.hg/store/00changelog.n (rust !)
136 165 1 r3/.hg/store/00manifest.i
137 166 1 r3/.hg/store/data/d1/f2.d
138 167 1 r3/.hg/store/data/d1/f2.i
139 168 1 r3/.hg/store/data/f1.i
140 169 1 r3/.hg/store/fncache (repofncache !)
141 170 1 r3/.hg/store/phaseroots
142 171 1 r3/.hg/store/requires
143 172 1 r3/.hg/store/undo
173 1 r3/.hg/store/undo.backup.00changelog.n.bck (rust !)
144 174 1 r3/.hg/store/undo.backupfiles
145 175
146 176 Push to repo r1 should break up most hardlinks in r2:
147 177
148 178 $ hg -R r2 verify -q
149 179
150 180 $ cd r3
151 181 $ hg push
152 182 pushing to $TESTTMP/r1
153 183 searching for changes
154 184 adding changesets
155 185 adding manifests
156 186 adding file changes
157 187 added 10 changesets with 10 changes to 1 files
158 188
159 189 $ cd ..
160 190
161 191 $ nlinksdir r2/.hg/store
192 1 r2/.hg/store/00changelog-b870a51b.nd (rust !)
193 1 r2/.hg/store/00changelog.d
162 194 1 r2/.hg/store/00changelog.i
195 1 r2/.hg/store/00changelog.n (rust !)
163 196 1 r2/.hg/store/00manifest.i
164 197 1 r2/.hg/store/data/d1/f2.i
165 198 2 r2/.hg/store/data/f1.i
166 199 [12] r2/\.hg/store/fncache (re) (repofncache !)
167 200 1 r2/.hg/store/requires
168 201
169 202 #if hardlink-whitelisted repofncache
170 203 $ nlinksdir r2/.hg/store/fncache
171 204 1 r2/.hg/store/fncache
172 205 #endif
173 206
174 207 $ hg -R r2 verify -q
175 208
176 209 $ cd r1
177 210 $ hg up
178 211 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
179 212
180 213 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
181 214
182 215 $ echo c1c1 >> f1
183 216 $ hg ci -m00
184 217 $ cd ..
185 218
186 219 $ nlinksdir r2/.hg/store
220 1 r2/.hg/store/00changelog-b870a51b.nd (rust !)
221 1 r2/.hg/store/00changelog.d
187 222 1 r2/.hg/store/00changelog.i
223 1 r2/.hg/store/00changelog.n (rust !)
188 224 1 r2/.hg/store/00manifest.i
189 225 1 r2/.hg/store/data/d1/f2.i
190 226 1 r2/.hg/store/data/f1.i
191 227 1 r2/.hg/store/fncache (repofncache !)
192 228 1 r2/.hg/store/requires
193 229
194 230 #if hardlink-whitelisted repofncache
195 231 $ nlinksdir r2/.hg/store/fncache
196 232 1 r2/.hg/store/fncache
197 233 #endif
198 234
199 235 Create a file which exec permissions we will change
200 236 $ cd r3
201 237 $ echo "echo hello world" > f3
202 238 $ hg add f3
203 239 $ hg ci -mf3
204 240 $ cd ..
205 241
206 242 $ cd r3
207 243 $ hg tip --template '{rev}:{node|short}\n'
208 244 12:d3b77733a28a
209 245 $ echo bla > f1
210 246 $ chmod +x f3
211 247 $ hg ci -m1
212 248 $ cd ..
213 249
214 250 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
215 251
216 252 $ linkcp r3 r4
217 253
218 254 'checklink' is produced by hardlinking a symlink, which is undefined whether
219 255 the symlink should be followed or not. It does behave differently on Linux and
220 256 BSD. Just remove it so the test pass on both platforms.
221 257
222 258 $ rm -f r4/.hg/wcache/checklink
223 259
224 260 r4 has hardlinks in the working dir (not just inside .hg):
225 261
226 262 $ nlinksdir r4
227 263 2 r4/.hg/00changelog.i
228 264 [24] r4/.hg/branch (re)
229 265 2 r4/.hg/cache/branch2-base
230 266 2 r4/.hg/cache/branch2-immutable
231 267 2 r4/.hg/cache/branch2-served
232 268 2 r4/.hg/cache/branch2-served.hidden
233 269 2 r4/.hg/cache/branch2-visible
234 270 2 r4/.hg/cache/branch2-visible-hidden
235 271 2 r4/.hg/cache/rbc-names-v1
236 272 2 r4/.hg/cache/rbc-revs-v1
237 273 2 r4/.hg/cache/tags2
238 274 2 r4/.hg/cache/tags2-served
239 275 2 r4/.hg/dirstate
240 276 2 r4/.hg/fsmonitor.state (fsmonitor !)
241 277 2 r4/.hg/hgrc
242 278 2 r4/.hg/last-message.txt
243 279 2 r4/.hg/requires
280 2 r4/.hg/store/00changelog-7f2eb713.nd (rust !)
281 2 r4/.hg/store/00changelog.d
244 282 2 r4/.hg/store/00changelog.i
283 2 r4/.hg/store/00changelog.n (rust !)
245 284 2 r4/.hg/store/00manifest.i
246 285 2 r4/.hg/store/data/d1/f2.d
247 286 2 r4/.hg/store/data/d1/f2.i
248 287 2 r4/.hg/store/data/f1.i
249 288 2 r4/.hg/store/data/f3.i
250 289 2 r4/.hg/store/fncache (repofncache !)
251 290 2 r4/.hg/store/phaseroots
252 291 2 r4/.hg/store/requires
253 292 2 r4/.hg/store/undo
293 2 r4/.hg/store/undo.backup.00changelog.n.bck (rust !)
254 294 2 r4/.hg/store/undo.backupfiles
255 295 [24] r4/.hg/undo.backup.branch.bck (re)
256 296 2 r4/\.hg/undo\.backup\.dirstate.bck (re)
257 297 2 r4/.hg/undo.desc
258 298 2 r4/.hg/wcache/checkisexec (execbit !)
259 299 2 r4/.hg/wcache/checklink-target (symlink !)
260 300 2 r4/.hg/wcache/checknoexec (execbit !)
261 301 2 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
262 302 2 r4/d1/data1
263 303 2 r4/d1/f2
264 304 2 r4/f1
265 305 2 r4/f3
266 306
267 307 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
268 308 #if hardlink-whitelisted
269 309 $ nlinksdir r4/.hg/undo.backup.dirstate.bck r4/.hg/dirstate
270 310 2 r4/.hg/dirstate
271 311 2 r4/.hg/undo.backup.dirstate.bck
272 312 #endif
273 313
274 314
275 315 $ hg -R r4 up 12
276 316 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
277 317 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
278 318
279 319 $ nlinksdir r4
280 320 2 r4/.hg/00changelog.i
281 321 1 r4/.hg/branch
282 322 2 r4/.hg/cache/branch2-base
283 323 2 r4/.hg/cache/branch2-immutable
284 324 2 r4/.hg/cache/branch2-served
285 325 2 r4/.hg/cache/branch2-served.hidden
286 326 2 r4/.hg/cache/branch2-visible
287 327 2 r4/.hg/cache/branch2-visible-hidden
288 328 2 r4/.hg/cache/rbc-names-v1
289 329 2 r4/.hg/cache/rbc-revs-v1
290 330 2 r4/.hg/cache/tags2
291 331 2 r4/.hg/cache/tags2-served
292 332 1 r4/.hg/dirstate
293 333 1 r4/.hg/fsmonitor.state (fsmonitor !)
294 334 2 r4/.hg/hgrc
295 335 2 r4/.hg/last-message.txt
296 336 2 r4/.hg/requires
337 2 r4/.hg/store/00changelog-7f2eb713.nd (rust !)
338 2 r4/.hg/store/00changelog.d
297 339 2 r4/.hg/store/00changelog.i
340 2 r4/.hg/store/00changelog.n (rust !)
298 341 2 r4/.hg/store/00manifest.i
299 342 2 r4/.hg/store/data/d1/f2.d
300 343 2 r4/.hg/store/data/d1/f2.i
301 344 2 r4/.hg/store/data/f1.i
302 345 2 r4/.hg/store/data/f3.i
303 346 2 r4/.hg/store/fncache
304 347 2 r4/.hg/store/phaseroots
305 348 2 r4/.hg/store/requires
306 349 2 r4/.hg/store/undo
350 2 r4/.hg/store/undo.backup.00changelog.n.bck (rust !)
307 351 2 r4/.hg/store/undo.backupfiles
308 352 [23] r4/.hg/undo.backup.branch.bck (re)
309 353 2 r4/\.hg/undo\.backup\.dirstate.bck (re)
310 354 2 r4/.hg/undo.desc
311 355 2 r4/.hg/wcache/checkisexec (execbit !)
312 356 2 r4/.hg/wcache/checklink-target (symlink !)
313 357 2 r4/.hg/wcache/checknoexec (execbit !)
314 358 1 r4/.hg/wcache/manifestfulltextcache (reporevlogstore !)
315 359 2 r4/d1/data1
316 360 2 r4/d1/f2
317 361 1 r4/f1
318 362 1 r4/f3 (execbit !)
319 363 2 r4/f3 (no-execbit !)
320 364
321 365 #if hardlink-whitelisted
322 366 $ nlinksdir r4/.hg/undo.backup.dirstate.bck r4/.hg/dirstate
323 367 1 r4/.hg/dirstate
324 368 2 r4/.hg/undo.backup.dirstate.bck
325 369 #endif
326 370
327 371 Test hardlinking outside hg:
328 372
329 373 $ mkdir x
330 374 $ echo foo > x/a
331 375
332 376 $ linkcp x y
333 377 $ echo bar >> y/a
334 378
335 379 No diff if hardlink:
336 380
337 381 $ diff x/a y/a
338 382
339 383 Test mq hardlinking:
340 384
341 385 $ echo "[extensions]" >> $HGRCPATH
342 386 $ echo "mq=" >> $HGRCPATH
343 387
344 388 $ hg init a
345 389 $ cd a
346 390
347 391 $ hg qimport -n foo - << EOF
348 392 > # HG changeset patch
349 393 > # Date 1 0
350 394 > diff -r 2588a8b53d66 a
351 395 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
352 396 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
353 397 > @@ -0,0 +1,1 @@
354 398 > +a
355 399 > EOF
356 400 adding foo to series file
357 401
358 402 $ hg qpush
359 403 applying foo
360 404 now at: foo
361 405
362 406 $ cd ..
363 407 $ linkcp a b
364 408 $ cd b
365 409
366 410 $ hg qimport -n bar - << EOF
367 411 > # HG changeset patch
368 412 > # Date 2 0
369 413 > diff -r 2588a8b53d66 a
370 414 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
371 415 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
372 416 > @@ -0,0 +1,1 @@
373 417 > +b
374 418 > EOF
375 419 adding bar to series file
376 420
377 421 $ hg qpush
378 422 applying bar
379 423 now at: bar
380 424
381 425 $ cat .hg/patches/status
382 426 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
383 427 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
384 428
385 429 $ cat .hg/patches/series
386 430 foo
387 431 bar
388 432
389 433 $ cat ../a/.hg/patches/status
390 434 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
391 435
392 436 $ cat ../a/.hg/patches/series
393 437 foo
394 438
395 439 Test tags hardlinking:
396 440
397 441 $ hg qdel -r qbase:qtip
398 442 patch foo finalized without changeset message
399 443 patch bar finalized without changeset message
400 444
401 445 $ hg tag -l lfoo
402 446 $ hg tag foo
403 447
404 448 $ cd ..
405 449 $ linkcp b c
406 450 $ cd c
407 451
408 452 $ hg tag -l -r 0 lbar
409 453 $ hg tag -r 0 bar
410 454
411 455 $ cat .hgtags
412 456 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
413 457 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
414 458
415 459 $ cat .hg/localtags
416 460 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
417 461 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
418 462
419 463 $ cat ../b/.hgtags
420 464 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
421 465
422 466 $ cat ../b/.hg/localtags
423 467 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
424 468
425 469 $ cd ..
@@ -1,1463 +1,1467 b''
1 1 commit hooks can see env vars
2 2 (and post-transaction one are run unlocked)
3 3
4 4
5 5 $ cat > $TESTTMP/txnabort.checkargs.py <<EOF
6 6 > from mercurial import pycompat
7 7 > def showargs(ui, repo, hooktype, **kwargs):
8 8 > kwargs = pycompat.byteskwargs(kwargs)
9 9 > ui.write(b'%s Python hook: %s\n' % (hooktype,
10 10 > b','.join(sorted(kwargs))))
11 11 > EOF
12 12
13 13 $ hg init a
14 14 $ cd a
15 15 $ cat > .hg/hgrc <<EOF
16 16 > [hooks]
17 17 > commit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit"
18 18 > commit.b = sh -c "HG_LOCAL= HG_TAG= printenv.py --line commit.b"
19 19 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= printenv.py --line precommit"
20 20 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxncommit"
21 21 > pretxncommit.tip = hg -q tip
22 22 > pre-identify = sh -c "printenv.py --line pre-identify 1"
23 23 > pre-cat = sh -c "printenv.py --line pre-cat"
24 24 > post-cat = sh -c "printenv.py --line post-cat"
25 25 > pretxnopen = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnopen"
26 26 > pretxnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line pretxnclose"
27 27 > txnclose = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnclose"
28 28 > txnabort.0 = python:$TESTTMP/txnabort.checkargs.py:showargs
29 29 > txnabort.1 = sh -c "HG_LOCAL= HG_TAG= printenv.py --line txnabort"
30 30 > txnclose.checklock = sh -c "hg debuglock > /dev/null"
31 31 > EOF
32 32 $ echo a > a
33 33 $ hg add a
34 34 $ hg commit -m a
35 35 precommit hook: HG_HOOKNAME=precommit
36 36 HG_HOOKTYPE=precommit
37 37 HG_PARENT1=0000000000000000000000000000000000000000
38 38
39 39 pretxnopen hook: HG_HOOKNAME=pretxnopen
40 40 HG_HOOKTYPE=pretxnopen
41 41 HG_TXNID=TXN:$ID$
42 42 HG_TXNNAME=commit
43 43
44 44 pretxncommit hook: HG_HOOKNAME=pretxncommit
45 45 HG_HOOKTYPE=pretxncommit
46 46 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
47 47 HG_PARENT1=0000000000000000000000000000000000000000
48 48 HG_PENDING=$TESTTMP/a
49 49
50 50 0:cb9a9f314b8b
51 51 pretxnclose hook: HG_HOOKNAME=pretxnclose
52 52 HG_HOOKTYPE=pretxnclose
53 53 HG_PENDING=$TESTTMP/a
54 54 HG_PHASES_MOVED=1
55 55 HG_TXNID=TXN:$ID$
56 56 HG_TXNNAME=commit
57 57
58 58 txnclose hook: HG_HOOKNAME=txnclose
59 59 HG_HOOKTYPE=txnclose
60 60 HG_PHASES_MOVED=1
61 61 HG_TXNID=TXN:$ID$
62 62 HG_TXNNAME=commit
63 63
64 64 commit hook: HG_HOOKNAME=commit
65 65 HG_HOOKTYPE=commit
66 66 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
67 67 HG_PARENT1=0000000000000000000000000000000000000000
68 68
69 69 commit.b hook: HG_HOOKNAME=commit.b
70 70 HG_HOOKTYPE=commit
71 71 HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
72 72 HG_PARENT1=0000000000000000000000000000000000000000
73 73
74 74
75 75 $ hg clone . ../b
76 76 updating to branch default
77 77 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 78 $ cd ../b
79 79
80 80 changegroup hooks can see env vars
81 81
82 82 $ cat > .hg/hgrc <<EOF
83 83 > [hooks]
84 84 > prechangegroup = sh -c "printenv.py --line prechangegroup"
85 85 > changegroup = sh -c "printenv.py --line changegroup"
86 86 > incoming = sh -c "printenv.py --line incoming"
87 87 > EOF
88 88
89 89 pretxncommit and commit hooks can see both parents of merge
90 90
91 91 $ cd ../a
92 92 $ echo b >> a
93 93 $ hg commit -m a1 -d "1 0"
94 94 precommit hook: HG_HOOKNAME=precommit
95 95 HG_HOOKTYPE=precommit
96 96 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
97 97
98 98 pretxnopen hook: HG_HOOKNAME=pretxnopen
99 99 HG_HOOKTYPE=pretxnopen
100 100 HG_TXNID=TXN:$ID$
101 101 HG_TXNNAME=commit
102 102
103 103 pretxncommit hook: HG_HOOKNAME=pretxncommit
104 104 HG_HOOKTYPE=pretxncommit
105 105 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
106 106 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
107 107 HG_PENDING=$TESTTMP/a
108 108
109 109 1:ab228980c14d
110 110 pretxnclose hook: HG_HOOKNAME=pretxnclose
111 111 HG_HOOKTYPE=pretxnclose
112 112 HG_PENDING=$TESTTMP/a
113 113 HG_TXNID=TXN:$ID$
114 114 HG_TXNNAME=commit
115 115
116 116 txnclose hook: HG_HOOKNAME=txnclose
117 117 HG_HOOKTYPE=txnclose
118 118 HG_TXNID=TXN:$ID$
119 119 HG_TXNNAME=commit
120 120
121 121 commit hook: HG_HOOKNAME=commit
122 122 HG_HOOKTYPE=commit
123 123 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
124 124 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
125 125
126 126 commit.b hook: HG_HOOKNAME=commit.b
127 127 HG_HOOKTYPE=commit
128 128 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
129 129 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
130 130
131 131 $ hg update -C 0
132 132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 133 $ echo b > b
134 134 $ hg add b
135 135 $ hg commit -m b -d '1 0'
136 136 precommit hook: HG_HOOKNAME=precommit
137 137 HG_HOOKTYPE=precommit
138 138 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
139 139
140 140 pretxnopen hook: HG_HOOKNAME=pretxnopen
141 141 HG_HOOKTYPE=pretxnopen
142 142 HG_TXNID=TXN:$ID$
143 143 HG_TXNNAME=commit
144 144
145 145 pretxncommit hook: HG_HOOKNAME=pretxncommit
146 146 HG_HOOKTYPE=pretxncommit
147 147 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
148 148 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
149 149 HG_PENDING=$TESTTMP/a
150 150
151 151 2:ee9deb46ab31
152 152 pretxnclose hook: HG_HOOKNAME=pretxnclose
153 153 HG_HOOKTYPE=pretxnclose
154 154 HG_PENDING=$TESTTMP/a
155 155 HG_TXNID=TXN:$ID$
156 156 HG_TXNNAME=commit
157 157
158 158 created new head
159 159 txnclose hook: HG_HOOKNAME=txnclose
160 160 HG_HOOKTYPE=txnclose
161 161 HG_TXNID=TXN:$ID$
162 162 HG_TXNNAME=commit
163 163
164 164 commit hook: HG_HOOKNAME=commit
165 165 HG_HOOKTYPE=commit
166 166 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
167 167 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
168 168
169 169 commit.b hook: HG_HOOKNAME=commit.b
170 170 HG_HOOKTYPE=commit
171 171 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
172 172 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
173 173
174 174 $ hg merge 1
175 175 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
176 176 (branch merge, don't forget to commit)
177 177 $ hg commit -m merge -d '2 0'
178 178 precommit hook: HG_HOOKNAME=precommit
179 179 HG_HOOKTYPE=precommit
180 180 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
181 181 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
182 182
183 183 pretxnopen hook: HG_HOOKNAME=pretxnopen
184 184 HG_HOOKTYPE=pretxnopen
185 185 HG_TXNID=TXN:$ID$
186 186 HG_TXNNAME=commit
187 187
188 188 pretxncommit hook: HG_HOOKNAME=pretxncommit
189 189 HG_HOOKTYPE=pretxncommit
190 190 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
191 191 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
192 192 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
193 193 HG_PENDING=$TESTTMP/a
194 194
195 195 3:07f3376c1e65
196 196 pretxnclose hook: HG_HOOKNAME=pretxnclose
197 197 HG_HOOKTYPE=pretxnclose
198 198 HG_PENDING=$TESTTMP/a
199 199 HG_TXNID=TXN:$ID$
200 200 HG_TXNNAME=commit
201 201
202 202 txnclose hook: HG_HOOKNAME=txnclose
203 203 HG_HOOKTYPE=txnclose
204 204 HG_TXNID=TXN:$ID$
205 205 HG_TXNNAME=commit
206 206
207 207 commit hook: HG_HOOKNAME=commit
208 208 HG_HOOKTYPE=commit
209 209 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
210 210 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
211 211 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
212 212
213 213 commit.b hook: HG_HOOKNAME=commit.b
214 214 HG_HOOKTYPE=commit
215 215 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
216 216 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
217 217 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
218 218
219 219
220 220 test generic hooks
221 221
222 222 $ hg id
223 223 pre-identify hook: HG_ARGS=id
224 224 HG_HOOKNAME=pre-identify
225 225 HG_HOOKTYPE=pre-identify
226 226 HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None, 'template': ''}
227 227 HG_PATS=[]
228 228
229 229 abort: pre-identify hook exited with status 1
230 230 [40]
231 231 $ hg cat b
232 232 pre-cat hook: HG_ARGS=cat b
233 233 HG_HOOKNAME=pre-cat
234 234 HG_HOOKTYPE=pre-cat
235 235 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
236 236 HG_PATS=['b']
237 237
238 238 b
239 239 post-cat hook: HG_ARGS=cat b
240 240 HG_HOOKNAME=post-cat
241 241 HG_HOOKTYPE=post-cat
242 242 HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': '', 'template': ''}
243 243 HG_PATS=['b']
244 244 HG_RESULT=0
245 245
246 246
247 247 $ cd ../b
248 248 $ hg pull ../a
249 249 pulling from ../a
250 250 searching for changes
251 251 prechangegroup hook: HG_HOOKNAME=prechangegroup
252 252 HG_HOOKTYPE=prechangegroup
253 253 HG_SOURCE=pull
254 254 HG_TXNID=TXN:$ID$
255 255 HG_TXNNAME=pull
256 256 file:/*/$TESTTMP/a (glob)
257 257 HG_URL=file:$TESTTMP/a
258 258
259 259 adding changesets
260 260 adding manifests
261 261 adding file changes
262 262 added 3 changesets with 2 changes to 2 files
263 263 new changesets ab228980c14d:07f3376c1e65
264 264 changegroup hook: HG_HOOKNAME=changegroup
265 265 HG_HOOKTYPE=changegroup
266 266 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
267 267 HG_NODE_LAST=07f3376c1e655977439df2a814e3cc14b27abac2
268 268 HG_SOURCE=pull
269 269 HG_TXNID=TXN:$ID$
270 270 HG_TXNNAME=pull
271 271 file:/*/$TESTTMP/a (glob)
272 272 HG_URL=file:$TESTTMP/a
273 273
274 274 incoming hook: HG_HOOKNAME=incoming
275 275 HG_HOOKTYPE=incoming
276 276 HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd
277 277 HG_SOURCE=pull
278 278 HG_TXNID=TXN:$ID$
279 279 HG_TXNNAME=pull
280 280 file:/*/$TESTTMP/a (glob)
281 281 HG_URL=file:$TESTTMP/a
282 282
283 283 incoming hook: HG_HOOKNAME=incoming
284 284 HG_HOOKTYPE=incoming
285 285 HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2
286 286 HG_SOURCE=pull
287 287 HG_TXNID=TXN:$ID$
288 288 HG_TXNNAME=pull
289 289 file:/*/$TESTTMP/a (glob)
290 290 HG_URL=file:$TESTTMP/a
291 291
292 292 incoming hook: HG_HOOKNAME=incoming
293 293 HG_HOOKTYPE=incoming
294 294 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
295 295 HG_SOURCE=pull
296 296 HG_TXNID=TXN:$ID$
297 297 HG_TXNNAME=pull
298 298 file:/*/$TESTTMP/a (glob)
299 299 HG_URL=file:$TESTTMP/a
300 300
301 301 (run 'hg update' to get a working copy)
302 302
303 303 tag hooks can see env vars
304 304
305 305 $ cd ../a
306 306 $ cat >> .hg/hgrc <<EOF
307 307 > pretag = sh -c "printenv.py --line pretag"
308 308 > tag = sh -c "HG_PARENT1= HG_PARENT2= printenv.py --line tag"
309 309 > EOF
310 310 $ hg tag -d '3 0' a
311 311 pretag hook: HG_HOOKNAME=pretag
312 312 HG_HOOKTYPE=pretag
313 313 HG_LOCAL=0
314 314 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
315 315 HG_TAG=a
316 316
317 317 precommit hook: HG_HOOKNAME=precommit
318 318 HG_HOOKTYPE=precommit
319 319 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
320 320
321 321 pretxnopen hook: HG_HOOKNAME=pretxnopen
322 322 HG_HOOKTYPE=pretxnopen
323 323 HG_TXNID=TXN:$ID$
324 324 HG_TXNNAME=commit
325 325
326 326 pretxncommit hook: HG_HOOKNAME=pretxncommit
327 327 HG_HOOKTYPE=pretxncommit
328 328 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
329 329 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
330 330 HG_PENDING=$TESTTMP/a
331 331
332 332 4:539e4b31b6dc
333 333 pretxnclose hook: HG_HOOKNAME=pretxnclose
334 334 HG_HOOKTYPE=pretxnclose
335 335 HG_PENDING=$TESTTMP/a
336 336 HG_TXNID=TXN:$ID$
337 337 HG_TXNNAME=commit
338 338
339 339 tag hook: HG_HOOKNAME=tag
340 340 HG_HOOKTYPE=tag
341 341 HG_LOCAL=0
342 342 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2
343 343 HG_TAG=a
344 344
345 345 txnclose hook: HG_HOOKNAME=txnclose
346 346 HG_HOOKTYPE=txnclose
347 347 HG_TXNID=TXN:$ID$
348 348 HG_TXNNAME=commit
349 349
350 350 commit hook: HG_HOOKNAME=commit
351 351 HG_HOOKTYPE=commit
352 352 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
353 353 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
354 354
355 355 commit.b hook: HG_HOOKNAME=commit.b
356 356 HG_HOOKTYPE=commit
357 357 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
358 358 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
359 359
360 360 $ hg tag -l la
361 361 pretag hook: HG_HOOKNAME=pretag
362 362 HG_HOOKTYPE=pretag
363 363 HG_LOCAL=1
364 364 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
365 365 HG_TAG=la
366 366
367 367 tag hook: HG_HOOKNAME=tag
368 368 HG_HOOKTYPE=tag
369 369 HG_LOCAL=1
370 370 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
371 371 HG_TAG=la
372 372
373 373
374 374 pretag hook can forbid tagging
375 375
376 376 $ cat >> .hg/hgrc <<EOF
377 377 > pretag.forbid = sh -c "printenv.py --line pretag.forbid 1"
378 378 > EOF
379 379 $ hg tag -d '4 0' fa
380 380 pretag hook: HG_HOOKNAME=pretag
381 381 HG_HOOKTYPE=pretag
382 382 HG_LOCAL=0
383 383 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
384 384 HG_TAG=fa
385 385
386 386 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
387 387 HG_HOOKTYPE=pretag
388 388 HG_LOCAL=0
389 389 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
390 390 HG_TAG=fa
391 391
392 392 abort: pretag.forbid hook exited with status 1
393 393 [40]
394 394 $ hg tag -l fla
395 395 pretag hook: HG_HOOKNAME=pretag
396 396 HG_HOOKTYPE=pretag
397 397 HG_LOCAL=1
398 398 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
399 399 HG_TAG=fla
400 400
401 401 pretag.forbid hook: HG_HOOKNAME=pretag.forbid
402 402 HG_HOOKTYPE=pretag
403 403 HG_LOCAL=1
404 404 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
405 405 HG_TAG=fla
406 406
407 407 abort: pretag.forbid hook exited with status 1
408 408 [40]
409 409
410 410 pretxncommit hook can see changeset, can roll back txn, changeset no
411 411 more there after
412 412
413 413 $ cat >> .hg/hgrc <<EOF
414 414 > pretxncommit.forbid0 = sh -c "hg tip -q"
415 415 > pretxncommit.forbid1 = sh -c "printenv.py --line pretxncommit.forbid 1"
416 416 > EOF
417 417 $ echo z > z
418 418 $ hg add z
419 419 $ hg -q tip
420 420 4:539e4b31b6dc
421 421 $ hg commit -m 'fail' -d '4 0'
422 422 precommit hook: HG_HOOKNAME=precommit
423 423 HG_HOOKTYPE=precommit
424 424 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
425 425
426 426 pretxnopen hook: HG_HOOKNAME=pretxnopen
427 427 HG_HOOKTYPE=pretxnopen
428 428 HG_TXNID=TXN:$ID$
429 429 HG_TXNNAME=commit
430 430
431 431 pretxncommit hook: HG_HOOKNAME=pretxncommit
432 432 HG_HOOKTYPE=pretxncommit
433 433 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
434 434 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
435 435 HG_PENDING=$TESTTMP/a
436 436
437 437 5:6f611f8018c1
438 438 5:6f611f8018c1
439 439 pretxncommit.forbid hook: HG_HOOKNAME=pretxncommit.forbid1
440 440 HG_HOOKTYPE=pretxncommit
441 441 HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567
442 442 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
443 443 HG_PENDING=$TESTTMP/a
444 444
445 445 transaction abort!
446 446 txnabort Python hook: changes,txnid,txnname
447 447 txnabort hook: HG_HOOKNAME=txnabort.1
448 448 HG_HOOKTYPE=txnabort
449 449 HG_TXNID=TXN:$ID$
450 450 HG_TXNNAME=commit
451 451
452 452 rollback completed
453 453 abort: pretxncommit.forbid1 hook exited with status 1
454 454 [40]
455 455 $ hg -q tip
456 456 4:539e4b31b6dc
457 457
458 458 (Check that no 'changelog.i.a' file were left behind)
459 459
460 460 $ ls -1 .hg/store/
461 00changelog-1335303a.nd (rust !)
462 00changelog.d
461 463 00changelog.i
464 00changelog.n (rust !)
462 465 00manifest.i
463 466 data
464 fncache (repofncache !)
467 fncache
465 468 phaseroots
466 469 requires
467 470 undo
468 undo.backup.fncache.bck (repofncache !)
471 undo.backup.00changelog.n.bck (rust !)
472 undo.backup.fncache.bck
469 473 undo.backupfiles
470 474
471 475
472 476 precommit hook can prevent commit
473 477
474 478 $ cat >> .hg/hgrc <<EOF
475 479 > precommit.forbid = sh -c "printenv.py --line precommit.forbid 1"
476 480 > EOF
477 481 $ hg commit -m 'fail' -d '4 0'
478 482 precommit hook: HG_HOOKNAME=precommit
479 483 HG_HOOKTYPE=precommit
480 484 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
481 485
482 486 precommit.forbid hook: HG_HOOKNAME=precommit.forbid
483 487 HG_HOOKTYPE=precommit
484 488 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
485 489
486 490 abort: precommit.forbid hook exited with status 1
487 491 [40]
488 492 $ hg -q tip
489 493 4:539e4b31b6dc
490 494
491 495 preupdate hook can prevent update
492 496
493 497 $ cat >> .hg/hgrc <<EOF
494 498 > preupdate = sh -c "printenv.py --line preupdate"
495 499 > EOF
496 500 $ hg update 1
497 501 preupdate hook: HG_HOOKNAME=preupdate
498 502 HG_HOOKTYPE=preupdate
499 503 HG_PARENT1=ab228980c14d
500 504
501 505 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
502 506
503 507 update hook
504 508
505 509 $ cat >> .hg/hgrc <<EOF
506 510 > update = sh -c "printenv.py --line update"
507 511 > EOF
508 512 $ hg update
509 513 preupdate hook: HG_HOOKNAME=preupdate
510 514 HG_HOOKTYPE=preupdate
511 515 HG_PARENT1=539e4b31b6dc
512 516
513 517 update hook: HG_ERROR=0
514 518 HG_HOOKNAME=update
515 519 HG_HOOKTYPE=update
516 520 HG_PARENT1=539e4b31b6dc
517 521
518 522 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
519 523
520 524 pushkey hook
521 525
522 526 $ cat >> .hg/hgrc <<EOF
523 527 > pushkey = sh -c "printenv.py --line pushkey"
524 528 > EOF
525 529 $ cd ../b
526 530 $ hg bookmark -r null foo
527 531 $ hg push -B foo ../a
528 532 pushing to ../a
529 533 searching for changes
530 534 no changes found
531 535 pretxnopen hook: HG_HOOKNAME=pretxnopen
532 536 HG_HOOKTYPE=pretxnopen
533 537 HG_TXNID=TXN:$ID$
534 538 HG_TXNNAME=push
535 539
536 540 pretxnclose hook: HG_BOOKMARK_MOVED=1
537 541 HG_BUNDLE2=1
538 542 HG_HOOKNAME=pretxnclose
539 543 HG_HOOKTYPE=pretxnclose
540 544 HG_PENDING=$TESTTMP/a
541 545 HG_SOURCE=push
542 546 HG_TXNID=TXN:$ID$
543 547 HG_TXNNAME=push
544 548 HG_URL=file:$TESTTMP/a
545 549
546 550 pushkey hook: HG_BUNDLE2=1
547 551 HG_HOOKNAME=pushkey
548 552 HG_HOOKTYPE=pushkey
549 553 HG_KEY=foo
550 554 HG_NAMESPACE=bookmarks
551 555 HG_NEW=0000000000000000000000000000000000000000
552 556 HG_PUSHKEYCOMPAT=1
553 557 HG_SOURCE=push
554 558 HG_TXNID=TXN:$ID$
555 559 HG_TXNNAME=push
556 560 HG_URL=file:$TESTTMP/a
557 561
558 562 txnclose hook: HG_BOOKMARK_MOVED=1
559 563 HG_BUNDLE2=1
560 564 HG_HOOKNAME=txnclose
561 565 HG_HOOKTYPE=txnclose
562 566 HG_SOURCE=push
563 567 HG_TXNID=TXN:$ID$
564 568 HG_TXNNAME=push
565 569 HG_URL=file:$TESTTMP/a
566 570
567 571 exporting bookmark foo
568 572 [1]
569 573 $ cd ../a
570 574
571 575 listkeys hook
572 576
573 577 $ cat >> .hg/hgrc <<EOF
574 578 > listkeys = sh -c "printenv.py --line listkeys"
575 579 > EOF
576 580 $ hg bookmark -r null bar
577 581 pretxnopen hook: HG_HOOKNAME=pretxnopen
578 582 HG_HOOKTYPE=pretxnopen
579 583 HG_TXNID=TXN:$ID$
580 584 HG_TXNNAME=bookmark
581 585
582 586 pretxnclose hook: HG_BOOKMARK_MOVED=1
583 587 HG_HOOKNAME=pretxnclose
584 588 HG_HOOKTYPE=pretxnclose
585 589 HG_PENDING=$TESTTMP/a
586 590 HG_TXNID=TXN:$ID$
587 591 HG_TXNNAME=bookmark
588 592
589 593 txnclose hook: HG_BOOKMARK_MOVED=1
590 594 HG_HOOKNAME=txnclose
591 595 HG_HOOKTYPE=txnclose
592 596 HG_TXNID=TXN:$ID$
593 597 HG_TXNNAME=bookmark
594 598
595 599 $ cd ../b
596 600 $ hg pull -B bar ../a
597 601 pulling from ../a
598 602 listkeys hook: HG_HOOKNAME=listkeys
599 603 HG_HOOKTYPE=listkeys
600 604 HG_NAMESPACE=bookmarks
601 605 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
602 606
603 607 no changes found
604 608 adding remote bookmark bar
605 609 $ cd ../a
606 610
607 611 test that prepushkey can prevent incoming keys
608 612
609 613 $ cat >> .hg/hgrc <<EOF
610 614 > prepushkey = sh -c "printenv.py --line prepushkey.forbid 1"
611 615 > EOF
612 616 $ cd ../b
613 617 $ hg bookmark -r null baz
614 618 $ hg push -B baz ../a
615 619 pushing to ../a
616 620 searching for changes
617 621 listkeys hook: HG_HOOKNAME=listkeys
618 622 HG_HOOKTYPE=listkeys
619 623 HG_NAMESPACE=phases
620 624 HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
621 625
622 626 listkeys hook: HG_HOOKNAME=listkeys
623 627 HG_HOOKTYPE=listkeys
624 628 HG_NAMESPACE=bookmarks
625 629 HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
626 630
627 631 no changes found
628 632 pretxnopen hook: HG_HOOKNAME=pretxnopen
629 633 HG_HOOKTYPE=pretxnopen
630 634 HG_TXNID=TXN:$ID$
631 635 HG_TXNNAME=push
632 636
633 637 prepushkey.forbid hook: HG_BUNDLE2=1
634 638 HG_HOOKNAME=prepushkey
635 639 HG_HOOKTYPE=prepushkey
636 640 HG_KEY=baz
637 641 HG_NAMESPACE=bookmarks
638 642 HG_NEW=0000000000000000000000000000000000000000
639 643 HG_PUSHKEYCOMPAT=1
640 644 HG_SOURCE=push
641 645 HG_TXNID=TXN:$ID$
642 646 HG_TXNNAME=push
643 647 HG_URL=file:$TESTTMP/a
644 648
645 649 txnabort Python hook: bundle2,changes,source,txnid,txnname,url
646 650 txnabort hook: HG_BUNDLE2=1
647 651 HG_HOOKNAME=txnabort.1
648 652 HG_HOOKTYPE=txnabort
649 653 HG_SOURCE=push
650 654 HG_TXNID=TXN:$ID$
651 655 HG_TXNNAME=push
652 656 HG_URL=file:$TESTTMP/a
653 657
654 658 abort: prepushkey hook exited with status 1
655 659 [40]
656 660 $ cd ../a
657 661
658 662 test that prelistkeys can prevent listing keys
659 663
660 664 $ cat >> .hg/hgrc <<EOF
661 665 > prelistkeys = sh -c "printenv.py --line prelistkeys.forbid 1"
662 666 > EOF
663 667 $ hg bookmark -r null quux
664 668 pretxnopen hook: HG_HOOKNAME=pretxnopen
665 669 HG_HOOKTYPE=pretxnopen
666 670 HG_TXNID=TXN:$ID$
667 671 HG_TXNNAME=bookmark
668 672
669 673 pretxnclose hook: HG_BOOKMARK_MOVED=1
670 674 HG_HOOKNAME=pretxnclose
671 675 HG_HOOKTYPE=pretxnclose
672 676 HG_PENDING=$TESTTMP/a
673 677 HG_TXNID=TXN:$ID$
674 678 HG_TXNNAME=bookmark
675 679
676 680 txnclose hook: HG_BOOKMARK_MOVED=1
677 681 HG_HOOKNAME=txnclose
678 682 HG_HOOKTYPE=txnclose
679 683 HG_TXNID=TXN:$ID$
680 684 HG_TXNNAME=bookmark
681 685
682 686 $ cd ../b
683 687 $ hg pull -B quux ../a
684 688 pulling from ../a
685 689 prelistkeys.forbid hook: HG_HOOKNAME=prelistkeys
686 690 HG_HOOKTYPE=prelistkeys
687 691 HG_NAMESPACE=bookmarks
688 692
689 693 abort: prelistkeys hook exited with status 1
690 694 [40]
691 695 $ cd ../a
692 696 $ rm .hg/hgrc
693 697
694 698 prechangegroup hook can prevent incoming changes
695 699
696 700 $ cd ../b
697 701 $ hg -q tip
698 702 3:07f3376c1e65
699 703 $ cat > .hg/hgrc <<EOF
700 704 > [hooks]
701 705 > prechangegroup.forbid = sh -c "printenv.py --line prechangegroup.forbid 1"
702 706 > EOF
703 707 $ hg pull ../a
704 708 pulling from ../a
705 709 searching for changes
706 710 prechangegroup.forbid hook: HG_HOOKNAME=prechangegroup.forbid
707 711 HG_HOOKTYPE=prechangegroup
708 712 HG_SOURCE=pull
709 713 HG_TXNID=TXN:$ID$
710 714 HG_TXNNAME=pull
711 715 file:/*/$TESTTMP/a (glob)
712 716 HG_URL=file:$TESTTMP/a
713 717
714 718 abort: prechangegroup.forbid hook exited with status 1
715 719 [40]
716 720
717 721 pretxnchangegroup hook can see incoming changes, can roll back txn,
718 722 incoming changes no longer there after
719 723
720 724 $ cat > .hg/hgrc <<EOF
721 725 > [hooks]
722 726 > pretxnchangegroup.forbid0 = hg tip -q
723 727 > pretxnchangegroup.forbid1 = sh -c "printenv.py --line pretxnchangegroup.forbid 1"
724 728 > EOF
725 729 $ hg pull ../a
726 730 pulling from ../a
727 731 searching for changes
728 732 adding changesets
729 733 adding manifests
730 734 adding file changes
731 735 4:539e4b31b6dc
732 736 pretxnchangegroup.forbid hook: HG_HOOKNAME=pretxnchangegroup.forbid1
733 737 HG_HOOKTYPE=pretxnchangegroup
734 738 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
735 739 HG_NODE_LAST=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
736 740 HG_PENDING=$TESTTMP/b
737 741 HG_SOURCE=pull
738 742 HG_TXNID=TXN:$ID$
739 743 HG_TXNNAME=pull
740 744 file:/*/$TESTTMP/a (glob)
741 745 HG_URL=file:$TESTTMP/a
742 746
743 747 transaction abort!
744 748 rollback completed
745 749 abort: pretxnchangegroup.forbid1 hook exited with status 1
746 750 [40]
747 751 $ hg -q tip
748 752 3:07f3376c1e65
749 753
750 754 outgoing hooks can see env vars
751 755
752 756 $ rm .hg/hgrc
753 757 $ cat > ../a/.hg/hgrc <<EOF
754 758 > [hooks]
755 759 > preoutgoing = sh -c "printenv.py --line preoutgoing"
756 760 > outgoing = sh -c "printenv.py --line outgoing"
757 761 > EOF
758 762 $ hg pull ../a
759 763 pulling from ../a
760 764 searching for changes
761 765 preoutgoing hook: HG_HOOKNAME=preoutgoing
762 766 HG_HOOKTYPE=preoutgoing
763 767 HG_SOURCE=pull
764 768
765 769 outgoing hook: HG_HOOKNAME=outgoing
766 770 HG_HOOKTYPE=outgoing
767 771 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
768 772 HG_SOURCE=pull
769 773
770 774 adding changesets
771 775 adding manifests
772 776 adding file changes
773 777 adding remote bookmark quux
774 778 added 1 changesets with 1 changes to 1 files
775 779 new changesets 539e4b31b6dc
776 780 (run 'hg update' to get a working copy)
777 781 $ hg rollback
778 782 repository tip rolled back to revision 3 (undo pull)
779 783
780 784 preoutgoing hook can prevent outgoing changes
781 785
782 786 $ cat >> ../a/.hg/hgrc <<EOF
783 787 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
784 788 > EOF
785 789 $ hg pull ../a
786 790 pulling from ../a
787 791 searching for changes
788 792 preoutgoing hook: HG_HOOKNAME=preoutgoing
789 793 HG_HOOKTYPE=preoutgoing
790 794 HG_SOURCE=pull
791 795
792 796 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
793 797 HG_HOOKTYPE=preoutgoing
794 798 HG_SOURCE=pull
795 799
796 800 abort: preoutgoing.forbid hook exited with status 1
797 801 [40]
798 802
799 803 outgoing hooks work for local clones
800 804
801 805 $ cd ..
802 806 $ cat > a/.hg/hgrc <<EOF
803 807 > [hooks]
804 808 > preoutgoing = sh -c "printenv.py --line preoutgoing"
805 809 > outgoing = sh -c "printenv.py --line outgoing"
806 810 > EOF
807 811 $ hg clone a c
808 812 preoutgoing hook: HG_HOOKNAME=preoutgoing
809 813 HG_HOOKTYPE=preoutgoing
810 814 HG_SOURCE=clone
811 815
812 816 outgoing hook: HG_HOOKNAME=outgoing
813 817 HG_HOOKTYPE=outgoing
814 818 HG_NODE=0000000000000000000000000000000000000000
815 819 HG_SOURCE=clone
816 820
817 821 updating to branch default
818 822 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
819 823 $ rm -rf c
820 824
821 825 preoutgoing hook can prevent outgoing changes for local clones
822 826
823 827 $ cat >> a/.hg/hgrc <<EOF
824 828 > preoutgoing.forbid = sh -c "printenv.py --line preoutgoing.forbid 1"
825 829 > EOF
826 830 $ hg clone a zzz
827 831 preoutgoing hook: HG_HOOKNAME=preoutgoing
828 832 HG_HOOKTYPE=preoutgoing
829 833 HG_SOURCE=clone
830 834
831 835 preoutgoing.forbid hook: HG_HOOKNAME=preoutgoing.forbid
832 836 HG_HOOKTYPE=preoutgoing
833 837 HG_SOURCE=clone
834 838
835 839 abort: preoutgoing.forbid hook exited with status 1
836 840 [40]
837 841
838 842 $ cd "$TESTTMP/b"
839 843
840 844 $ cat > hooktests.py <<EOF
841 845 > from mercurial import (
842 846 > error,
843 847 > pycompat,
844 848 > )
845 849 >
846 850 > uncallable = 0
847 851 >
848 852 > def printargs(ui, args):
849 853 > a = list(pycompat.byteskwargs(args).items())
850 854 > a.sort()
851 855 > ui.write(b'hook args:\n')
852 856 > for k, v in a:
853 857 > ui.write(b' %s %s\n' % (k, v))
854 858 >
855 859 > def passhook(ui, repo, **args):
856 860 > printargs(ui, args)
857 861 >
858 862 > def failhook(ui, repo, **args):
859 863 > printargs(ui, args)
860 864 > return True
861 865 >
862 866 > class LocalException(Exception):
863 867 > pass
864 868 >
865 869 > def raisehook(**args):
866 870 > raise LocalException('exception from hook')
867 871 >
868 872 > def aborthook(**args):
869 873 > raise error.Abort(b'raise abort from hook')
870 874 >
871 875 > def brokenhook(**args):
872 876 > return 1 + {}
873 877 >
874 878 > def verbosehook(ui, **args):
875 879 > ui.note(b'verbose output from hook\n')
876 880 >
877 881 > def printtags(ui, repo, **args):
878 882 > ui.write(b'[%s]\n' % b', '.join(sorted(repo.tags())))
879 883 >
880 884 > class container(object):
881 885 > unreachable = 1
882 886 > EOF
883 887
884 888 $ cat > syntaxerror.py << NO_CHECK_EOF
885 889 > (foo
886 890 > NO_CHECK_EOF
887 891
888 892 test python hooks
889 893
890 894 #if windows
891 895 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
892 896 #else
893 897 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
894 898 #endif
895 899 $ export PYTHONPATH
896 900
897 901 $ echo '[hooks]' > ../a/.hg/hgrc
898 902 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
899 903 $ hg pull ../a 2>&1 | grep 'raised an exception'
900 904 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
901 905
902 906 $ echo '[hooks]' > ../a/.hg/hgrc
903 907 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
904 908 $ hg pull ../a 2>&1 | grep 'raised an exception'
905 909 error: preoutgoing.raise hook raised an exception: exception from hook
906 910
907 911 $ echo '[hooks]' > ../a/.hg/hgrc
908 912 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
909 913 $ hg pull ../a
910 914 pulling from ../a
911 915 searching for changes
912 916 error: preoutgoing.abort hook failed: raise abort from hook
913 917 abort: raise abort from hook
914 918 [255]
915 919
916 920 $ echo '[hooks]' > ../a/.hg/hgrc
917 921 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
918 922 $ hg pull ../a
919 923 pulling from ../a
920 924 searching for changes
921 925 hook args:
922 926 hooktype preoutgoing
923 927 source pull
924 928 abort: preoutgoing.fail hook failed
925 929 [40]
926 930
927 931 $ echo '[hooks]' > ../a/.hg/hgrc
928 932 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
929 933 $ hg pull ../a
930 934 pulling from ../a
931 935 searching for changes
932 936 abort: preoutgoing.uncallable hook is invalid: "hooktests.uncallable" is not callable
933 937 [255]
934 938
935 939 $ echo '[hooks]' > ../a/.hg/hgrc
936 940 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
937 941 $ hg pull ../a
938 942 pulling from ../a
939 943 searching for changes
940 944 abort: preoutgoing.nohook hook is invalid: "hooktests.nohook" is not defined
941 945 [255]
942 946
943 947 $ echo '[hooks]' > ../a/.hg/hgrc
944 948 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
945 949 $ hg pull ../a
946 950 pulling from ../a
947 951 searching for changes
948 952 abort: preoutgoing.nomodule hook is invalid: "nomodule" not in a module
949 953 [255]
950 954
951 955 $ echo '[hooks]' > ../a/.hg/hgrc
952 956 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
953 957 $ hg pull ../a
954 958 pulling from ../a
955 959 searching for changes
956 960 abort: preoutgoing.badmodule hook is invalid: import of "nomodule" failed
957 961 (run with --traceback for stack trace)
958 962 [255]
959 963
960 964 $ echo '[hooks]' > ../a/.hg/hgrc
961 965 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
962 966 $ hg pull ../a
963 967 pulling from ../a
964 968 searching for changes
965 969 abort: preoutgoing.unreachable hook is invalid: import of "hooktests.container" failed
966 970 (run with --traceback for stack trace)
967 971 [255]
968 972
969 973 $ echo '[hooks]' > ../a/.hg/hgrc
970 974 $ echo 'preoutgoing.syntaxerror = python:syntaxerror.syntaxerror' >> ../a/.hg/hgrc
971 975 $ hg pull ../a
972 976 pulling from ../a
973 977 searching for changes
974 978 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
975 979 (run with --traceback for stack trace)
976 980 [255]
977 981
978 982 $ hg pull ../a --traceback 2>&1 | grep -E 'pulling|searching|^exception|Traceback|SyntaxError|ImportError|ModuleNotFoundError|HookLoadError|abort'
979 983 pulling from ../a
980 984 searching for changes
981 985 exception from first failed import attempt:
982 986 Traceback (most recent call last):
983 987 SyntaxError: * (glob)
984 988 exception from second failed import attempt:
985 989 Traceback (most recent call last):
986 990 SyntaxError: * (glob)
987 991 Traceback (most recent call last):
988 992 ModuleNotFoundError: No module named 'hgext_syntaxerror'
989 993 Traceback (most recent call last):
990 994 SyntaxError: * (glob)
991 995 Traceback (most recent call last):
992 996 ModuleNotFoundError: No module named 'hgext_syntaxerror'
993 997 Traceback (most recent call last):
994 998 raise error.HookLoadError(msg, hint=tracebackhint) (py37 !)
995 999 mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
996 1000 abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
997 1001
998 1002 $ echo '[hooks]' > ../a/.hg/hgrc
999 1003 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
1000 1004 $ hg pull ../a
1001 1005 pulling from ../a
1002 1006 searching for changes
1003 1007 hook args:
1004 1008 hooktype preoutgoing
1005 1009 source pull
1006 1010 adding changesets
1007 1011 adding manifests
1008 1012 adding file changes
1009 1013 adding remote bookmark quux
1010 1014 added 1 changesets with 1 changes to 1 files
1011 1015 new changesets 539e4b31b6dc
1012 1016 (run 'hg update' to get a working copy)
1013 1017
1014 1018 post- python hooks that fail to *run* don't cause an abort
1015 1019 $ rm ../a/.hg/hgrc
1016 1020 $ echo '[hooks]' > .hg/hgrc
1017 1021 $ echo 'post-pull.broken = python:hooktests.brokenhook' >> .hg/hgrc
1018 1022 $ hg pull ../a
1019 1023 pulling from ../a
1020 1024 searching for changes
1021 1025 no changes found
1022 1026 error: post-pull.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
1023 1027 (run with --traceback for stack trace)
1024 1028
1025 1029 but post- python hooks that fail to *load* do
1026 1030 $ echo '[hooks]' > .hg/hgrc
1027 1031 $ echo 'post-pull.nomodule = python:nomodule' >> .hg/hgrc
1028 1032 $ hg pull ../a
1029 1033 pulling from ../a
1030 1034 searching for changes
1031 1035 no changes found
1032 1036 abort: post-pull.nomodule hook is invalid: "nomodule" not in a module
1033 1037 [255]
1034 1038
1035 1039 $ echo '[hooks]' > .hg/hgrc
1036 1040 $ echo 'post-pull.badmodule = python:nomodule.nowhere' >> .hg/hgrc
1037 1041 $ hg pull ../a
1038 1042 pulling from ../a
1039 1043 searching for changes
1040 1044 no changes found
1041 1045 abort: post-pull.badmodule hook is invalid: import of "nomodule" failed
1042 1046 (run with --traceback for stack trace)
1043 1047 [255]
1044 1048
1045 1049 $ echo '[hooks]' > .hg/hgrc
1046 1050 $ echo 'post-pull.nohook = python:hooktests.nohook' >> .hg/hgrc
1047 1051 $ hg pull ../a
1048 1052 pulling from ../a
1049 1053 searching for changes
1050 1054 no changes found
1051 1055 abort: post-pull.nohook hook is invalid: "hooktests.nohook" is not defined
1052 1056 [255]
1053 1057
1054 1058 make sure --traceback works
1055 1059
1056 1060 $ echo '[hooks]' > .hg/hgrc
1057 1061 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
1058 1062
1059 1063 $ echo aa > a
1060 1064 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
1061 1065 Traceback (most recent call last):
1062 1066
1063 1067 $ cd ..
1064 1068 $ hg init c
1065 1069 $ cd c
1066 1070
1067 1071 $ cat > hookext.py <<EOF
1068 1072 > def autohook(ui, **args):
1069 1073 > ui.write(b'Automatically installed hook\n')
1070 1074 >
1071 1075 > def reposetup(ui, repo):
1072 1076 > repo.ui.setconfig(b"hooks", b"commit.auto", autohook)
1073 1077 > EOF
1074 1078 $ echo '[extensions]' >> .hg/hgrc
1075 1079 $ echo 'hookext = hookext.py' >> .hg/hgrc
1076 1080
1077 1081 $ touch foo
1078 1082 $ hg add foo
1079 1083 $ hg ci -d '0 0' -m 'add foo'
1080 1084 Automatically installed hook
1081 1085 $ echo >> foo
1082 1086 $ hg ci --debug -d '0 0' -m 'change foo'
1083 1087 committing files:
1084 1088 foo
1085 1089 committing manifest
1086 1090 committing changelog
1087 1091 updating the branch cache
1088 1092 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
1089 1093 calling hook commit.auto: hgext_hookext.autohook
1090 1094 Automatically installed hook
1091 1095
1092 1096 $ hg showconfig hooks
1093 1097 hooks.commit.auto=<function autohook at *> (glob)
1094 1098
1095 1099 test python hook configured with python:[file]:[hook] syntax
1096 1100
1097 1101 $ cd ..
1098 1102 $ mkdir d
1099 1103 $ cd d
1100 1104 $ hg init repo
1101 1105 $ mkdir hooks
1102 1106
1103 1107 $ cd hooks
1104 1108 $ cat > testhooks.py <<EOF
1105 1109 > def testhook(ui, **args):
1106 1110 > ui.write(b'hook works\n')
1107 1111 > EOF
1108 1112 $ echo '[hooks]' > ../repo/.hg/hgrc
1109 1113 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
1110 1114
1111 1115 $ cd ../repo
1112 1116 $ hg commit -d '0 0'
1113 1117 hook works
1114 1118 nothing changed
1115 1119 [1]
1116 1120
1117 1121 $ echo '[hooks]' > .hg/hgrc
1118 1122 $ echo "update.ne = python:`pwd`/nonexistent.py:testhook" >> .hg/hgrc
1119 1123 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
1120 1124
1121 1125 $ hg up null
1122 1126 loading update.ne hook failed:
1123 1127 abort: $ENOENT$: '$TESTTMP/d/repo/nonexistent.py'
1124 1128 [255]
1125 1129
1126 1130 $ hg id
1127 1131 loading pre-identify.npmd hook failed:
1128 1132 abort: No module named 'repo'
1129 1133 [255]
1130 1134
1131 1135 $ cd ../../b
1132 1136
1133 1137 make sure --traceback works on hook import failure
1134 1138
1135 1139 $ cat > importfail.py <<EOF
1136 1140 > import somebogusmodule
1137 1141 > # dereference something in the module to force demandimport to load it
1138 1142 > somebogusmodule.whatever
1139 1143 > EOF
1140 1144
1141 1145 $ echo '[hooks]' > .hg/hgrc
1142 1146 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
1143 1147
1144 1148 $ echo a >> a
1145 1149 $ hg --traceback commit -ma 2>&1 | grep -E '^exception|ImportError|ModuleNotFoundError|Traceback|HookLoadError|abort'
1146 1150 exception from first failed import attempt:
1147 1151 Traceback (most recent call last):
1148 1152 ModuleNotFoundError: No module named 'somebogusmodule'
1149 1153 exception from second failed import attempt:
1150 1154 Traceback (most recent call last):
1151 1155 ModuleNotFoundError: No module named 'somebogusmodule'
1152 1156 Traceback (most recent call last):
1153 1157 ModuleNotFoundError: No module named 'hgext_importfail'
1154 1158 Traceback (most recent call last):
1155 1159 ModuleNotFoundError: No module named 'somebogusmodule'
1156 1160 Traceback (most recent call last):
1157 1161 ModuleNotFoundError: No module named 'hgext_importfail'
1158 1162 Traceback (most recent call last):
1159 1163 raise error.HookLoadError(msg, hint=tracebackhint) (py37 !)
1160 1164 mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed
1161 1165 abort: precommit.importfail hook is invalid: import of "importfail" failed
1162 1166
1163 1167 Issue1827: Hooks Update & Commit not completely post operation
1164 1168
1165 1169 commit and update hooks should run after command completion. The largefiles
1166 1170 use demonstrates a recursive wlock, showing the hook doesn't run until the
1167 1171 final release (and dirstate flush).
1168 1172
1169 1173 $ echo '[hooks]' > .hg/hgrc
1170 1174 $ echo 'commit = hg id' >> .hg/hgrc
1171 1175 $ echo 'update = hg id' >> .hg/hgrc
1172 1176 $ echo bb > a
1173 1177 $ hg ci -ma
1174 1178 223eafe2750c tip
1175 1179 $ hg up 0 --config extensions.largefiles=
1176 1180 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
1177 1181 cb9a9f314b8b
1178 1182 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1179 1183
1180 1184 make sure --verbose (and --quiet/--debug etc.) are propagated to the local ui
1181 1185 that is passed to pre/post hooks
1182 1186
1183 1187 $ echo '[hooks]' > .hg/hgrc
1184 1188 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
1185 1189 $ hg id
1186 1190 cb9a9f314b8b
1187 1191 $ hg id --verbose
1188 1192 calling hook pre-identify: hooktests.verbosehook
1189 1193 verbose output from hook
1190 1194 cb9a9f314b8b
1191 1195
1192 1196 Ensure hooks can be prioritized
1193 1197
1194 1198 $ echo '[hooks]' > .hg/hgrc
1195 1199 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
1196 1200 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
1197 1201 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
1198 1202 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
1199 1203 $ hg id --verbose
1200 1204 calling hook pre-identify.b: hooktests.verbosehook
1201 1205 verbose output from hook
1202 1206 calling hook pre-identify.a: hooktests.verbosehook
1203 1207 verbose output from hook
1204 1208 calling hook pre-identify.c: hooktests.verbosehook
1205 1209 verbose output from hook
1206 1210 cb9a9f314b8b
1207 1211
1208 1212 new tags must be visible in pretxncommit (issue3210)
1209 1213
1210 1214 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
1211 1215 $ hg tag -f foo
1212 1216 [a, foo, tip]
1213 1217
1214 1218 post-init hooks must not crash (issue4983)
1215 1219 This also creates the `to` repo for the next test block.
1216 1220
1217 1221 $ cd ..
1218 1222 $ cat << EOF >> hgrc-with-post-init-hook
1219 1223 > [hooks]
1220 1224 > post-init = sh -c "printenv.py --line post-init"
1221 1225 > EOF
1222 1226 $ HGRCPATH=hgrc-with-post-init-hook hg init to
1223 1227 post-init hook: HG_ARGS=init to
1224 1228 HG_HOOKNAME=post-init
1225 1229 HG_HOOKTYPE=post-init
1226 1230 HG_OPTS={'insecure': None, 'remotecmd': '', 'ssh': ''}
1227 1231 HG_PATS=['to']
1228 1232 HG_RESULT=0
1229 1233
1230 1234
1231 1235 new commits must be visible in pretxnchangegroup (issue3428)
1232 1236
1233 1237 $ echo '[hooks]' >> to/.hg/hgrc
1234 1238 $ echo 'prechangegroup = hg --traceback tip' >> to/.hg/hgrc
1235 1239 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
1236 1240 $ echo a >> to/a
1237 1241 $ hg --cwd to ci -Ama
1238 1242 adding a
1239 1243 $ hg clone to from
1240 1244 updating to branch default
1241 1245 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1242 1246 $ echo aa >> from/a
1243 1247 $ hg --cwd from ci -mb
1244 1248 $ hg --cwd from push
1245 1249 pushing to $TESTTMP/to
1246 1250 searching for changes
1247 1251 changeset: 0:cb9a9f314b8b
1248 1252 tag: tip
1249 1253 user: test
1250 1254 date: Thu Jan 01 00:00:00 1970 +0000
1251 1255 summary: a
1252 1256
1253 1257 adding changesets
1254 1258 adding manifests
1255 1259 adding file changes
1256 1260 changeset: 1:9836a07b9b9d
1257 1261 tag: tip
1258 1262 user: test
1259 1263 date: Thu Jan 01 00:00:00 1970 +0000
1260 1264 summary: b
1261 1265
1262 1266 added 1 changesets with 1 changes to 1 files
1263 1267
1264 1268 pretxnclose hook failure should abort the transaction
1265 1269
1266 1270 $ hg init txnfailure
1267 1271 $ cd txnfailure
1268 1272 $ touch a && hg commit -Aqm a
1269 1273 $ cat >> .hg/hgrc <<EOF
1270 1274 > [hooks]
1271 1275 > pretxnclose.error = exit 1
1272 1276 > EOF
1273 1277 $ hg strip -r 0 --config extensions.strip=
1274 1278 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1275 1279 saved backup bundle to * (glob)
1276 1280 transaction abort!
1277 1281 rollback completed
1278 1282 strip failed, backup bundle stored in * (glob)
1279 1283 abort: pretxnclose.error hook exited with status 1
1280 1284 [40]
1281 1285 $ hg recover
1282 1286 no interrupted transaction available
1283 1287 [1]
1284 1288 $ cd ..
1285 1289
1286 1290 check whether HG_PENDING makes pending changes only in related
1287 1291 repositories visible to an external hook.
1288 1292
1289 1293 (emulate a transaction running concurrently by copied
1290 1294 .hg/store/00changelog.i.a in subsequent test)
1291 1295
1292 1296 $ cat > $TESTTMP/savepending.sh <<EOF
1293 1297 > cp .hg/store/00changelog.i.a .hg/store/00changelog.i.a.saved
1294 1298 > exit 1 # to avoid adding new revision for subsequent tests
1295 1299 > EOF
1296 1300 $ cd a
1297 1301 $ hg tip -q
1298 1302 4:539e4b31b6dc
1299 1303 $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" commit -m "invisible"
1300 1304 transaction abort!
1301 1305 rollback completed
1302 1306 abort: pretxnclose hook exited with status 1
1303 1307 [40]
1304 1308 $ cp .hg/store/00changelog.i.a.saved .hg/store/00changelog.i.a
1305 1309
1306 1310 (check (in)visibility of new changeset while transaction running in
1307 1311 repo)
1308 1312
1309 1313 $ cat > $TESTTMP/checkpending.sh <<EOF
1310 1314 > echo '@a'
1311 1315 > hg -R "$TESTTMP/a" tip -q
1312 1316 > echo '@a/nested'
1313 1317 > hg -R "$TESTTMP/a/nested" tip -q
1314 1318 > exit 1 # to avoid adding new revision for subsequent tests
1315 1319 > EOF
1316 1320 $ hg init nested
1317 1321 $ cd nested
1318 1322 $ echo a > a
1319 1323 $ hg add a
1320 1324 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkpending.sh" commit -m '#0'
1321 1325 @a
1322 1326 4:539e4b31b6dc
1323 1327 @a/nested
1324 1328 0:bf5e395ced2c
1325 1329 transaction abort!
1326 1330 rollback completed
1327 1331 abort: pretxnclose hook exited with status 1
1328 1332 [40]
1329 1333
1330 1334 Hook from untrusted hgrc are reported as failure
1331 1335 ================================================
1332 1336
1333 1337 $ cat << EOF > $TESTTMP/untrusted.py
1334 1338 > from mercurial import scmutil, util
1335 1339 > def uisetup(ui):
1336 1340 > class untrustedui(ui.__class__):
1337 1341 > def _trusted(self, fp, f):
1338 1342 > if util.normpath(fp.name).endswith(b'untrusted/.hg/hgrc'):
1339 1343 > return False
1340 1344 > return super(untrustedui, self)._trusted(fp, f)
1341 1345 > ui.__class__ = untrustedui
1342 1346 > EOF
1343 1347 $ cat << EOF >> $HGRCPATH
1344 1348 > [extensions]
1345 1349 > untrusted=$TESTTMP/untrusted.py
1346 1350 > EOF
1347 1351 $ hg init untrusted
1348 1352 $ cd untrusted
1349 1353
1350 1354 Non-blocking hook
1351 1355 -----------------
1352 1356
1353 1357 $ cat << EOF >> .hg/hgrc
1354 1358 > [hooks]
1355 1359 > txnclose.testing=echo txnclose hook called
1356 1360 > EOF
1357 1361 $ touch a && hg commit -Aqm a
1358 1362 warning: untrusted hook txnclose.testing not executed
1359 1363 $ hg log
1360 1364 changeset: 0:3903775176ed
1361 1365 tag: tip
1362 1366 user: test
1363 1367 date: Thu Jan 01 00:00:00 1970 +0000
1364 1368 summary: a
1365 1369
1366 1370
1367 1371 Non-blocking hook
1368 1372 -----------------
1369 1373
1370 1374 $ cat << EOF >> .hg/hgrc
1371 1375 > [hooks]
1372 1376 > pretxnclose.testing=echo pre-txnclose hook called
1373 1377 > EOF
1374 1378 $ touch b && hg commit -Aqm a
1375 1379 transaction abort!
1376 1380 rollback completed
1377 1381 abort: untrusted hook pretxnclose.testing not executed
1378 1382 (see 'hg help config.trusted')
1379 1383 [40]
1380 1384 $ hg log
1381 1385 changeset: 0:3903775176ed
1382 1386 tag: tip
1383 1387 user: test
1384 1388 date: Thu Jan 01 00:00:00 1970 +0000
1385 1389 summary: a
1386 1390
1387 1391
1388 1392 unsetup the test
1389 1393 ----------------
1390 1394
1391 1395 # touch the file to unconfuse chg with a diffrent mtime
1392 1396 $ sleep 1
1393 1397 $ touch $TESTTMP/untrusted.py
1394 1398 $ cat << EOF >> $HGRCPATH
1395 1399 > [extensions]
1396 1400 > untrusted=!
1397 1401 > EOF
1398 1402
1399 1403 HGPLAIN setting in hooks
1400 1404 ========================
1401 1405
1402 1406 $ cat << EOF >> .hg/hgrc
1403 1407 > [hooks]
1404 1408 > pre-version.testing-default=sh -c "echo '### default ###' plain: \${HGPLAIN:-'<unset>'}"
1405 1409 > pre-version.testing-yes=sh -c "echo '### yes #######' plain: \${HGPLAIN:-'<unset>'}"
1406 1410 > pre-version.testing-yes:run-with-plain=yes
1407 1411 > pre-version.testing-no=sh -c "echo '### no ########' plain: \${HGPLAIN:-'<unset>'}"
1408 1412 > pre-version.testing-no:run-with-plain=no
1409 1413 > pre-version.testing-auto=sh -c "echo '### auto ######' plain: \${HGPLAIN:-'<unset>'}"
1410 1414 > pre-version.testing-auto:run-with-plain=auto
1411 1415 > EOF
1412 1416
1413 1417 $ (unset HGPLAIN; hg version --quiet)
1414 1418 ### default ### plain: 1
1415 1419 ### yes ####### plain: 1
1416 1420 ### no ######## plain: <unset>
1417 1421 ### auto ###### plain: <unset>
1418 1422 Mercurial Distributed SCM (*) (glob)
1419 1423
1420 1424 $ HGPLAIN=1 hg version --quiet
1421 1425 ### default ### plain: 1
1422 1426 ### yes ####### plain: 1
1423 1427 ### no ######## plain: <unset>
1424 1428 ### auto ###### plain: 1
1425 1429 Mercurial Distributed SCM (*) (glob)
1426 1430
1427 1431 Test hook that change the underlying repo
1428 1432 =========================================
1429 1433
1430 1434 blackbox access the dirstate afterward and can see a changelog / dirstate
1431 1435 desync.
1432 1436
1433 1437
1434 1438 $ cd $TESTTMP
1435 1439 $ cat <<EOF >> $HGRCPATH
1436 1440 > [extensions]
1437 1441 > blackbox=
1438 1442 > [hooks]
1439 1443 > post-merge = hg commit -m "auto merge"
1440 1444 > EOF
1441 1445
1442 1446 $ hg init t
1443 1447 $ cd t
1444 1448 $ touch ".hgignore"
1445 1449 $ hg commit -Am "initial" -d'0 0'
1446 1450 adding .hgignore
1447 1451
1448 1452 $ echo This is file a1 > a
1449 1453 $ hg commit -Am "commit #1" -d'0 0'
1450 1454 adding a
1451 1455
1452 1456 $ hg update 0
1453 1457 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1454 1458 $ echo This is file b1 > b
1455 1459 $ hg commit -Am "commit #2" -d'0 0'
1456 1460 adding b
1457 1461 created new head
1458 1462
1459 1463 $ hg merge 1
1460 1464 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1461 1465 (branch merge, don't forget to commit)
1462 1466
1463 1467 $ cd ..
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now