##// END OF EJS Templates
bundlerepo: make baserevision return raw text...
Jun Wu -
r31834:433ab46f default
parent child Browse files
Show More
@@ -1,556 +1,556
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import os
17 17 import shutil
18 18 import tempfile
19 19
20 20 from .i18n import _
21 21 from .node import nullid
22 22
23 23 from . import (
24 24 bundle2,
25 25 changegroup,
26 26 changelog,
27 27 cmdutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 filelog,
32 32 localrepo,
33 33 manifest,
34 34 mdiff,
35 35 node as nodemod,
36 36 pathutil,
37 37 phases,
38 38 pycompat,
39 39 revlog,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43
44 44 class bundlerevlog(revlog.revlog):
45 45 def __init__(self, opener, indexfile, bundle, linkmapper):
46 46 # How it works:
47 47 # To retrieve a revision, we need to know the offset of the revision in
48 48 # the bundle (an unbundle object). We store this offset in the index
49 49 # (start). The base of the delta is stored in the base field.
50 50 #
51 51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 52 # check revision against repotiprev.
53 53 opener = vfsmod.readonlyvfs(opener)
54 54 revlog.revlog.__init__(self, opener, indexfile)
55 55 self.bundle = bundle
56 56 n = len(self)
57 57 self.repotiprev = n - 1
58 58 chain = None
59 59 self.bundlerevs = set() # used by 'bundle()' revset expression
60 60 getchunk = lambda: bundle.deltachunk(chain)
61 61 for chunkdata in iter(getchunk, {}):
62 62 node = chunkdata['node']
63 63 p1 = chunkdata['p1']
64 64 p2 = chunkdata['p2']
65 65 cs = chunkdata['cs']
66 66 deltabase = chunkdata['deltabase']
67 67 delta = chunkdata['delta']
68 68
69 69 size = len(delta)
70 70 start = bundle.tell() - size
71 71
72 72 link = linkmapper(cs)
73 73 if node in self.nodemap:
74 74 # this can happen if two branches make the same change
75 75 chain = node
76 76 self.bundlerevs.add(self.nodemap[node])
77 77 continue
78 78
79 79 for p in (p1, p2):
80 80 if p not in self.nodemap:
81 81 raise error.LookupError(p, self.indexfile,
82 82 _("unknown parent"))
83 83
84 84 if deltabase not in self.nodemap:
85 85 raise LookupError(deltabase, self.indexfile,
86 86 _('unknown delta base'))
87 87
88 88 baserev = self.rev(deltabase)
89 89 # start, size, full unc. size, base (unused), link, p1, p2, node
90 90 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
91 91 self.rev(p1), self.rev(p2), node)
92 92 self.index.insert(-1, e)
93 93 self.nodemap[node] = n
94 94 self.bundlerevs.add(n)
95 95 chain = node
96 96 n += 1
97 97
98 98 def _chunk(self, rev):
99 99 # Warning: in case of bundle, the diff is against what we stored as
100 100 # delta base, not against rev - 1
101 101 # XXX: could use some caching
102 102 if rev <= self.repotiprev:
103 103 return revlog.revlog._chunk(self, rev)
104 104 self.bundle.seek(self.start(rev))
105 105 return self.bundle.read(self.length(rev))
106 106
107 107 def revdiff(self, rev1, rev2):
108 108 """return or calculate a delta between two revisions"""
109 109 if rev1 > self.repotiprev and rev2 > self.repotiprev:
110 110 # hot path for bundle
111 111 revb = self.index[rev2][3]
112 112 if revb == rev1:
113 113 return self._chunk(rev2)
114 114 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
115 115 return revlog.revlog.revdiff(self, rev1, rev2)
116 116
117 117 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
118 118
119 119 def revision(self, nodeorrev, raw=False):
120 120 """return an uncompressed revision of a given node or revision
121 121 number.
122 122 """
123 123 if isinstance(nodeorrev, int):
124 124 rev = nodeorrev
125 125 node = self.node(rev)
126 126 else:
127 127 node = nodeorrev
128 128 rev = self.rev(node)
129 129
130 130 if node == nullid:
131 131 return ""
132 132
133 133 text = None
134 134 chain = []
135 135 iterrev = rev
136 136 # reconstruct the revision if it is from a changegroup
137 137 while iterrev > self.repotiprev:
138 138 if self._cache and self._cache[1] == iterrev:
139 139 text = self._cache[2]
140 140 break
141 141 chain.append(iterrev)
142 142 iterrev = self.index[iterrev][3]
143 143 if text is None:
144 144 text = self.baserevision(iterrev)
145 145
146 146 while chain:
147 147 delta = self._chunk(chain.pop())
148 148 text = mdiff.patches(text, [delta])
149 149
150 150 text, validatehash = self._processflags(text, self.flags(rev),
151 151 'read', raw=raw)
152 152 if validatehash:
153 153 self.checkhash(text, node, rev=rev)
154 154 self._cache = (node, rev, text)
155 155 return text
156 156
157 157 def baserevision(self, nodeorrev):
158 158 # Revlog subclasses may override 'revision' method to modify format of
159 159 # content retrieved from revlog. To use bundlerevlog with such class one
160 160 # needs to override 'baserevision' and make more specific call here.
161 return revlog.revlog.revision(self, nodeorrev)
161 return revlog.revlog.revision(self, nodeorrev, raw=True)
162 162
163 163 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
164 164 raise NotImplementedError
165 165 def addgroup(self, revs, linkmapper, transaction):
166 166 raise NotImplementedError
167 167 def strip(self, rev, minlink):
168 168 raise NotImplementedError
169 169 def checksize(self):
170 170 raise NotImplementedError
171 171
172 172 class bundlechangelog(bundlerevlog, changelog.changelog):
173 173 def __init__(self, opener, bundle):
174 174 changelog.changelog.__init__(self, opener)
175 175 linkmapper = lambda x: x
176 176 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
177 177 linkmapper)
178 178
179 179 def baserevision(self, nodeorrev):
180 180 # Although changelog doesn't override 'revision' method, some extensions
181 181 # may replace this class with another that does. Same story with
182 182 # manifest and filelog classes.
183 183
184 184 # This bypasses filtering on changelog.node() and rev() because we need
185 185 # revision text of the bundle base even if it is hidden.
186 186 oldfilter = self.filteredrevs
187 187 try:
188 188 self.filteredrevs = ()
189 return changelog.changelog.revision(self, nodeorrev)
189 return changelog.changelog.revision(self, nodeorrev, raw=True)
190 190 finally:
191 191 self.filteredrevs = oldfilter
192 192
193 193 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
194 194 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
195 195 manifest.manifestrevlog.__init__(self, opener, dir=dir)
196 196 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
197 197 linkmapper)
198 198 if dirlogstarts is None:
199 199 dirlogstarts = {}
200 200 if self.bundle.version == "03":
201 201 dirlogstarts = _getfilestarts(self.bundle)
202 202 self._dirlogstarts = dirlogstarts
203 203 self._linkmapper = linkmapper
204 204
205 205 def baserevision(self, nodeorrev):
206 206 node = nodeorrev
207 207 if isinstance(node, int):
208 208 node = self.node(node)
209 209
210 210 if node in self.fulltextcache:
211 211 result = '%s' % self.fulltextcache[node]
212 212 else:
213 result = manifest.manifestrevlog.revision(self, nodeorrev)
213 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
214 214 return result
215 215
216 216 def dirlog(self, d):
217 217 if d in self._dirlogstarts:
218 218 self.bundle.seek(self._dirlogstarts[d])
219 219 return bundlemanifest(
220 220 self.opener, self.bundle, self._linkmapper,
221 221 self._dirlogstarts, dir=d)
222 222 return super(bundlemanifest, self).dirlog(d)
223 223
224 224 class bundlefilelog(bundlerevlog, filelog.filelog):
225 225 def __init__(self, opener, path, bundle, linkmapper):
226 226 filelog.filelog.__init__(self, opener, path)
227 227 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
228 228 linkmapper)
229 229
230 230 def baserevision(self, nodeorrev):
231 return filelog.filelog.revision(self, nodeorrev)
231 return filelog.filelog.revision(self, nodeorrev, raw=True)
232 232
233 233 class bundlepeer(localrepo.localpeer):
234 234 def canpush(self):
235 235 return False
236 236
237 237 class bundlephasecache(phases.phasecache):
238 238 def __init__(self, *args, **kwargs):
239 239 super(bundlephasecache, self).__init__(*args, **kwargs)
240 240 if util.safehasattr(self, 'opener'):
241 241 self.opener = vfsmod.readonlyvfs(self.opener)
242 242
243 243 def write(self):
244 244 raise NotImplementedError
245 245
246 246 def _write(self, fp):
247 247 raise NotImplementedError
248 248
249 249 def _updateroots(self, phase, newroots, tr):
250 250 self.phaseroots[phase] = newroots
251 251 self.invalidate()
252 252 self.dirty = True
253 253
254 254 def _getfilestarts(bundle):
255 255 bundlefilespos = {}
256 256 for chunkdata in iter(bundle.filelogheader, {}):
257 257 fname = chunkdata['filename']
258 258 bundlefilespos[fname] = bundle.tell()
259 259 for chunk in iter(lambda: bundle.deltachunk(None), {}):
260 260 pass
261 261 return bundlefilespos
262 262
263 263 class bundlerepository(localrepo.localrepository):
264 264 def __init__(self, ui, path, bundlename):
265 265 def _writetempbundle(read, suffix, header=''):
266 266 """Write a temporary file to disk
267 267
268 268 This is closure because we need to make sure this tracked by
269 269 self.tempfile for cleanup purposes."""
270 270 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
271 271 suffix=".hg10un")
272 272 self.tempfile = temp
273 273
274 274 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
275 275 fptemp.write(header)
276 276 while True:
277 277 chunk = read(2**18)
278 278 if not chunk:
279 279 break
280 280 fptemp.write(chunk)
281 281
282 282 return self.vfs.open(self.tempfile, mode="rb")
283 283 self._tempparent = None
284 284 try:
285 285 localrepo.localrepository.__init__(self, ui, path)
286 286 except error.RepoError:
287 287 self._tempparent = tempfile.mkdtemp()
288 288 localrepo.instance(ui, self._tempparent, 1)
289 289 localrepo.localrepository.__init__(self, ui, self._tempparent)
290 290 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
291 291
292 292 if path:
293 293 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
294 294 else:
295 295 self._url = 'bundle:' + bundlename
296 296
297 297 self.tempfile = None
298 298 f = util.posixfile(bundlename, "rb")
299 299 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
300 300
301 301 if isinstance(self.bundle, bundle2.unbundle20):
302 302 cgstream = None
303 303 for part in self.bundle.iterparts():
304 304 if part.type == 'changegroup':
305 305 if cgstream is not None:
306 306 raise NotImplementedError("can't process "
307 307 "multiple changegroups")
308 308 cgstream = part
309 309 version = part.params.get('version', '01')
310 310 legalcgvers = changegroup.supportedincomingversions(self)
311 311 if version not in legalcgvers:
312 312 msg = _('Unsupported changegroup version: %s')
313 313 raise error.Abort(msg % version)
314 314 if self.bundle.compressed():
315 315 cgstream = _writetempbundle(part.read,
316 316 ".cg%sun" % version)
317 317
318 318 if cgstream is None:
319 319 raise error.Abort(_('No changegroups found'))
320 320 cgstream.seek(0)
321 321
322 322 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
323 323
324 324 elif self.bundle.compressed():
325 325 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
326 326 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
327 327 bundlename,
328 328 self.vfs)
329 329
330 330 # dict with the mapping 'filename' -> position in the bundle
331 331 self.bundlefilespos = {}
332 332
333 333 self.firstnewrev = self.changelog.repotiprev + 1
334 334 phases.retractboundary(self, None, phases.draft,
335 335 [ctx.node() for ctx in self[self.firstnewrev:]])
336 336
337 337 @localrepo.unfilteredpropertycache
338 338 def _phasecache(self):
339 339 return bundlephasecache(self, self._phasedefaults)
340 340
341 341 @localrepo.unfilteredpropertycache
342 342 def changelog(self):
343 343 # consume the header if it exists
344 344 self.bundle.changelogheader()
345 345 c = bundlechangelog(self.svfs, self.bundle)
346 346 self.manstart = self.bundle.tell()
347 347 return c
348 348
349 349 def _constructmanifest(self):
350 350 self.bundle.seek(self.manstart)
351 351 # consume the header if it exists
352 352 self.bundle.manifestheader()
353 353 linkmapper = self.unfiltered().changelog.rev
354 354 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
355 355 self.filestart = self.bundle.tell()
356 356 return m
357 357
358 358 @localrepo.unfilteredpropertycache
359 359 def manstart(self):
360 360 self.changelog
361 361 return self.manstart
362 362
363 363 @localrepo.unfilteredpropertycache
364 364 def filestart(self):
365 365 self.manifestlog
366 366 return self.filestart
367 367
368 368 def url(self):
369 369 return self._url
370 370
371 371 def file(self, f):
372 372 if not self.bundlefilespos:
373 373 self.bundle.seek(self.filestart)
374 374 self.bundlefilespos = _getfilestarts(self.bundle)
375 375
376 376 if f in self.bundlefilespos:
377 377 self.bundle.seek(self.bundlefilespos[f])
378 378 linkmapper = self.unfiltered().changelog.rev
379 379 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
380 380 else:
381 381 return filelog.filelog(self.svfs, f)
382 382
383 383 def close(self):
384 384 """Close assigned bundle file immediately."""
385 385 self.bundlefile.close()
386 386 if self.tempfile is not None:
387 387 self.vfs.unlink(self.tempfile)
388 388 if self._tempparent:
389 389 shutil.rmtree(self._tempparent, True)
390 390
391 391 def cancopy(self):
392 392 return False
393 393
394 394 def peer(self):
395 395 return bundlepeer(self)
396 396
397 397 def getcwd(self):
398 398 return pycompat.getcwd() # always outside the repo
399 399
400 400 # Check if parents exist in localrepo before setting
401 401 def setparents(self, p1, p2=nullid):
402 402 p1rev = self.changelog.rev(p1)
403 403 p2rev = self.changelog.rev(p2)
404 404 msg = _("setting parent to node %s that only exists in the bundle\n")
405 405 if self.changelog.repotiprev < p1rev:
406 406 self.ui.warn(msg % nodemod.hex(p1))
407 407 if self.changelog.repotiprev < p2rev:
408 408 self.ui.warn(msg % nodemod.hex(p2))
409 409 return super(bundlerepository, self).setparents(p1, p2)
410 410
411 411 def instance(ui, path, create):
412 412 if create:
413 413 raise error.Abort(_('cannot create new bundle repository'))
414 414 # internal config: bundle.mainreporoot
415 415 parentpath = ui.config("bundle", "mainreporoot", "")
416 416 if not parentpath:
417 417 # try to find the correct path to the working directory repo
418 418 parentpath = cmdutil.findrepo(pycompat.getcwd())
419 419 if parentpath is None:
420 420 parentpath = ''
421 421 if parentpath:
422 422 # Try to make the full path relative so we get a nice, short URL.
423 423 # In particular, we don't want temp dir names in test outputs.
424 424 cwd = pycompat.getcwd()
425 425 if parentpath == cwd:
426 426 parentpath = ''
427 427 else:
428 428 cwd = pathutil.normasprefix(cwd)
429 429 if parentpath.startswith(cwd):
430 430 parentpath = parentpath[len(cwd):]
431 431 u = util.url(path)
432 432 path = u.localpath()
433 433 if u.scheme == 'bundle':
434 434 s = path.split("+", 1)
435 435 if len(s) == 1:
436 436 repopath, bundlename = parentpath, s[0]
437 437 else:
438 438 repopath, bundlename = s
439 439 else:
440 440 repopath, bundlename = parentpath, path
441 441 return bundlerepository(ui, repopath, bundlename)
442 442
443 443 class bundletransactionmanager(object):
444 444 def transaction(self):
445 445 return None
446 446
447 447 def close(self):
448 448 raise NotImplementedError
449 449
450 450 def release(self):
451 451 raise NotImplementedError
452 452
453 453 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
454 454 force=False):
455 455 '''obtains a bundle of changes incoming from other
456 456
457 457 "onlyheads" restricts the returned changes to those reachable from the
458 458 specified heads.
459 459 "bundlename", if given, stores the bundle to this file path permanently;
460 460 otherwise it's stored to a temp file and gets deleted again when you call
461 461 the returned "cleanupfn".
462 462 "force" indicates whether to proceed on unrelated repos.
463 463
464 464 Returns a tuple (local, csets, cleanupfn):
465 465
466 466 "local" is a local repo from which to obtain the actual incoming
467 467 changesets; it is a bundlerepo for the obtained bundle when the
468 468 original "other" is remote.
469 469 "csets" lists the incoming changeset node ids.
470 470 "cleanupfn" must be called without arguments when you're done processing
471 471 the changes; it closes both the original "other" and the one returned
472 472 here.
473 473 '''
474 474 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
475 475 force=force)
476 476 common, incoming, rheads = tmp
477 477 if not incoming:
478 478 try:
479 479 if bundlename:
480 480 os.unlink(bundlename)
481 481 except OSError:
482 482 pass
483 483 return repo, [], other.close
484 484
485 485 commonset = set(common)
486 486 rheads = [x for x in rheads if x not in commonset]
487 487
488 488 bundle = None
489 489 bundlerepo = None
490 490 localrepo = other.local()
491 491 if bundlename or not localrepo:
492 492 # create a bundle (uncompressed if other repo is not local)
493 493
494 494 # developer config: devel.legacy.exchange
495 495 legexc = ui.configlist('devel', 'legacy.exchange')
496 496 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
497 497 canbundle2 = (not forcebundle1
498 498 and other.capable('getbundle')
499 499 and other.capable('bundle2'))
500 500 if canbundle2:
501 501 kwargs = {}
502 502 kwargs['common'] = common
503 503 kwargs['heads'] = rheads
504 504 kwargs['bundlecaps'] = exchange.caps20to10(repo)
505 505 kwargs['cg'] = True
506 506 b2 = other.getbundle('incoming', **kwargs)
507 507 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
508 508 bundlename)
509 509 else:
510 510 if other.capable('getbundle'):
511 511 cg = other.getbundle('incoming', common=common, heads=rheads)
512 512 elif onlyheads is None and not other.capable('changegroupsubset'):
513 513 # compat with older servers when pulling all remote heads
514 514 cg = other.changegroup(incoming, "incoming")
515 515 rheads = None
516 516 else:
517 517 cg = other.changegroupsubset(incoming, rheads, 'incoming')
518 518 if localrepo:
519 519 bundletype = "HG10BZ"
520 520 else:
521 521 bundletype = "HG10UN"
522 522 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
523 523 bundletype)
524 524 # keep written bundle?
525 525 if bundlename:
526 526 bundle = None
527 527 if not localrepo:
528 528 # use the created uncompressed bundlerepo
529 529 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
530 530 fname)
531 531 # this repo contains local and other now, so filter out local again
532 532 common = repo.heads()
533 533 if localrepo:
534 534 # Part of common may be remotely filtered
535 535 # So use an unfiltered version
536 536 # The discovery process probably need cleanup to avoid that
537 537 localrepo = localrepo.unfiltered()
538 538
539 539 csets = localrepo.changelog.findmissing(common, rheads)
540 540
541 541 if bundlerepo:
542 542 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
543 543 remotephases = other.listkeys('phases')
544 544
545 545 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
546 546 pullop.trmanager = bundletransactionmanager()
547 547 exchange._pullapplyphases(pullop, remotephases)
548 548
549 549 def cleanup():
550 550 if bundlerepo:
551 551 bundlerepo.close()
552 552 if bundle:
553 553 os.unlink(bundle)
554 554 other.close()
555 555
556 556 return (localrepo, csets, cleanup)
@@ -1,198 +1,197
1 1 # Create server
2 2 $ hg init server
3 3 $ cd server
4 4 $ cat >> .hg/hgrc << EOF
5 5 > [extensions]
6 6 > extension=$TESTDIR/flagprocessorext.py
7 7 > EOF
8 8 $ cd ../
9 9
10 10 # Clone server and enable extensions
11 11 $ hg clone -q server client
12 12 $ cd client
13 13 $ cat >> .hg/hgrc << EOF
14 14 > [extensions]
15 15 > extension=$TESTDIR/flagprocessorext.py
16 16 > EOF
17 17
18 18 # Commit file that will trigger the noop extension
19 19 $ echo '[NOOP]' > noop
20 20 $ hg commit -Aqm "noop"
21 21
22 22 # Commit file that will trigger the base64 extension
23 23 $ echo '[BASE64]' > base64
24 24 $ hg commit -Aqm 'base64'
25 25
26 26 # Commit file that will trigger the gzip extension
27 27 $ echo '[GZIP]' > gzip
28 28 $ hg commit -Aqm 'gzip'
29 29
30 30 # Commit file that will trigger noop and base64
31 31 $ echo '[NOOP][BASE64]' > noop-base64
32 32 $ hg commit -Aqm 'noop+base64'
33 33
34 34 # Commit file that will trigger noop and gzip
35 35 $ echo '[NOOP][GZIP]' > noop-gzip
36 36 $ hg commit -Aqm 'noop+gzip'
37 37
38 38 # Commit file that will trigger base64 and gzip
39 39 $ echo '[BASE64][GZIP]' > base64-gzip
40 40 $ hg commit -Aqm 'base64+gzip'
41 41
42 42 # Commit file that will trigger base64, gzip and noop
43 43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
44 44 $ hg commit -Aqm 'base64+gzip+noop'
45 45
46 46 # TEST: ensure the revision data is consistent
47 47 $ hg cat noop
48 48 [NOOP]
49 49 $ hg debugdata noop 0
50 50 [NOOP]
51 51
52 52 $ hg cat -r . base64
53 53 [BASE64]
54 54 $ hg debugdata base64 0
55 55 W0JBU0U2NF0K (no-eol)
56 56
57 57 $ hg cat -r . gzip
58 58 [GZIP]
59 59 $ hg debugdata gzip 0
60 60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
61 61
62 62 $ hg cat -r . noop-base64
63 63 [NOOP][BASE64]
64 64 $ hg debugdata noop-base64 0
65 65 W05PT1BdW0JBU0U2NF0K (no-eol)
66 66
67 67 $ hg cat -r . noop-gzip
68 68 [NOOP][GZIP]
69 69 $ hg debugdata noop-gzip 0
70 70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
71 71
72 72 $ hg cat -r . base64-gzip
73 73 [BASE64][GZIP]
74 74 $ hg debugdata base64-gzip 0
75 75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
76 76
77 77 $ hg cat -r . base64-gzip-noop
78 78 [BASE64][GZIP][NOOP]
79 79 $ hg debugdata base64-gzip-noop 0
80 80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
81 81
82 82 # Push to the server
83 83 $ hg push
84 84 pushing to $TESTTMP/server (glob)
85 85 searching for changes
86 86 adding changesets
87 87 adding manifests
88 88 adding file changes
89 89 added 7 changesets with 7 changes to 7 files
90 90
91 91 # Initialize new client (not cloning) and setup extension
92 92 $ cd ..
93 93 $ hg init client2
94 94 $ cd client2
95 95 $ cat >> .hg/hgrc << EOF
96 96 > [paths]
97 97 > default = $TESTTMP/server
98 98 > [extensions]
99 99 > extension=$TESTDIR/flagprocessorext.py
100 100 > EOF
101 101
102 102 # Pull from server and update to latest revision
103 103 $ hg pull default
104 104 pulling from $TESTTMP/server (glob)
105 105 requesting all changes
106 106 adding changesets
107 107 adding manifests
108 108 adding file changes
109 109 added 7 changesets with 7 changes to 7 files
110 110 (run 'hg update' to get a working copy)
111 111 $ hg update
112 112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 113
114 114 # TEST: ensure the revision data is consistent
115 115 $ hg cat noop
116 116 [NOOP]
117 117 $ hg debugdata noop 0
118 118 [NOOP]
119 119
120 120 $ hg cat -r . base64
121 121 [BASE64]
122 122 $ hg debugdata base64 0
123 123 W0JBU0U2NF0K (no-eol)
124 124
125 125 $ hg cat -r . gzip
126 126 [GZIP]
127 127 $ hg debugdata gzip 0
128 128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
129 129
130 130 $ hg cat -r . noop-base64
131 131 [NOOP][BASE64]
132 132 $ hg debugdata noop-base64 0
133 133 W05PT1BdW0JBU0U2NF0K (no-eol)
134 134
135 135 $ hg cat -r . noop-gzip
136 136 [NOOP][GZIP]
137 137 $ hg debugdata noop-gzip 0
138 138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
139 139
140 140 $ hg cat -r . base64-gzip
141 141 [BASE64][GZIP]
142 142 $ hg debugdata base64-gzip 0
143 143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
144 144
145 145 $ hg cat -r . base64-gzip-noop
146 146 [BASE64][GZIP][NOOP]
147 147 $ hg debugdata base64-gzip-noop 0
148 148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
149 149
150 150 # TEST: ensure a missing processor is handled
151 151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
152 152 $ hg commit -Aqm 'fail+base64+gzip+noop'
153 153 abort: missing processor for flag '0x1'!
154 154 [255]
155 155
156 156 # TEST: ensure we cannot register several flag processors on the same flag
157 157 $ cat >> .hg/hgrc << EOF
158 158 > [extensions]
159 159 > extension=$TESTDIR/flagprocessorext.py
160 160 > duplicate=$TESTDIR/flagprocessorext.py
161 161 > EOF
162 162 $ echo 'this should fail' > file
163 163 $ hg commit -Aqm 'add file'
164 164 abort: cannot register multiple processors on flag '0x8'.
165 165 [255]
166 166
167 167 $ cd ..
168 168
169 169 # TEST: bundle repo
170 170 $ hg init bundletest
171 171 $ cd bundletest
172 172
173 173 $ cat >> .hg/hgrc << EOF
174 174 > [extensions]
175 175 > flagprocessor=$TESTDIR/flagprocessorext.py
176 176 > EOF
177 177
178 178 $ for i in 0 single two three 4; do
179 179 > echo '[BASE64]a-bit-longer-'$i > base64
180 180 > hg commit -m base64-$i -A base64
181 181 > done
182 182
183 183 $ hg update 2 -q
184 184 $ echo '[BASE64]a-bit-longer-branching' > base64
185 185 $ hg commit -q -m branching
186 186
187 187 $ hg bundle --base 1 bundle.hg
188 188 4 changesets found
189 189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
190 190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
191 Traceback (most recent call last):
192 mercurial.mpatch.mpatchError: invalid patch
191 abort: integrity check failed on data/base64.i:2!
193 192
194 193 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
195 194 Traceback (most recent call last):
196 TypeError: Incorrect padding
195 mercurial.mpatch.mpatchError: invalid patch
197 196 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
198 197 abort: repository bundle-again.hg not found!
General Comments 0
You need to be logged in to leave comments. Login now