##// END OF EJS Templates
bundlerepo: use raw revision in revdiff()...
Jun Wu -
r31837:37e79391 default
parent child Browse files
Show More
@@ -1,557 +1,558 b''
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import os
17 17 import shutil
18 18 import tempfile
19 19
20 20 from .i18n import _
21 21 from .node import nullid
22 22
23 23 from . import (
24 24 bundle2,
25 25 changegroup,
26 26 changelog,
27 27 cmdutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 filelog,
32 32 localrepo,
33 33 manifest,
34 34 mdiff,
35 35 node as nodemod,
36 36 pathutil,
37 37 phases,
38 38 pycompat,
39 39 revlog,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43
44 44 class bundlerevlog(revlog.revlog):
45 45 def __init__(self, opener, indexfile, bundle, linkmapper):
46 46 # How it works:
47 47 # To retrieve a revision, we need to know the offset of the revision in
48 48 # the bundle (an unbundle object). We store this offset in the index
49 49 # (start). The base of the delta is stored in the base field.
50 50 #
51 51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 52 # check revision against repotiprev.
53 53 opener = vfsmod.readonlyvfs(opener)
54 54 revlog.revlog.__init__(self, opener, indexfile)
55 55 self.bundle = bundle
56 56 n = len(self)
57 57 self.repotiprev = n - 1
58 58 chain = None
59 59 self.bundlerevs = set() # used by 'bundle()' revset expression
60 60 getchunk = lambda: bundle.deltachunk(chain)
61 61 for chunkdata in iter(getchunk, {}):
62 62 node = chunkdata['node']
63 63 p1 = chunkdata['p1']
64 64 p2 = chunkdata['p2']
65 65 cs = chunkdata['cs']
66 66 deltabase = chunkdata['deltabase']
67 67 delta = chunkdata['delta']
68 68 flags = chunkdata['flags']
69 69
70 70 size = len(delta)
71 71 start = bundle.tell() - size
72 72
73 73 link = linkmapper(cs)
74 74 if node in self.nodemap:
75 75 # this can happen if two branches make the same change
76 76 chain = node
77 77 self.bundlerevs.add(self.nodemap[node])
78 78 continue
79 79
80 80 for p in (p1, p2):
81 81 if p not in self.nodemap:
82 82 raise error.LookupError(p, self.indexfile,
83 83 _("unknown parent"))
84 84
85 85 if deltabase not in self.nodemap:
86 86 raise LookupError(deltabase, self.indexfile,
87 87 _('unknown delta base'))
88 88
89 89 baserev = self.rev(deltabase)
90 90 # start, size, full unc. size, base (unused), link, p1, p2, node
91 91 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
92 92 self.rev(p1), self.rev(p2), node)
93 93 self.index.insert(-1, e)
94 94 self.nodemap[node] = n
95 95 self.bundlerevs.add(n)
96 96 chain = node
97 97 n += 1
98 98
99 99 def _chunk(self, rev):
100 100 # Warning: in case of bundle, the diff is against what we stored as
101 101 # delta base, not against rev - 1
102 102 # XXX: could use some caching
103 103 if rev <= self.repotiprev:
104 104 return revlog.revlog._chunk(self, rev)
105 105 self.bundle.seek(self.start(rev))
106 106 return self.bundle.read(self.length(rev))
107 107
108 108 def revdiff(self, rev1, rev2):
109 109 """return or calculate a delta between two revisions"""
110 110 if rev1 > self.repotiprev and rev2 > self.repotiprev:
111 111 # hot path for bundle
112 112 revb = self.index[rev2][3]
113 113 if revb == rev1:
114 114 return self._chunk(rev2)
115 115 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
116 116 return revlog.revlog.revdiff(self, rev1, rev2)
117 117
118 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
118 return mdiff.textdiff(self.revision(rev1, raw=True),
119 self.revision(rev2, raw=True))
119 120
120 121 def revision(self, nodeorrev, raw=False):
121 122 """return an uncompressed revision of a given node or revision
122 123 number.
123 124 """
124 125 if isinstance(nodeorrev, int):
125 126 rev = nodeorrev
126 127 node = self.node(rev)
127 128 else:
128 129 node = nodeorrev
129 130 rev = self.rev(node)
130 131
131 132 if node == nullid:
132 133 return ""
133 134
134 135 rawtext = None
135 136 chain = []
136 137 iterrev = rev
137 138 # reconstruct the revision if it is from a changegroup
138 139 while iterrev > self.repotiprev:
139 140 if self._cache and self._cache[1] == iterrev:
140 141 rawtext = self._cache[2]
141 142 break
142 143 chain.append(iterrev)
143 144 iterrev = self.index[iterrev][3]
144 145 if rawtext is None:
145 146 rawtext = self.baserevision(iterrev)
146 147
147 148 while chain:
148 149 delta = self._chunk(chain.pop())
149 150 rawtext = mdiff.patches(rawtext, [delta])
150 151
151 152 text, validatehash = self._processflags(rawtext, self.flags(rev),
152 153 'read', raw=raw)
153 154 if validatehash:
154 155 self.checkhash(text, node, rev=rev)
155 156 self._cache = (node, rev, rawtext)
156 157 return text
157 158
158 159 def baserevision(self, nodeorrev):
159 160 # Revlog subclasses may override 'revision' method to modify format of
160 161 # content retrieved from revlog. To use bundlerevlog with such class one
161 162 # needs to override 'baserevision' and make more specific call here.
162 163 return revlog.revlog.revision(self, nodeorrev, raw=True)
163 164
164 165 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
165 166 raise NotImplementedError
166 167 def addgroup(self, revs, linkmapper, transaction):
167 168 raise NotImplementedError
168 169 def strip(self, rev, minlink):
169 170 raise NotImplementedError
170 171 def checksize(self):
171 172 raise NotImplementedError
172 173
173 174 class bundlechangelog(bundlerevlog, changelog.changelog):
174 175 def __init__(self, opener, bundle):
175 176 changelog.changelog.__init__(self, opener)
176 177 linkmapper = lambda x: x
177 178 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
178 179 linkmapper)
179 180
180 181 def baserevision(self, nodeorrev):
181 182 # Although changelog doesn't override 'revision' method, some extensions
182 183 # may replace this class with another that does. Same story with
183 184 # manifest and filelog classes.
184 185
185 186 # This bypasses filtering on changelog.node() and rev() because we need
186 187 # revision text of the bundle base even if it is hidden.
187 188 oldfilter = self.filteredrevs
188 189 try:
189 190 self.filteredrevs = ()
190 191 return changelog.changelog.revision(self, nodeorrev, raw=True)
191 192 finally:
192 193 self.filteredrevs = oldfilter
193 194
194 195 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
195 196 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
196 197 manifest.manifestrevlog.__init__(self, opener, dir=dir)
197 198 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
198 199 linkmapper)
199 200 if dirlogstarts is None:
200 201 dirlogstarts = {}
201 202 if self.bundle.version == "03":
202 203 dirlogstarts = _getfilestarts(self.bundle)
203 204 self._dirlogstarts = dirlogstarts
204 205 self._linkmapper = linkmapper
205 206
206 207 def baserevision(self, nodeorrev):
207 208 node = nodeorrev
208 209 if isinstance(node, int):
209 210 node = self.node(node)
210 211
211 212 if node in self.fulltextcache:
212 213 result = '%s' % self.fulltextcache[node]
213 214 else:
214 215 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
215 216 return result
216 217
217 218 def dirlog(self, d):
218 219 if d in self._dirlogstarts:
219 220 self.bundle.seek(self._dirlogstarts[d])
220 221 return bundlemanifest(
221 222 self.opener, self.bundle, self._linkmapper,
222 223 self._dirlogstarts, dir=d)
223 224 return super(bundlemanifest, self).dirlog(d)
224 225
225 226 class bundlefilelog(bundlerevlog, filelog.filelog):
226 227 def __init__(self, opener, path, bundle, linkmapper):
227 228 filelog.filelog.__init__(self, opener, path)
228 229 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
229 230 linkmapper)
230 231
231 232 def baserevision(self, nodeorrev):
232 233 return filelog.filelog.revision(self, nodeorrev, raw=True)
233 234
234 235 class bundlepeer(localrepo.localpeer):
235 236 def canpush(self):
236 237 return False
237 238
238 239 class bundlephasecache(phases.phasecache):
239 240 def __init__(self, *args, **kwargs):
240 241 super(bundlephasecache, self).__init__(*args, **kwargs)
241 242 if util.safehasattr(self, 'opener'):
242 243 self.opener = vfsmod.readonlyvfs(self.opener)
243 244
244 245 def write(self):
245 246 raise NotImplementedError
246 247
247 248 def _write(self, fp):
248 249 raise NotImplementedError
249 250
250 251 def _updateroots(self, phase, newroots, tr):
251 252 self.phaseroots[phase] = newroots
252 253 self.invalidate()
253 254 self.dirty = True
254 255
255 256 def _getfilestarts(bundle):
256 257 bundlefilespos = {}
257 258 for chunkdata in iter(bundle.filelogheader, {}):
258 259 fname = chunkdata['filename']
259 260 bundlefilespos[fname] = bundle.tell()
260 261 for chunk in iter(lambda: bundle.deltachunk(None), {}):
261 262 pass
262 263 return bundlefilespos
263 264
264 265 class bundlerepository(localrepo.localrepository):
265 266 def __init__(self, ui, path, bundlename):
266 267 def _writetempbundle(read, suffix, header=''):
267 268 """Write a temporary file to disk
268 269
269 270 This is closure because we need to make sure this tracked by
270 271 self.tempfile for cleanup purposes."""
271 272 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
272 273 suffix=".hg10un")
273 274 self.tempfile = temp
274 275
275 276 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
276 277 fptemp.write(header)
277 278 while True:
278 279 chunk = read(2**18)
279 280 if not chunk:
280 281 break
281 282 fptemp.write(chunk)
282 283
283 284 return self.vfs.open(self.tempfile, mode="rb")
284 285 self._tempparent = None
285 286 try:
286 287 localrepo.localrepository.__init__(self, ui, path)
287 288 except error.RepoError:
288 289 self._tempparent = tempfile.mkdtemp()
289 290 localrepo.instance(ui, self._tempparent, 1)
290 291 localrepo.localrepository.__init__(self, ui, self._tempparent)
291 292 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
292 293
293 294 if path:
294 295 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
295 296 else:
296 297 self._url = 'bundle:' + bundlename
297 298
298 299 self.tempfile = None
299 300 f = util.posixfile(bundlename, "rb")
300 301 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
301 302
302 303 if isinstance(self.bundle, bundle2.unbundle20):
303 304 cgstream = None
304 305 for part in self.bundle.iterparts():
305 306 if part.type == 'changegroup':
306 307 if cgstream is not None:
307 308 raise NotImplementedError("can't process "
308 309 "multiple changegroups")
309 310 cgstream = part
310 311 version = part.params.get('version', '01')
311 312 legalcgvers = changegroup.supportedincomingversions(self)
312 313 if version not in legalcgvers:
313 314 msg = _('Unsupported changegroup version: %s')
314 315 raise error.Abort(msg % version)
315 316 if self.bundle.compressed():
316 317 cgstream = _writetempbundle(part.read,
317 318 ".cg%sun" % version)
318 319
319 320 if cgstream is None:
320 321 raise error.Abort(_('No changegroups found'))
321 322 cgstream.seek(0)
322 323
323 324 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
324 325
325 326 elif self.bundle.compressed():
326 327 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
327 328 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
328 329 bundlename,
329 330 self.vfs)
330 331
331 332 # dict with the mapping 'filename' -> position in the bundle
332 333 self.bundlefilespos = {}
333 334
334 335 self.firstnewrev = self.changelog.repotiprev + 1
335 336 phases.retractboundary(self, None, phases.draft,
336 337 [ctx.node() for ctx in self[self.firstnewrev:]])
337 338
338 339 @localrepo.unfilteredpropertycache
339 340 def _phasecache(self):
340 341 return bundlephasecache(self, self._phasedefaults)
341 342
342 343 @localrepo.unfilteredpropertycache
343 344 def changelog(self):
344 345 # consume the header if it exists
345 346 self.bundle.changelogheader()
346 347 c = bundlechangelog(self.svfs, self.bundle)
347 348 self.manstart = self.bundle.tell()
348 349 return c
349 350
350 351 def _constructmanifest(self):
351 352 self.bundle.seek(self.manstart)
352 353 # consume the header if it exists
353 354 self.bundle.manifestheader()
354 355 linkmapper = self.unfiltered().changelog.rev
355 356 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
356 357 self.filestart = self.bundle.tell()
357 358 return m
358 359
359 360 @localrepo.unfilteredpropertycache
360 361 def manstart(self):
361 362 self.changelog
362 363 return self.manstart
363 364
364 365 @localrepo.unfilteredpropertycache
365 366 def filestart(self):
366 367 self.manifestlog
367 368 return self.filestart
368 369
369 370 def url(self):
370 371 return self._url
371 372
372 373 def file(self, f):
373 374 if not self.bundlefilespos:
374 375 self.bundle.seek(self.filestart)
375 376 self.bundlefilespos = _getfilestarts(self.bundle)
376 377
377 378 if f in self.bundlefilespos:
378 379 self.bundle.seek(self.bundlefilespos[f])
379 380 linkmapper = self.unfiltered().changelog.rev
380 381 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
381 382 else:
382 383 return filelog.filelog(self.svfs, f)
383 384
384 385 def close(self):
385 386 """Close assigned bundle file immediately."""
386 387 self.bundlefile.close()
387 388 if self.tempfile is not None:
388 389 self.vfs.unlink(self.tempfile)
389 390 if self._tempparent:
390 391 shutil.rmtree(self._tempparent, True)
391 392
392 393 def cancopy(self):
393 394 return False
394 395
395 396 def peer(self):
396 397 return bundlepeer(self)
397 398
398 399 def getcwd(self):
399 400 return pycompat.getcwd() # always outside the repo
400 401
401 402 # Check if parents exist in localrepo before setting
402 403 def setparents(self, p1, p2=nullid):
403 404 p1rev = self.changelog.rev(p1)
404 405 p2rev = self.changelog.rev(p2)
405 406 msg = _("setting parent to node %s that only exists in the bundle\n")
406 407 if self.changelog.repotiprev < p1rev:
407 408 self.ui.warn(msg % nodemod.hex(p1))
408 409 if self.changelog.repotiprev < p2rev:
409 410 self.ui.warn(msg % nodemod.hex(p2))
410 411 return super(bundlerepository, self).setparents(p1, p2)
411 412
412 413 def instance(ui, path, create):
413 414 if create:
414 415 raise error.Abort(_('cannot create new bundle repository'))
415 416 # internal config: bundle.mainreporoot
416 417 parentpath = ui.config("bundle", "mainreporoot", "")
417 418 if not parentpath:
418 419 # try to find the correct path to the working directory repo
419 420 parentpath = cmdutil.findrepo(pycompat.getcwd())
420 421 if parentpath is None:
421 422 parentpath = ''
422 423 if parentpath:
423 424 # Try to make the full path relative so we get a nice, short URL.
424 425 # In particular, we don't want temp dir names in test outputs.
425 426 cwd = pycompat.getcwd()
426 427 if parentpath == cwd:
427 428 parentpath = ''
428 429 else:
429 430 cwd = pathutil.normasprefix(cwd)
430 431 if parentpath.startswith(cwd):
431 432 parentpath = parentpath[len(cwd):]
432 433 u = util.url(path)
433 434 path = u.localpath()
434 435 if u.scheme == 'bundle':
435 436 s = path.split("+", 1)
436 437 if len(s) == 1:
437 438 repopath, bundlename = parentpath, s[0]
438 439 else:
439 440 repopath, bundlename = s
440 441 else:
441 442 repopath, bundlename = parentpath, path
442 443 return bundlerepository(ui, repopath, bundlename)
443 444
444 445 class bundletransactionmanager(object):
445 446 def transaction(self):
446 447 return None
447 448
448 449 def close(self):
449 450 raise NotImplementedError
450 451
451 452 def release(self):
452 453 raise NotImplementedError
453 454
454 455 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
455 456 force=False):
456 457 '''obtains a bundle of changes incoming from other
457 458
458 459 "onlyheads" restricts the returned changes to those reachable from the
459 460 specified heads.
460 461 "bundlename", if given, stores the bundle to this file path permanently;
461 462 otherwise it's stored to a temp file and gets deleted again when you call
462 463 the returned "cleanupfn".
463 464 "force" indicates whether to proceed on unrelated repos.
464 465
465 466 Returns a tuple (local, csets, cleanupfn):
466 467
467 468 "local" is a local repo from which to obtain the actual incoming
468 469 changesets; it is a bundlerepo for the obtained bundle when the
469 470 original "other" is remote.
470 471 "csets" lists the incoming changeset node ids.
471 472 "cleanupfn" must be called without arguments when you're done processing
472 473 the changes; it closes both the original "other" and the one returned
473 474 here.
474 475 '''
475 476 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
476 477 force=force)
477 478 common, incoming, rheads = tmp
478 479 if not incoming:
479 480 try:
480 481 if bundlename:
481 482 os.unlink(bundlename)
482 483 except OSError:
483 484 pass
484 485 return repo, [], other.close
485 486
486 487 commonset = set(common)
487 488 rheads = [x for x in rheads if x not in commonset]
488 489
489 490 bundle = None
490 491 bundlerepo = None
491 492 localrepo = other.local()
492 493 if bundlename or not localrepo:
493 494 # create a bundle (uncompressed if other repo is not local)
494 495
495 496 # developer config: devel.legacy.exchange
496 497 legexc = ui.configlist('devel', 'legacy.exchange')
497 498 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
498 499 canbundle2 = (not forcebundle1
499 500 and other.capable('getbundle')
500 501 and other.capable('bundle2'))
501 502 if canbundle2:
502 503 kwargs = {}
503 504 kwargs['common'] = common
504 505 kwargs['heads'] = rheads
505 506 kwargs['bundlecaps'] = exchange.caps20to10(repo)
506 507 kwargs['cg'] = True
507 508 b2 = other.getbundle('incoming', **kwargs)
508 509 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
509 510 bundlename)
510 511 else:
511 512 if other.capable('getbundle'):
512 513 cg = other.getbundle('incoming', common=common, heads=rheads)
513 514 elif onlyheads is None and not other.capable('changegroupsubset'):
514 515 # compat with older servers when pulling all remote heads
515 516 cg = other.changegroup(incoming, "incoming")
516 517 rheads = None
517 518 else:
518 519 cg = other.changegroupsubset(incoming, rheads, 'incoming')
519 520 if localrepo:
520 521 bundletype = "HG10BZ"
521 522 else:
522 523 bundletype = "HG10UN"
523 524 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
524 525 bundletype)
525 526 # keep written bundle?
526 527 if bundlename:
527 528 bundle = None
528 529 if not localrepo:
529 530 # use the created uncompressed bundlerepo
530 531 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
531 532 fname)
532 533 # this repo contains local and other now, so filter out local again
533 534 common = repo.heads()
534 535 if localrepo:
535 536 # Part of common may be remotely filtered
536 537 # So use an unfiltered version
537 538 # The discovery process probably need cleanup to avoid that
538 539 localrepo = localrepo.unfiltered()
539 540
540 541 csets = localrepo.changelog.findmissing(common, rheads)
541 542
542 543 if bundlerepo:
543 544 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
544 545 remotephases = other.listkeys('phases')
545 546
546 547 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
547 548 pullop.trmanager = bundletransactionmanager()
548 549 exchange._pullapplyphases(pullop, remotephases)
549 550
550 551 def cleanup():
551 552 if bundlerepo:
552 553 bundlerepo.close()
553 554 if bundle:
554 555 os.unlink(bundle)
555 556 other.close()
556 557
557 558 return (localrepo, csets, cleanup)
@@ -1,220 +1,242 b''
1 1 # Create server
2 2 $ hg init server
3 3 $ cd server
4 4 $ cat >> .hg/hgrc << EOF
5 5 > [extensions]
6 6 > extension=$TESTDIR/flagprocessorext.py
7 7 > EOF
8 8 $ cd ../
9 9
10 10 # Clone server and enable extensions
11 11 $ hg clone -q server client
12 12 $ cd client
13 13 $ cat >> .hg/hgrc << EOF
14 14 > [extensions]
15 15 > extension=$TESTDIR/flagprocessorext.py
16 16 > EOF
17 17
18 18 # Commit file that will trigger the noop extension
19 19 $ echo '[NOOP]' > noop
20 20 $ hg commit -Aqm "noop"
21 21
22 22 # Commit file that will trigger the base64 extension
23 23 $ echo '[BASE64]' > base64
24 24 $ hg commit -Aqm 'base64'
25 25
26 26 # Commit file that will trigger the gzip extension
27 27 $ echo '[GZIP]' > gzip
28 28 $ hg commit -Aqm 'gzip'
29 29
30 30 # Commit file that will trigger noop and base64
31 31 $ echo '[NOOP][BASE64]' > noop-base64
32 32 $ hg commit -Aqm 'noop+base64'
33 33
34 34 # Commit file that will trigger noop and gzip
35 35 $ echo '[NOOP][GZIP]' > noop-gzip
36 36 $ hg commit -Aqm 'noop+gzip'
37 37
38 38 # Commit file that will trigger base64 and gzip
39 39 $ echo '[BASE64][GZIP]' > base64-gzip
40 40 $ hg commit -Aqm 'base64+gzip'
41 41
42 42 # Commit file that will trigger base64, gzip and noop
43 43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
44 44 $ hg commit -Aqm 'base64+gzip+noop'
45 45
46 46 # TEST: ensure the revision data is consistent
47 47 $ hg cat noop
48 48 [NOOP]
49 49 $ hg debugdata noop 0
50 50 [NOOP]
51 51
52 52 $ hg cat -r . base64
53 53 [BASE64]
54 54 $ hg debugdata base64 0
55 55 W0JBU0U2NF0K (no-eol)
56 56
57 57 $ hg cat -r . gzip
58 58 [GZIP]
59 59 $ hg debugdata gzip 0
60 60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
61 61
62 62 $ hg cat -r . noop-base64
63 63 [NOOP][BASE64]
64 64 $ hg debugdata noop-base64 0
65 65 W05PT1BdW0JBU0U2NF0K (no-eol)
66 66
67 67 $ hg cat -r . noop-gzip
68 68 [NOOP][GZIP]
69 69 $ hg debugdata noop-gzip 0
70 70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
71 71
72 72 $ hg cat -r . base64-gzip
73 73 [BASE64][GZIP]
74 74 $ hg debugdata base64-gzip 0
75 75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
76 76
77 77 $ hg cat -r . base64-gzip-noop
78 78 [BASE64][GZIP][NOOP]
79 79 $ hg debugdata base64-gzip-noop 0
80 80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
81 81
82 82 # Push to the server
83 83 $ hg push
84 84 pushing to $TESTTMP/server (glob)
85 85 searching for changes
86 86 adding changesets
87 87 adding manifests
88 88 adding file changes
89 89 added 7 changesets with 7 changes to 7 files
90 90
91 91 # Initialize new client (not cloning) and setup extension
92 92 $ cd ..
93 93 $ hg init client2
94 94 $ cd client2
95 95 $ cat >> .hg/hgrc << EOF
96 96 > [paths]
97 97 > default = $TESTTMP/server
98 98 > [extensions]
99 99 > extension=$TESTDIR/flagprocessorext.py
100 100 > EOF
101 101
102 102 # Pull from server and update to latest revision
103 103 $ hg pull default
104 104 pulling from $TESTTMP/server (glob)
105 105 requesting all changes
106 106 adding changesets
107 107 adding manifests
108 108 adding file changes
109 109 added 7 changesets with 7 changes to 7 files
110 110 (run 'hg update' to get a working copy)
111 111 $ hg update
112 112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 113
114 114 # TEST: ensure the revision data is consistent
115 115 $ hg cat noop
116 116 [NOOP]
117 117 $ hg debugdata noop 0
118 118 [NOOP]
119 119
120 120 $ hg cat -r . base64
121 121 [BASE64]
122 122 $ hg debugdata base64 0
123 123 W0JBU0U2NF0K (no-eol)
124 124
125 125 $ hg cat -r . gzip
126 126 [GZIP]
127 127 $ hg debugdata gzip 0
128 128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
129 129
130 130 $ hg cat -r . noop-base64
131 131 [NOOP][BASE64]
132 132 $ hg debugdata noop-base64 0
133 133 W05PT1BdW0JBU0U2NF0K (no-eol)
134 134
135 135 $ hg cat -r . noop-gzip
136 136 [NOOP][GZIP]
137 137 $ hg debugdata noop-gzip 0
138 138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
139 139
140 140 $ hg cat -r . base64-gzip
141 141 [BASE64][GZIP]
142 142 $ hg debugdata base64-gzip 0
143 143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
144 144
145 145 $ hg cat -r . base64-gzip-noop
146 146 [BASE64][GZIP][NOOP]
147 147 $ hg debugdata base64-gzip-noop 0
148 148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
149 149
150 150 # TEST: ensure a missing processor is handled
151 151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
152 152 $ hg commit -Aqm 'fail+base64+gzip+noop'
153 153 abort: missing processor for flag '0x1'!
154 154 [255]
155 155
156 156 # TEST: ensure we cannot register several flag processors on the same flag
157 157 $ cat >> .hg/hgrc << EOF
158 158 > [extensions]
159 159 > extension=$TESTDIR/flagprocessorext.py
160 160 > duplicate=$TESTDIR/flagprocessorext.py
161 161 > EOF
162 162 $ echo 'this should fail' > file
163 163 $ hg commit -Aqm 'add file'
164 164 abort: cannot register multiple processors on flag '0x8'.
165 165 [255]
166 166
167 167 $ cd ..
168 168
169 169 # TEST: bundle repo
170 170 $ hg init bundletest
171 171 $ cd bundletest
172 172
173 173 $ cat >> .hg/hgrc << EOF
174 174 > [extensions]
175 175 > flagprocessor=$TESTDIR/flagprocessorext.py
176 176 > EOF
177 177
178 178 $ for i in 0 single two three 4; do
179 179 > echo '[BASE64]a-bit-longer-'$i > base64
180 180 > hg commit -m base64-$i -A base64
181 181 > done
182 182
183 183 $ hg update 2 -q
184 184 $ echo '[BASE64]a-bit-longer-branching' > base64
185 185 $ hg commit -q -m branching
186 186
187 187 $ hg bundle --base 1 bundle.hg
188 188 4 changesets found
189 189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
190 190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
191 191 5 branching
192 192 base64 | 2 +-
193 193 1 files changed, 1 insertions(+), 1 deletions(-)
194 194
195 195 4 base64-4
196 196 base64 | 2 +-
197 197 1 files changed, 1 insertions(+), 1 deletions(-)
198 198
199 199 3 base64-three
200 200 base64 | 2 +-
201 201 1 files changed, 1 insertions(+), 1 deletions(-)
202 202
203 203 2 base64-two
204 204 base64 | 2 +-
205 205 1 files changed, 1 insertions(+), 1 deletions(-)
206 206
207 207 1 base64-single
208 208 base64 | 2 +-
209 209 1 files changed, 1 insertions(+), 1 deletions(-)
210 210
211 211 0 base64-0
212 212 base64 | 1 +
213 213 1 files changed, 1 insertions(+), 0 deletions(-)
214 214
215 215
216 216 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
217 217 [1]
218 218 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
219 Traceback (most recent call last):
220 TypeError: Incorrect padding
219 5 branching
220 base64 | 2 +-
221 1 files changed, 1 insertions(+), 1 deletions(-)
222
223 4 base64-4
224 base64 | 2 +-
225 1 files changed, 1 insertions(+), 1 deletions(-)
226
227 3 base64-three
228 base64 | 2 +-
229 1 files changed, 1 insertions(+), 1 deletions(-)
230
231 2 base64-two
232 base64 | 2 +-
233 1 files changed, 1 insertions(+), 1 deletions(-)
234
235 1 base64-single
236 base64 | 2 +-
237 1 files changed, 1 insertions(+), 1 deletions(-)
238
239 0 base64-0
240 base64 | 1 +
241 1 files changed, 1 insertions(+), 0 deletions(-)
242
General Comments 0
You need to be logged in to leave comments. Login now