##// END OF EJS Templates
shelve: use cg3 for treemanifests...
Martin von Zweigbergk -
r27931:1289a122 stable
parent child Browse files
Show More
@@ -1,859 +1,859 b''
1 1 # shelve.py - save/restore working directory state
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """save and restore changes to the working directory
9 9
10 10 The "hg shelve" command saves changes made to the working directory
11 11 and reverts those changes, resetting the working directory to a clean
12 12 state.
13 13
14 14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 15 shelve". Changes can be restored even after updating to a different
16 16 parent, in which case Mercurial's merge machinery will resolve any
17 17 conflicts if necessary.
18 18
19 19 You can have more than one shelved change outstanding at a time; each
20 20 shelved change has a distinct name. For details, see the help for "hg
21 21 shelve".
22 22 """
23 23
24 24 import collections
25 25 import itertools
26 26 from mercurial.i18n import _
27 27 from mercurial.node import nullid, nullrev, bin, hex
28 28 from mercurial import changegroup, cmdutil, scmutil, phases, commands
29 29 from mercurial import error, hg, mdiff, merge, patch, repair, util
30 30 from mercurial import templatefilters, exchange, bundlerepo, bundle2
31 31 from mercurial import lock as lockmod
32 32 from hgext import rebase
33 33 import errno
34 34
35 35 cmdtable = {}
36 36 command = cmdutil.command(cmdtable)
37 37 # Note for extension authors: ONLY specify testedwith = 'internal' for
38 38 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
39 39 # be specifying the version(s) of Mercurial they are tested with, or
40 40 # leave the attribute unspecified.
41 41 testedwith = 'internal'
42 42
43 43 backupdir = 'shelve-backup'
44 44
45 45 class shelvedfile(object):
46 46 """Helper for the file storing a single shelve
47 47
48 48 Handles common functions on shelve files (.hg/.patch) using
49 49 the vfs layer"""
50 50 def __init__(self, repo, name, filetype=None):
51 51 self.repo = repo
52 52 self.name = name
53 53 self.vfs = scmutil.vfs(repo.join('shelved'))
54 54 self.backupvfs = scmutil.vfs(repo.join(backupdir))
55 55 self.ui = self.repo.ui
56 56 if filetype:
57 57 self.fname = name + '.' + filetype
58 58 else:
59 59 self.fname = name
60 60
61 61 def exists(self):
62 62 return self.vfs.exists(self.fname)
63 63
64 64 def filename(self):
65 65 return self.vfs.join(self.fname)
66 66
67 67 def backupfilename(self):
68 68 def gennames(base):
69 69 yield base
70 70 base, ext = base.rsplit('.', 1)
71 71 for i in itertools.count(1):
72 72 yield '%s-%d.%s' % (base, i, ext)
73 73
74 74 name = self.backupvfs.join(self.fname)
75 75 for n in gennames(name):
76 76 if not self.backupvfs.exists(n):
77 77 return n
78 78
79 79 def movetobackup(self):
80 80 if not self.backupvfs.isdir():
81 81 self.backupvfs.makedir()
82 82 util.rename(self.filename(), self.backupfilename())
83 83
84 84 def stat(self):
85 85 return self.vfs.stat(self.fname)
86 86
87 87 def opener(self, mode='rb'):
88 88 try:
89 89 return self.vfs(self.fname, mode)
90 90 except IOError as err:
91 91 if err.errno != errno.ENOENT:
92 92 raise
93 93 raise error.Abort(_("shelved change '%s' not found") % self.name)
94 94
95 95 def applybundle(self):
96 96 fp = self.opener()
97 97 try:
98 98 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
99 99 if not isinstance(gen, bundle2.unbundle20):
100 100 gen.apply(self.repo, 'unshelve',
101 101 'bundle:' + self.vfs.join(self.fname),
102 102 targetphase=phases.secret)
103 103 if isinstance(gen, bundle2.unbundle20):
104 104 bundle2.applybundle(self.repo, gen,
105 105 self.repo.currenttransaction(),
106 106 source='unshelve',
107 107 url='bundle:' + self.vfs.join(self.fname))
108 108 finally:
109 109 fp.close()
110 110
111 111 def bundlerepo(self):
112 112 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
113 113 self.vfs.join(self.fname))
114 114 def writebundle(self, bases, node):
115 cgversion = changegroup.safeversion(self.repo)
116 if cgversion == '01':
115 117 btype = 'HG10BZ'
116 cgversion = '01'
117 118 compression = None
118 if 'generaldelta' in self.repo.requirements:
119 else:
119 120 btype = 'HG20'
120 cgversion = '02'
121 121 compression = 'BZ'
122 122
123 123 cg = changegroup.changegroupsubset(self.repo, bases, [node], 'shelve',
124 124 version=cgversion)
125 125 changegroup.writebundle(self.ui, cg, self.fname, btype, self.vfs,
126 126 compression=compression)
127 127
128 128 class shelvedstate(object):
129 129 """Handle persistence during unshelving operations.
130 130
131 131 Handles saving and restoring a shelved state. Ensures that different
132 132 versions of a shelved state are possible and handles them appropriately.
133 133 """
134 134 _version = 1
135 135 _filename = 'shelvedstate'
136 136
137 137 @classmethod
138 138 def load(cls, repo):
139 139 fp = repo.vfs(cls._filename)
140 140 try:
141 141 version = int(fp.readline().strip())
142 142
143 143 if version != cls._version:
144 144 raise error.Abort(_('this version of shelve is incompatible '
145 145 'with the version used in this repo'))
146 146 name = fp.readline().strip()
147 147 wctx = fp.readline().strip()
148 148 pendingctx = fp.readline().strip()
149 149 parents = [bin(h) for h in fp.readline().split()]
150 150 stripnodes = [bin(h) for h in fp.readline().split()]
151 151 finally:
152 152 fp.close()
153 153
154 154 obj = cls()
155 155 obj.name = name
156 156 obj.wctx = repo[bin(wctx)]
157 157 obj.pendingctx = repo[bin(pendingctx)]
158 158 obj.parents = parents
159 159 obj.stripnodes = stripnodes
160 160
161 161 return obj
162 162
163 163 @classmethod
164 164 def save(cls, repo, name, originalwctx, pendingctx, stripnodes):
165 165 fp = repo.vfs(cls._filename, 'wb')
166 166 fp.write('%i\n' % cls._version)
167 167 fp.write('%s\n' % name)
168 168 fp.write('%s\n' % hex(originalwctx.node()))
169 169 fp.write('%s\n' % hex(pendingctx.node()))
170 170 fp.write('%s\n' % ' '.join([hex(p) for p in repo.dirstate.parents()]))
171 171 fp.write('%s\n' % ' '.join([hex(n) for n in stripnodes]))
172 172 fp.close()
173 173
174 174 @classmethod
175 175 def clear(cls, repo):
176 176 util.unlinkpath(repo.join(cls._filename), ignoremissing=True)
177 177
178 178 def cleanupoldbackups(repo):
179 179 vfs = scmutil.vfs(repo.join(backupdir))
180 180 maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
181 181 hgfiles = [f for f in vfs.listdir() if f.endswith('.hg')]
182 182 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
183 183 if 0 < maxbackups and maxbackups < len(hgfiles):
184 184 bordermtime = hgfiles[-maxbackups][0]
185 185 else:
186 186 bordermtime = None
187 187 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
188 188 if mtime == bordermtime:
189 189 # keep it, because timestamp can't decide exact order of backups
190 190 continue
191 191 base = f[:-3]
192 192 for ext in 'hg patch'.split():
193 193 try:
194 194 vfs.unlink(base + '.' + ext)
195 195 except OSError as err:
196 196 if err.errno != errno.ENOENT:
197 197 raise
198 198
199 199 def _aborttransaction(repo):
200 200 '''Abort current transaction for shelve/unshelve, but keep dirstate
201 201 '''
202 202 backupname = 'dirstate.shelve'
203 203 dirstatebackup = None
204 204 try:
205 205 # create backup of (un)shelved dirstate, because aborting transaction
206 206 # should restore dirstate to one at the beginning of the
207 207 # transaction, which doesn't include the result of (un)shelving
208 208 fp = repo.vfs.open(backupname, "w")
209 209 dirstatebackup = backupname
210 210 # clearing _dirty/_dirtypl of dirstate by _writedirstate below
211 211 # is unintentional. but it doesn't cause problem in this case,
212 212 # because no code path refers them until transaction is aborted.
213 213 repo.dirstate._writedirstate(fp) # write in-memory changes forcibly
214 214
215 215 tr = repo.currenttransaction()
216 216 tr.abort()
217 217
218 218 # restore to backuped dirstate
219 219 repo.vfs.rename(dirstatebackup, 'dirstate')
220 220 dirstatebackup = None
221 221 finally:
222 222 if dirstatebackup:
223 223 repo.vfs.unlink(dirstatebackup)
224 224
225 225 def createcmd(ui, repo, pats, opts):
226 226 """subcommand that creates a new shelve"""
227 227 with repo.wlock():
228 228 cmdutil.checkunfinished(repo)
229 229 return _docreatecmd(ui, repo, pats, opts)
230 230
231 231 def _docreatecmd(ui, repo, pats, opts):
232 232 def mutableancestors(ctx):
233 233 """return all mutable ancestors for ctx (included)
234 234
235 235 Much faster than the revset ancestors(ctx) & draft()"""
236 236 seen = set([nullrev])
237 237 visit = collections.deque()
238 238 visit.append(ctx)
239 239 while visit:
240 240 ctx = visit.popleft()
241 241 yield ctx.node()
242 242 for parent in ctx.parents():
243 243 rev = parent.rev()
244 244 if rev not in seen:
245 245 seen.add(rev)
246 246 if parent.mutable():
247 247 visit.append(parent)
248 248
249 249 wctx = repo[None]
250 250 parents = wctx.parents()
251 251 if len(parents) > 1:
252 252 raise error.Abort(_('cannot shelve while merging'))
253 253 parent = parents[0]
254 254
255 255 # we never need the user, so we use a generic user for all shelve operations
256 256 user = 'shelve@localhost'
257 257 label = repo._activebookmark or parent.branch() or 'default'
258 258
259 259 # slashes aren't allowed in filenames, therefore we rename it
260 260 label = label.replace('/', '_')
261 261
262 262 def gennames():
263 263 yield label
264 264 for i in xrange(1, 100):
265 265 yield '%s-%02d' % (label, i)
266 266
267 267 if parent.node() != nullid:
268 268 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
269 269 else:
270 270 desc = '(changes in empty repository)'
271 271
272 272 if not opts['message']:
273 273 opts['message'] = desc
274 274
275 275 name = opts['name']
276 276
277 277 lock = tr = None
278 278 try:
279 279 lock = repo.lock()
280 280
281 281 # use an uncommitted transaction to generate the bundle to avoid
282 282 # pull races. ensure we don't print the abort message to stderr.
283 283 tr = repo.transaction('commit', report=lambda x: None)
284 284
285 285 if name:
286 286 if shelvedfile(repo, name, 'hg').exists():
287 287 raise error.Abort(_("a shelved change named '%s' already exists"
288 288 ) % name)
289 289 else:
290 290 for n in gennames():
291 291 if not shelvedfile(repo, n, 'hg').exists():
292 292 name = n
293 293 break
294 294 else:
295 295 raise error.Abort(_("too many shelved changes named '%s'") %
296 296 label)
297 297
298 298 # ensure we are not creating a subdirectory or a hidden file
299 299 if '/' in name or '\\' in name:
300 300 raise error.Abort(_('shelved change names may not contain slashes'))
301 301 if name.startswith('.'):
302 302 raise error.Abort(_("shelved change names may not start with '.'"))
303 303 interactive = opts.get('interactive', False)
304 304 includeunknown = (opts.get('unknown', False) and
305 305 not opts.get('addremove', False))
306 306
307 307 extra={}
308 308 if includeunknown:
309 309 s = repo.status(match=scmutil.match(repo[None], pats, opts),
310 310 unknown=True)
311 311 if s.unknown:
312 312 extra['shelve_unknown'] = '\0'.join(s.unknown)
313 313 repo[None].add(s.unknown)
314 314
315 315 def commitfunc(ui, repo, message, match, opts):
316 316 hasmq = util.safehasattr(repo, 'mq')
317 317 if hasmq:
318 318 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
319 319 backup = repo.ui.backupconfig('phases', 'new-commit')
320 320 try:
321 321 repo.ui. setconfig('phases', 'new-commit', phases.secret)
322 322 editor = cmdutil.getcommiteditor(editform='shelve.shelve',
323 323 **opts)
324 324 return repo.commit(message, user, opts.get('date'), match,
325 325 editor=editor, extra=extra)
326 326 finally:
327 327 repo.ui.restoreconfig(backup)
328 328 if hasmq:
329 329 repo.mq.checkapplied = saved
330 330
331 331 def interactivecommitfunc(ui, repo, *pats, **opts):
332 332 match = scmutil.match(repo['.'], pats, {})
333 333 message = opts['message']
334 334 return commitfunc(ui, repo, message, match, opts)
335 335 if not interactive:
336 336 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
337 337 else:
338 338 node = cmdutil.dorecord(ui, repo, interactivecommitfunc, None,
339 339 False, cmdutil.recordfilter, *pats, **opts)
340 340 if not node:
341 341 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
342 342 if stat.deleted:
343 343 ui.status(_("nothing changed (%d missing files, see "
344 344 "'hg status')\n") % len(stat.deleted))
345 345 else:
346 346 ui.status(_("nothing changed\n"))
347 347 return 1
348 348
349 349 bases = list(mutableancestors(repo[node]))
350 350 shelvedfile(repo, name, 'hg').writebundle(bases, node)
351 351 cmdutil.export(repo, [node],
352 352 fp=shelvedfile(repo, name, 'patch').opener('wb'),
353 353 opts=mdiff.diffopts(git=True))
354 354
355 355
356 356 if ui.formatted():
357 357 desc = util.ellipsis(desc, ui.termwidth())
358 358 ui.status(_('shelved as %s\n') % name)
359 359 hg.update(repo, parent.node())
360 360
361 361 _aborttransaction(repo)
362 362 finally:
363 363 lockmod.release(tr, lock)
364 364
365 365 def cleanupcmd(ui, repo):
366 366 """subcommand that deletes all shelves"""
367 367
368 368 with repo.wlock():
369 369 for (name, _type) in repo.vfs.readdir('shelved'):
370 370 suffix = name.rsplit('.', 1)[-1]
371 371 if suffix in ('hg', 'patch'):
372 372 shelvedfile(repo, name).movetobackup()
373 373 cleanupoldbackups(repo)
374 374
375 375 def deletecmd(ui, repo, pats):
376 376 """subcommand that deletes a specific shelve"""
377 377 if not pats:
378 378 raise error.Abort(_('no shelved changes specified!'))
379 379 with repo.wlock():
380 380 try:
381 381 for name in pats:
382 382 for suffix in 'hg patch'.split():
383 383 shelvedfile(repo, name, suffix).movetobackup()
384 384 cleanupoldbackups(repo)
385 385 except OSError as err:
386 386 if err.errno != errno.ENOENT:
387 387 raise
388 388 raise error.Abort(_("shelved change '%s' not found") % name)
389 389
390 390 def listshelves(repo):
391 391 """return all shelves in repo as list of (time, filename)"""
392 392 try:
393 393 names = repo.vfs.readdir('shelved')
394 394 except OSError as err:
395 395 if err.errno != errno.ENOENT:
396 396 raise
397 397 return []
398 398 info = []
399 399 for (name, _type) in names:
400 400 pfx, sfx = name.rsplit('.', 1)
401 401 if not pfx or sfx != 'patch':
402 402 continue
403 403 st = shelvedfile(repo, name).stat()
404 404 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
405 405 return sorted(info, reverse=True)
406 406
407 407 def listcmd(ui, repo, pats, opts):
408 408 """subcommand that displays the list of shelves"""
409 409 pats = set(pats)
410 410 width = 80
411 411 if not ui.plain():
412 412 width = ui.termwidth()
413 413 namelabel = 'shelve.newest'
414 414 for mtime, name in listshelves(repo):
415 415 sname = util.split(name)[1]
416 416 if pats and sname not in pats:
417 417 continue
418 418 ui.write(sname, label=namelabel)
419 419 namelabel = 'shelve.name'
420 420 if ui.quiet:
421 421 ui.write('\n')
422 422 continue
423 423 ui.write(' ' * (16 - len(sname)))
424 424 used = 16
425 425 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
426 426 ui.write(age, label='shelve.age')
427 427 ui.write(' ' * (12 - len(age)))
428 428 used += 12
429 429 with open(name + '.patch', 'rb') as fp:
430 430 while True:
431 431 line = fp.readline()
432 432 if not line:
433 433 break
434 434 if not line.startswith('#'):
435 435 desc = line.rstrip()
436 436 if ui.formatted():
437 437 desc = util.ellipsis(desc, width - used)
438 438 ui.write(desc)
439 439 break
440 440 ui.write('\n')
441 441 if not (opts['patch'] or opts['stat']):
442 442 continue
443 443 difflines = fp.readlines()
444 444 if opts['patch']:
445 445 for chunk, label in patch.difflabel(iter, difflines):
446 446 ui.write(chunk, label=label)
447 447 if opts['stat']:
448 448 for chunk, label in patch.diffstatui(difflines, width=width,
449 449 git=True):
450 450 ui.write(chunk, label=label)
451 451
452 452 def singlepatchcmds(ui, repo, pats, opts, subcommand):
453 453 """subcommand that displays a single shelf"""
454 454 if len(pats) != 1:
455 455 raise error.Abort(_("--%s expects a single shelf") % subcommand)
456 456 shelfname = pats[0]
457 457
458 458 if not shelvedfile(repo, shelfname, 'patch').exists():
459 459 raise error.Abort(_("cannot find shelf %s") % shelfname)
460 460
461 461 listcmd(ui, repo, pats, opts)
462 462
463 463 def checkparents(repo, state):
464 464 """check parent while resuming an unshelve"""
465 465 if state.parents != repo.dirstate.parents():
466 466 raise error.Abort(_('working directory parents do not match unshelve '
467 467 'state'))
468 468
469 469 def pathtofiles(repo, files):
470 470 cwd = repo.getcwd()
471 471 return [repo.pathto(f, cwd) for f in files]
472 472
473 473 def unshelveabort(ui, repo, state, opts):
474 474 """subcommand that abort an in-progress unshelve"""
475 475 with repo.lock():
476 476 try:
477 477 checkparents(repo, state)
478 478
479 479 util.rename(repo.join('unshelverebasestate'),
480 480 repo.join('rebasestate'))
481 481 try:
482 482 rebase.rebase(ui, repo, **{
483 483 'abort' : True
484 484 })
485 485 except Exception:
486 486 util.rename(repo.join('rebasestate'),
487 487 repo.join('unshelverebasestate'))
488 488 raise
489 489
490 490 mergefiles(ui, repo, state.wctx, state.pendingctx)
491 491 repair.strip(ui, repo, state.stripnodes, backup=False,
492 492 topic='shelve')
493 493 finally:
494 494 shelvedstate.clear(repo)
495 495 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
496 496
497 497 def mergefiles(ui, repo, wctx, shelvectx):
498 498 """updates to wctx and merges the changes from shelvectx into the
499 499 dirstate."""
500 500 oldquiet = ui.quiet
501 501 try:
502 502 ui.quiet = True
503 503 hg.update(repo, wctx.node())
504 504 files = []
505 505 files.extend(shelvectx.files())
506 506 files.extend(shelvectx.parents()[0].files())
507 507
508 508 # revert will overwrite unknown files, so move them out of the way
509 509 for file in repo.status(unknown=True).unknown:
510 510 if file in files:
511 511 util.rename(file, scmutil.origpath(ui, repo, file))
512 512 ui.pushbuffer(True)
513 513 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
514 514 *pathtofiles(repo, files),
515 515 **{'no_backup': True})
516 516 ui.popbuffer()
517 517 finally:
518 518 ui.quiet = oldquiet
519 519
520 520 def unshelvecleanup(ui, repo, name, opts):
521 521 """remove related files after an unshelve"""
522 522 if not opts['keep']:
523 523 for filetype in 'hg patch'.split():
524 524 shelvedfile(repo, name, filetype).movetobackup()
525 525 cleanupoldbackups(repo)
526 526
527 527 def unshelvecontinue(ui, repo, state, opts):
528 528 """subcommand to continue an in-progress unshelve"""
529 529 # We're finishing off a merge. First parent is our original
530 530 # parent, second is the temporary "fake" commit we're unshelving.
531 531 with repo.lock():
532 532 checkparents(repo, state)
533 533 ms = merge.mergestate.read(repo)
534 534 if [f for f in ms if ms[f] == 'u']:
535 535 raise error.Abort(
536 536 _("unresolved conflicts, can't continue"),
537 537 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
538 538
539 539 util.rename(repo.join('unshelverebasestate'),
540 540 repo.join('rebasestate'))
541 541 try:
542 542 rebase.rebase(ui, repo, **{
543 543 'continue' : True
544 544 })
545 545 except Exception:
546 546 util.rename(repo.join('rebasestate'),
547 547 repo.join('unshelverebasestate'))
548 548 raise
549 549
550 550 shelvectx = repo['tip']
551 551 if not shelvectx in state.pendingctx.children():
552 552 # rebase was a no-op, so it produced no child commit
553 553 shelvectx = state.pendingctx
554 554 else:
555 555 # only strip the shelvectx if the rebase produced it
556 556 state.stripnodes.append(shelvectx.node())
557 557
558 558 mergefiles(ui, repo, state.wctx, shelvectx)
559 559
560 560 repair.strip(ui, repo, state.stripnodes, backup=False, topic='shelve')
561 561 shelvedstate.clear(repo)
562 562 unshelvecleanup(ui, repo, state.name, opts)
563 563 ui.status(_("unshelve of '%s' complete\n") % state.name)
564 564
565 565 @command('unshelve',
566 566 [('a', 'abort', None,
567 567 _('abort an incomplete unshelve operation')),
568 568 ('c', 'continue', None,
569 569 _('continue an incomplete unshelve operation')),
570 570 ('k', 'keep', None,
571 571 _('keep shelve after unshelving')),
572 572 ('t', 'tool', '', _('specify merge tool')),
573 573 ('', 'date', '',
574 574 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
575 575 _('hg unshelve [SHELVED]'))
576 576 def unshelve(ui, repo, *shelved, **opts):
577 577 """restore a shelved change to the working directory
578 578
579 579 This command accepts an optional name of a shelved change to
580 580 restore. If none is given, the most recent shelved change is used.
581 581
582 582 If a shelved change is applied successfully, the bundle that
583 583 contains the shelved changes is moved to a backup location
584 584 (.hg/shelve-backup).
585 585
586 586 Since you can restore a shelved change on top of an arbitrary
587 587 commit, it is possible that unshelving will result in a conflict
588 588 between your changes and the commits you are unshelving onto. If
589 589 this occurs, you must resolve the conflict, then use
590 590 ``--continue`` to complete the unshelve operation. (The bundle
591 591 will not be moved until you successfully complete the unshelve.)
592 592
593 593 (Alternatively, you can use ``--abort`` to abandon an unshelve
594 594 that causes a conflict. This reverts the unshelved changes, and
595 595 leaves the bundle in place.)
596 596
597 597 After a successful unshelve, the shelved changes are stored in a
598 598 backup directory. Only the N most recent backups are kept. N
599 599 defaults to 10 but can be overridden using the ``shelve.maxbackups``
600 600 configuration option.
601 601
602 602 .. container:: verbose
603 603
604 604 Timestamp in seconds is used to decide order of backups. More
605 605 than ``maxbackups`` backups are kept, if same timestamp
606 606 prevents from deciding exact order of them, for safety.
607 607 """
608 608 with repo.wlock():
609 609 return _dounshelve(ui, repo, *shelved, **opts)
610 610
611 611 def _dounshelve(ui, repo, *shelved, **opts):
612 612 abortf = opts['abort']
613 613 continuef = opts['continue']
614 614 if not abortf and not continuef:
615 615 cmdutil.checkunfinished(repo)
616 616
617 617 if abortf or continuef:
618 618 if abortf and continuef:
619 619 raise error.Abort(_('cannot use both abort and continue'))
620 620 if shelved:
621 621 raise error.Abort(_('cannot combine abort/continue with '
622 622 'naming a shelved change'))
623 623 if abortf and opts.get('tool', False):
624 624 ui.warn(_('tool option will be ignored\n'))
625 625
626 626 try:
627 627 state = shelvedstate.load(repo)
628 628 except IOError as err:
629 629 if err.errno != errno.ENOENT:
630 630 raise
631 631 raise error.Abort(_('no unshelve operation underway'))
632 632
633 633 if abortf:
634 634 return unshelveabort(ui, repo, state, opts)
635 635 elif continuef:
636 636 return unshelvecontinue(ui, repo, state, opts)
637 637 elif len(shelved) > 1:
638 638 raise error.Abort(_('can only unshelve one change at a time'))
639 639 elif not shelved:
640 640 shelved = listshelves(repo)
641 641 if not shelved:
642 642 raise error.Abort(_('no shelved changes to apply!'))
643 643 basename = util.split(shelved[0][1])[1]
644 644 ui.status(_("unshelving change '%s'\n") % basename)
645 645 else:
646 646 basename = shelved[0]
647 647
648 648 if not shelvedfile(repo, basename, 'patch').exists():
649 649 raise error.Abort(_("shelved change '%s' not found") % basename)
650 650
651 651 oldquiet = ui.quiet
652 652 lock = tr = None
653 653 forcemerge = ui.backupconfig('ui', 'forcemerge')
654 654 try:
655 655 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'unshelve')
656 656 lock = repo.lock()
657 657
658 658 tr = repo.transaction('unshelve', report=lambda x: None)
659 659 oldtiprev = len(repo)
660 660
661 661 pctx = repo['.']
662 662 tmpwctx = pctx
663 663 # The goal is to have a commit structure like so:
664 664 # ...-> pctx -> tmpwctx -> shelvectx
665 665 # where tmpwctx is an optional commit with the user's pending changes
666 666 # and shelvectx is the unshelved changes. Then we merge it all down
667 667 # to the original pctx.
668 668
669 669 # Store pending changes in a commit and remember added in case a shelve
670 670 # contains unknown files that are part of the pending change
671 671 s = repo.status()
672 672 addedbefore = frozenset(s.added)
673 673 if s.modified or s.added or s.removed or s.deleted:
674 674 ui.status(_("temporarily committing pending changes "
675 675 "(restore with 'hg unshelve --abort')\n"))
676 676 def commitfunc(ui, repo, message, match, opts):
677 677 hasmq = util.safehasattr(repo, 'mq')
678 678 if hasmq:
679 679 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
680 680
681 681 backup = repo.ui.backupconfig('phases', 'new-commit')
682 682 try:
683 683 repo.ui.setconfig('phases', 'new-commit', phases.secret)
684 684 return repo.commit(message, 'shelve@localhost',
685 685 opts.get('date'), match)
686 686 finally:
687 687 repo.ui.restoreconfig(backup)
688 688 if hasmq:
689 689 repo.mq.checkapplied = saved
690 690
691 691 tempopts = {}
692 692 tempopts['message'] = "pending changes temporary commit"
693 693 tempopts['date'] = opts.get('date')
694 694 ui.quiet = True
695 695 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
696 696 tmpwctx = repo[node]
697 697
698 698 ui.quiet = True
699 699 shelvedfile(repo, basename, 'hg').applybundle()
700 700
701 701 ui.quiet = oldquiet
702 702
703 703 shelvectx = repo['tip']
704 704
705 705 # If the shelve is not immediately on top of the commit
706 706 # we'll be merging with, rebase it to be on top.
707 707 if tmpwctx.node() != shelvectx.parents()[0].node():
708 708 ui.status(_('rebasing shelved changes\n'))
709 709 try:
710 710 rebase.rebase(ui, repo, **{
711 711 'rev' : [shelvectx.rev()],
712 712 'dest' : str(tmpwctx.rev()),
713 713 'keep' : True,
714 714 'tool' : opts.get('tool', ''),
715 715 })
716 716 except error.InterventionRequired:
717 717 tr.close()
718 718
719 719 stripnodes = [repo.changelog.node(rev)
720 720 for rev in xrange(oldtiprev, len(repo))]
721 721 shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes)
722 722
723 723 util.rename(repo.join('rebasestate'),
724 724 repo.join('unshelverebasestate'))
725 725 raise error.InterventionRequired(
726 726 _("unresolved conflicts (see 'hg resolve', then "
727 727 "'hg unshelve --continue')"))
728 728
729 729 # refresh ctx after rebase completes
730 730 shelvectx = repo['tip']
731 731
732 732 if not shelvectx in tmpwctx.children():
733 733 # rebase was a no-op, so it produced no child commit
734 734 shelvectx = tmpwctx
735 735
736 736 mergefiles(ui, repo, pctx, shelvectx)
737 737
738 738 # Forget any files that were unknown before the shelve, unknown before
739 739 # unshelve started, but are now added.
740 740 shelveunknown = shelvectx.extra().get('shelve_unknown')
741 741 if shelveunknown:
742 742 shelveunknown = frozenset(shelveunknown.split('\0'))
743 743 addedafter = frozenset(repo.status().added)
744 744 toforget = (addedafter & shelveunknown) - addedbefore
745 745 repo[None].forget(toforget)
746 746
747 747 shelvedstate.clear(repo)
748 748
749 749 # The transaction aborting will strip all the commits for us,
750 750 # but it doesn't update the inmemory structures, so addchangegroup
751 751 # hooks still fire and try to operate on the missing commits.
752 752 # Clean up manually to prevent this.
753 753 repo.unfiltered().changelog.strip(oldtiprev, tr)
754 754
755 755 unshelvecleanup(ui, repo, basename, opts)
756 756
757 757 _aborttransaction(repo)
758 758 finally:
759 759 ui.quiet = oldquiet
760 760 if tr:
761 761 tr.release()
762 762 lockmod.release(lock)
763 763 ui.restoreconfig(forcemerge)
764 764
765 765 @command('shelve',
766 766 [('A', 'addremove', None,
767 767 _('mark new/missing files as added/removed before shelving')),
768 768 ('u', 'unknown', None,
769 769 _('store unknown files in the shelve')),
770 770 ('', 'cleanup', None,
771 771 _('delete all shelved changes')),
772 772 ('', 'date', '',
773 773 _('shelve with the specified commit date'), _('DATE')),
774 774 ('d', 'delete', None,
775 775 _('delete the named shelved change(s)')),
776 776 ('e', 'edit', False,
777 777 _('invoke editor on commit messages')),
778 778 ('l', 'list', None,
779 779 _('list current shelves')),
780 780 ('m', 'message', '',
781 781 _('use text as shelve message'), _('TEXT')),
782 782 ('n', 'name', '',
783 783 _('use the given name for the shelved commit'), _('NAME')),
784 784 ('p', 'patch', None,
785 785 _('show patch')),
786 786 ('i', 'interactive', None,
787 787 _('interactive mode, only works while creating a shelve')),
788 788 ('', 'stat', None,
789 789 _('output diffstat-style summary of changes'))] + commands.walkopts,
790 790 _('hg shelve [OPTION]... [FILE]...'))
791 791 def shelvecmd(ui, repo, *pats, **opts):
792 792 '''save and set aside changes from the working directory
793 793
794 794 Shelving takes files that "hg status" reports as not clean, saves
795 795 the modifications to a bundle (a shelved change), and reverts the
796 796 files so that their state in the working directory becomes clean.
797 797
798 798 To restore these changes to the working directory, using "hg
799 799 unshelve"; this will work even if you switch to a different
800 800 commit.
801 801
802 802 When no files are specified, "hg shelve" saves all not-clean
803 803 files. If specific files or directories are named, only changes to
804 804 those files are shelved.
805 805
806 806 Each shelved change has a name that makes it easier to find later.
807 807 The name of a shelved change defaults to being based on the active
808 808 bookmark, or if there is no active bookmark, the current named
809 809 branch. To specify a different name, use ``--name``.
810 810
811 811 To see a list of existing shelved changes, use the ``--list``
812 812 option. For each shelved change, this will print its name, age,
813 813 and description; use ``--patch`` or ``--stat`` for more details.
814 814
815 815 To delete specific shelved changes, use ``--delete``. To delete
816 816 all shelved changes, use ``--cleanup``.
817 817 '''
818 818 allowables = [
819 819 ('addremove', set(['create'])), # 'create' is pseudo action
820 820 ('unknown', set(['create'])),
821 821 ('cleanup', set(['cleanup'])),
822 822 # ('date', set(['create'])), # ignored for passing '--date "0 0"' in tests
823 823 ('delete', set(['delete'])),
824 824 ('edit', set(['create'])),
825 825 ('list', set(['list'])),
826 826 ('message', set(['create'])),
827 827 ('name', set(['create'])),
828 828 ('patch', set(['patch', 'list'])),
829 829 ('stat', set(['stat', 'list'])),
830 830 ]
831 831 def checkopt(opt):
832 832 if opts[opt]:
833 833 for i, allowable in allowables:
834 834 if opts[i] and opt not in allowable:
835 835 raise error.Abort(_("options '--%s' and '--%s' may not be "
836 836 "used together") % (opt, i))
837 837 return True
838 838 if checkopt('cleanup'):
839 839 if pats:
840 840 raise error.Abort(_("cannot specify names when using '--cleanup'"))
841 841 return cleanupcmd(ui, repo)
842 842 elif checkopt('delete'):
843 843 return deletecmd(ui, repo, pats)
844 844 elif checkopt('list'):
845 845 return listcmd(ui, repo, pats, opts)
846 846 elif checkopt('patch'):
847 847 return singlepatchcmds(ui, repo, pats, opts, subcommand='patch')
848 848 elif checkopt('stat'):
849 849 return singlepatchcmds(ui, repo, pats, opts, subcommand='stat')
850 850 else:
851 851 return createcmd(ui, repo, pats, opts)
852 852
853 853 def extsetup(ui):
854 854 cmdutil.unfinishedstates.append(
855 855 [shelvedstate._filename, False, False,
856 856 _('unshelve already in progress'),
857 857 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
858 858 cmdutil.afterresolvedstates.append(
859 859 [shelvedstate._filename, _('hg unshelve --continue')])
@@ -1,1136 +1,1137 b''
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import tempfile
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 branchmap,
25 25 dagutil,
26 26 discovery,
27 27 error,
28 28 mdiff,
29 29 phases,
30 30 util,
31 31 )
32 32
33 33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36 36
37 37 def readexactly(stream, n):
38 38 '''read n bytes from stream.read and abort if less was available'''
39 39 s = stream.read(n)
40 40 if len(s) < n:
41 41 raise error.Abort(_("stream ended unexpectedly"
42 42 " (got %d bytes, expected %d)")
43 43 % (len(s), n))
44 44 return s
45 45
46 46 def getchunk(stream):
47 47 """return the next chunk from stream as a string"""
48 48 d = readexactly(stream, 4)
49 49 l = struct.unpack(">l", d)[0]
50 50 if l <= 4:
51 51 if l:
52 52 raise error.Abort(_("invalid chunk length %d") % l)
53 53 return ""
54 54 return readexactly(stream, l - 4)
55 55
56 56 def chunkheader(length):
57 57 """return a changegroup chunk header (string)"""
58 58 return struct.pack(">l", length + 4)
59 59
60 60 def closechunk():
61 61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 62 return struct.pack(">l", 0)
63 63
64 64 def combineresults(results):
65 65 """logic to combine 0 or more addchangegroup results into one"""
66 66 changedheads = 0
67 67 result = 1
68 68 for ret in results:
69 69 # If any changegroup result is 0, return 0
70 70 if ret == 0:
71 71 result = 0
72 72 break
73 73 if ret < -1:
74 74 changedheads += ret + 1
75 75 elif ret > 1:
76 76 changedheads += ret - 1
77 77 if changedheads > 0:
78 78 result = 1 + changedheads
79 79 elif changedheads < 0:
80 80 result = -1 + changedheads
81 81 return result
82 82
83 83 bundletypes = {
84 84 "": ("", None), # only when using unbundle on ssh and old http servers
85 85 # since the unification ssh accepts a header but there
86 86 # is no capability signaling it.
87 87 "HG20": (), # special-cased below
88 88 "HG10UN": ("HG10UN", None),
89 89 "HG10BZ": ("HG10", 'BZ'),
90 90 "HG10GZ": ("HG10GZ", 'GZ'),
91 91 }
92 92
93 93 # hgweb uses this list to communicate its preferred type
94 94 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
95 95
96 96 def writechunks(ui, chunks, filename, vfs=None):
97 97 """Write chunks to a file and return its filename.
98 98
99 99 The stream is assumed to be a bundle file.
100 100 Existing files will not be overwritten.
101 101 If no filename is specified, a temporary file is created.
102 102 """
103 103 fh = None
104 104 cleanup = None
105 105 try:
106 106 if filename:
107 107 if vfs:
108 108 fh = vfs.open(filename, "wb")
109 109 else:
110 110 fh = open(filename, "wb")
111 111 else:
112 112 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
113 113 fh = os.fdopen(fd, "wb")
114 114 cleanup = filename
115 115 for c in chunks:
116 116 fh.write(c)
117 117 cleanup = None
118 118 return filename
119 119 finally:
120 120 if fh is not None:
121 121 fh.close()
122 122 if cleanup is not None:
123 123 if filename and vfs:
124 124 vfs.unlink(cleanup)
125 125 else:
126 126 os.unlink(cleanup)
127 127
128 128 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
129 129 """Write a bundle file and return its filename.
130 130
131 131 Existing files will not be overwritten.
132 132 If no filename is specified, a temporary file is created.
133 133 bz2 compression can be turned off.
134 134 The bundle file will be deleted in case of errors.
135 135 """
136 136
137 137 if bundletype == "HG20":
138 138 from . import bundle2
139 139 bundle = bundle2.bundle20(ui)
140 140 bundle.setcompression(compression)
141 141 part = bundle.newpart('changegroup', data=cg.getchunks())
142 142 part.addparam('version', cg.version)
143 143 chunkiter = bundle.getchunks()
144 144 else:
145 145 # compression argument is only for the bundle2 case
146 146 assert compression is None
147 147 if cg.version != '01':
148 148 raise error.Abort(_('old bundle types only supports v1 '
149 149 'changegroups'))
150 150 header, comp = bundletypes[bundletype]
151 151 if comp not in util.compressors:
152 152 raise error.Abort(_('unknown stream compression type: %s')
153 153 % comp)
154 154 z = util.compressors[comp]()
155 155 subchunkiter = cg.getchunks()
156 156 def chunkiter():
157 157 yield header
158 158 for chunk in subchunkiter:
159 159 yield z.compress(chunk)
160 160 yield z.flush()
161 161 chunkiter = chunkiter()
162 162
163 163 # parse the changegroup data, otherwise we will block
164 164 # in case of sshrepo because we don't know the end of the stream
165 165
166 166 # an empty chunkgroup is the end of the changegroup
167 167 # a changegroup has at least 2 chunkgroups (changelog and manifest).
168 168 # after that, an empty chunkgroup is the end of the changegroup
169 169 return writechunks(ui, chunkiter, filename, vfs=vfs)
170 170
171 171 class cg1unpacker(object):
172 172 """Unpacker for cg1 changegroup streams.
173 173
174 174 A changegroup unpacker handles the framing of the revision data in
175 175 the wire format. Most consumers will want to use the apply()
176 176 method to add the changes from the changegroup to a repository.
177 177
178 178 If you're forwarding a changegroup unmodified to another consumer,
179 179 use getchunks(), which returns an iterator of changegroup
180 180 chunks. This is mostly useful for cases where you need to know the
181 181 data stream has ended by observing the end of the changegroup.
182 182
183 183 deltachunk() is useful only if you're applying delta data. Most
184 184 consumers should prefer apply() instead.
185 185
186 186 A few other public methods exist. Those are used only for
187 187 bundlerepo and some debug commands - their use is discouraged.
188 188 """
189 189 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
190 190 deltaheadersize = struct.calcsize(deltaheader)
191 191 version = '01'
192 192 _grouplistcount = 1 # One list of files after the manifests
193 193
194 194 def __init__(self, fh, alg):
195 195 if alg == 'UN':
196 196 alg = None # get more modern without breaking too much
197 197 if not alg in util.decompressors:
198 198 raise error.Abort(_('unknown stream compression type: %s')
199 199 % alg)
200 200 if alg == 'BZ':
201 201 alg = '_truncatedBZ'
202 202 self._stream = util.decompressors[alg](fh)
203 203 self._type = alg
204 204 self.callback = None
205 205
206 206 # These methods (compressed, read, seek, tell) all appear to only
207 207 # be used by bundlerepo, but it's a little hard to tell.
208 208 def compressed(self):
209 209 return self._type is not None
210 210 def read(self, l):
211 211 return self._stream.read(l)
212 212 def seek(self, pos):
213 213 return self._stream.seek(pos)
214 214 def tell(self):
215 215 return self._stream.tell()
216 216 def close(self):
217 217 return self._stream.close()
218 218
219 219 def _chunklength(self):
220 220 d = readexactly(self._stream, 4)
221 221 l = struct.unpack(">l", d)[0]
222 222 if l <= 4:
223 223 if l:
224 224 raise error.Abort(_("invalid chunk length %d") % l)
225 225 return 0
226 226 if self.callback:
227 227 self.callback()
228 228 return l - 4
229 229
230 230 def changelogheader(self):
231 231 """v10 does not have a changelog header chunk"""
232 232 return {}
233 233
234 234 def manifestheader(self):
235 235 """v10 does not have a manifest header chunk"""
236 236 return {}
237 237
238 238 def filelogheader(self):
239 239 """return the header of the filelogs chunk, v10 only has the filename"""
240 240 l = self._chunklength()
241 241 if not l:
242 242 return {}
243 243 fname = readexactly(self._stream, l)
244 244 return {'filename': fname}
245 245
246 246 def _deltaheader(self, headertuple, prevnode):
247 247 node, p1, p2, cs = headertuple
248 248 if prevnode is None:
249 249 deltabase = p1
250 250 else:
251 251 deltabase = prevnode
252 252 flags = 0
253 253 return node, p1, p2, deltabase, cs, flags
254 254
255 255 def deltachunk(self, prevnode):
256 256 l = self._chunklength()
257 257 if not l:
258 258 return {}
259 259 headerdata = readexactly(self._stream, self.deltaheadersize)
260 260 header = struct.unpack(self.deltaheader, headerdata)
261 261 delta = readexactly(self._stream, l - self.deltaheadersize)
262 262 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
263 263 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
264 264 'deltabase': deltabase, 'delta': delta, 'flags': flags}
265 265
266 266 def getchunks(self):
267 267 """returns all the chunks contains in the bundle
268 268
269 269 Used when you need to forward the binary stream to a file or another
270 270 network API. To do so, it parse the changegroup data, otherwise it will
271 271 block in case of sshrepo because it don't know the end of the stream.
272 272 """
273 273 # an empty chunkgroup is the end of the changegroup
274 274 # a changegroup has at least 2 chunkgroups (changelog and manifest).
275 275 # after that, changegroup versions 1 and 2 have a series of groups
276 276 # with one group per file. changegroup 3 has a series of directory
277 277 # manifests before the files.
278 278 count = 0
279 279 emptycount = 0
280 280 while emptycount < self._grouplistcount:
281 281 empty = True
282 282 count += 1
283 283 while True:
284 284 chunk = getchunk(self)
285 285 if not chunk:
286 286 if empty and count > 2:
287 287 emptycount += 1
288 288 break
289 289 empty = False
290 290 yield chunkheader(len(chunk))
291 291 pos = 0
292 292 while pos < len(chunk):
293 293 next = pos + 2**20
294 294 yield chunk[pos:next]
295 295 pos = next
296 296 yield closechunk()
297 297
298 298 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
299 299 # We know that we'll never have more manifests than we had
300 300 # changesets.
301 301 self.callback = prog(_('manifests'), numchanges)
302 302 # no need to check for empty manifest group here:
303 303 # if the result of the merge of 1 and 2 is the same in 3 and 4,
304 304 # no new manifest will be created and the manifest group will
305 305 # be empty during the pull
306 306 self.manifestheader()
307 307 repo.manifest.addgroup(self, revmap, trp)
308 308 repo.ui.progress(_('manifests'), None)
309 309
310 310 def apply(self, repo, srctype, url, emptyok=False,
311 311 targetphase=phases.draft, expectedtotal=None):
312 312 """Add the changegroup returned by source.read() to this repo.
313 313 srctype is a string like 'push', 'pull', or 'unbundle'. url is
314 314 the URL of the repo where this changegroup is coming from.
315 315
316 316 Return an integer summarizing the change to this repo:
317 317 - nothing changed or no source: 0
318 318 - more heads than before: 1+added heads (2..n)
319 319 - fewer heads than before: -1-removed heads (-2..-n)
320 320 - number of heads stays the same: 1
321 321 """
322 322 repo = repo.unfiltered()
323 323 def csmap(x):
324 324 repo.ui.debug("add changeset %s\n" % short(x))
325 325 return len(cl)
326 326
327 327 def revmap(x):
328 328 return cl.rev(x)
329 329
330 330 changesets = files = revisions = 0
331 331
332 332 try:
333 333 with repo.transaction("\n".join([srctype,
334 334 util.hidepassword(url)])) as tr:
335 335 # The transaction could have been created before and already
336 336 # carries source information. In this case we use the top
337 337 # level data. We overwrite the argument because we need to use
338 338 # the top level value (if they exist) in this function.
339 339 srctype = tr.hookargs.setdefault('source', srctype)
340 340 url = tr.hookargs.setdefault('url', url)
341 341 repo.hook('prechangegroup', throw=True, **tr.hookargs)
342 342
343 343 # write changelog data to temp files so concurrent readers
344 344 # will not see an inconsistent view
345 345 cl = repo.changelog
346 346 cl.delayupdate(tr)
347 347 oldheads = cl.heads()
348 348
349 349 trp = weakref.proxy(tr)
350 350 # pull off the changeset group
351 351 repo.ui.status(_("adding changesets\n"))
352 352 clstart = len(cl)
353 353 class prog(object):
354 354 def __init__(self, step, total):
355 355 self._step = step
356 356 self._total = total
357 357 self._count = 1
358 358 def __call__(self):
359 359 repo.ui.progress(self._step, self._count,
360 360 unit=_('chunks'), total=self._total)
361 361 self._count += 1
362 362 self.callback = prog(_('changesets'), expectedtotal)
363 363
364 364 efiles = set()
365 365 def onchangelog(cl, node):
366 366 efiles.update(cl.read(node)[3])
367 367
368 368 self.changelogheader()
369 369 srccontent = cl.addgroup(self, csmap, trp,
370 370 addrevisioncb=onchangelog)
371 371 efiles = len(efiles)
372 372
373 373 if not (srccontent or emptyok):
374 374 raise error.Abort(_("received changelog group is empty"))
375 375 clend = len(cl)
376 376 changesets = clend - clstart
377 377 repo.ui.progress(_('changesets'), None)
378 378
379 379 # pull off the manifest group
380 380 repo.ui.status(_("adding manifests\n"))
381 381 self._unpackmanifests(repo, revmap, trp, prog, changesets)
382 382
383 383 needfiles = {}
384 384 if repo.ui.configbool('server', 'validate', default=False):
385 385 # validate incoming csets have their manifests
386 386 for cset in xrange(clstart, clend):
387 387 mfnode = repo.changelog.read(
388 388 repo.changelog.node(cset))[0]
389 389 mfest = repo.manifest.readdelta(mfnode)
390 390 # store file nodes we must see
391 391 for f, n in mfest.iteritems():
392 392 needfiles.setdefault(f, set()).add(n)
393 393
394 394 # process the files
395 395 repo.ui.status(_("adding file changes\n"))
396 396 self.callback = None
397 397 pr = prog(_('files'), efiles)
398 398 newrevs, newfiles = _addchangegroupfiles(
399 399 repo, self, revmap, trp, pr, needfiles)
400 400 revisions += newrevs
401 401 files += newfiles
402 402
403 403 dh = 0
404 404 if oldheads:
405 405 heads = cl.heads()
406 406 dh = len(heads) - len(oldheads)
407 407 for h in heads:
408 408 if h not in oldheads and repo[h].closesbranch():
409 409 dh -= 1
410 410 htext = ""
411 411 if dh:
412 412 htext = _(" (%+d heads)") % dh
413 413
414 414 repo.ui.status(_("added %d changesets"
415 415 " with %d changes to %d files%s\n")
416 416 % (changesets, revisions, files, htext))
417 417 repo.invalidatevolatilesets()
418 418
419 419 if changesets > 0:
420 420 if 'node' not in tr.hookargs:
421 421 tr.hookargs['node'] = hex(cl.node(clstart))
422 422 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
423 423 hookargs = dict(tr.hookargs)
424 424 else:
425 425 hookargs = dict(tr.hookargs)
426 426 hookargs['node'] = hex(cl.node(clstart))
427 427 hookargs['node_last'] = hex(cl.node(clend - 1))
428 428 repo.hook('pretxnchangegroup', throw=True, **hookargs)
429 429
430 430 added = [cl.node(r) for r in xrange(clstart, clend)]
431 431 publishing = repo.publishing()
432 432 if srctype in ('push', 'serve'):
433 433 # Old servers can not push the boundary themselves.
434 434 # New servers won't push the boundary if changeset already
435 435 # exists locally as secret
436 436 #
437 437 # We should not use added here but the list of all change in
438 438 # the bundle
439 439 if publishing:
440 440 phases.advanceboundary(repo, tr, phases.public,
441 441 srccontent)
442 442 else:
443 443 # Those changesets have been pushed from the
444 444 # outside, their phases are going to be pushed
445 445 # alongside. Therefor `targetphase` is
446 446 # ignored.
447 447 phases.advanceboundary(repo, tr, phases.draft,
448 448 srccontent)
449 449 phases.retractboundary(repo, tr, phases.draft, added)
450 450 elif srctype != 'strip':
451 451 # publishing only alter behavior during push
452 452 #
453 453 # strip should not touch boundary at all
454 454 phases.retractboundary(repo, tr, targetphase, added)
455 455
456 456 if changesets > 0:
457 457 if srctype != 'strip':
458 458 # During strip, branchcache is invalid but
459 459 # coming call to `destroyed` will repair it.
460 460 # In other case we can safely update cache on
461 461 # disk.
462 462 branchmap.updatecache(repo.filtered('served'))
463 463
464 464 def runhooks():
465 465 # These hooks run when the lock releases, not when the
466 466 # transaction closes. So it's possible for the changelog
467 467 # to have changed since we last saw it.
468 468 if clstart >= len(repo):
469 469 return
470 470
471 471 # forcefully update the on-disk branch cache
472 472 repo.ui.debug("updating the branch cache\n")
473 473 repo.hook("changegroup", **hookargs)
474 474
475 475 for n in added:
476 476 args = hookargs.copy()
477 477 args['node'] = hex(n)
478 478 del args['node_last']
479 479 repo.hook("incoming", **args)
480 480
481 481 newheads = [h for h in repo.heads()
482 482 if h not in oldheads]
483 483 repo.ui.log("incoming",
484 484 "%s incoming changes - new heads: %s\n",
485 485 len(added),
486 486 ', '.join([hex(c[:6]) for c in newheads]))
487 487
488 488 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
489 489 lambda tr: repo._afterlock(runhooks))
490 490 finally:
491 491 repo.ui.flush()
492 492 # never return 0 here:
493 493 if dh < 0:
494 494 return dh - 1
495 495 else:
496 496 return dh + 1
497 497
498 498 class cg2unpacker(cg1unpacker):
499 499 """Unpacker for cg2 streams.
500 500
501 501 cg2 streams add support for generaldelta, so the delta header
502 502 format is slightly different. All other features about the data
503 503 remain the same.
504 504 """
505 505 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
506 506 deltaheadersize = struct.calcsize(deltaheader)
507 507 version = '02'
508 508
509 509 def _deltaheader(self, headertuple, prevnode):
510 510 node, p1, p2, deltabase, cs = headertuple
511 511 flags = 0
512 512 return node, p1, p2, deltabase, cs, flags
513 513
514 514 class cg3unpacker(cg2unpacker):
515 515 """Unpacker for cg3 streams.
516 516
517 517 cg3 streams add support for exchanging treemanifests and revlog
518 518 flags. It adds the revlog flags to the delta header and an empty chunk
519 519 separating manifests and files.
520 520 """
521 521 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
522 522 deltaheadersize = struct.calcsize(deltaheader)
523 523 version = '03'
524 524 _grouplistcount = 2 # One list of manifests and one list of files
525 525
526 526 def _deltaheader(self, headertuple, prevnode):
527 527 node, p1, p2, deltabase, cs, flags = headertuple
528 528 return node, p1, p2, deltabase, cs, flags
529 529
530 530 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
531 531 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
532 532 numchanges)
533 533 while True:
534 534 chunkdata = self.filelogheader()
535 535 if not chunkdata:
536 536 break
537 537 # If we get here, there are directory manifests in the changegroup
538 538 d = chunkdata["filename"]
539 539 repo.ui.debug("adding %s revisions\n" % d)
540 540 dirlog = repo.manifest.dirlog(d)
541 541 if not dirlog.addgroup(self, revmap, trp):
542 542 raise error.Abort(_("received dir revlog group is empty"))
543 543
544 544 class headerlessfixup(object):
545 545 def __init__(self, fh, h):
546 546 self._h = h
547 547 self._fh = fh
548 548 def read(self, n):
549 549 if self._h:
550 550 d, self._h = self._h[:n], self._h[n:]
551 551 if len(d) < n:
552 552 d += readexactly(self._fh, n - len(d))
553 553 return d
554 554 return readexactly(self._fh, n)
555 555
556 556 def _moddirs(files):
557 557 """Given a set of modified files, find the list of modified directories.
558 558
559 559 This returns a list of (path to changed dir, changed dir) tuples,
560 560 as that's what the one client needs anyway.
561 561
562 562 >>> _moddirs(['a/b/c.py', 'a/b/c.txt', 'a/d/e/f/g.txt', 'i.txt', ])
563 563 [('/', 'a/'), ('a/', 'b/'), ('a/', 'd/'), ('a/d/', 'e/'), ('a/d/e/', 'f/')]
564 564
565 565 """
566 566 alldirs = set()
567 567 for f in files:
568 568 path = f.split('/')[:-1]
569 569 for i in xrange(len(path) - 1, -1, -1):
570 570 dn = '/'.join(path[:i])
571 571 current = dn + '/', path[i] + '/'
572 572 if current in alldirs:
573 573 break
574 574 alldirs.add(current)
575 575 return sorted(alldirs)
576 576
577 577 class cg1packer(object):
578 578 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
579 579 version = '01'
580 580 def __init__(self, repo, bundlecaps=None):
581 581 """Given a source repo, construct a bundler.
582 582
583 583 bundlecaps is optional and can be used to specify the set of
584 584 capabilities which can be used to build the bundle.
585 585 """
586 586 # Set of capabilities we can use to build the bundle.
587 587 if bundlecaps is None:
588 588 bundlecaps = set()
589 589 self._bundlecaps = bundlecaps
590 590 # experimental config: bundle.reorder
591 591 reorder = repo.ui.config('bundle', 'reorder', 'auto')
592 592 if reorder == 'auto':
593 593 reorder = None
594 594 else:
595 595 reorder = util.parsebool(reorder)
596 596 self._repo = repo
597 597 self._reorder = reorder
598 598 self._progress = repo.ui.progress
599 599 if self._repo.ui.verbose and not self._repo.ui.debugflag:
600 600 self._verbosenote = self._repo.ui.note
601 601 else:
602 602 self._verbosenote = lambda s: None
603 603
604 604 def close(self):
605 605 return closechunk()
606 606
607 607 def fileheader(self, fname):
608 608 return chunkheader(len(fname)) + fname
609 609
610 610 def group(self, nodelist, revlog, lookup, units=None):
611 611 """Calculate a delta group, yielding a sequence of changegroup chunks
612 612 (strings).
613 613
614 614 Given a list of changeset revs, return a set of deltas and
615 615 metadata corresponding to nodes. The first delta is
616 616 first parent(nodelist[0]) -> nodelist[0], the receiver is
617 617 guaranteed to have this parent as it has all history before
618 618 these changesets. In the case firstparent is nullrev the
619 619 changegroup starts with a full revision.
620 620
621 621 If units is not None, progress detail will be generated, units specifies
622 622 the type of revlog that is touched (changelog, manifest, etc.).
623 623 """
624 624 # if we don't have any revisions touched by these changesets, bail
625 625 if len(nodelist) == 0:
626 626 yield self.close()
627 627 return
628 628
629 629 # for generaldelta revlogs, we linearize the revs; this will both be
630 630 # much quicker and generate a much smaller bundle
631 631 if (revlog._generaldelta and self._reorder is None) or self._reorder:
632 632 dag = dagutil.revlogdag(revlog)
633 633 revs = set(revlog.rev(n) for n in nodelist)
634 634 revs = dag.linearize(revs)
635 635 else:
636 636 revs = sorted([revlog.rev(n) for n in nodelist])
637 637
638 638 # add the parent of the first rev
639 639 p = revlog.parentrevs(revs[0])[0]
640 640 revs.insert(0, p)
641 641
642 642 # build deltas
643 643 total = len(revs) - 1
644 644 msgbundling = _('bundling')
645 645 for r in xrange(len(revs) - 1):
646 646 if units is not None:
647 647 self._progress(msgbundling, r + 1, unit=units, total=total)
648 648 prev, curr = revs[r], revs[r + 1]
649 649 linknode = lookup(revlog.node(curr))
650 650 for c in self.revchunk(revlog, curr, prev, linknode):
651 651 yield c
652 652
653 653 if units is not None:
654 654 self._progress(msgbundling, None)
655 655 yield self.close()
656 656
657 657 # filter any nodes that claim to be part of the known set
658 658 def prune(self, revlog, missing, commonrevs):
659 659 rr, rl = revlog.rev, revlog.linkrev
660 660 return [n for n in missing if rl(rr(n)) not in commonrevs]
661 661
662 662 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
663 663 """Pack flat manifests into a changegroup stream."""
664 664 ml = self._repo.manifest
665 665 size = 0
666 666 for chunk in self.group(
667 667 mfnodes, ml, lookuplinknode, units=_('manifests')):
668 668 size += len(chunk)
669 669 yield chunk
670 670 self._verbosenote(_('%8.i (manifests)\n') % size)
671 671 # It looks odd to assert this here, but tmfnodes doesn't get
672 672 # filled in until after we've called lookuplinknode for
673 673 # sending root manifests, so the only way to tell the streams
674 674 # got crossed is to check after we've done all the work.
675 675 assert not tmfnodes
676 676
677 677 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
678 678 '''yield a sequence of changegroup chunks (strings)'''
679 679 repo = self._repo
680 680 cl = repo.changelog
681 681 ml = repo.manifest
682 682
683 683 clrevorder = {}
684 684 mfs = {} # needed manifests
685 685 tmfnodes = {}
686 686 fnodes = {} # needed file nodes
687 687 # maps manifest node id -> set(changed files)
688 688 mfchangedfiles = {}
689 689
690 690 # Callback for the changelog, used to collect changed files and manifest
691 691 # nodes.
692 692 # Returns the linkrev node (identity in the changelog case).
693 693 def lookupcl(x):
694 694 c = cl.read(x)
695 695 clrevorder[x] = len(clrevorder)
696 696 n = c[0]
697 697 # record the first changeset introducing this manifest version
698 698 mfs.setdefault(n, x)
699 699 # Record a complete list of potentially-changed files in
700 700 # this manifest.
701 701 mfchangedfiles.setdefault(n, set()).update(c[3])
702 702 return x
703 703
704 704 self._verbosenote(_('uncompressed size of bundle content:\n'))
705 705 size = 0
706 706 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
707 707 size += len(chunk)
708 708 yield chunk
709 709 self._verbosenote(_('%8.i (changelog)\n') % size)
710 710
711 711 # We need to make sure that the linkrev in the changegroup refers to
712 712 # the first changeset that introduced the manifest or file revision.
713 713 # The fastpath is usually safer than the slowpath, because the filelogs
714 714 # are walked in revlog order.
715 715 #
716 716 # When taking the slowpath with reorder=None and the manifest revlog
717 717 # uses generaldelta, the manifest may be walked in the "wrong" order.
718 718 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
719 719 # cc0ff93d0c0c).
720 720 #
721 721 # When taking the fastpath, we are only vulnerable to reordering
722 722 # of the changelog itself. The changelog never uses generaldelta, so
723 723 # it is only reordered when reorder=True. To handle this case, we
724 724 # simply take the slowpath, which already has the 'clrevorder' logic.
725 725 # This was also fixed in cc0ff93d0c0c.
726 726 fastpathlinkrev = fastpathlinkrev and not self._reorder
727 727 # Treemanifests don't work correctly with fastpathlinkrev
728 728 # either, because we don't discover which directory nodes to
729 729 # send along with files. This could probably be fixed.
730 730 fastpathlinkrev = fastpathlinkrev and (
731 731 'treemanifest' not in repo.requirements)
732 732 # Callback for the manifest, used to collect linkrevs for filelog
733 733 # revisions.
734 734 # Returns the linkrev node (collected in lookupcl).
735 735 if fastpathlinkrev:
736 736 lookupmflinknode = mfs.__getitem__
737 737 else:
738 738 def lookupmflinknode(x):
739 739 """Callback for looking up the linknode for manifests.
740 740
741 741 Returns the linkrev node for the specified manifest.
742 742
743 743 SIDE EFFECT:
744 744
745 745 1) fclnodes gets populated with the list of relevant
746 746 file nodes if we're not using fastpathlinkrev
747 747 2) When treemanifests are in use, collects treemanifest nodes
748 748 to send
749 749
750 750 Note that this means manifests must be completely sent to
751 751 the client before you can trust the list of files and
752 752 treemanifests to send.
753 753 """
754 754 clnode = mfs[x]
755 755 # We no longer actually care about reading deltas of
756 756 # the manifest here, because we already know the list
757 757 # of changed files, so for treemanifests (which
758 758 # lazily-load anyway to *generate* a readdelta) we can
759 759 # just load them with read() and then we'll actually
760 760 # be able to correctly load node IDs from the
761 761 # submanifest entries.
762 762 if 'treemanifest' in repo.requirements:
763 763 mdata = ml.read(x)
764 764 else:
765 765 mdata = ml.readfast(x)
766 766 for f in mfchangedfiles[x]:
767 767 try:
768 768 n = mdata[f]
769 769 except KeyError:
770 770 continue
771 771 # record the first changeset introducing this filelog
772 772 # version
773 773 fclnodes = fnodes.setdefault(f, {})
774 774 fclnode = fclnodes.setdefault(n, clnode)
775 775 if clrevorder[clnode] < clrevorder[fclnode]:
776 776 fclnodes[n] = clnode
777 777 # gather list of changed treemanifest nodes
778 778 if 'treemanifest' in repo.requirements:
779 779 submfs = {'/': mdata}
780 780 for dn, bn in _moddirs(mfchangedfiles[x]):
781 781 submf = submfs[dn]
782 782 submf = submf._dirs[bn]
783 783 submfs[submf.dir()] = submf
784 784 tmfclnodes = tmfnodes.setdefault(submf.dir(), {})
785 785 tmfclnodes.setdefault(submf._node, clnode)
786 786 if clrevorder[clnode] < clrevorder[fclnode]:
787 787 tmfclnodes[n] = clnode
788 788 return clnode
789 789
790 790 mfnodes = self.prune(ml, mfs, commonrevs)
791 791 for x in self._packmanifests(
792 792 mfnodes, tmfnodes, lookupmflinknode):
793 793 yield x
794 794
795 795 mfs.clear()
796 796 clrevs = set(cl.rev(x) for x in clnodes)
797 797
798 798 if not fastpathlinkrev:
799 799 def linknodes(unused, fname):
800 800 return fnodes.get(fname, {})
801 801 else:
802 802 cln = cl.node
803 803 def linknodes(filerevlog, fname):
804 804 llr = filerevlog.linkrev
805 805 fln = filerevlog.node
806 806 revs = ((r, llr(r)) for r in filerevlog)
807 807 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
808 808
809 809 changedfiles = set()
810 810 for x in mfchangedfiles.itervalues():
811 811 changedfiles.update(x)
812 812 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
813 813 source):
814 814 yield chunk
815 815
816 816 yield self.close()
817 817
818 818 if clnodes:
819 819 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
820 820
821 821 # The 'source' parameter is useful for extensions
822 822 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
823 823 repo = self._repo
824 824 progress = self._progress
825 825 msgbundling = _('bundling')
826 826
827 827 total = len(changedfiles)
828 828 # for progress output
829 829 msgfiles = _('files')
830 830 for i, fname in enumerate(sorted(changedfiles)):
831 831 filerevlog = repo.file(fname)
832 832 if not filerevlog:
833 833 raise error.Abort(_("empty or missing revlog for %s") % fname)
834 834
835 835 linkrevnodes = linknodes(filerevlog, fname)
836 836 # Lookup for filenodes, we collected the linkrev nodes above in the
837 837 # fastpath case and with lookupmf in the slowpath case.
838 838 def lookupfilelog(x):
839 839 return linkrevnodes[x]
840 840
841 841 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
842 842 if filenodes:
843 843 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
844 844 total=total)
845 845 h = self.fileheader(fname)
846 846 size = len(h)
847 847 yield h
848 848 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
849 849 size += len(chunk)
850 850 yield chunk
851 851 self._verbosenote(_('%8.i %s\n') % (size, fname))
852 852 progress(msgbundling, None)
853 853
854 854 def deltaparent(self, revlog, rev, p1, p2, prev):
855 855 return prev
856 856
857 857 def revchunk(self, revlog, rev, prev, linknode):
858 858 node = revlog.node(rev)
859 859 p1, p2 = revlog.parentrevs(rev)
860 860 base = self.deltaparent(revlog, rev, p1, p2, prev)
861 861
862 862 prefix = ''
863 863 if revlog.iscensored(base) or revlog.iscensored(rev):
864 864 try:
865 865 delta = revlog.revision(node)
866 866 except error.CensoredNodeError as e:
867 867 delta = e.tombstone
868 868 if base == nullrev:
869 869 prefix = mdiff.trivialdiffheader(len(delta))
870 870 else:
871 871 baselen = revlog.rawsize(base)
872 872 prefix = mdiff.replacediffheader(baselen, len(delta))
873 873 elif base == nullrev:
874 874 delta = revlog.revision(node)
875 875 prefix = mdiff.trivialdiffheader(len(delta))
876 876 else:
877 877 delta = revlog.revdiff(base, rev)
878 878 p1n, p2n = revlog.parents(node)
879 879 basenode = revlog.node(base)
880 880 flags = revlog.flags(rev)
881 881 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
882 882 meta += prefix
883 883 l = len(meta) + len(delta)
884 884 yield chunkheader(l)
885 885 yield meta
886 886 yield delta
887 887 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
888 888 # do nothing with basenode, it is implicitly the previous one in HG10
889 889 # do nothing with flags, it is implicitly 0 for cg1 and cg2
890 890 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
891 891
892 892 class cg2packer(cg1packer):
893 893 version = '02'
894 894 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
895 895
896 896 def __init__(self, repo, bundlecaps=None):
897 897 super(cg2packer, self).__init__(repo, bundlecaps)
898 898 if self._reorder is None:
899 899 # Since generaldelta is directly supported by cg2, reordering
900 900 # generally doesn't help, so we disable it by default (treating
901 901 # bundle.reorder=auto just like bundle.reorder=False).
902 902 self._reorder = False
903 903
904 904 def deltaparent(self, revlog, rev, p1, p2, prev):
905 905 dp = revlog.deltaparent(rev)
906 906 # avoid storing full revisions; pick prev in those cases
907 907 # also pick prev when we can't be sure remote has dp
908 908 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
909 909 return prev
910 910 return dp
911 911
912 912 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
913 913 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
914 914 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
915 915
916 916 class cg3packer(cg2packer):
917 917 version = '03'
918 918 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
919 919
920 920 def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode):
921 921 # Note that debug prints are super confusing in this code, as
922 922 # tmfnodes gets populated by the calls to lookuplinknode in
923 923 # the superclass's manifest packer. In the future we should
924 924 # probably see if we can refactor this somehow to be less
925 925 # confusing.
926 926 for x in super(cg3packer, self)._packmanifests(
927 927 mfnodes, {}, lookuplinknode):
928 928 yield x
929 929 dirlog = self._repo.manifest.dirlog
930 930 for name, nodes in tmfnodes.iteritems():
931 931 # For now, directory headers are simply file headers with
932 932 # a trailing '/' on the path (already in the name).
933 933 yield self.fileheader(name)
934 934 for chunk in self.group(nodes, dirlog(name), nodes.get):
935 935 yield chunk
936 936 yield self.close()
937 937
938 938 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
939 939 return struct.pack(
940 940 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
941 941
942 942 _packermap = {'01': (cg1packer, cg1unpacker),
943 943 # cg2 adds support for exchanging generaldelta
944 944 '02': (cg2packer, cg2unpacker),
945 945 # cg3 adds support for exchanging revlog flags and treemanifests
946 946 '03': (cg3packer, cg3unpacker),
947 947 }
948 948
949 949 def supportedversions(repo):
950 950 versions = set(_packermap.keys())
951 951 if ('treemanifest' in repo.requirements or
952 952 repo.ui.configbool('experimental', 'treemanifest')):
953 953 # Versions 01 and 02 support only flat manifests and it's just too
954 954 # expensive to convert between the flat manifest and tree manifest on
955 955 # the fly. Since tree manifests are hashed differently, all of history
956 956 # would have to be converted. Instead, we simply don't even pretend to
957 957 # support versions 01 and 02.
958 958 versions.discard('01')
959 959 versions.discard('02')
960 960 elif not repo.ui.configbool('experimental', 'changegroup3'):
961 961 versions.discard('03')
962 962 return versions
963 963
964 964 def safeversion(repo):
965 965 # Finds the smallest version that it's safe to assume clients of the repo
966 # will support.
966 # will support. For example, all hg versions that support generaldelta also
967 # support changegroup 02.
967 968 versions = supportedversions(repo)
968 969 if 'generaldelta' in repo.requirements:
969 970 versions.discard('01')
970 971 assert versions
971 972 return min(versions)
972 973
973 974 def getbundler(version, repo, bundlecaps=None):
974 975 assert version in supportedversions(repo)
975 976 return _packermap[version][0](repo, bundlecaps)
976 977
977 978 def getunbundler(version, fh, alg):
978 979 return _packermap[version][1](fh, alg)
979 980
980 981 def _changegroupinfo(repo, nodes, source):
981 982 if repo.ui.verbose or source == 'bundle':
982 983 repo.ui.status(_("%d changesets found\n") % len(nodes))
983 984 if repo.ui.debugflag:
984 985 repo.ui.debug("list of changesets:\n")
985 986 for node in nodes:
986 987 repo.ui.debug("%s\n" % hex(node))
987 988
988 989 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
989 990 repo = repo.unfiltered()
990 991 commonrevs = outgoing.common
991 992 csets = outgoing.missing
992 993 heads = outgoing.missingheads
993 994 # We go through the fast path if we get told to, or if all (unfiltered
994 995 # heads have been requested (since we then know there all linkrevs will
995 996 # be pulled by the client).
996 997 heads.sort()
997 998 fastpathlinkrev = fastpath or (
998 999 repo.filtername is None and heads == sorted(repo.heads()))
999 1000
1000 1001 repo.hook('preoutgoing', throw=True, source=source)
1001 1002 _changegroupinfo(repo, csets, source)
1002 1003 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1003 1004
1004 1005 def getsubset(repo, outgoing, bundler, source, fastpath=False):
1005 1006 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
1006 1007 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
1007 1008
1008 1009 def changegroupsubset(repo, roots, heads, source, version='01'):
1009 1010 """Compute a changegroup consisting of all the nodes that are
1010 1011 descendants of any of the roots and ancestors of any of the heads.
1011 1012 Return a chunkbuffer object whose read() method will return
1012 1013 successive changegroup chunks.
1013 1014
1014 1015 It is fairly complex as determining which filenodes and which
1015 1016 manifest nodes need to be included for the changeset to be complete
1016 1017 is non-trivial.
1017 1018
1018 1019 Another wrinkle is doing the reverse, figuring out which changeset in
1019 1020 the changegroup a particular filenode or manifestnode belongs to.
1020 1021 """
1021 1022 cl = repo.changelog
1022 1023 if not roots:
1023 1024 roots = [nullid]
1024 1025 discbases = []
1025 1026 for n in roots:
1026 1027 discbases.extend([p for p in cl.parents(n) if p != nullid])
1027 1028 # TODO: remove call to nodesbetween.
1028 1029 csets, roots, heads = cl.nodesbetween(roots, heads)
1029 1030 included = set(csets)
1030 1031 discbases = [n for n in discbases if n not in included]
1031 1032 outgoing = discovery.outgoing(cl, discbases, heads)
1032 1033 bundler = getbundler(version, repo)
1033 1034 return getsubset(repo, outgoing, bundler, source)
1034 1035
1035 1036 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
1036 1037 version='01'):
1037 1038 """Like getbundle, but taking a discovery.outgoing as an argument.
1038 1039
1039 1040 This is only implemented for local repos and reuses potentially
1040 1041 precomputed sets in outgoing. Returns a raw changegroup generator."""
1041 1042 if not outgoing.missing:
1042 1043 return None
1043 1044 bundler = getbundler(version, repo, bundlecaps)
1044 1045 return getsubsetraw(repo, outgoing, bundler, source)
1045 1046
1046 1047 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
1047 1048 version='01'):
1048 1049 """Like getbundle, but taking a discovery.outgoing as an argument.
1049 1050
1050 1051 This is only implemented for local repos and reuses potentially
1051 1052 precomputed sets in outgoing."""
1052 1053 if not outgoing.missing:
1053 1054 return None
1054 1055 bundler = getbundler(version, repo, bundlecaps)
1055 1056 return getsubset(repo, outgoing, bundler, source)
1056 1057
1057 1058 def computeoutgoing(repo, heads, common):
1058 1059 """Computes which revs are outgoing given a set of common
1059 1060 and a set of heads.
1060 1061
1061 1062 This is a separate function so extensions can have access to
1062 1063 the logic.
1063 1064
1064 1065 Returns a discovery.outgoing object.
1065 1066 """
1066 1067 cl = repo.changelog
1067 1068 if common:
1068 1069 hasnode = cl.hasnode
1069 1070 common = [n for n in common if hasnode(n)]
1070 1071 else:
1071 1072 common = [nullid]
1072 1073 if not heads:
1073 1074 heads = cl.heads()
1074 1075 return discovery.outgoing(cl, common, heads)
1075 1076
1076 1077 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1077 1078 version='01'):
1078 1079 """Like changegroupsubset, but returns the set difference between the
1079 1080 ancestors of heads and the ancestors common.
1080 1081
1081 1082 If heads is None, use the local heads. If common is None, use [nullid].
1082 1083
1083 1084 The nodes in common might not all be known locally due to the way the
1084 1085 current discovery protocol works.
1085 1086 """
1086 1087 outgoing = computeoutgoing(repo, heads, common)
1087 1088 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1088 1089 version=version)
1089 1090
1090 1091 def changegroup(repo, basenodes, source):
1091 1092 # to avoid a race we use changegroupsubset() (issue1320)
1092 1093 return changegroupsubset(repo, basenodes, repo.heads(), source)
1093 1094
1094 1095 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
1095 1096 revisions = 0
1096 1097 files = 0
1097 1098 while True:
1098 1099 chunkdata = source.filelogheader()
1099 1100 if not chunkdata:
1100 1101 break
1101 1102 f = chunkdata["filename"]
1102 1103 repo.ui.debug("adding %s revisions\n" % f)
1103 1104 pr()
1104 1105 fl = repo.file(f)
1105 1106 o = len(fl)
1106 1107 try:
1107 1108 if not fl.addgroup(source, revmap, trp):
1108 1109 raise error.Abort(_("received file revlog group is empty"))
1109 1110 except error.CensoredBaseError as e:
1110 1111 raise error.Abort(_("received delta base is censored: %s") % e)
1111 1112 revisions += len(fl) - o
1112 1113 files += 1
1113 1114 if f in needfiles:
1114 1115 needs = needfiles[f]
1115 1116 for new in xrange(o, len(fl)):
1116 1117 n = fl.node(new)
1117 1118 if n in needs:
1118 1119 needs.remove(n)
1119 1120 else:
1120 1121 raise error.Abort(
1121 1122 _("received spurious file revlog entry"))
1122 1123 if not needs:
1123 1124 del needfiles[f]
1124 1125 repo.ui.progress(_('files'), None)
1125 1126
1126 1127 for f, needs in needfiles.iteritems():
1127 1128 fl = repo.file(f)
1128 1129 for n in needs:
1129 1130 try:
1130 1131 fl.rev(n)
1131 1132 except error.LookupError:
1132 1133 raise error.Abort(
1133 1134 _('missing file data for %s:%s - run hg verify') %
1134 1135 (f, hex(n)))
1135 1136
1136 1137 return revisions, files
@@ -1,455 +1,471 b''
1 1 $ cat << EOF >> $HGRCPATH
2 2 > [format]
3 3 > usegeneraldelta=yes
4 4 > EOF
5 5
6 6 Set up repo
7 7
8 8 $ hg --config experimental.treemanifest=True init repo
9 9 $ cd repo
10 10
11 11 Requirements get set on init
12 12
13 13 $ grep treemanifest .hg/requires
14 14 treemanifest
15 15
16 16 Without directories, looks like any other repo
17 17
18 18 $ echo 0 > a
19 19 $ echo 0 > b
20 20 $ hg ci -Aqm initial
21 21 $ hg debugdata -m 0
22 22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
24 24
25 25 Submanifest is stored in separate revlog
26 26
27 27 $ mkdir dir1
28 28 $ echo 1 > dir1/a
29 29 $ echo 1 > dir1/b
30 30 $ echo 1 > e
31 31 $ hg ci -Aqm 'add dir1'
32 32 $ hg debugdata -m 1
33 33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
35 35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
36 36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
37 37 $ hg debugdata --dir dir1 0
38 38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
40 40
41 41 Can add nested directories
42 42
43 43 $ mkdir dir1/dir1
44 44 $ echo 2 > dir1/dir1/a
45 45 $ echo 2 > dir1/dir1/b
46 46 $ mkdir dir1/dir2
47 47 $ echo 2 > dir1/dir2/a
48 48 $ echo 2 > dir1/dir2/b
49 49 $ hg ci -Aqm 'add dir1/dir1'
50 50 $ hg files -r .
51 51 a
52 52 b
53 53 dir1/a (glob)
54 54 dir1/b (glob)
55 55 dir1/dir1/a (glob)
56 56 dir1/dir1/b (glob)
57 57 dir1/dir2/a (glob)
58 58 dir1/dir2/b (glob)
59 59 e
60 60
61 61 Revision is not created for unchanged directory
62 62
63 63 $ mkdir dir2
64 64 $ echo 3 > dir2/a
65 65 $ hg add dir2
66 66 adding dir2/a (glob)
67 67 $ hg debugindex --dir dir1 > before
68 68 $ hg ci -qm 'add dir2'
69 69 $ hg debugindex --dir dir1 > after
70 70 $ diff before after
71 71 $ rm before after
72 72
73 73 Removing directory does not create an revlog entry
74 74
75 75 $ hg rm dir1/dir1
76 76 removing dir1/dir1/a (glob)
77 77 removing dir1/dir1/b (glob)
78 78 $ hg debugindex --dir dir1/dir1 > before
79 79 $ hg ci -qm 'remove dir1/dir1'
80 80 $ hg debugindex --dir dir1/dir1 > after
81 81 $ diff before after
82 82 $ rm before after
83 83
84 84 Check that hg files (calls treemanifest.walk()) works
85 85 without loading all directory revlogs
86 86
87 87 $ hg co 'desc("add dir2")'
88 88 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 89 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
90 90 $ hg files -r . dir1
91 91 dir1/a (glob)
92 92 dir1/b (glob)
93 93 dir1/dir1/a (glob)
94 94 dir1/dir1/b (glob)
95 95 dir1/dir2/a (glob)
96 96 dir1/dir2/b (glob)
97 97
98 98 Check that status between revisions works (calls treemanifest.matches())
99 99 without loading all directory revlogs
100 100
101 101 $ hg status --rev 'desc("add dir1")' --rev . dir1
102 102 A dir1/dir1/a
103 103 A dir1/dir1/b
104 104 A dir1/dir2/a
105 105 A dir1/dir2/b
106 106 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
107 107
108 108 Merge creates 2-parent revision of directory revlog
109 109
110 110 $ echo 5 > dir1/a
111 111 $ hg ci -Aqm 'modify dir1/a'
112 112 $ hg co '.^'
113 113 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 114 $ echo 6 > dir1/b
115 115 $ hg ci -Aqm 'modify dir1/b'
116 116 $ hg merge 'desc("modify dir1/a")'
117 117 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
118 118 (branch merge, don't forget to commit)
119 119 $ hg ci -m 'conflict-free merge involving dir1/'
120 120 $ cat dir1/a
121 121 5
122 122 $ cat dir1/b
123 123 6
124 124 $ hg debugindex --dir dir1
125 125 rev offset length delta linkrev nodeid p1 p2
126 126 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000
127 127 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000
128 128 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000
129 129 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000
130 130 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000
131 131 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce
132 132
133 133 Merge keeping directory from parent 1 does not create revlog entry. (Note that
134 134 dir1's manifest does change, but only because dir1/a's filelog changes.)
135 135
136 136 $ hg co 'desc("add dir2")'
137 137 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
138 138 $ echo 8 > dir2/a
139 139 $ hg ci -m 'modify dir2/a'
140 140 created new head
141 141
142 142 $ hg debugindex --dir dir2 > before
143 143 $ hg merge 'desc("modify dir1/a")'
144 144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 145 (branch merge, don't forget to commit)
146 146 $ hg revert -r 'desc("modify dir2/a")' .
147 147 reverting dir1/a (glob)
148 148 $ hg ci -m 'merge, keeping parent 1'
149 149 $ hg debugindex --dir dir2 > after
150 150 $ diff before after
151 151 $ rm before after
152 152
153 153 Merge keeping directory from parent 2 does not create revlog entry. (Note that
154 154 dir2's manifest does change, but only because dir2/a's filelog changes.)
155 155
156 156 $ hg co 'desc("modify dir2/a")'
157 157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 158 $ hg debugindex --dir dir1 > before
159 159 $ hg merge 'desc("modify dir1/a")'
160 160 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 161 (branch merge, don't forget to commit)
162 162 $ hg revert -r 'desc("modify dir1/a")' .
163 163 reverting dir2/a (glob)
164 164 $ hg ci -m 'merge, keeping parent 2'
165 165 created new head
166 166 $ hg debugindex --dir dir1 > after
167 167 $ diff before after
168 168 $ rm before after
169 169
170 170 Create flat source repo for tests with mixed flat/tree manifests
171 171
172 172 $ cd ..
173 173 $ hg init repo-flat
174 174 $ cd repo-flat
175 175
176 176 Create a few commits with flat manifest
177 177
178 178 $ echo 0 > a
179 179 $ echo 0 > b
180 180 $ echo 0 > e
181 181 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
182 182 > do
183 183 > mkdir $d
184 184 > echo 0 > $d/a
185 185 > echo 0 > $d/b
186 186 > done
187 187 $ hg ci -Aqm initial
188 188
189 189 $ echo 1 > a
190 190 $ echo 1 > dir1/a
191 191 $ echo 1 > dir1/dir1/a
192 192 $ hg ci -Aqm 'modify on branch 1'
193 193
194 194 $ hg co 0
195 195 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
196 196 $ echo 2 > b
197 197 $ echo 2 > dir1/b
198 198 $ echo 2 > dir1/dir1/b
199 199 $ hg ci -Aqm 'modify on branch 2'
200 200
201 201 $ hg merge 1
202 202 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
203 203 (branch merge, don't forget to commit)
204 204 $ hg ci -m 'merge of flat manifests to new flat manifest'
205 205
206 206 Create clone with tree manifests enabled
207 207
208 208 $ cd ..
209 209 $ hg clone --pull --config experimental.treemanifest=1 repo-flat repo-mixed
210 210 requesting all changes
211 211 adding changesets
212 212 adding manifests
213 213 adding file changes
214 214 added 4 changesets with 17 changes to 11 files
215 215 updating to branch default
216 216 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
217 217 $ cd repo-mixed
218 218 $ test -f .hg/store/meta
219 219 [1]
220 220 $ grep treemanifest .hg/requires
221 221 treemanifest
222 222
223 223 Commit should store revlog per directory
224 224
225 225 $ hg co 1
226 226 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 227 $ echo 3 > a
228 228 $ echo 3 > dir1/a
229 229 $ echo 3 > dir1/dir1/a
230 230 $ hg ci -m 'first tree'
231 231 created new head
232 232 $ find .hg/store/meta | sort
233 233 .hg/store/meta
234 234 .hg/store/meta/dir1
235 235 .hg/store/meta/dir1/00manifest.i
236 236 .hg/store/meta/dir1/dir1
237 237 .hg/store/meta/dir1/dir1/00manifest.i
238 238 .hg/store/meta/dir1/dir2
239 239 .hg/store/meta/dir1/dir2/00manifest.i
240 240 .hg/store/meta/dir2
241 241 .hg/store/meta/dir2/00manifest.i
242 242
243 243 Merge of two trees
244 244
245 245 $ hg co 2
246 246 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 247 $ hg merge 1
248 248 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
249 249 (branch merge, don't forget to commit)
250 250 $ hg ci -m 'merge of flat manifests to new tree manifest'
251 251 created new head
252 252 $ hg diff -r 3
253 253
254 254 Parent of tree root manifest should be flat manifest, and two for merge
255 255
256 256 $ hg debugindex -m
257 257 rev offset length delta linkrev nodeid p1 p2
258 258 0 0 80 -1 0 40536115ed9e 000000000000 000000000000
259 259 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000
260 260 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000
261 261 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
262 262 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000
263 263 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255
264 264
265 265
266 266 Status across flat/tree boundary should work
267 267
268 268 $ hg status --rev '.^' --rev .
269 269 M a
270 270 M dir1/a
271 271 M dir1/dir1/a
272 272
273 273
274 274 Turning off treemanifest config has no effect
275 275
276 276 $ hg debugindex .hg/store/meta/dir1/00manifest.i
277 277 rev offset length delta linkrev nodeid p1 p2
278 278 0 0 127 -1 4 064927a0648a 000000000000 000000000000
279 279 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
280 280 $ echo 2 > dir1/a
281 281 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
282 282 $ hg debugindex .hg/store/meta/dir1/00manifest.i
283 283 rev offset length delta linkrev nodeid p1 p2
284 284 0 0 127 -1 4 064927a0648a 000000000000 000000000000
285 285 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
286 286 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000
287 287
288 288 Stripping and recovering changes should work
289 289
290 290 $ hg st --change tip
291 291 M dir1/a
292 292 $ hg --config extensions.strip= strip tip
293 293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
294 294 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob)
295 295 $ hg unbundle -q .hg/strip-backup/*
296 296 $ hg st --change tip
297 297 M dir1/a
298 298
299 Shelving and unshelving should work
300
301 $ echo foo >> dir1/a
302 $ hg --config extensions.shelve= shelve
303 shelved as default
304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
305 $ hg --config extensions.shelve= unshelve
306 unshelving change 'default'
307 $ hg diff --nodates
308 diff -r 708a273da119 dir1/a
309 --- a/dir1/a
310 +++ b/dir1/a
311 @@ -1,1 +1,2 @@
312 1
313 +foo
314
299 315 Create deeper repo with tree manifests.
300 316
301 317 $ cd ..
302 318 $ hg --config experimental.treemanifest=True init deeprepo
303 319 $ cd deeprepo
304 320
305 321 $ mkdir a
306 322 $ mkdir b
307 323 $ mkdir b/bar
308 324 $ mkdir b/bar/orange
309 325 $ mkdir b/bar/orange/fly
310 326 $ mkdir b/foo
311 327 $ mkdir b/foo/apple
312 328 $ mkdir b/foo/apple/bees
313 329
314 330 $ touch a/one.txt
315 331 $ touch a/two.txt
316 332 $ touch b/bar/fruits.txt
317 333 $ touch b/bar/orange/fly/gnat.py
318 334 $ touch b/bar/orange/fly/housefly.txt
319 335 $ touch b/foo/apple/bees/flower.py
320 336 $ touch c.txt
321 337 $ touch d.py
322 338
323 339 $ hg ci -Aqm 'initial'
324 340
325 341 We'll see that visitdir works by removing some treemanifest revlogs and running
326 342 the files command with various parameters.
327 343
328 344 Test files from the root.
329 345
330 346 $ hg files -r .
331 347 a/one.txt (glob)
332 348 a/two.txt (glob)
333 349 b/bar/fruits.txt (glob)
334 350 b/bar/orange/fly/gnat.py (glob)
335 351 b/bar/orange/fly/housefly.txt (glob)
336 352 b/foo/apple/bees/flower.py (glob)
337 353 c.txt
338 354 d.py
339 355
340 356 Excludes with a glob should not exclude everything from the glob's root
341 357
342 358 $ hg files -r . -X 'b/fo?' b
343 359 b/bar/fruits.txt (glob)
344 360 b/bar/orange/fly/gnat.py (glob)
345 361 b/bar/orange/fly/housefly.txt (glob)
346 362
347 363 Test files for a subdirectory.
348 364
349 365 $ mv .hg/store/meta/a oldmf
350 366 $ hg files -r . b
351 367 b/bar/fruits.txt (glob)
352 368 b/bar/orange/fly/gnat.py (glob)
353 369 b/bar/orange/fly/housefly.txt (glob)
354 370 b/foo/apple/bees/flower.py (glob)
355 371 $ mv oldmf .hg/store/meta/a
356 372
357 373 Test files with just includes and excludes.
358 374
359 375 $ mv .hg/store/meta/a oldmf
360 376 $ mv .hg/store/meta/b/bar/orange/fly oldmf2
361 377 $ mv .hg/store/meta/b/foo/apple/bees oldmf3
362 378 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
363 379 b/bar/fruits.txt (glob)
364 380 $ mv oldmf .hg/store/meta/a
365 381 $ mv oldmf2 .hg/store/meta/b/bar/orange/fly
366 382 $ mv oldmf3 .hg/store/meta/b/foo/apple/bees
367 383
368 384 Test files for a subdirectory, excluding a directory within it.
369 385
370 386 $ mv .hg/store/meta/a oldmf
371 387 $ mv .hg/store/meta/b/foo oldmf2
372 388 $ hg files -r . -X path:b/foo b
373 389 b/bar/fruits.txt (glob)
374 390 b/bar/orange/fly/gnat.py (glob)
375 391 b/bar/orange/fly/housefly.txt (glob)
376 392 $ mv oldmf .hg/store/meta/a
377 393 $ mv oldmf2 .hg/store/meta/b/foo
378 394
379 395 Test files for a sub directory, including only a directory within it, and
380 396 including an unrelated directory.
381 397
382 398 $ mv .hg/store/meta/a oldmf
383 399 $ mv .hg/store/meta/b/foo oldmf2
384 400 $ hg files -r . -I path:b/bar/orange -I path:a b
385 401 b/bar/orange/fly/gnat.py (glob)
386 402 b/bar/orange/fly/housefly.txt (glob)
387 403 $ mv oldmf .hg/store/meta/a
388 404 $ mv oldmf2 .hg/store/meta/b/foo
389 405
390 406 Test files for a pattern, including a directory, and excluding a directory
391 407 within that.
392 408
393 409 $ mv .hg/store/meta/a oldmf
394 410 $ mv .hg/store/meta/b/foo oldmf2
395 411 $ mv .hg/store/meta/b/bar/orange oldmf3
396 412 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
397 413 b/bar/fruits.txt (glob)
398 414 $ mv oldmf .hg/store/meta/a
399 415 $ mv oldmf2 .hg/store/meta/b/foo
400 416 $ mv oldmf3 .hg/store/meta/b/bar/orange
401 417
402 418 Add some more changes to the deep repo
403 419 $ echo narf >> b/bar/fruits.txt
404 420 $ hg ci -m narf
405 421 $ echo troz >> b/bar/orange/fly/gnat.py
406 422 $ hg ci -m troz
407 423
408 424 Test cloning a treemanifest repo over http.
409 425 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
410 426 $ cat hg.pid >> $DAEMON_PIDS
411 427 $ cd ..
412 428 We can clone even with the knob turned off and we'll get a treemanifest repo.
413 429 $ hg clone --config experimental.treemanifest=False \
414 430 > --config experimental.changegroup3=True \
415 431 > http://localhost:$HGPORT deepclone
416 432 requesting all changes
417 433 adding changesets
418 434 adding manifests
419 435 adding file changes
420 436 added 3 changesets with 10 changes to 8 files
421 437 updating to branch default
422 438 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
423 439 No server errors.
424 440 $ cat deeprepo/errors.log
425 441 requires got updated to include treemanifest
426 442 $ cat deepclone/.hg/requires | grep treemanifest
427 443 treemanifest
428 444 Tree manifest revlogs exist.
429 445 $ find deepclone/.hg/store/meta | sort
430 446 deepclone/.hg/store/meta
431 447 deepclone/.hg/store/meta/a
432 448 deepclone/.hg/store/meta/a/00manifest.i
433 449 deepclone/.hg/store/meta/b
434 450 deepclone/.hg/store/meta/b/00manifest.i
435 451 deepclone/.hg/store/meta/b/bar
436 452 deepclone/.hg/store/meta/b/bar/00manifest.i
437 453 deepclone/.hg/store/meta/b/bar/orange
438 454 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
439 455 deepclone/.hg/store/meta/b/bar/orange/fly
440 456 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
441 457 deepclone/.hg/store/meta/b/foo
442 458 deepclone/.hg/store/meta/b/foo/00manifest.i
443 459 deepclone/.hg/store/meta/b/foo/apple
444 460 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
445 461 deepclone/.hg/store/meta/b/foo/apple/bees
446 462 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
447 463 Verify passes.
448 464 $ cd deepclone
449 465 $ hg verify
450 466 checking changesets
451 467 checking manifests
452 468 crosschecking files in changesets and manifests
453 469 checking files
454 470 8 files, 3 changesets, 10 total revisions
455 471 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now