##// END OF EJS Templates
changegroup: replace changegroupsubset with makechangegroup...
Durham Goode -
r34099:f7d41b85 default
parent child Browse files
Show More
@@ -1,1043 +1,1047
1 1 # shelve.py - save/restore working directory state
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """save and restore changes to the working directory
9 9
10 10 The "hg shelve" command saves changes made to the working directory
11 11 and reverts those changes, resetting the working directory to a clean
12 12 state.
13 13
14 14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 15 shelve". Changes can be restored even after updating to a different
16 16 parent, in which case Mercurial's merge machinery will resolve any
17 17 conflicts if necessary.
18 18
19 19 You can have more than one shelved change outstanding at a time; each
20 20 shelved change has a distinct name. For details, see the help for "hg
21 21 shelve".
22 22 """
23 23 from __future__ import absolute_import
24 24
25 25 import collections
26 26 import errno
27 27 import itertools
28 28
29 29 from mercurial.i18n import _
30 30 from mercurial import (
31 31 bookmarks,
32 32 bundle2,
33 33 bundlerepo,
34 34 changegroup,
35 35 cmdutil,
36 discovery,
36 37 error,
37 38 exchange,
38 39 hg,
39 40 lock as lockmod,
40 41 mdiff,
41 42 merge,
42 43 node as nodemod,
43 44 patch,
44 45 phases,
45 46 registrar,
46 47 repair,
47 48 scmutil,
48 49 templatefilters,
49 50 util,
50 51 vfs as vfsmod,
51 52 )
52 53
53 54 from . import (
54 55 rebase,
55 56 )
56 57
57 58 cmdtable = {}
58 59 command = registrar.command(cmdtable)
59 60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 62 # be specifying the version(s) of Mercurial they are tested with, or
62 63 # leave the attribute unspecified.
63 64 testedwith = 'ships-with-hg-core'
64 65
65 66 backupdir = 'shelve-backup'
66 67 shelvedir = 'shelved'
67 68 shelvefileextensions = ['hg', 'patch', 'oshelve']
68 69 # universal extension is present in all types of shelves
69 70 patchextension = 'patch'
70 71
71 72 # we never need the user, so we use a
72 73 # generic user for all shelve operations
73 74 shelveuser = 'shelve@localhost'
74 75
75 76 class shelvedfile(object):
76 77 """Helper for the file storing a single shelve
77 78
78 79 Handles common functions on shelve files (.hg/.patch) using
79 80 the vfs layer"""
80 81 def __init__(self, repo, name, filetype=None):
81 82 self.repo = repo
82 83 self.name = name
83 84 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
84 85 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
85 86 self.ui = self.repo.ui
86 87 if filetype:
87 88 self.fname = name + '.' + filetype
88 89 else:
89 90 self.fname = name
90 91
91 92 def exists(self):
92 93 return self.vfs.exists(self.fname)
93 94
94 95 def filename(self):
95 96 return self.vfs.join(self.fname)
96 97
97 98 def backupfilename(self):
98 99 def gennames(base):
99 100 yield base
100 101 base, ext = base.rsplit('.', 1)
101 102 for i in itertools.count(1):
102 103 yield '%s-%d.%s' % (base, i, ext)
103 104
104 105 name = self.backupvfs.join(self.fname)
105 106 for n in gennames(name):
106 107 if not self.backupvfs.exists(n):
107 108 return n
108 109
109 110 def movetobackup(self):
110 111 if not self.backupvfs.isdir():
111 112 self.backupvfs.makedir()
112 113 util.rename(self.filename(), self.backupfilename())
113 114
114 115 def stat(self):
115 116 return self.vfs.stat(self.fname)
116 117
117 118 def opener(self, mode='rb'):
118 119 try:
119 120 return self.vfs(self.fname, mode)
120 121 except IOError as err:
121 122 if err.errno != errno.ENOENT:
122 123 raise
123 124 raise error.Abort(_("shelved change '%s' not found") % self.name)
124 125
125 126 def applybundle(self):
126 127 fp = self.opener()
127 128 try:
128 129 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
129 130 bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
130 131 source='unshelve',
131 132 url='bundle:' + self.vfs.join(self.fname),
132 133 targetphase=phases.secret)
133 134 finally:
134 135 fp.close()
135 136
136 137 def bundlerepo(self):
137 138 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
138 139 self.vfs.join(self.fname))
139 140 def writebundle(self, bases, node):
140 141 cgversion = changegroup.safeversion(self.repo)
141 142 if cgversion == '01':
142 143 btype = 'HG10BZ'
143 144 compression = None
144 145 else:
145 146 btype = 'HG20'
146 147 compression = 'BZ'
147 148
148 cg = changegroup.changegroupsubset(self.repo, bases, [node], 'shelve',
149 version=cgversion)
149 outgoing = discovery.outgoing(self.repo, missingroots=bases,
150 missingheads=[node])
151 cg = changegroup.makechangegroup(self.repo, outgoing, cgversion,
152 'shelve')
153
150 154 bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
151 155 compression=compression)
152 156
153 157 def writeobsshelveinfo(self, info):
154 158 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
155 159
156 160 def readobsshelveinfo(self):
157 161 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
158 162
159 163 class shelvedstate(object):
160 164 """Handle persistence during unshelving operations.
161 165
162 166 Handles saving and restoring a shelved state. Ensures that different
163 167 versions of a shelved state are possible and handles them appropriately.
164 168 """
165 169 _version = 2
166 170 _filename = 'shelvedstate'
167 171 _keep = 'keep'
168 172 _nokeep = 'nokeep'
169 173 # colon is essential to differentiate from a real bookmark name
170 174 _noactivebook = ':no-active-bookmark'
171 175
172 176 @classmethod
173 177 def _verifyandtransform(cls, d):
174 178 """Some basic shelvestate syntactic verification and transformation"""
175 179 try:
176 180 d['originalwctx'] = nodemod.bin(d['originalwctx'])
177 181 d['pendingctx'] = nodemod.bin(d['pendingctx'])
178 182 d['parents'] = [nodemod.bin(h)
179 183 for h in d['parents'].split(' ')]
180 184 d['nodestoremove'] = [nodemod.bin(h)
181 185 for h in d['nodestoremove'].split(' ')]
182 186 except (ValueError, TypeError, KeyError) as err:
183 187 raise error.CorruptedState(str(err))
184 188
185 189 @classmethod
186 190 def _getversion(cls, repo):
187 191 """Read version information from shelvestate file"""
188 192 fp = repo.vfs(cls._filename)
189 193 try:
190 194 version = int(fp.readline().strip())
191 195 except ValueError as err:
192 196 raise error.CorruptedState(str(err))
193 197 finally:
194 198 fp.close()
195 199 return version
196 200
197 201 @classmethod
198 202 def _readold(cls, repo):
199 203 """Read the old position-based version of a shelvestate file"""
200 204 # Order is important, because old shelvestate file uses it
201 205 # to detemine values of fields (i.g. name is on the second line,
202 206 # originalwctx is on the third and so forth). Please do not change.
203 207 keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
204 208 'nodestoremove', 'branchtorestore', 'keep', 'activebook']
205 209 # this is executed only seldomly, so it is not a big deal
206 210 # that we open this file twice
207 211 fp = repo.vfs(cls._filename)
208 212 d = {}
209 213 try:
210 214 for key in keys:
211 215 d[key] = fp.readline().strip()
212 216 finally:
213 217 fp.close()
214 218 return d
215 219
216 220 @classmethod
217 221 def load(cls, repo):
218 222 version = cls._getversion(repo)
219 223 if version < cls._version:
220 224 d = cls._readold(repo)
221 225 elif version == cls._version:
222 226 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
223 227 .read(firstlinenonkeyval=True)
224 228 else:
225 229 raise error.Abort(_('this version of shelve is incompatible '
226 230 'with the version used in this repo'))
227 231
228 232 cls._verifyandtransform(d)
229 233 try:
230 234 obj = cls()
231 235 obj.name = d['name']
232 236 obj.wctx = repo[d['originalwctx']]
233 237 obj.pendingctx = repo[d['pendingctx']]
234 238 obj.parents = d['parents']
235 239 obj.nodestoremove = d['nodestoremove']
236 240 obj.branchtorestore = d.get('branchtorestore', '')
237 241 obj.keep = d.get('keep') == cls._keep
238 242 obj.activebookmark = ''
239 243 if d.get('activebook', '') != cls._noactivebook:
240 244 obj.activebookmark = d.get('activebook', '')
241 245 except (error.RepoLookupError, KeyError) as err:
242 246 raise error.CorruptedState(str(err))
243 247
244 248 return obj
245 249
246 250 @classmethod
247 251 def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
248 252 branchtorestore, keep=False, activebook=''):
249 253 info = {
250 254 "name": name,
251 255 "originalwctx": nodemod.hex(originalwctx.node()),
252 256 "pendingctx": nodemod.hex(pendingctx.node()),
253 257 "parents": ' '.join([nodemod.hex(p)
254 258 for p in repo.dirstate.parents()]),
255 259 "nodestoremove": ' '.join([nodemod.hex(n)
256 260 for n in nodestoremove]),
257 261 "branchtorestore": branchtorestore,
258 262 "keep": cls._keep if keep else cls._nokeep,
259 263 "activebook": activebook or cls._noactivebook
260 264 }
261 265 scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
262 266 .write(info, firstline=str(cls._version))
263 267
264 268 @classmethod
265 269 def clear(cls, repo):
266 270 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
267 271
268 272 def cleanupoldbackups(repo):
269 273 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
270 274 maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
271 275 hgfiles = [f for f in vfs.listdir()
272 276 if f.endswith('.' + patchextension)]
273 277 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
274 278 if 0 < maxbackups and maxbackups < len(hgfiles):
275 279 bordermtime = hgfiles[-maxbackups][0]
276 280 else:
277 281 bordermtime = None
278 282 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
279 283 if mtime == bordermtime:
280 284 # keep it, because timestamp can't decide exact order of backups
281 285 continue
282 286 base = f[:-(1 + len(patchextension))]
283 287 for ext in shelvefileextensions:
284 288 vfs.tryunlink(base + '.' + ext)
285 289
286 290 def _backupactivebookmark(repo):
287 291 activebookmark = repo._activebookmark
288 292 if activebookmark:
289 293 bookmarks.deactivate(repo)
290 294 return activebookmark
291 295
292 296 def _restoreactivebookmark(repo, mark):
293 297 if mark:
294 298 bookmarks.activate(repo, mark)
295 299
296 300 def _aborttransaction(repo):
297 301 '''Abort current transaction for shelve/unshelve, but keep dirstate
298 302 '''
299 303 tr = repo.currenttransaction()
300 304 backupname = 'dirstate.shelve'
301 305 repo.dirstate.savebackup(tr, backupname)
302 306 tr.abort()
303 307 repo.dirstate.restorebackup(None, backupname)
304 308
305 309 def createcmd(ui, repo, pats, opts):
306 310 """subcommand that creates a new shelve"""
307 311 with repo.wlock():
308 312 cmdutil.checkunfinished(repo)
309 313 return _docreatecmd(ui, repo, pats, opts)
310 314
311 315 def getshelvename(repo, parent, opts):
312 316 """Decide on the name this shelve is going to have"""
313 317 def gennames():
314 318 yield label
315 319 for i in itertools.count(1):
316 320 yield '%s-%02d' % (label, i)
317 321 name = opts.get('name')
318 322 label = repo._activebookmark or parent.branch() or 'default'
319 323 # slashes aren't allowed in filenames, therefore we rename it
320 324 label = label.replace('/', '_')
321 325 label = label.replace('\\', '_')
322 326 # filenames must not start with '.' as it should not be hidden
323 327 if label.startswith('.'):
324 328 label = label.replace('.', '_', 1)
325 329
326 330 if name:
327 331 if shelvedfile(repo, name, patchextension).exists():
328 332 e = _("a shelved change named '%s' already exists") % name
329 333 raise error.Abort(e)
330 334
331 335 # ensure we are not creating a subdirectory or a hidden file
332 336 if '/' in name or '\\' in name:
333 337 raise error.Abort(_('shelved change names can not contain slashes'))
334 338 if name.startswith('.'):
335 339 raise error.Abort(_("shelved change names can not start with '.'"))
336 340
337 341 else:
338 342 for n in gennames():
339 343 if not shelvedfile(repo, n, patchextension).exists():
340 344 name = n
341 345 break
342 346
343 347 return name
344 348
345 349 def mutableancestors(ctx):
346 350 """return all mutable ancestors for ctx (included)
347 351
348 352 Much faster than the revset ancestors(ctx) & draft()"""
349 353 seen = {nodemod.nullrev}
350 354 visit = collections.deque()
351 355 visit.append(ctx)
352 356 while visit:
353 357 ctx = visit.popleft()
354 358 yield ctx.node()
355 359 for parent in ctx.parents():
356 360 rev = parent.rev()
357 361 if rev not in seen:
358 362 seen.add(rev)
359 363 if parent.mutable():
360 364 visit.append(parent)
361 365
362 366 def getcommitfunc(extra, interactive, editor=False):
363 367 def commitfunc(ui, repo, message, match, opts):
364 368 hasmq = util.safehasattr(repo, 'mq')
365 369 if hasmq:
366 370 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
367 371 overrides = {('phases', 'new-commit'): phases.secret}
368 372 try:
369 373 editor_ = False
370 374 if editor:
371 375 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
372 376 **opts)
373 377 with repo.ui.configoverride(overrides):
374 378 return repo.commit(message, shelveuser, opts.get('date'),
375 379 match, editor=editor_, extra=extra)
376 380 finally:
377 381 if hasmq:
378 382 repo.mq.checkapplied = saved
379 383
380 384 def interactivecommitfunc(ui, repo, *pats, **opts):
381 385 match = scmutil.match(repo['.'], pats, {})
382 386 message = opts['message']
383 387 return commitfunc(ui, repo, message, match, opts)
384 388
385 389 return interactivecommitfunc if interactive else commitfunc
386 390
387 391 def _nothingtoshelvemessaging(ui, repo, pats, opts):
388 392 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
389 393 if stat.deleted:
390 394 ui.status(_("nothing changed (%d missing files, see "
391 395 "'hg status')\n") % len(stat.deleted))
392 396 else:
393 397 ui.status(_("nothing changed\n"))
394 398
395 399 def _shelvecreatedcommit(repo, node, name):
396 400 bases = list(mutableancestors(repo[node]))
397 401 shelvedfile(repo, name, 'hg').writebundle(bases, node)
398 402 cmdutil.export(repo, [node],
399 403 fp=shelvedfile(repo, name, patchextension).opener('wb'),
400 404 opts=mdiff.diffopts(git=True))
401 405
402 406 def _includeunknownfiles(repo, pats, opts, extra):
403 407 s = repo.status(match=scmutil.match(repo[None], pats, opts),
404 408 unknown=True)
405 409 if s.unknown:
406 410 extra['shelve_unknown'] = '\0'.join(s.unknown)
407 411 repo[None].add(s.unknown)
408 412
409 413 def _finishshelve(repo):
410 414 _aborttransaction(repo)
411 415
412 416 def _docreatecmd(ui, repo, pats, opts):
413 417 wctx = repo[None]
414 418 parents = wctx.parents()
415 419 if len(parents) > 1:
416 420 raise error.Abort(_('cannot shelve while merging'))
417 421 parent = parents[0]
418 422 origbranch = wctx.branch()
419 423
420 424 if parent.node() != nodemod.nullid:
421 425 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
422 426 else:
423 427 desc = '(changes in empty repository)'
424 428
425 429 if not opts.get('message'):
426 430 opts['message'] = desc
427 431
428 432 lock = tr = activebookmark = None
429 433 try:
430 434 lock = repo.lock()
431 435
432 436 # use an uncommitted transaction to generate the bundle to avoid
433 437 # pull races. ensure we don't print the abort message to stderr.
434 438 tr = repo.transaction('commit', report=lambda x: None)
435 439
436 440 interactive = opts.get('interactive', False)
437 441 includeunknown = (opts.get('unknown', False) and
438 442 not opts.get('addremove', False))
439 443
440 444 name = getshelvename(repo, parent, opts)
441 445 activebookmark = _backupactivebookmark(repo)
442 446 extra = {}
443 447 if includeunknown:
444 448 _includeunknownfiles(repo, pats, opts, extra)
445 449
446 450 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
447 451 # In non-bare shelve we don't store newly created branch
448 452 # at bundled commit
449 453 repo.dirstate.setbranch(repo['.'].branch())
450 454
451 455 commitfunc = getcommitfunc(extra, interactive, editor=True)
452 456 if not interactive:
453 457 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
454 458 else:
455 459 node = cmdutil.dorecord(ui, repo, commitfunc, None,
456 460 False, cmdutil.recordfilter, *pats,
457 461 **opts)
458 462 if not node:
459 463 _nothingtoshelvemessaging(ui, repo, pats, opts)
460 464 return 1
461 465
462 466 _shelvecreatedcommit(repo, node, name)
463 467
464 468 if ui.formatted():
465 469 desc = util.ellipsis(desc, ui.termwidth())
466 470 ui.status(_('shelved as %s\n') % name)
467 471 hg.update(repo, parent.node())
468 472 if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
469 473 repo.dirstate.setbranch(origbranch)
470 474
471 475 _finishshelve(repo)
472 476 finally:
473 477 _restoreactivebookmark(repo, activebookmark)
474 478 lockmod.release(tr, lock)
475 479
476 480 def _isbareshelve(pats, opts):
477 481 return (not pats
478 482 and not opts.get('interactive', False)
479 483 and not opts.get('include', False)
480 484 and not opts.get('exclude', False))
481 485
482 486 def _iswctxonnewbranch(repo):
483 487 return repo[None].branch() != repo['.'].branch()
484 488
485 489 def cleanupcmd(ui, repo):
486 490 """subcommand that deletes all shelves"""
487 491
488 492 with repo.wlock():
489 493 for (name, _type) in repo.vfs.readdir(shelvedir):
490 494 suffix = name.rsplit('.', 1)[-1]
491 495 if suffix in shelvefileextensions:
492 496 shelvedfile(repo, name).movetobackup()
493 497 cleanupoldbackups(repo)
494 498
495 499 def deletecmd(ui, repo, pats):
496 500 """subcommand that deletes a specific shelve"""
497 501 if not pats:
498 502 raise error.Abort(_('no shelved changes specified!'))
499 503 with repo.wlock():
500 504 try:
501 505 for name in pats:
502 506 for suffix in shelvefileextensions:
503 507 shfile = shelvedfile(repo, name, suffix)
504 508 # patch file is necessary, as it should
505 509 # be present for any kind of shelve,
506 510 # but the .hg file is optional as in future we
507 511 # will add obsolete shelve with does not create a
508 512 # bundle
509 513 if shfile.exists() or suffix == patchextension:
510 514 shfile.movetobackup()
511 515 cleanupoldbackups(repo)
512 516 except OSError as err:
513 517 if err.errno != errno.ENOENT:
514 518 raise
515 519 raise error.Abort(_("shelved change '%s' not found") % name)
516 520
517 521 def listshelves(repo):
518 522 """return all shelves in repo as list of (time, filename)"""
519 523 try:
520 524 names = repo.vfs.readdir(shelvedir)
521 525 except OSError as err:
522 526 if err.errno != errno.ENOENT:
523 527 raise
524 528 return []
525 529 info = []
526 530 for (name, _type) in names:
527 531 pfx, sfx = name.rsplit('.', 1)
528 532 if not pfx or sfx != patchextension:
529 533 continue
530 534 st = shelvedfile(repo, name).stat()
531 535 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
532 536 return sorted(info, reverse=True)
533 537
534 538 def listcmd(ui, repo, pats, opts):
535 539 """subcommand that displays the list of shelves"""
536 540 pats = set(pats)
537 541 width = 80
538 542 if not ui.plain():
539 543 width = ui.termwidth()
540 544 namelabel = 'shelve.newest'
541 545 ui.pager('shelve')
542 546 for mtime, name in listshelves(repo):
543 547 sname = util.split(name)[1]
544 548 if pats and sname not in pats:
545 549 continue
546 550 ui.write(sname, label=namelabel)
547 551 namelabel = 'shelve.name'
548 552 if ui.quiet:
549 553 ui.write('\n')
550 554 continue
551 555 ui.write(' ' * (16 - len(sname)))
552 556 used = 16
553 557 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
554 558 ui.write(age, label='shelve.age')
555 559 ui.write(' ' * (12 - len(age)))
556 560 used += 12
557 561 with open(name + '.' + patchextension, 'rb') as fp:
558 562 while True:
559 563 line = fp.readline()
560 564 if not line:
561 565 break
562 566 if not line.startswith('#'):
563 567 desc = line.rstrip()
564 568 if ui.formatted():
565 569 desc = util.ellipsis(desc, width - used)
566 570 ui.write(desc)
567 571 break
568 572 ui.write('\n')
569 573 if not (opts['patch'] or opts['stat']):
570 574 continue
571 575 difflines = fp.readlines()
572 576 if opts['patch']:
573 577 for chunk, label in patch.difflabel(iter, difflines):
574 578 ui.write(chunk, label=label)
575 579 if opts['stat']:
576 580 for chunk, label in patch.diffstatui(difflines, width=width):
577 581 ui.write(chunk, label=label)
578 582
579 583 def patchcmds(ui, repo, pats, opts, subcommand):
580 584 """subcommand that displays shelves"""
581 585 if len(pats) == 0:
582 586 raise error.Abort(_("--%s expects at least one shelf") % subcommand)
583 587
584 588 for shelfname in pats:
585 589 if not shelvedfile(repo, shelfname, patchextension).exists():
586 590 raise error.Abort(_("cannot find shelf %s") % shelfname)
587 591
588 592 listcmd(ui, repo, pats, opts)
589 593
590 594 def checkparents(repo, state):
591 595 """check parent while resuming an unshelve"""
592 596 if state.parents != repo.dirstate.parents():
593 597 raise error.Abort(_('working directory parents do not match unshelve '
594 598 'state'))
595 599
596 600 def pathtofiles(repo, files):
597 601 cwd = repo.getcwd()
598 602 return [repo.pathto(f, cwd) for f in files]
599 603
600 604 def unshelveabort(ui, repo, state, opts):
601 605 """subcommand that abort an in-progress unshelve"""
602 606 with repo.lock():
603 607 try:
604 608 checkparents(repo, state)
605 609
606 610 repo.vfs.rename('unshelverebasestate', 'rebasestate')
607 611 try:
608 612 rebase.rebase(ui, repo, **{
609 613 'abort' : True
610 614 })
611 615 except Exception:
612 616 repo.vfs.rename('rebasestate', 'unshelverebasestate')
613 617 raise
614 618
615 619 mergefiles(ui, repo, state.wctx, state.pendingctx)
616 620 repair.strip(ui, repo, state.nodestoremove, backup=False,
617 621 topic='shelve')
618 622 finally:
619 623 shelvedstate.clear(repo)
620 624 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
621 625
622 626 def mergefiles(ui, repo, wctx, shelvectx):
623 627 """updates to wctx and merges the changes from shelvectx into the
624 628 dirstate."""
625 629 with ui.configoverride({('ui', 'quiet'): True}):
626 630 hg.update(repo, wctx.node())
627 631 files = []
628 632 files.extend(shelvectx.files())
629 633 files.extend(shelvectx.parents()[0].files())
630 634
631 635 # revert will overwrite unknown files, so move them out of the way
632 636 for file in repo.status(unknown=True).unknown:
633 637 if file in files:
634 638 util.rename(file, scmutil.origpath(ui, repo, file))
635 639 ui.pushbuffer(True)
636 640 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
637 641 *pathtofiles(repo, files),
638 642 **{'no_backup': True})
639 643 ui.popbuffer()
640 644
641 645 def restorebranch(ui, repo, branchtorestore):
642 646 if branchtorestore and branchtorestore != repo.dirstate.branch():
643 647 repo.dirstate.setbranch(branchtorestore)
644 648 ui.status(_('marked working directory as branch %s\n')
645 649 % branchtorestore)
646 650
647 651 def unshelvecleanup(ui, repo, name, opts):
648 652 """remove related files after an unshelve"""
649 653 if not opts.get('keep'):
650 654 for filetype in shelvefileextensions:
651 655 shfile = shelvedfile(repo, name, filetype)
652 656 if shfile.exists():
653 657 shfile.movetobackup()
654 658 cleanupoldbackups(repo)
655 659
656 660 def unshelvecontinue(ui, repo, state, opts):
657 661 """subcommand to continue an in-progress unshelve"""
658 662 # We're finishing off a merge. First parent is our original
659 663 # parent, second is the temporary "fake" commit we're unshelving.
660 664 with repo.lock():
661 665 checkparents(repo, state)
662 666 ms = merge.mergestate.read(repo)
663 667 if list(ms.unresolved()):
664 668 raise error.Abort(
665 669 _("unresolved conflicts, can't continue"),
666 670 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
667 671
668 672 repo.vfs.rename('unshelverebasestate', 'rebasestate')
669 673 try:
670 674 rebase.rebase(ui, repo, **{
671 675 'continue' : True
672 676 })
673 677 except Exception:
674 678 repo.vfs.rename('rebasestate', 'unshelverebasestate')
675 679 raise
676 680
677 681 shelvectx = repo['tip']
678 682 if state.pendingctx not in shelvectx.parents():
679 683 # rebase was a no-op, so it produced no child commit
680 684 shelvectx = state.pendingctx
681 685 else:
682 686 # only strip the shelvectx if the rebase produced it
683 687 state.nodestoremove.append(shelvectx.node())
684 688
685 689 mergefiles(ui, repo, state.wctx, shelvectx)
686 690 restorebranch(ui, repo, state.branchtorestore)
687 691
688 692 repair.strip(ui, repo, state.nodestoremove, backup=False,
689 693 topic='shelve')
690 694 _restoreactivebookmark(repo, state.activebookmark)
691 695 shelvedstate.clear(repo)
692 696 unshelvecleanup(ui, repo, state.name, opts)
693 697 ui.status(_("unshelve of '%s' complete\n") % state.name)
694 698
695 699 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
696 700 """Temporarily commit working copy changes before moving unshelve commit"""
697 701 # Store pending changes in a commit and remember added in case a shelve
698 702 # contains unknown files that are part of the pending change
699 703 s = repo.status()
700 704 addedbefore = frozenset(s.added)
701 705 if not (s.modified or s.added or s.removed):
702 706 return tmpwctx, addedbefore
703 707 ui.status(_("temporarily committing pending changes "
704 708 "(restore with 'hg unshelve --abort')\n"))
705 709 commitfunc = getcommitfunc(extra=None, interactive=False,
706 710 editor=False)
707 711 tempopts = {}
708 712 tempopts['message'] = "pending changes temporary commit"
709 713 tempopts['date'] = opts.get('date')
710 714 with ui.configoverride({('ui', 'quiet'): True}):
711 715 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
712 716 tmpwctx = repo[node]
713 717 return tmpwctx, addedbefore
714 718
715 719 def _unshelverestorecommit(ui, repo, basename):
716 720 """Recreate commit in the repository during the unshelve"""
717 721 with ui.configoverride({('ui', 'quiet'): True}):
718 722 shelvedfile(repo, basename, 'hg').applybundle()
719 723 shelvectx = repo['tip']
720 724 return repo, shelvectx
721 725
722 726 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
723 727 tmpwctx, shelvectx, branchtorestore,
724 728 activebookmark):
725 729 """Rebase restored commit from its original location to a destination"""
726 730 # If the shelve is not immediately on top of the commit
727 731 # we'll be merging with, rebase it to be on top.
728 732 if tmpwctx.node() == shelvectx.parents()[0].node():
729 733 return shelvectx
730 734
731 735 ui.status(_('rebasing shelved changes\n'))
732 736 try:
733 737 rebase.rebase(ui, repo, **{
734 738 'rev': [shelvectx.rev()],
735 739 'dest': str(tmpwctx.rev()),
736 740 'keep': True,
737 741 'tool': opts.get('tool', ''),
738 742 })
739 743 except error.InterventionRequired:
740 744 tr.close()
741 745
742 746 nodestoremove = [repo.changelog.node(rev)
743 747 for rev in xrange(oldtiprev, len(repo))]
744 748 shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
745 749 branchtorestore, opts.get('keep'), activebookmark)
746 750
747 751 repo.vfs.rename('rebasestate', 'unshelverebasestate')
748 752 raise error.InterventionRequired(
749 753 _("unresolved conflicts (see 'hg resolve', then "
750 754 "'hg unshelve --continue')"))
751 755
752 756 # refresh ctx after rebase completes
753 757 shelvectx = repo['tip']
754 758
755 759 if tmpwctx not in shelvectx.parents():
756 760 # rebase was a no-op, so it produced no child commit
757 761 shelvectx = tmpwctx
758 762 return shelvectx
759 763
760 764 def _forgetunknownfiles(repo, shelvectx, addedbefore):
761 765 # Forget any files that were unknown before the shelve, unknown before
762 766 # unshelve started, but are now added.
763 767 shelveunknown = shelvectx.extra().get('shelve_unknown')
764 768 if not shelveunknown:
765 769 return
766 770 shelveunknown = frozenset(shelveunknown.split('\0'))
767 771 addedafter = frozenset(repo.status().added)
768 772 toforget = (addedafter & shelveunknown) - addedbefore
769 773 repo[None].forget(toforget)
770 774
771 775 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
772 776 _restoreactivebookmark(repo, activebookmark)
773 777 # The transaction aborting will strip all the commits for us,
774 778 # but it doesn't update the inmemory structures, so addchangegroup
775 779 # hooks still fire and try to operate on the missing commits.
776 780 # Clean up manually to prevent this.
777 781 repo.unfiltered().changelog.strip(oldtiprev, tr)
778 782 _aborttransaction(repo)
779 783
780 784 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
781 785 """Check potential problems which may result from working
782 786 copy having untracked changes."""
783 787 wcdeleted = set(repo.status().deleted)
784 788 shelvetouched = set(shelvectx.files())
785 789 intersection = wcdeleted.intersection(shelvetouched)
786 790 if intersection:
787 791 m = _("shelved change touches missing files")
788 792 hint = _("run hg status to see which files are missing")
789 793 raise error.Abort(m, hint=hint)
790 794
791 795 @command('unshelve',
792 796 [('a', 'abort', None,
793 797 _('abort an incomplete unshelve operation')),
794 798 ('c', 'continue', None,
795 799 _('continue an incomplete unshelve operation')),
796 800 ('k', 'keep', None,
797 801 _('keep shelve after unshelving')),
798 802 ('n', 'name', '',
799 803 _('restore shelved change with given name'), _('NAME')),
800 804 ('t', 'tool', '', _('specify merge tool')),
801 805 ('', 'date', '',
802 806 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
803 807 _('hg unshelve [[-n] SHELVED]'))
804 808 def unshelve(ui, repo, *shelved, **opts):
805 809 """restore a shelved change to the working directory
806 810
807 811 This command accepts an optional name of a shelved change to
808 812 restore. If none is given, the most recent shelved change is used.
809 813
810 814 If a shelved change is applied successfully, the bundle that
811 815 contains the shelved changes is moved to a backup location
812 816 (.hg/shelve-backup).
813 817
814 818 Since you can restore a shelved change on top of an arbitrary
815 819 commit, it is possible that unshelving will result in a conflict
816 820 between your changes and the commits you are unshelving onto. If
817 821 this occurs, you must resolve the conflict, then use
818 822 ``--continue`` to complete the unshelve operation. (The bundle
819 823 will not be moved until you successfully complete the unshelve.)
820 824
821 825 (Alternatively, you can use ``--abort`` to abandon an unshelve
822 826 that causes a conflict. This reverts the unshelved changes, and
823 827 leaves the bundle in place.)
824 828
825 829 If bare shelved change(when no files are specified, without interactive,
826 830 include and exclude option) was done on newly created branch it would
827 831 restore branch information to the working directory.
828 832
829 833 After a successful unshelve, the shelved changes are stored in a
830 834 backup directory. Only the N most recent backups are kept. N
831 835 defaults to 10 but can be overridden using the ``shelve.maxbackups``
832 836 configuration option.
833 837
834 838 .. container:: verbose
835 839
836 840 Timestamp in seconds is used to decide order of backups. More
837 841 than ``maxbackups`` backups are kept, if same timestamp
838 842 prevents from deciding exact order of them, for safety.
839 843 """
840 844 with repo.wlock():
841 845 return _dounshelve(ui, repo, *shelved, **opts)
842 846
843 847 def _dounshelve(ui, repo, *shelved, **opts):
844 848 abortf = opts.get('abort')
845 849 continuef = opts.get('continue')
846 850 if not abortf and not continuef:
847 851 cmdutil.checkunfinished(repo)
848 852 shelved = list(shelved)
849 853 if opts.get("name"):
850 854 shelved.append(opts["name"])
851 855
852 856 if abortf or continuef:
853 857 if abortf and continuef:
854 858 raise error.Abort(_('cannot use both abort and continue'))
855 859 if shelved:
856 860 raise error.Abort(_('cannot combine abort/continue with '
857 861 'naming a shelved change'))
858 862 if abortf and opts.get('tool', False):
859 863 ui.warn(_('tool option will be ignored\n'))
860 864
861 865 try:
862 866 state = shelvedstate.load(repo)
863 867 if opts.get('keep') is None:
864 868 opts['keep'] = state.keep
865 869 except IOError as err:
866 870 if err.errno != errno.ENOENT:
867 871 raise
868 872 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
869 873 except error.CorruptedState as err:
870 874 ui.debug(str(err) + '\n')
871 875 if continuef:
872 876 msg = _('corrupted shelved state file')
873 877 hint = _('please run hg unshelve --abort to abort unshelve '
874 878 'operation')
875 879 raise error.Abort(msg, hint=hint)
876 880 elif abortf:
877 881 msg = _('could not read shelved state file, your working copy '
878 882 'may be in an unexpected state\nplease update to some '
879 883 'commit\n')
880 884 ui.warn(msg)
881 885 shelvedstate.clear(repo)
882 886 return
883 887
884 888 if abortf:
885 889 return unshelveabort(ui, repo, state, opts)
886 890 elif continuef:
887 891 return unshelvecontinue(ui, repo, state, opts)
888 892 elif len(shelved) > 1:
889 893 raise error.Abort(_('can only unshelve one change at a time'))
890 894 elif not shelved:
891 895 shelved = listshelves(repo)
892 896 if not shelved:
893 897 raise error.Abort(_('no shelved changes to apply!'))
894 898 basename = util.split(shelved[0][1])[1]
895 899 ui.status(_("unshelving change '%s'\n") % basename)
896 900 else:
897 901 basename = shelved[0]
898 902
899 903 if not shelvedfile(repo, basename, patchextension).exists():
900 904 raise error.Abort(_("shelved change '%s' not found") % basename)
901 905
902 906 lock = tr = None
903 907 try:
904 908 lock = repo.lock()
905 909 tr = repo.transaction('unshelve', report=lambda x: None)
906 910 oldtiprev = len(repo)
907 911
908 912 pctx = repo['.']
909 913 tmpwctx = pctx
910 914 # The goal is to have a commit structure like so:
911 915 # ...-> pctx -> tmpwctx -> shelvectx
912 916 # where tmpwctx is an optional commit with the user's pending changes
913 917 # and shelvectx is the unshelved changes. Then we merge it all down
914 918 # to the original pctx.
915 919
916 920 activebookmark = _backupactivebookmark(repo)
917 921 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
918 922 with ui.configoverride(overrides, 'unshelve'):
919 923 tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
920 924 tmpwctx)
921 925 repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
922 926 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
923 927 branchtorestore = ''
924 928 if shelvectx.branch() != shelvectx.p1().branch():
925 929 branchtorestore = shelvectx.branch()
926 930
927 931 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
928 932 basename, pctx, tmpwctx,
929 933 shelvectx, branchtorestore,
930 934 activebookmark)
931 935 mergefiles(ui, repo, pctx, shelvectx)
932 936 restorebranch(ui, repo, branchtorestore)
933 937 _forgetunknownfiles(repo, shelvectx, addedbefore)
934 938
935 939 shelvedstate.clear(repo)
936 940 _finishunshelve(repo, oldtiprev, tr, activebookmark)
937 941 unshelvecleanup(ui, repo, basename, opts)
938 942 finally:
939 943 if tr:
940 944 tr.release()
941 945 lockmod.release(lock)
942 946
943 947 @command('shelve',
944 948 [('A', 'addremove', None,
945 949 _('mark new/missing files as added/removed before shelving')),
946 950 ('u', 'unknown', None,
947 951 _('store unknown files in the shelve')),
948 952 ('', 'cleanup', None,
949 953 _('delete all shelved changes')),
950 954 ('', 'date', '',
951 955 _('shelve with the specified commit date'), _('DATE')),
952 956 ('d', 'delete', None,
953 957 _('delete the named shelved change(s)')),
954 958 ('e', 'edit', False,
955 959 _('invoke editor on commit messages')),
956 960 ('l', 'list', None,
957 961 _('list current shelves')),
958 962 ('m', 'message', '',
959 963 _('use text as shelve message'), _('TEXT')),
960 964 ('n', 'name', '',
961 965 _('use the given name for the shelved commit'), _('NAME')),
962 966 ('p', 'patch', None,
963 967 _('show patch')),
964 968 ('i', 'interactive', None,
965 969 _('interactive mode, only works while creating a shelve')),
966 970 ('', 'stat', None,
967 971 _('output diffstat-style summary of changes'))] + cmdutil.walkopts,
968 972 _('hg shelve [OPTION]... [FILE]...'))
969 973 def shelvecmd(ui, repo, *pats, **opts):
970 974 '''save and set aside changes from the working directory
971 975
972 976 Shelving takes files that "hg status" reports as not clean, saves
973 977 the modifications to a bundle (a shelved change), and reverts the
974 978 files so that their state in the working directory becomes clean.
975 979
976 980 To restore these changes to the working directory, using "hg
977 981 unshelve"; this will work even if you switch to a different
978 982 commit.
979 983
980 984 When no files are specified, "hg shelve" saves all not-clean
981 985 files. If specific files or directories are named, only changes to
982 986 those files are shelved.
983 987
984 988 In bare shelve (when no files are specified, without interactive,
985 989 include and exclude option), shelving remembers information if the
986 990 working directory was on newly created branch, in other words working
987 991 directory was on different branch than its first parent. In this
988 992 situation unshelving restores branch information to the working directory.
989 993
990 994 Each shelved change has a name that makes it easier to find later.
991 995 The name of a shelved change defaults to being based on the active
992 996 bookmark, or if there is no active bookmark, the current named
993 997 branch. To specify a different name, use ``--name``.
994 998
995 999 To see a list of existing shelved changes, use the ``--list``
996 1000 option. For each shelved change, this will print its name, age,
997 1001 and description; use ``--patch`` or ``--stat`` for more details.
998 1002
999 1003 To delete specific shelved changes, use ``--delete``. To delete
1000 1004 all shelved changes, use ``--cleanup``.
1001 1005 '''
1002 1006 allowables = [
1003 1007 ('addremove', {'create'}), # 'create' is pseudo action
1004 1008 ('unknown', {'create'}),
1005 1009 ('cleanup', {'cleanup'}),
1006 1010 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1007 1011 ('delete', {'delete'}),
1008 1012 ('edit', {'create'}),
1009 1013 ('list', {'list'}),
1010 1014 ('message', {'create'}),
1011 1015 ('name', {'create'}),
1012 1016 ('patch', {'patch', 'list'}),
1013 1017 ('stat', {'stat', 'list'}),
1014 1018 ]
1015 1019 def checkopt(opt):
1016 1020 if opts.get(opt):
1017 1021 for i, allowable in allowables:
1018 1022 if opts[i] and opt not in allowable:
1019 1023 raise error.Abort(_("options '--%s' and '--%s' may not be "
1020 1024 "used together") % (opt, i))
1021 1025 return True
1022 1026 if checkopt('cleanup'):
1023 1027 if pats:
1024 1028 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1025 1029 return cleanupcmd(ui, repo)
1026 1030 elif checkopt('delete'):
1027 1031 return deletecmd(ui, repo, pats)
1028 1032 elif checkopt('list'):
1029 1033 return listcmd(ui, repo, pats, opts)
1030 1034 elif checkopt('patch'):
1031 1035 return patchcmds(ui, repo, pats, opts, subcommand='patch')
1032 1036 elif checkopt('stat'):
1033 1037 return patchcmds(ui, repo, pats, opts, subcommand='stat')
1034 1038 else:
1035 1039 return createcmd(ui, repo, pats, opts)
1036 1040
1037 1041 def extsetup(ui):
1038 1042 cmdutil.unfinishedstates.append(
1039 1043 [shelvedstate._filename, False, False,
1040 1044 _('unshelve already in progress'),
1041 1045 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1042 1046 cmdutil.afterresolvedstates.append(
1043 1047 [shelvedstate._filename, _('hg unshelve --continue')])
@@ -1,1030 +1,1016
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11 import struct
12 12 import tempfile
13 13 import weakref
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullrev,
19 19 short,
20 20 )
21 21
22 22 from . import (
23 23 dagutil,
24 24 discovery,
25 25 error,
26 26 mdiff,
27 27 phases,
28 28 pycompat,
29 29 util,
30 30 )
31 31
32 32 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 33 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 34 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 35
36 36 def readexactly(stream, n):
37 37 '''read n bytes from stream.read and abort if less was available'''
38 38 s = stream.read(n)
39 39 if len(s) < n:
40 40 raise error.Abort(_("stream ended unexpectedly"
41 41 " (got %d bytes, expected %d)")
42 42 % (len(s), n))
43 43 return s
44 44
45 45 def getchunk(stream):
46 46 """return the next chunk from stream as a string"""
47 47 d = readexactly(stream, 4)
48 48 l = struct.unpack(">l", d)[0]
49 49 if l <= 4:
50 50 if l:
51 51 raise error.Abort(_("invalid chunk length %d") % l)
52 52 return ""
53 53 return readexactly(stream, l - 4)
54 54
55 55 def chunkheader(length):
56 56 """return a changegroup chunk header (string)"""
57 57 return struct.pack(">l", length + 4)
58 58
59 59 def closechunk():
60 60 """return a changegroup chunk header (string) for a zero-length chunk"""
61 61 return struct.pack(">l", 0)
62 62
63 63 def writechunks(ui, chunks, filename, vfs=None):
64 64 """Write chunks to a file and return its filename.
65 65
66 66 The stream is assumed to be a bundle file.
67 67 Existing files will not be overwritten.
68 68 If no filename is specified, a temporary file is created.
69 69 """
70 70 fh = None
71 71 cleanup = None
72 72 try:
73 73 if filename:
74 74 if vfs:
75 75 fh = vfs.open(filename, "wb")
76 76 else:
77 77 # Increase default buffer size because default is usually
78 78 # small (4k is common on Linux).
79 79 fh = open(filename, "wb", 131072)
80 80 else:
81 81 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
82 82 fh = os.fdopen(fd, pycompat.sysstr("wb"))
83 83 cleanup = filename
84 84 for c in chunks:
85 85 fh.write(c)
86 86 cleanup = None
87 87 return filename
88 88 finally:
89 89 if fh is not None:
90 90 fh.close()
91 91 if cleanup is not None:
92 92 if filename and vfs:
93 93 vfs.unlink(cleanup)
94 94 else:
95 95 os.unlink(cleanup)
96 96
97 97 class cg1unpacker(object):
98 98 """Unpacker for cg1 changegroup streams.
99 99
100 100 A changegroup unpacker handles the framing of the revision data in
101 101 the wire format. Most consumers will want to use the apply()
102 102 method to add the changes from the changegroup to a repository.
103 103
104 104 If you're forwarding a changegroup unmodified to another consumer,
105 105 use getchunks(), which returns an iterator of changegroup
106 106 chunks. This is mostly useful for cases where you need to know the
107 107 data stream has ended by observing the end of the changegroup.
108 108
109 109 deltachunk() is useful only if you're applying delta data. Most
110 110 consumers should prefer apply() instead.
111 111
112 112 A few other public methods exist. Those are used only for
113 113 bundlerepo and some debug commands - their use is discouraged.
114 114 """
115 115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
116 116 deltaheadersize = struct.calcsize(deltaheader)
117 117 version = '01'
118 118 _grouplistcount = 1 # One list of files after the manifests
119 119
120 120 def __init__(self, fh, alg, extras=None):
121 121 if alg is None:
122 122 alg = 'UN'
123 123 if alg not in util.compengines.supportedbundletypes:
124 124 raise error.Abort(_('unknown stream compression type: %s')
125 125 % alg)
126 126 if alg == 'BZ':
127 127 alg = '_truncatedBZ'
128 128
129 129 compengine = util.compengines.forbundletype(alg)
130 130 self._stream = compengine.decompressorreader(fh)
131 131 self._type = alg
132 132 self.extras = extras or {}
133 133 self.callback = None
134 134
135 135 # These methods (compressed, read, seek, tell) all appear to only
136 136 # be used by bundlerepo, but it's a little hard to tell.
137 137 def compressed(self):
138 138 return self._type is not None and self._type != 'UN'
139 139 def read(self, l):
140 140 return self._stream.read(l)
141 141 def seek(self, pos):
142 142 return self._stream.seek(pos)
143 143 def tell(self):
144 144 return self._stream.tell()
145 145 def close(self):
146 146 return self._stream.close()
147 147
148 148 def _chunklength(self):
149 149 d = readexactly(self._stream, 4)
150 150 l = struct.unpack(">l", d)[0]
151 151 if l <= 4:
152 152 if l:
153 153 raise error.Abort(_("invalid chunk length %d") % l)
154 154 return 0
155 155 if self.callback:
156 156 self.callback()
157 157 return l - 4
158 158
159 159 def changelogheader(self):
160 160 """v10 does not have a changelog header chunk"""
161 161 return {}
162 162
163 163 def manifestheader(self):
164 164 """v10 does not have a manifest header chunk"""
165 165 return {}
166 166
167 167 def filelogheader(self):
168 168 """return the header of the filelogs chunk, v10 only has the filename"""
169 169 l = self._chunklength()
170 170 if not l:
171 171 return {}
172 172 fname = readexactly(self._stream, l)
173 173 return {'filename': fname}
174 174
175 175 def _deltaheader(self, headertuple, prevnode):
176 176 node, p1, p2, cs = headertuple
177 177 if prevnode is None:
178 178 deltabase = p1
179 179 else:
180 180 deltabase = prevnode
181 181 flags = 0
182 182 return node, p1, p2, deltabase, cs, flags
183 183
184 184 def deltachunk(self, prevnode):
185 185 l = self._chunklength()
186 186 if not l:
187 187 return {}
188 188 headerdata = readexactly(self._stream, self.deltaheadersize)
189 189 header = struct.unpack(self.deltaheader, headerdata)
190 190 delta = readexactly(self._stream, l - self.deltaheadersize)
191 191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
192 192 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
193 193 'deltabase': deltabase, 'delta': delta, 'flags': flags}
194 194
195 195 def getchunks(self):
196 196 """returns all the chunks contains in the bundle
197 197
198 198 Used when you need to forward the binary stream to a file or another
199 199 network API. To do so, it parse the changegroup data, otherwise it will
200 200 block in case of sshrepo because it don't know the end of the stream.
201 201 """
202 202 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
203 203 # and a list of filelogs. For changegroup 3, we expect 4 parts:
204 204 # changelog, manifestlog, a list of tree manifestlogs, and a list of
205 205 # filelogs.
206 206 #
207 207 # Changelog and manifestlog parts are terminated with empty chunks. The
208 208 # tree and file parts are a list of entry sections. Each entry section
209 209 # is a series of chunks terminating in an empty chunk. The list of these
210 210 # entry sections is terminated in yet another empty chunk, so we know
211 211 # we've reached the end of the tree/file list when we reach an empty
212 212 # chunk that was proceeded by no non-empty chunks.
213 213
214 214 parts = 0
215 215 while parts < 2 + self._grouplistcount:
216 216 noentries = True
217 217 while True:
218 218 chunk = getchunk(self)
219 219 if not chunk:
220 220 # The first two empty chunks represent the end of the
221 221 # changelog and the manifestlog portions. The remaining
222 222 # empty chunks represent either A) the end of individual
223 223 # tree or file entries in the file list, or B) the end of
224 224 # the entire list. It's the end of the entire list if there
225 225 # were no entries (i.e. noentries is True).
226 226 if parts < 2:
227 227 parts += 1
228 228 elif noentries:
229 229 parts += 1
230 230 break
231 231 noentries = False
232 232 yield chunkheader(len(chunk))
233 233 pos = 0
234 234 while pos < len(chunk):
235 235 next = pos + 2**20
236 236 yield chunk[pos:next]
237 237 pos = next
238 238 yield closechunk()
239 239
240 240 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
241 241 # We know that we'll never have more manifests than we had
242 242 # changesets.
243 243 self.callback = prog(_('manifests'), numchanges)
244 244 # no need to check for empty manifest group here:
245 245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
246 246 # no new manifest will be created and the manifest group will
247 247 # be empty during the pull
248 248 self.manifestheader()
249 249 repo.manifestlog._revlog.addgroup(self, revmap, trp)
250 250 repo.ui.progress(_('manifests'), None)
251 251 self.callback = None
252 252
253 253 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
254 254 expectedtotal=None):
255 255 """Add the changegroup returned by source.read() to this repo.
256 256 srctype is a string like 'push', 'pull', or 'unbundle'. url is
257 257 the URL of the repo where this changegroup is coming from.
258 258
259 259 Return an integer summarizing the change to this repo:
260 260 - nothing changed or no source: 0
261 261 - more heads than before: 1+added heads (2..n)
262 262 - fewer heads than before: -1-removed heads (-2..-n)
263 263 - number of heads stays the same: 1
264 264 """
265 265 repo = repo.unfiltered()
266 266 def csmap(x):
267 267 repo.ui.debug("add changeset %s\n" % short(x))
268 268 return len(cl)
269 269
270 270 def revmap(x):
271 271 return cl.rev(x)
272 272
273 273 changesets = files = revisions = 0
274 274
275 275 try:
276 276 # The transaction may already carry source information. In this
277 277 # case we use the top level data. We overwrite the argument
278 278 # because we need to use the top level value (if they exist)
279 279 # in this function.
280 280 srctype = tr.hookargs.setdefault('source', srctype)
281 281 url = tr.hookargs.setdefault('url', url)
282 282 repo.hook('prechangegroup',
283 283 throw=True, **pycompat.strkwargs(tr.hookargs))
284 284
285 285 # write changelog data to temp files so concurrent readers
286 286 # will not see an inconsistent view
287 287 cl = repo.changelog
288 288 cl.delayupdate(tr)
289 289 oldheads = set(cl.heads())
290 290
291 291 trp = weakref.proxy(tr)
292 292 # pull off the changeset group
293 293 repo.ui.status(_("adding changesets\n"))
294 294 clstart = len(cl)
295 295 class prog(object):
296 296 def __init__(self, step, total):
297 297 self._step = step
298 298 self._total = total
299 299 self._count = 1
300 300 def __call__(self):
301 301 repo.ui.progress(self._step, self._count, unit=_('chunks'),
302 302 total=self._total)
303 303 self._count += 1
304 304 self.callback = prog(_('changesets'), expectedtotal)
305 305
306 306 efiles = set()
307 307 def onchangelog(cl, node):
308 308 efiles.update(cl.readfiles(node))
309 309
310 310 self.changelogheader()
311 311 cgnodes = cl.addgroup(self, csmap, trp, addrevisioncb=onchangelog)
312 312 efiles = len(efiles)
313 313
314 314 if not cgnodes:
315 315 repo.ui.develwarn('applied empty changegroup',
316 316 config='empty-changegroup')
317 317 clend = len(cl)
318 318 changesets = clend - clstart
319 319 repo.ui.progress(_('changesets'), None)
320 320 self.callback = None
321 321
322 322 # pull off the manifest group
323 323 repo.ui.status(_("adding manifests\n"))
324 324 self._unpackmanifests(repo, revmap, trp, prog, changesets)
325 325
326 326 needfiles = {}
327 327 if repo.ui.configbool('server', 'validate'):
328 328 cl = repo.changelog
329 329 ml = repo.manifestlog
330 330 # validate incoming csets have their manifests
331 331 for cset in xrange(clstart, clend):
332 332 mfnode = cl.changelogrevision(cset).manifest
333 333 mfest = ml[mfnode].readdelta()
334 334 # store file cgnodes we must see
335 335 for f, n in mfest.iteritems():
336 336 needfiles.setdefault(f, set()).add(n)
337 337
338 338 # process the files
339 339 repo.ui.status(_("adding file changes\n"))
340 340 newrevs, newfiles = _addchangegroupfiles(
341 341 repo, self, revmap, trp, efiles, needfiles)
342 342 revisions += newrevs
343 343 files += newfiles
344 344
345 345 deltaheads = 0
346 346 if oldheads:
347 347 heads = cl.heads()
348 348 deltaheads = len(heads) - len(oldheads)
349 349 for h in heads:
350 350 if h not in oldheads and repo[h].closesbranch():
351 351 deltaheads -= 1
352 352 htext = ""
353 353 if deltaheads:
354 354 htext = _(" (%+d heads)") % deltaheads
355 355
356 356 repo.ui.status(_("added %d changesets"
357 357 " with %d changes to %d files%s\n")
358 358 % (changesets, revisions, files, htext))
359 359 repo.invalidatevolatilesets()
360 360
361 361 if changesets > 0:
362 362 if 'node' not in tr.hookargs:
363 363 tr.hookargs['node'] = hex(cl.node(clstart))
364 364 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
365 365 hookargs = dict(tr.hookargs)
366 366 else:
367 367 hookargs = dict(tr.hookargs)
368 368 hookargs['node'] = hex(cl.node(clstart))
369 369 hookargs['node_last'] = hex(cl.node(clend - 1))
370 370 repo.hook('pretxnchangegroup',
371 371 throw=True, **pycompat.strkwargs(hookargs))
372 372
373 373 added = [cl.node(r) for r in xrange(clstart, clend)]
374 374 phaseall = None
375 375 if srctype in ('push', 'serve'):
376 376 # Old servers can not push the boundary themselves.
377 377 # New servers won't push the boundary if changeset already
378 378 # exists locally as secret
379 379 #
380 380 # We should not use added here but the list of all change in
381 381 # the bundle
382 382 if repo.publishing():
383 383 targetphase = phaseall = phases.public
384 384 else:
385 385 # closer target phase computation
386 386
387 387 # Those changesets have been pushed from the
388 388 # outside, their phases are going to be pushed
389 389 # alongside. Therefor `targetphase` is
390 390 # ignored.
391 391 targetphase = phaseall = phases.draft
392 392 if added:
393 393 phases.registernew(repo, tr, targetphase, added)
394 394 if phaseall is not None:
395 395 phases.advanceboundary(repo, tr, phaseall, cgnodes)
396 396
397 397 if changesets > 0:
398 398
399 399 def runhooks():
400 400 # These hooks run when the lock releases, not when the
401 401 # transaction closes. So it's possible for the changelog
402 402 # to have changed since we last saw it.
403 403 if clstart >= len(repo):
404 404 return
405 405
406 406 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
407 407
408 408 for n in added:
409 409 args = hookargs.copy()
410 410 args['node'] = hex(n)
411 411 del args['node_last']
412 412 repo.hook("incoming", **pycompat.strkwargs(args))
413 413
414 414 newheads = [h for h in repo.heads()
415 415 if h not in oldheads]
416 416 repo.ui.log("incoming",
417 417 "%s incoming changes - new heads: %s\n",
418 418 len(added),
419 419 ', '.join([hex(c[:6]) for c in newheads]))
420 420
421 421 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
422 422 lambda tr: repo._afterlock(runhooks))
423 423 finally:
424 424 repo.ui.flush()
425 425 # never return 0 here:
426 426 if deltaheads < 0:
427 427 ret = deltaheads - 1
428 428 else:
429 429 ret = deltaheads + 1
430 430 return ret
431 431
432 432 class cg2unpacker(cg1unpacker):
433 433 """Unpacker for cg2 streams.
434 434
435 435 cg2 streams add support for generaldelta, so the delta header
436 436 format is slightly different. All other features about the data
437 437 remain the same.
438 438 """
439 439 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
440 440 deltaheadersize = struct.calcsize(deltaheader)
441 441 version = '02'
442 442
443 443 def _deltaheader(self, headertuple, prevnode):
444 444 node, p1, p2, deltabase, cs = headertuple
445 445 flags = 0
446 446 return node, p1, p2, deltabase, cs, flags
447 447
448 448 class cg3unpacker(cg2unpacker):
449 449 """Unpacker for cg3 streams.
450 450
451 451 cg3 streams add support for exchanging treemanifests and revlog
452 452 flags. It adds the revlog flags to the delta header and an empty chunk
453 453 separating manifests and files.
454 454 """
455 455 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
456 456 deltaheadersize = struct.calcsize(deltaheader)
457 457 version = '03'
458 458 _grouplistcount = 2 # One list of manifests and one list of files
459 459
460 460 def _deltaheader(self, headertuple, prevnode):
461 461 node, p1, p2, deltabase, cs, flags = headertuple
462 462 return node, p1, p2, deltabase, cs, flags
463 463
464 464 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
465 465 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
466 466 numchanges)
467 467 for chunkdata in iter(self.filelogheader, {}):
468 468 # If we get here, there are directory manifests in the changegroup
469 469 d = chunkdata["filename"]
470 470 repo.ui.debug("adding %s revisions\n" % d)
471 471 dirlog = repo.manifestlog._revlog.dirlog(d)
472 472 if not dirlog.addgroup(self, revmap, trp):
473 473 raise error.Abort(_("received dir revlog group is empty"))
474 474
475 475 class headerlessfixup(object):
476 476 def __init__(self, fh, h):
477 477 self._h = h
478 478 self._fh = fh
479 479 def read(self, n):
480 480 if self._h:
481 481 d, self._h = self._h[:n], self._h[n:]
482 482 if len(d) < n:
483 483 d += readexactly(self._fh, n - len(d))
484 484 return d
485 485 return readexactly(self._fh, n)
486 486
487 487 class cg1packer(object):
488 488 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
489 489 version = '01'
490 490 def __init__(self, repo, bundlecaps=None):
491 491 """Given a source repo, construct a bundler.
492 492
493 493 bundlecaps is optional and can be used to specify the set of
494 494 capabilities which can be used to build the bundle. While bundlecaps is
495 495 unused in core Mercurial, extensions rely on this feature to communicate
496 496 capabilities to customize the changegroup packer.
497 497 """
498 498 # Set of capabilities we can use to build the bundle.
499 499 if bundlecaps is None:
500 500 bundlecaps = set()
501 501 self._bundlecaps = bundlecaps
502 502 # experimental config: bundle.reorder
503 503 reorder = repo.ui.config('bundle', 'reorder')
504 504 if reorder == 'auto':
505 505 reorder = None
506 506 else:
507 507 reorder = util.parsebool(reorder)
508 508 self._repo = repo
509 509 self._reorder = reorder
510 510 self._progress = repo.ui.progress
511 511 if self._repo.ui.verbose and not self._repo.ui.debugflag:
512 512 self._verbosenote = self._repo.ui.note
513 513 else:
514 514 self._verbosenote = lambda s: None
515 515
516 516 def close(self):
517 517 return closechunk()
518 518
519 519 def fileheader(self, fname):
520 520 return chunkheader(len(fname)) + fname
521 521
522 522 # Extracted both for clarity and for overriding in extensions.
523 523 def _sortgroup(self, revlog, nodelist, lookup):
524 524 """Sort nodes for change group and turn them into revnums."""
525 525 # for generaldelta revlogs, we linearize the revs; this will both be
526 526 # much quicker and generate a much smaller bundle
527 527 if (revlog._generaldelta and self._reorder is None) or self._reorder:
528 528 dag = dagutil.revlogdag(revlog)
529 529 return dag.linearize(set(revlog.rev(n) for n in nodelist))
530 530 else:
531 531 return sorted([revlog.rev(n) for n in nodelist])
532 532
533 533 def group(self, nodelist, revlog, lookup, units=None):
534 534 """Calculate a delta group, yielding a sequence of changegroup chunks
535 535 (strings).
536 536
537 537 Given a list of changeset revs, return a set of deltas and
538 538 metadata corresponding to nodes. The first delta is
539 539 first parent(nodelist[0]) -> nodelist[0], the receiver is
540 540 guaranteed to have this parent as it has all history before
541 541 these changesets. In the case firstparent is nullrev the
542 542 changegroup starts with a full revision.
543 543
544 544 If units is not None, progress detail will be generated, units specifies
545 545 the type of revlog that is touched (changelog, manifest, etc.).
546 546 """
547 547 # if we don't have any revisions touched by these changesets, bail
548 548 if len(nodelist) == 0:
549 549 yield self.close()
550 550 return
551 551
552 552 revs = self._sortgroup(revlog, nodelist, lookup)
553 553
554 554 # add the parent of the first rev
555 555 p = revlog.parentrevs(revs[0])[0]
556 556 revs.insert(0, p)
557 557
558 558 # build deltas
559 559 total = len(revs) - 1
560 560 msgbundling = _('bundling')
561 561 for r in xrange(len(revs) - 1):
562 562 if units is not None:
563 563 self._progress(msgbundling, r + 1, unit=units, total=total)
564 564 prev, curr = revs[r], revs[r + 1]
565 565 linknode = lookup(revlog.node(curr))
566 566 for c in self.revchunk(revlog, curr, prev, linknode):
567 567 yield c
568 568
569 569 if units is not None:
570 570 self._progress(msgbundling, None)
571 571 yield self.close()
572 572
573 573 # filter any nodes that claim to be part of the known set
574 574 def prune(self, revlog, missing, commonrevs):
575 575 rr, rl = revlog.rev, revlog.linkrev
576 576 return [n for n in missing if rl(rr(n)) not in commonrevs]
577 577
578 578 def _packmanifests(self, dir, mfnodes, lookuplinknode):
579 579 """Pack flat manifests into a changegroup stream."""
580 580 assert not dir
581 581 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
582 582 lookuplinknode, units=_('manifests')):
583 583 yield chunk
584 584
585 585 def _manifestsdone(self):
586 586 return ''
587 587
588 588 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
589 589 '''yield a sequence of changegroup chunks (strings)'''
590 590 repo = self._repo
591 591 cl = repo.changelog
592 592
593 593 clrevorder = {}
594 594 mfs = {} # needed manifests
595 595 fnodes = {} # needed file nodes
596 596 changedfiles = set()
597 597
598 598 # Callback for the changelog, used to collect changed files and manifest
599 599 # nodes.
600 600 # Returns the linkrev node (identity in the changelog case).
601 601 def lookupcl(x):
602 602 c = cl.read(x)
603 603 clrevorder[x] = len(clrevorder)
604 604 n = c[0]
605 605 # record the first changeset introducing this manifest version
606 606 mfs.setdefault(n, x)
607 607 # Record a complete list of potentially-changed files in
608 608 # this manifest.
609 609 changedfiles.update(c[3])
610 610 return x
611 611
612 612 self._verbosenote(_('uncompressed size of bundle content:\n'))
613 613 size = 0
614 614 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
615 615 size += len(chunk)
616 616 yield chunk
617 617 self._verbosenote(_('%8.i (changelog)\n') % size)
618 618
619 619 # We need to make sure that the linkrev in the changegroup refers to
620 620 # the first changeset that introduced the manifest or file revision.
621 621 # The fastpath is usually safer than the slowpath, because the filelogs
622 622 # are walked in revlog order.
623 623 #
624 624 # When taking the slowpath with reorder=None and the manifest revlog
625 625 # uses generaldelta, the manifest may be walked in the "wrong" order.
626 626 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
627 627 # cc0ff93d0c0c).
628 628 #
629 629 # When taking the fastpath, we are only vulnerable to reordering
630 630 # of the changelog itself. The changelog never uses generaldelta, so
631 631 # it is only reordered when reorder=True. To handle this case, we
632 632 # simply take the slowpath, which already has the 'clrevorder' logic.
633 633 # This was also fixed in cc0ff93d0c0c.
634 634 fastpathlinkrev = fastpathlinkrev and not self._reorder
635 635 # Treemanifests don't work correctly with fastpathlinkrev
636 636 # either, because we don't discover which directory nodes to
637 637 # send along with files. This could probably be fixed.
638 638 fastpathlinkrev = fastpathlinkrev and (
639 639 'treemanifest' not in repo.requirements)
640 640
641 641 for chunk in self.generatemanifests(commonrevs, clrevorder,
642 642 fastpathlinkrev, mfs, fnodes):
643 643 yield chunk
644 644 mfs.clear()
645 645 clrevs = set(cl.rev(x) for x in clnodes)
646 646
647 647 if not fastpathlinkrev:
648 648 def linknodes(unused, fname):
649 649 return fnodes.get(fname, {})
650 650 else:
651 651 cln = cl.node
652 652 def linknodes(filerevlog, fname):
653 653 llr = filerevlog.linkrev
654 654 fln = filerevlog.node
655 655 revs = ((r, llr(r)) for r in filerevlog)
656 656 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
657 657
658 658 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
659 659 source):
660 660 yield chunk
661 661
662 662 yield self.close()
663 663
664 664 if clnodes:
665 665 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
666 666
667 667 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
668 668 fnodes):
669 669 repo = self._repo
670 670 mfl = repo.manifestlog
671 671 dirlog = mfl._revlog.dirlog
672 672 tmfnodes = {'': mfs}
673 673
674 674 # Callback for the manifest, used to collect linkrevs for filelog
675 675 # revisions.
676 676 # Returns the linkrev node (collected in lookupcl).
677 677 def makelookupmflinknode(dir):
678 678 if fastpathlinkrev:
679 679 assert not dir
680 680 return mfs.__getitem__
681 681
682 682 def lookupmflinknode(x):
683 683 """Callback for looking up the linknode for manifests.
684 684
685 685 Returns the linkrev node for the specified manifest.
686 686
687 687 SIDE EFFECT:
688 688
689 689 1) fclnodes gets populated with the list of relevant
690 690 file nodes if we're not using fastpathlinkrev
691 691 2) When treemanifests are in use, collects treemanifest nodes
692 692 to send
693 693
694 694 Note that this means manifests must be completely sent to
695 695 the client before you can trust the list of files and
696 696 treemanifests to send.
697 697 """
698 698 clnode = tmfnodes[dir][x]
699 699 mdata = mfl.get(dir, x).readfast(shallow=True)
700 700 for p, n, fl in mdata.iterentries():
701 701 if fl == 't': # subdirectory manifest
702 702 subdir = dir + p + '/'
703 703 tmfclnodes = tmfnodes.setdefault(subdir, {})
704 704 tmfclnode = tmfclnodes.setdefault(n, clnode)
705 705 if clrevorder[clnode] < clrevorder[tmfclnode]:
706 706 tmfclnodes[n] = clnode
707 707 else:
708 708 f = dir + p
709 709 fclnodes = fnodes.setdefault(f, {})
710 710 fclnode = fclnodes.setdefault(n, clnode)
711 711 if clrevorder[clnode] < clrevorder[fclnode]:
712 712 fclnodes[n] = clnode
713 713 return clnode
714 714 return lookupmflinknode
715 715
716 716 size = 0
717 717 while tmfnodes:
718 718 dir = min(tmfnodes)
719 719 nodes = tmfnodes[dir]
720 720 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
721 721 if not dir or prunednodes:
722 722 for x in self._packmanifests(dir, prunednodes,
723 723 makelookupmflinknode(dir)):
724 724 size += len(x)
725 725 yield x
726 726 del tmfnodes[dir]
727 727 self._verbosenote(_('%8.i (manifests)\n') % size)
728 728 yield self._manifestsdone()
729 729
730 730 # The 'source' parameter is useful for extensions
731 731 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
732 732 repo = self._repo
733 733 progress = self._progress
734 734 msgbundling = _('bundling')
735 735
736 736 total = len(changedfiles)
737 737 # for progress output
738 738 msgfiles = _('files')
739 739 for i, fname in enumerate(sorted(changedfiles)):
740 740 filerevlog = repo.file(fname)
741 741 if not filerevlog:
742 742 raise error.Abort(_("empty or missing revlog for %s") % fname)
743 743
744 744 linkrevnodes = linknodes(filerevlog, fname)
745 745 # Lookup for filenodes, we collected the linkrev nodes above in the
746 746 # fastpath case and with lookupmf in the slowpath case.
747 747 def lookupfilelog(x):
748 748 return linkrevnodes[x]
749 749
750 750 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
751 751 if filenodes:
752 752 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
753 753 total=total)
754 754 h = self.fileheader(fname)
755 755 size = len(h)
756 756 yield h
757 757 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
758 758 size += len(chunk)
759 759 yield chunk
760 760 self._verbosenote(_('%8.i %s\n') % (size, fname))
761 761 progress(msgbundling, None)
762 762
763 763 def deltaparent(self, revlog, rev, p1, p2, prev):
764 764 return prev
765 765
766 766 def revchunk(self, revlog, rev, prev, linknode):
767 767 node = revlog.node(rev)
768 768 p1, p2 = revlog.parentrevs(rev)
769 769 base = self.deltaparent(revlog, rev, p1, p2, prev)
770 770
771 771 prefix = ''
772 772 if revlog.iscensored(base) or revlog.iscensored(rev):
773 773 try:
774 774 delta = revlog.revision(node, raw=True)
775 775 except error.CensoredNodeError as e:
776 776 delta = e.tombstone
777 777 if base == nullrev:
778 778 prefix = mdiff.trivialdiffheader(len(delta))
779 779 else:
780 780 baselen = revlog.rawsize(base)
781 781 prefix = mdiff.replacediffheader(baselen, len(delta))
782 782 elif base == nullrev:
783 783 delta = revlog.revision(node, raw=True)
784 784 prefix = mdiff.trivialdiffheader(len(delta))
785 785 else:
786 786 delta = revlog.revdiff(base, rev)
787 787 p1n, p2n = revlog.parents(node)
788 788 basenode = revlog.node(base)
789 789 flags = revlog.flags(rev)
790 790 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
791 791 meta += prefix
792 792 l = len(meta) + len(delta)
793 793 yield chunkheader(l)
794 794 yield meta
795 795 yield delta
796 796 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
797 797 # do nothing with basenode, it is implicitly the previous one in HG10
798 798 # do nothing with flags, it is implicitly 0 for cg1 and cg2
799 799 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
800 800
801 801 class cg2packer(cg1packer):
802 802 version = '02'
803 803 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
804 804
805 805 def __init__(self, repo, bundlecaps=None):
806 806 super(cg2packer, self).__init__(repo, bundlecaps)
807 807 if self._reorder is None:
808 808 # Since generaldelta is directly supported by cg2, reordering
809 809 # generally doesn't help, so we disable it by default (treating
810 810 # bundle.reorder=auto just like bundle.reorder=False).
811 811 self._reorder = False
812 812
813 813 def deltaparent(self, revlog, rev, p1, p2, prev):
814 814 dp = revlog.deltaparent(rev)
815 815 if dp == nullrev and revlog.storedeltachains:
816 816 # Avoid sending full revisions when delta parent is null. Pick prev
817 817 # in that case. It's tempting to pick p1 in this case, as p1 will
818 818 # be smaller in the common case. However, computing a delta against
819 819 # p1 may require resolving the raw text of p1, which could be
820 820 # expensive. The revlog caches should have prev cached, meaning
821 821 # less CPU for changegroup generation. There is likely room to add
822 822 # a flag and/or config option to control this behavior.
823 823 return prev
824 824 elif dp == nullrev:
825 825 # revlog is configured to use full snapshot for a reason,
826 826 # stick to full snapshot.
827 827 return nullrev
828 828 elif dp not in (p1, p2, prev):
829 829 # Pick prev when we can't be sure remote has the base revision.
830 830 return prev
831 831 else:
832 832 return dp
833 833
834 834 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
835 835 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
836 836 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
837 837
838 838 class cg3packer(cg2packer):
839 839 version = '03'
840 840 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
841 841
842 842 def _packmanifests(self, dir, mfnodes, lookuplinknode):
843 843 if dir:
844 844 yield self.fileheader(dir)
845 845
846 846 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
847 847 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
848 848 units=_('manifests')):
849 849 yield chunk
850 850
851 851 def _manifestsdone(self):
852 852 return self.close()
853 853
854 854 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
855 855 return struct.pack(
856 856 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
857 857
858 858 _packermap = {'01': (cg1packer, cg1unpacker),
859 859 # cg2 adds support for exchanging generaldelta
860 860 '02': (cg2packer, cg2unpacker),
861 861 # cg3 adds support for exchanging revlog flags and treemanifests
862 862 '03': (cg3packer, cg3unpacker),
863 863 }
864 864
865 865 def allsupportedversions(repo):
866 866 versions = set(_packermap.keys())
867 867 if not (repo.ui.configbool('experimental', 'changegroup3') or
868 868 repo.ui.configbool('experimental', 'treemanifest') or
869 869 'treemanifest' in repo.requirements):
870 870 versions.discard('03')
871 871 return versions
872 872
873 873 # Changegroup versions that can be applied to the repo
874 874 def supportedincomingversions(repo):
875 875 return allsupportedversions(repo)
876 876
877 877 # Changegroup versions that can be created from the repo
878 878 def supportedoutgoingversions(repo):
879 879 versions = allsupportedversions(repo)
880 880 if 'treemanifest' in repo.requirements:
881 881 # Versions 01 and 02 support only flat manifests and it's just too
882 882 # expensive to convert between the flat manifest and tree manifest on
883 883 # the fly. Since tree manifests are hashed differently, all of history
884 884 # would have to be converted. Instead, we simply don't even pretend to
885 885 # support versions 01 and 02.
886 886 versions.discard('01')
887 887 versions.discard('02')
888 888 return versions
889 889
890 890 def safeversion(repo):
891 891 # Finds the smallest version that it's safe to assume clients of the repo
892 892 # will support. For example, all hg versions that support generaldelta also
893 893 # support changegroup 02.
894 894 versions = supportedoutgoingversions(repo)
895 895 if 'generaldelta' in repo.requirements:
896 896 versions.discard('01')
897 897 assert versions
898 898 return min(versions)
899 899
900 900 def getbundler(version, repo, bundlecaps=None):
901 901 assert version in supportedoutgoingversions(repo)
902 902 return _packermap[version][0](repo, bundlecaps)
903 903
904 904 def getunbundler(version, fh, alg, extras=None):
905 905 return _packermap[version][1](fh, alg, extras=extras)
906 906
907 907 def _changegroupinfo(repo, nodes, source):
908 908 if repo.ui.verbose or source == 'bundle':
909 909 repo.ui.status(_("%d changesets found\n") % len(nodes))
910 910 if repo.ui.debugflag:
911 911 repo.ui.debug("list of changesets:\n")
912 912 for node in nodes:
913 913 repo.ui.debug("%s\n" % hex(node))
914 914
915 915 def makestream(repo, outgoing, version, source, fastpath=False,
916 916 bundlecaps=None):
917 917 bundler = getbundler(version, repo, bundlecaps=bundlecaps)
918 918 return getsubsetraw(repo, outgoing, bundler, source, fastpath=fastpath)
919 919
920 920 def makechangegroup(repo, outgoing, version, source, fastpath=False,
921 921 bundlecaps=None):
922 922 cgstream = makestream(repo, outgoing, version, source,
923 923 fastpath=fastpath, bundlecaps=bundlecaps)
924 924 return getunbundler(version, util.chunkbuffer(cgstream), None,
925 925 {'clcount': len(outgoing.missing) })
926 926
927 927 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
928 928 repo = repo.unfiltered()
929 929 commonrevs = outgoing.common
930 930 csets = outgoing.missing
931 931 heads = outgoing.missingheads
932 932 # We go through the fast path if we get told to, or if all (unfiltered
933 933 # heads have been requested (since we then know there all linkrevs will
934 934 # be pulled by the client).
935 935 heads.sort()
936 936 fastpathlinkrev = fastpath or (
937 937 repo.filtername is None and heads == sorted(repo.heads()))
938 938
939 939 repo.hook('preoutgoing', throw=True, source=source)
940 940 _changegroupinfo(repo, csets, source)
941 941 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
942 942
943 def changegroupsubset(repo, roots, heads, source, version='01'):
944 """Compute a changegroup consisting of all the nodes that are
945 descendants of any of the roots and ancestors of any of the heads.
946 Return a chunkbuffer object whose read() method will return
947 successive changegroup chunks.
948
949 It is fairly complex as determining which filenodes and which
950 manifest nodes need to be included for the changeset to be complete
951 is non-trivial.
952
953 Another wrinkle is doing the reverse, figuring out which changeset in
954 the changegroup a particular filenode or manifestnode belongs to.
955 """
956 outgoing = discovery.outgoing(repo, missingroots=roots, missingheads=heads)
957 return makechangegroup(repo, outgoing, version, source)
958
959 943 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
960 944 version='01'):
961 945 """Like getbundle, but taking a discovery.outgoing as an argument.
962 946
963 947 This is only implemented for local repos and reuses potentially
964 948 precomputed sets in outgoing. Returns a raw changegroup generator."""
965 949 if not outgoing.missing:
966 950 return None
967 951 bundler = getbundler(version, repo, bundlecaps)
968 952 return getsubsetraw(repo, outgoing, bundler, source)
969 953
970 954 def getchangegroup(repo, source, outgoing, bundlecaps=None,
971 955 version='01'):
972 956 """Like getbundle, but taking a discovery.outgoing as an argument.
973 957
974 958 This is only implemented for local repos and reuses potentially
975 959 precomputed sets in outgoing."""
976 960 if not outgoing.missing:
977 961 return None
978 962 return makechangegroup(repo, outgoing, version, source,
979 963 bundlecaps=bundlecaps)
980 964
981 965 def getlocalchangegroup(repo, *args, **kwargs):
982 966 repo.ui.deprecwarn('getlocalchangegroup is deprecated, use getchangegroup',
983 967 '4.3')
984 968 return getchangegroup(repo, *args, **kwargs)
985 969
986 970 def changegroup(repo, basenodes, source):
987 971 # to avoid a race we use changegroupsubset() (issue1320)
988 return changegroupsubset(repo, basenodes, repo.heads(), source)
972 outgoing = discovery.outgoing(repo, missingroots=basenodes,
973 missingheads=repo.heads())
974 return makechangegroup(repo, outgoing, '01', source)
989 975
990 976 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
991 977 revisions = 0
992 978 files = 0
993 979 for chunkdata in iter(source.filelogheader, {}):
994 980 files += 1
995 981 f = chunkdata["filename"]
996 982 repo.ui.debug("adding %s revisions\n" % f)
997 983 repo.ui.progress(_('files'), files, unit=_('files'),
998 984 total=expectedfiles)
999 985 fl = repo.file(f)
1000 986 o = len(fl)
1001 987 try:
1002 988 if not fl.addgroup(source, revmap, trp):
1003 989 raise error.Abort(_("received file revlog group is empty"))
1004 990 except error.CensoredBaseError as e:
1005 991 raise error.Abort(_("received delta base is censored: %s") % e)
1006 992 revisions += len(fl) - o
1007 993 if f in needfiles:
1008 994 needs = needfiles[f]
1009 995 for new in xrange(o, len(fl)):
1010 996 n = fl.node(new)
1011 997 if n in needs:
1012 998 needs.remove(n)
1013 999 else:
1014 1000 raise error.Abort(
1015 1001 _("received spurious file revlog entry"))
1016 1002 if not needs:
1017 1003 del needfiles[f]
1018 1004 repo.ui.progress(_('files'), None)
1019 1005
1020 1006 for f, needs in needfiles.iteritems():
1021 1007 fl = repo.file(f)
1022 1008 for n in needs:
1023 1009 try:
1024 1010 fl.rev(n)
1025 1011 except error.LookupError:
1026 1012 raise error.Abort(
1027 1013 _('missing file data for %s:%s - run hg verify') %
1028 1014 (f, hex(n)))
1029 1015
1030 1016 return revisions, files
@@ -1,2294 +1,2297
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 discovery,
34 35 encoding,
35 36 error,
36 37 exchange,
37 38 extensions,
38 39 filelog,
39 40 hook,
40 41 lock as lockmod,
41 42 manifest,
42 43 match as matchmod,
43 44 merge as mergemod,
44 45 mergeutil,
45 46 namespaces,
46 47 obsolete,
47 48 pathutil,
48 49 peer,
49 50 phases,
50 51 pushkey,
51 52 pycompat,
52 53 repository,
53 54 repoview,
54 55 revset,
55 56 revsetlang,
56 57 scmutil,
57 58 sparse,
58 59 store,
59 60 subrepo,
60 61 tags as tagsmod,
61 62 transaction,
62 63 txnutil,
63 64 util,
64 65 vfs as vfsmod,
65 66 )
66 67
67 68 release = lockmod.release
68 69 urlerr = util.urlerr
69 70 urlreq = util.urlreq
70 71
71 72 # set of (path, vfs-location) tuples. vfs-location is:
72 73 # - 'plain for vfs relative paths
73 74 # - '' for svfs relative paths
74 75 _cachedfiles = set()
75 76
76 77 class _basefilecache(scmutil.filecache):
77 78 """All filecache usage on repo are done for logic that should be unfiltered
78 79 """
79 80 def __get__(self, repo, type=None):
80 81 if repo is None:
81 82 return self
82 83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 84 def __set__(self, repo, value):
84 85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 86 def __delete__(self, repo):
86 87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 88
88 89 class repofilecache(_basefilecache):
89 90 """filecache for files in .hg but outside of .hg/store"""
90 91 def __init__(self, *paths):
91 92 super(repofilecache, self).__init__(*paths)
92 93 for path in paths:
93 94 _cachedfiles.add((path, 'plain'))
94 95
95 96 def join(self, obj, fname):
96 97 return obj.vfs.join(fname)
97 98
98 99 class storecache(_basefilecache):
99 100 """filecache for files in the store"""
100 101 def __init__(self, *paths):
101 102 super(storecache, self).__init__(*paths)
102 103 for path in paths:
103 104 _cachedfiles.add((path, ''))
104 105
105 106 def join(self, obj, fname):
106 107 return obj.sjoin(fname)
107 108
108 109 def isfilecached(repo, name):
109 110 """check if a repo has already cached "name" filecache-ed property
110 111
111 112 This returns (cachedobj-or-None, iscached) tuple.
112 113 """
113 114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 115 if not cacheentry:
115 116 return None, False
116 117 return cacheentry.obj, True
117 118
118 119 class unfilteredpropertycache(util.propertycache):
119 120 """propertycache that apply to unfiltered repo only"""
120 121
121 122 def __get__(self, repo, type=None):
122 123 unfi = repo.unfiltered()
123 124 if unfi is repo:
124 125 return super(unfilteredpropertycache, self).__get__(unfi)
125 126 return getattr(unfi, self.name)
126 127
127 128 class filteredpropertycache(util.propertycache):
128 129 """propertycache that must take filtering in account"""
129 130
130 131 def cachevalue(self, obj, value):
131 132 object.__setattr__(obj, self.name, value)
132 133
133 134
134 135 def hasunfilteredcache(repo, name):
135 136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 137 return name in vars(repo.unfiltered())
137 138
138 139 def unfilteredmethod(orig):
139 140 """decorate method that always need to be run on unfiltered version"""
140 141 def wrapper(repo, *args, **kwargs):
141 142 return orig(repo.unfiltered(), *args, **kwargs)
142 143 return wrapper
143 144
144 145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 146 'unbundle'}
146 147 legacycaps = moderncaps.union({'changegroupsubset'})
147 148
148 149 class localpeer(repository.peer):
149 150 '''peer for a local repo; reflects only the most recent API'''
150 151
151 152 def __init__(self, repo, caps=None):
152 153 super(localpeer, self).__init__()
153 154
154 155 if caps is None:
155 156 caps = moderncaps.copy()
156 157 self._repo = repo.filtered('served')
157 158 self._ui = repo.ui
158 159 self._caps = repo._restrictcapabilities(caps)
159 160
160 161 # Begin of _basepeer interface.
161 162
162 163 @util.propertycache
163 164 def ui(self):
164 165 return self._ui
165 166
166 167 def url(self):
167 168 return self._repo.url()
168 169
169 170 def local(self):
170 171 return self._repo
171 172
172 173 def peer(self):
173 174 return self
174 175
175 176 def canpush(self):
176 177 return True
177 178
178 179 def close(self):
179 180 self._repo.close()
180 181
181 182 # End of _basepeer interface.
182 183
183 184 # Begin of _basewirecommands interface.
184 185
185 186 def branchmap(self):
186 187 return self._repo.branchmap()
187 188
188 189 def capabilities(self):
189 190 return self._caps
190 191
191 192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 193 """Used to test argument passing over the wire"""
193 194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 195
195 196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 197 **kwargs):
197 198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 199 common=common, bundlecaps=bundlecaps,
199 200 **kwargs)
200 201 cb = util.chunkbuffer(chunks)
201 202
202 203 if exchange.bundle2requested(bundlecaps):
203 204 # When requesting a bundle2, getbundle returns a stream to make the
204 205 # wire level function happier. We need to build a proper object
205 206 # from it in local peer.
206 207 return bundle2.getunbundler(self.ui, cb)
207 208 else:
208 209 return changegroup.getunbundler('01', cb, None)
209 210
210 211 def heads(self):
211 212 return self._repo.heads()
212 213
213 214 def known(self, nodes):
214 215 return self._repo.known(nodes)
215 216
216 217 def listkeys(self, namespace):
217 218 return self._repo.listkeys(namespace)
218 219
219 220 def lookup(self, key):
220 221 return self._repo.lookup(key)
221 222
222 223 def pushkey(self, namespace, key, old, new):
223 224 return self._repo.pushkey(namespace, key, old, new)
224 225
225 226 def stream_out(self):
226 227 raise error.Abort(_('cannot perform stream clone against local '
227 228 'peer'))
228 229
229 230 def unbundle(self, cg, heads, url):
230 231 """apply a bundle on a repo
231 232
232 233 This function handles the repo locking itself."""
233 234 try:
234 235 try:
235 236 cg = exchange.readbundle(self.ui, cg, None)
236 237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 238 if util.safehasattr(ret, 'getchunks'):
238 239 # This is a bundle20 object, turn it into an unbundler.
239 240 # This little dance should be dropped eventually when the
240 241 # API is finally improved.
241 242 stream = util.chunkbuffer(ret.getchunks())
242 243 ret = bundle2.getunbundler(self.ui, stream)
243 244 return ret
244 245 except Exception as exc:
245 246 # If the exception contains output salvaged from a bundle2
246 247 # reply, we need to make sure it is printed before continuing
247 248 # to fail. So we build a bundle2 with such output and consume
248 249 # it directly.
249 250 #
250 251 # This is not very elegant but allows a "simple" solution for
251 252 # issue4594
252 253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 254 if output:
254 255 bundler = bundle2.bundle20(self._repo.ui)
255 256 for out in output:
256 257 bundler.addpart(out)
257 258 stream = util.chunkbuffer(bundler.getchunks())
258 259 b = bundle2.getunbundler(self.ui, stream)
259 260 bundle2.processbundle(self._repo, b)
260 261 raise
261 262 except error.PushRaced as exc:
262 263 raise error.ResponseError(_('push failed:'), str(exc))
263 264
264 265 # End of _basewirecommands interface.
265 266
266 267 # Begin of peer interface.
267 268
268 269 def iterbatch(self):
269 270 return peer.localiterbatcher(self)
270 271
271 272 # End of peer interface.
272 273
273 274 class locallegacypeer(repository.legacypeer, localpeer):
274 275 '''peer extension which implements legacy methods too; used for tests with
275 276 restricted capabilities'''
276 277
277 278 def __init__(self, repo):
278 279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 280
280 281 # Begin of baselegacywirecommands interface.
281 282
282 283 def between(self, pairs):
283 284 return self._repo.between(pairs)
284 285
285 286 def branches(self, nodes):
286 287 return self._repo.branches(nodes)
287 288
288 289 def changegroup(self, basenodes, source):
289 290 return changegroup.changegroup(self._repo, basenodes, source)
290 291
291 292 def changegroupsubset(self, bases, heads, source):
292 return changegroup.changegroupsubset(self._repo, bases, heads, source)
293 outgoing = discovery.outgoing(self._repo, missingroots=bases,
294 missingheads=heads)
295 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293 296
294 297 # End of baselegacywirecommands interface.
295 298
296 299 # Increment the sub-version when the revlog v2 format changes to lock out old
297 300 # clients.
298 301 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
299 302
300 303 class localrepository(object):
301 304
302 305 supportedformats = {
303 306 'revlogv1',
304 307 'generaldelta',
305 308 'treemanifest',
306 309 'manifestv2',
307 310 REVLOGV2_REQUIREMENT,
308 311 }
309 312 _basesupported = supportedformats | {
310 313 'store',
311 314 'fncache',
312 315 'shared',
313 316 'relshared',
314 317 'dotencode',
315 318 'exp-sparse',
316 319 }
317 320 openerreqs = {
318 321 'revlogv1',
319 322 'generaldelta',
320 323 'treemanifest',
321 324 'manifestv2',
322 325 }
323 326
324 327 # a list of (ui, featureset) functions.
325 328 # only functions defined in module of enabled extensions are invoked
326 329 featuresetupfuncs = set()
327 330
328 331 # list of prefix for file which can be written without 'wlock'
329 332 # Extensions should extend this list when needed
330 333 _wlockfreeprefix = {
331 334 # We migh consider requiring 'wlock' for the next
332 335 # two, but pretty much all the existing code assume
333 336 # wlock is not needed so we keep them excluded for
334 337 # now.
335 338 'hgrc',
336 339 'requires',
337 340 # XXX cache is a complicatged business someone
338 341 # should investigate this in depth at some point
339 342 'cache/',
340 343 # XXX shouldn't be dirstate covered by the wlock?
341 344 'dirstate',
342 345 # XXX bisect was still a bit too messy at the time
343 346 # this changeset was introduced. Someone should fix
344 347 # the remainig bit and drop this line
345 348 'bisect.state',
346 349 }
347 350
348 351 def __init__(self, baseui, path, create=False):
349 352 self.requirements = set()
350 353 self.filtername = None
351 354 # wvfs: rooted at the repository root, used to access the working copy
352 355 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
353 356 # vfs: rooted at .hg, used to access repo files outside of .hg/store
354 357 self.vfs = None
355 358 # svfs: usually rooted at .hg/store, used to access repository history
356 359 # If this is a shared repository, this vfs may point to another
357 360 # repository's .hg/store directory.
358 361 self.svfs = None
359 362 self.root = self.wvfs.base
360 363 self.path = self.wvfs.join(".hg")
361 364 self.origroot = path
362 365 # These auditor are not used by the vfs,
363 366 # only used when writing this comment: basectx.match
364 367 self.auditor = pathutil.pathauditor(self.root, self._checknested)
365 368 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
366 369 realfs=False, cached=True)
367 370 self.baseui = baseui
368 371 self.ui = baseui.copy()
369 372 self.ui.copy = baseui.copy # prevent copying repo configuration
370 373 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
371 374 if (self.ui.configbool('devel', 'all-warnings') or
372 375 self.ui.configbool('devel', 'check-locks')):
373 376 self.vfs.audit = self._getvfsward(self.vfs.audit)
374 377 # A list of callback to shape the phase if no data were found.
375 378 # Callback are in the form: func(repo, roots) --> processed root.
376 379 # This list it to be filled by extension during repo setup
377 380 self._phasedefaults = []
378 381 try:
379 382 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
380 383 self._loadextensions()
381 384 except IOError:
382 385 pass
383 386
384 387 if self.featuresetupfuncs:
385 388 self.supported = set(self._basesupported) # use private copy
386 389 extmods = set(m.__name__ for n, m
387 390 in extensions.extensions(self.ui))
388 391 for setupfunc in self.featuresetupfuncs:
389 392 if setupfunc.__module__ in extmods:
390 393 setupfunc(self.ui, self.supported)
391 394 else:
392 395 self.supported = self._basesupported
393 396 color.setup(self.ui)
394 397
395 398 # Add compression engines.
396 399 for name in util.compengines:
397 400 engine = util.compengines[name]
398 401 if engine.revlogheader():
399 402 self.supported.add('exp-compression-%s' % name)
400 403
401 404 if not self.vfs.isdir():
402 405 if create:
403 406 self.requirements = newreporequirements(self)
404 407
405 408 if not self.wvfs.exists():
406 409 self.wvfs.makedirs()
407 410 self.vfs.makedir(notindexed=True)
408 411
409 412 if 'store' in self.requirements:
410 413 self.vfs.mkdir("store")
411 414
412 415 # create an invalid changelog
413 416 self.vfs.append(
414 417 "00changelog.i",
415 418 '\0\0\0\2' # represents revlogv2
416 419 ' dummy changelog to prevent using the old repo layout'
417 420 )
418 421 else:
419 422 raise error.RepoError(_("repository %s not found") % path)
420 423 elif create:
421 424 raise error.RepoError(_("repository %s already exists") % path)
422 425 else:
423 426 try:
424 427 self.requirements = scmutil.readrequires(
425 428 self.vfs, self.supported)
426 429 except IOError as inst:
427 430 if inst.errno != errno.ENOENT:
428 431 raise
429 432
430 433 cachepath = self.vfs.join('cache')
431 434 self.sharedpath = self.path
432 435 try:
433 436 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
434 437 if 'relshared' in self.requirements:
435 438 sharedpath = self.vfs.join(sharedpath)
436 439 vfs = vfsmod.vfs(sharedpath, realpath=True)
437 440 cachepath = vfs.join('cache')
438 441 s = vfs.base
439 442 if not vfs.exists():
440 443 raise error.RepoError(
441 444 _('.hg/sharedpath points to nonexistent directory %s') % s)
442 445 self.sharedpath = s
443 446 except IOError as inst:
444 447 if inst.errno != errno.ENOENT:
445 448 raise
446 449
447 450 if 'exp-sparse' in self.requirements and not sparse.enabled:
448 451 raise error.RepoError(_('repository is using sparse feature but '
449 452 'sparse is not enabled; enable the '
450 453 '"sparse" extensions to access'))
451 454
452 455 self.store = store.store(
453 456 self.requirements, self.sharedpath,
454 457 lambda base: vfsmod.vfs(base, cacheaudited=True))
455 458 self.spath = self.store.path
456 459 self.svfs = self.store.vfs
457 460 self.sjoin = self.store.join
458 461 self.vfs.createmode = self.store.createmode
459 462 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
460 463 self.cachevfs.createmode = self.store.createmode
461 464 if (self.ui.configbool('devel', 'all-warnings') or
462 465 self.ui.configbool('devel', 'check-locks')):
463 466 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
464 467 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
465 468 else: # standard vfs
466 469 self.svfs.audit = self._getsvfsward(self.svfs.audit)
467 470 self._applyopenerreqs()
468 471 if create:
469 472 self._writerequirements()
470 473
471 474 self._dirstatevalidatewarned = False
472 475
473 476 self._branchcaches = {}
474 477 self._revbranchcache = None
475 478 self.filterpats = {}
476 479 self._datafilters = {}
477 480 self._transref = self._lockref = self._wlockref = None
478 481
479 482 # A cache for various files under .hg/ that tracks file changes,
480 483 # (used by the filecache decorator)
481 484 #
482 485 # Maps a property name to its util.filecacheentry
483 486 self._filecache = {}
484 487
485 488 # hold sets of revision to be filtered
486 489 # should be cleared when something might have changed the filter value:
487 490 # - new changesets,
488 491 # - phase change,
489 492 # - new obsolescence marker,
490 493 # - working directory parent change,
491 494 # - bookmark changes
492 495 self.filteredrevcache = {}
493 496
494 497 # post-dirstate-status hooks
495 498 self._postdsstatus = []
496 499
497 500 # Cache of types representing filtered repos.
498 501 self._filteredrepotypes = weakref.WeakKeyDictionary()
499 502
500 503 # generic mapping between names and nodes
501 504 self.names = namespaces.namespaces()
502 505
503 506 # Key to signature value.
504 507 self._sparsesignaturecache = {}
505 508 # Signature to cached matcher instance.
506 509 self._sparsematchercache = {}
507 510
508 511 def _getvfsward(self, origfunc):
509 512 """build a ward for self.vfs"""
510 513 rref = weakref.ref(self)
511 514 def checkvfs(path, mode=None):
512 515 ret = origfunc(path, mode=mode)
513 516 repo = rref()
514 517 if (repo is None
515 518 or not util.safehasattr(repo, '_wlockref')
516 519 or not util.safehasattr(repo, '_lockref')):
517 520 return
518 521 if mode in (None, 'r', 'rb'):
519 522 return
520 523 if path.startswith(repo.path):
521 524 # truncate name relative to the repository (.hg)
522 525 path = path[len(repo.path) + 1:]
523 526 if path.startswith('cache/'):
524 527 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
525 528 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
526 529 if path.startswith('journal.'):
527 530 # journal is covered by 'lock'
528 531 if repo._currentlock(repo._lockref) is None:
529 532 repo.ui.develwarn('write with no lock: "%s"' % path,
530 533 stacklevel=2, config='check-locks')
531 534 elif repo._currentlock(repo._wlockref) is None:
532 535 # rest of vfs files are covered by 'wlock'
533 536 #
534 537 # exclude special files
535 538 for prefix in self._wlockfreeprefix:
536 539 if path.startswith(prefix):
537 540 return
538 541 repo.ui.develwarn('write with no wlock: "%s"' % path,
539 542 stacklevel=2, config='check-locks')
540 543 return ret
541 544 return checkvfs
542 545
543 546 def _getsvfsward(self, origfunc):
544 547 """build a ward for self.svfs"""
545 548 rref = weakref.ref(self)
546 549 def checksvfs(path, mode=None):
547 550 ret = origfunc(path, mode=mode)
548 551 repo = rref()
549 552 if repo is None or not util.safehasattr(repo, '_lockref'):
550 553 return
551 554 if mode in (None, 'r', 'rb'):
552 555 return
553 556 if path.startswith(repo.sharedpath):
554 557 # truncate name relative to the repository (.hg)
555 558 path = path[len(repo.sharedpath) + 1:]
556 559 if repo._currentlock(repo._lockref) is None:
557 560 repo.ui.develwarn('write with no lock: "%s"' % path,
558 561 stacklevel=3)
559 562 return ret
560 563 return checksvfs
561 564
562 565 def close(self):
563 566 self._writecaches()
564 567
565 568 def _loadextensions(self):
566 569 extensions.loadall(self.ui)
567 570
568 571 def _writecaches(self):
569 572 if self._revbranchcache:
570 573 self._revbranchcache.write()
571 574
572 575 def _restrictcapabilities(self, caps):
573 576 if self.ui.configbool('experimental', 'bundle2-advertise'):
574 577 caps = set(caps)
575 578 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
576 579 caps.add('bundle2=' + urlreq.quote(capsblob))
577 580 return caps
578 581
579 582 def _applyopenerreqs(self):
580 583 self.svfs.options = dict((r, 1) for r in self.requirements
581 584 if r in self.openerreqs)
582 585 # experimental config: format.chunkcachesize
583 586 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
584 587 if chunkcachesize is not None:
585 588 self.svfs.options['chunkcachesize'] = chunkcachesize
586 589 # experimental config: format.maxchainlen
587 590 maxchainlen = self.ui.configint('format', 'maxchainlen')
588 591 if maxchainlen is not None:
589 592 self.svfs.options['maxchainlen'] = maxchainlen
590 593 # experimental config: format.manifestcachesize
591 594 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
592 595 if manifestcachesize is not None:
593 596 self.svfs.options['manifestcachesize'] = manifestcachesize
594 597 # experimental config: format.aggressivemergedeltas
595 598 aggressivemergedeltas = self.ui.configbool('format',
596 599 'aggressivemergedeltas')
597 600 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
598 601 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
599 602 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
600 603 if 0 <= chainspan:
601 604 self.svfs.options['maxdeltachainspan'] = chainspan
602 605
603 606 for r in self.requirements:
604 607 if r.startswith('exp-compression-'):
605 608 self.svfs.options['compengine'] = r[len('exp-compression-'):]
606 609
607 610 # TODO move "revlogv2" to openerreqs once finalized.
608 611 if REVLOGV2_REQUIREMENT in self.requirements:
609 612 self.svfs.options['revlogv2'] = True
610 613
611 614 def _writerequirements(self):
612 615 scmutil.writerequires(self.vfs, self.requirements)
613 616
614 617 def _checknested(self, path):
615 618 """Determine if path is a legal nested repository."""
616 619 if not path.startswith(self.root):
617 620 return False
618 621 subpath = path[len(self.root) + 1:]
619 622 normsubpath = util.pconvert(subpath)
620 623
621 624 # XXX: Checking against the current working copy is wrong in
622 625 # the sense that it can reject things like
623 626 #
624 627 # $ hg cat -r 10 sub/x.txt
625 628 #
626 629 # if sub/ is no longer a subrepository in the working copy
627 630 # parent revision.
628 631 #
629 632 # However, it can of course also allow things that would have
630 633 # been rejected before, such as the above cat command if sub/
631 634 # is a subrepository now, but was a normal directory before.
632 635 # The old path auditor would have rejected by mistake since it
633 636 # panics when it sees sub/.hg/.
634 637 #
635 638 # All in all, checking against the working copy seems sensible
636 639 # since we want to prevent access to nested repositories on
637 640 # the filesystem *now*.
638 641 ctx = self[None]
639 642 parts = util.splitpath(subpath)
640 643 while parts:
641 644 prefix = '/'.join(parts)
642 645 if prefix in ctx.substate:
643 646 if prefix == normsubpath:
644 647 return True
645 648 else:
646 649 sub = ctx.sub(prefix)
647 650 return sub.checknested(subpath[len(prefix) + 1:])
648 651 else:
649 652 parts.pop()
650 653 return False
651 654
652 655 def peer(self):
653 656 return localpeer(self) # not cached to avoid reference cycle
654 657
655 658 def unfiltered(self):
656 659 """Return unfiltered version of the repository
657 660
658 661 Intended to be overwritten by filtered repo."""
659 662 return self
660 663
661 664 def filtered(self, name):
662 665 """Return a filtered version of a repository"""
663 666 # Python <3.4 easily leaks types via __mro__. See
664 667 # https://bugs.python.org/issue17950. We cache dynamically
665 668 # created types so this method doesn't leak on every
666 669 # invocation.
667 670
668 671 key = self.unfiltered().__class__
669 672 if key not in self._filteredrepotypes:
670 673 # Build a new type with the repoview mixin and the base
671 674 # class of this repo. Give it a name containing the
672 675 # filter name to aid debugging.
673 676 bases = (repoview.repoview, key)
674 677 cls = type(r'%sfilteredrepo' % name, bases, {})
675 678 self._filteredrepotypes[key] = cls
676 679
677 680 return self._filteredrepotypes[key](self, name)
678 681
679 682 @repofilecache('bookmarks', 'bookmarks.current')
680 683 def _bookmarks(self):
681 684 return bookmarks.bmstore(self)
682 685
683 686 @property
684 687 def _activebookmark(self):
685 688 return self._bookmarks.active
686 689
687 690 # _phaserevs and _phasesets depend on changelog. what we need is to
688 691 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
689 692 # can't be easily expressed in filecache mechanism.
690 693 @storecache('phaseroots', '00changelog.i')
691 694 def _phasecache(self):
692 695 return phases.phasecache(self, self._phasedefaults)
693 696
694 697 @storecache('obsstore')
695 698 def obsstore(self):
696 699 return obsolete.makestore(self.ui, self)
697 700
698 701 @storecache('00changelog.i')
699 702 def changelog(self):
700 703 return changelog.changelog(self.svfs,
701 704 trypending=txnutil.mayhavepending(self.root))
702 705
703 706 def _constructmanifest(self):
704 707 # This is a temporary function while we migrate from manifest to
705 708 # manifestlog. It allows bundlerepo and unionrepo to intercept the
706 709 # manifest creation.
707 710 return manifest.manifestrevlog(self.svfs)
708 711
709 712 @storecache('00manifest.i')
710 713 def manifestlog(self):
711 714 return manifest.manifestlog(self.svfs, self)
712 715
713 716 @repofilecache('dirstate')
714 717 def dirstate(self):
715 718 sparsematchfn = lambda: sparse.matcher(self)
716 719
717 720 return dirstate.dirstate(self.vfs, self.ui, self.root,
718 721 self._dirstatevalidate, sparsematchfn)
719 722
720 723 def _dirstatevalidate(self, node):
721 724 try:
722 725 self.changelog.rev(node)
723 726 return node
724 727 except error.LookupError:
725 728 if not self._dirstatevalidatewarned:
726 729 self._dirstatevalidatewarned = True
727 730 self.ui.warn(_("warning: ignoring unknown"
728 731 " working parent %s!\n") % short(node))
729 732 return nullid
730 733
731 734 def __getitem__(self, changeid):
732 735 if changeid is None:
733 736 return context.workingctx(self)
734 737 if isinstance(changeid, slice):
735 738 # wdirrev isn't contiguous so the slice shouldn't include it
736 739 return [context.changectx(self, i)
737 740 for i in xrange(*changeid.indices(len(self)))
738 741 if i not in self.changelog.filteredrevs]
739 742 try:
740 743 return context.changectx(self, changeid)
741 744 except error.WdirUnsupported:
742 745 return context.workingctx(self)
743 746
744 747 def __contains__(self, changeid):
745 748 """True if the given changeid exists
746 749
747 750 error.LookupError is raised if an ambiguous node specified.
748 751 """
749 752 try:
750 753 self[changeid]
751 754 return True
752 755 except error.RepoLookupError:
753 756 return False
754 757
755 758 def __nonzero__(self):
756 759 return True
757 760
758 761 __bool__ = __nonzero__
759 762
760 763 def __len__(self):
761 764 return len(self.changelog)
762 765
763 766 def __iter__(self):
764 767 return iter(self.changelog)
765 768
766 769 def revs(self, expr, *args):
767 770 '''Find revisions matching a revset.
768 771
769 772 The revset is specified as a string ``expr`` that may contain
770 773 %-formatting to escape certain types. See ``revsetlang.formatspec``.
771 774
772 775 Revset aliases from the configuration are not expanded. To expand
773 776 user aliases, consider calling ``scmutil.revrange()`` or
774 777 ``repo.anyrevs([expr], user=True)``.
775 778
776 779 Returns a revset.abstractsmartset, which is a list-like interface
777 780 that contains integer revisions.
778 781 '''
779 782 expr = revsetlang.formatspec(expr, *args)
780 783 m = revset.match(None, expr)
781 784 return m(self)
782 785
783 786 def set(self, expr, *args):
784 787 '''Find revisions matching a revset and emit changectx instances.
785 788
786 789 This is a convenience wrapper around ``revs()`` that iterates the
787 790 result and is a generator of changectx instances.
788 791
789 792 Revset aliases from the configuration are not expanded. To expand
790 793 user aliases, consider calling ``scmutil.revrange()``.
791 794 '''
792 795 for r in self.revs(expr, *args):
793 796 yield self[r]
794 797
795 798 def anyrevs(self, specs, user=False, localalias=None):
796 799 '''Find revisions matching one of the given revsets.
797 800
798 801 Revset aliases from the configuration are not expanded by default. To
799 802 expand user aliases, specify ``user=True``. To provide some local
800 803 definitions overriding user aliases, set ``localalias`` to
801 804 ``{name: definitionstring}``.
802 805 '''
803 806 if user:
804 807 m = revset.matchany(self.ui, specs, repo=self,
805 808 localalias=localalias)
806 809 else:
807 810 m = revset.matchany(None, specs, localalias=localalias)
808 811 return m(self)
809 812
810 813 def url(self):
811 814 return 'file:' + self.root
812 815
813 816 def hook(self, name, throw=False, **args):
814 817 """Call a hook, passing this repo instance.
815 818
816 819 This a convenience method to aid invoking hooks. Extensions likely
817 820 won't call this unless they have registered a custom hook or are
818 821 replacing code that is expected to call a hook.
819 822 """
820 823 return hook.hook(self.ui, self, name, throw, **args)
821 824
822 825 @filteredpropertycache
823 826 def _tagscache(self):
824 827 '''Returns a tagscache object that contains various tags related
825 828 caches.'''
826 829
827 830 # This simplifies its cache management by having one decorated
828 831 # function (this one) and the rest simply fetch things from it.
829 832 class tagscache(object):
830 833 def __init__(self):
831 834 # These two define the set of tags for this repository. tags
832 835 # maps tag name to node; tagtypes maps tag name to 'global' or
833 836 # 'local'. (Global tags are defined by .hgtags across all
834 837 # heads, and local tags are defined in .hg/localtags.)
835 838 # They constitute the in-memory cache of tags.
836 839 self.tags = self.tagtypes = None
837 840
838 841 self.nodetagscache = self.tagslist = None
839 842
840 843 cache = tagscache()
841 844 cache.tags, cache.tagtypes = self._findtags()
842 845
843 846 return cache
844 847
845 848 def tags(self):
846 849 '''return a mapping of tag to node'''
847 850 t = {}
848 851 if self.changelog.filteredrevs:
849 852 tags, tt = self._findtags()
850 853 else:
851 854 tags = self._tagscache.tags
852 855 for k, v in tags.iteritems():
853 856 try:
854 857 # ignore tags to unknown nodes
855 858 self.changelog.rev(v)
856 859 t[k] = v
857 860 except (error.LookupError, ValueError):
858 861 pass
859 862 return t
860 863
861 864 def _findtags(self):
862 865 '''Do the hard work of finding tags. Return a pair of dicts
863 866 (tags, tagtypes) where tags maps tag name to node, and tagtypes
864 867 maps tag name to a string like \'global\' or \'local\'.
865 868 Subclasses or extensions are free to add their own tags, but
866 869 should be aware that the returned dicts will be retained for the
867 870 duration of the localrepo object.'''
868 871
869 872 # XXX what tagtype should subclasses/extensions use? Currently
870 873 # mq and bookmarks add tags, but do not set the tagtype at all.
871 874 # Should each extension invent its own tag type? Should there
872 875 # be one tagtype for all such "virtual" tags? Or is the status
873 876 # quo fine?
874 877
875 878
876 879 # map tag name to (node, hist)
877 880 alltags = tagsmod.findglobaltags(self.ui, self)
878 881 # map tag name to tag type
879 882 tagtypes = dict((tag, 'global') for tag in alltags)
880 883
881 884 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
882 885
883 886 # Build the return dicts. Have to re-encode tag names because
884 887 # the tags module always uses UTF-8 (in order not to lose info
885 888 # writing to the cache), but the rest of Mercurial wants them in
886 889 # local encoding.
887 890 tags = {}
888 891 for (name, (node, hist)) in alltags.iteritems():
889 892 if node != nullid:
890 893 tags[encoding.tolocal(name)] = node
891 894 tags['tip'] = self.changelog.tip()
892 895 tagtypes = dict([(encoding.tolocal(name), value)
893 896 for (name, value) in tagtypes.iteritems()])
894 897 return (tags, tagtypes)
895 898
896 899 def tagtype(self, tagname):
897 900 '''
898 901 return the type of the given tag. result can be:
899 902
900 903 'local' : a local tag
901 904 'global' : a global tag
902 905 None : tag does not exist
903 906 '''
904 907
905 908 return self._tagscache.tagtypes.get(tagname)
906 909
907 910 def tagslist(self):
908 911 '''return a list of tags ordered by revision'''
909 912 if not self._tagscache.tagslist:
910 913 l = []
911 914 for t, n in self.tags().iteritems():
912 915 l.append((self.changelog.rev(n), t, n))
913 916 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
914 917
915 918 return self._tagscache.tagslist
916 919
917 920 def nodetags(self, node):
918 921 '''return the tags associated with a node'''
919 922 if not self._tagscache.nodetagscache:
920 923 nodetagscache = {}
921 924 for t, n in self._tagscache.tags.iteritems():
922 925 nodetagscache.setdefault(n, []).append(t)
923 926 for tags in nodetagscache.itervalues():
924 927 tags.sort()
925 928 self._tagscache.nodetagscache = nodetagscache
926 929 return self._tagscache.nodetagscache.get(node, [])
927 930
928 931 def nodebookmarks(self, node):
929 932 """return the list of bookmarks pointing to the specified node"""
930 933 marks = []
931 934 for bookmark, n in self._bookmarks.iteritems():
932 935 if n == node:
933 936 marks.append(bookmark)
934 937 return sorted(marks)
935 938
936 939 def branchmap(self):
937 940 '''returns a dictionary {branch: [branchheads]} with branchheads
938 941 ordered by increasing revision number'''
939 942 branchmap.updatecache(self)
940 943 return self._branchcaches[self.filtername]
941 944
942 945 @unfilteredmethod
943 946 def revbranchcache(self):
944 947 if not self._revbranchcache:
945 948 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
946 949 return self._revbranchcache
947 950
948 951 def branchtip(self, branch, ignoremissing=False):
949 952 '''return the tip node for a given branch
950 953
951 954 If ignoremissing is True, then this method will not raise an error.
952 955 This is helpful for callers that only expect None for a missing branch
953 956 (e.g. namespace).
954 957
955 958 '''
956 959 try:
957 960 return self.branchmap().branchtip(branch)
958 961 except KeyError:
959 962 if not ignoremissing:
960 963 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
961 964 else:
962 965 pass
963 966
964 967 def lookup(self, key):
965 968 return self[key].node()
966 969
967 970 def lookupbranch(self, key, remote=None):
968 971 repo = remote or self
969 972 if key in repo.branchmap():
970 973 return key
971 974
972 975 repo = (remote and remote.local()) and remote or self
973 976 return repo[key].branch()
974 977
975 978 def known(self, nodes):
976 979 cl = self.changelog
977 980 nm = cl.nodemap
978 981 filtered = cl.filteredrevs
979 982 result = []
980 983 for n in nodes:
981 984 r = nm.get(n)
982 985 resp = not (r is None or r in filtered)
983 986 result.append(resp)
984 987 return result
985 988
986 989 def local(self):
987 990 return self
988 991
989 992 def publishing(self):
990 993 # it's safe (and desirable) to trust the publish flag unconditionally
991 994 # so that we don't finalize changes shared between users via ssh or nfs
992 995 return self.ui.configbool('phases', 'publish', untrusted=True)
993 996
994 997 def cancopy(self):
995 998 # so statichttprepo's override of local() works
996 999 if not self.local():
997 1000 return False
998 1001 if not self.publishing():
999 1002 return True
1000 1003 # if publishing we can't copy if there is filtered content
1001 1004 return not self.filtered('visible').changelog.filteredrevs
1002 1005
1003 1006 def shared(self):
1004 1007 '''the type of shared repository (None if not shared)'''
1005 1008 if self.sharedpath != self.path:
1006 1009 return 'store'
1007 1010 return None
1008 1011
1009 1012 def wjoin(self, f, *insidef):
1010 1013 return self.vfs.reljoin(self.root, f, *insidef)
1011 1014
1012 1015 def file(self, f):
1013 1016 if f[0] == '/':
1014 1017 f = f[1:]
1015 1018 return filelog.filelog(self.svfs, f)
1016 1019
1017 1020 def changectx(self, changeid):
1018 1021 return self[changeid]
1019 1022
1020 1023 def setparents(self, p1, p2=nullid):
1021 1024 with self.dirstate.parentchange():
1022 1025 copies = self.dirstate.setparents(p1, p2)
1023 1026 pctx = self[p1]
1024 1027 if copies:
1025 1028 # Adjust copy records, the dirstate cannot do it, it
1026 1029 # requires access to parents manifests. Preserve them
1027 1030 # only for entries added to first parent.
1028 1031 for f in copies:
1029 1032 if f not in pctx and copies[f] in pctx:
1030 1033 self.dirstate.copy(copies[f], f)
1031 1034 if p2 == nullid:
1032 1035 for f, s in sorted(self.dirstate.copies().items()):
1033 1036 if f not in pctx and s not in pctx:
1034 1037 self.dirstate.copy(None, f)
1035 1038
1036 1039 def filectx(self, path, changeid=None, fileid=None):
1037 1040 """changeid can be a changeset revision, node, or tag.
1038 1041 fileid can be a file revision or node."""
1039 1042 return context.filectx(self, path, changeid, fileid)
1040 1043
1041 1044 def getcwd(self):
1042 1045 return self.dirstate.getcwd()
1043 1046
1044 1047 def pathto(self, f, cwd=None):
1045 1048 return self.dirstate.pathto(f, cwd)
1046 1049
1047 1050 def _loadfilter(self, filter):
1048 1051 if filter not in self.filterpats:
1049 1052 l = []
1050 1053 for pat, cmd in self.ui.configitems(filter):
1051 1054 if cmd == '!':
1052 1055 continue
1053 1056 mf = matchmod.match(self.root, '', [pat])
1054 1057 fn = None
1055 1058 params = cmd
1056 1059 for name, filterfn in self._datafilters.iteritems():
1057 1060 if cmd.startswith(name):
1058 1061 fn = filterfn
1059 1062 params = cmd[len(name):].lstrip()
1060 1063 break
1061 1064 if not fn:
1062 1065 fn = lambda s, c, **kwargs: util.filter(s, c)
1063 1066 # Wrap old filters not supporting keyword arguments
1064 1067 if not inspect.getargspec(fn)[2]:
1065 1068 oldfn = fn
1066 1069 fn = lambda s, c, **kwargs: oldfn(s, c)
1067 1070 l.append((mf, fn, params))
1068 1071 self.filterpats[filter] = l
1069 1072 return self.filterpats[filter]
1070 1073
1071 1074 def _filter(self, filterpats, filename, data):
1072 1075 for mf, fn, cmd in filterpats:
1073 1076 if mf(filename):
1074 1077 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1075 1078 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1076 1079 break
1077 1080
1078 1081 return data
1079 1082
1080 1083 @unfilteredpropertycache
1081 1084 def _encodefilterpats(self):
1082 1085 return self._loadfilter('encode')
1083 1086
1084 1087 @unfilteredpropertycache
1085 1088 def _decodefilterpats(self):
1086 1089 return self._loadfilter('decode')
1087 1090
1088 1091 def adddatafilter(self, name, filter):
1089 1092 self._datafilters[name] = filter
1090 1093
1091 1094 def wread(self, filename):
1092 1095 if self.wvfs.islink(filename):
1093 1096 data = self.wvfs.readlink(filename)
1094 1097 else:
1095 1098 data = self.wvfs.read(filename)
1096 1099 return self._filter(self._encodefilterpats, filename, data)
1097 1100
1098 1101 def wwrite(self, filename, data, flags, backgroundclose=False):
1099 1102 """write ``data`` into ``filename`` in the working directory
1100 1103
1101 1104 This returns length of written (maybe decoded) data.
1102 1105 """
1103 1106 data = self._filter(self._decodefilterpats, filename, data)
1104 1107 if 'l' in flags:
1105 1108 self.wvfs.symlink(data, filename)
1106 1109 else:
1107 1110 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1108 1111 if 'x' in flags:
1109 1112 self.wvfs.setflags(filename, False, True)
1110 1113 return len(data)
1111 1114
1112 1115 def wwritedata(self, filename, data):
1113 1116 return self._filter(self._decodefilterpats, filename, data)
1114 1117
1115 1118 def currenttransaction(self):
1116 1119 """return the current transaction or None if non exists"""
1117 1120 if self._transref:
1118 1121 tr = self._transref()
1119 1122 else:
1120 1123 tr = None
1121 1124
1122 1125 if tr and tr.running():
1123 1126 return tr
1124 1127 return None
1125 1128
1126 1129 def transaction(self, desc, report=None):
1127 1130 if (self.ui.configbool('devel', 'all-warnings')
1128 1131 or self.ui.configbool('devel', 'check-locks')):
1129 1132 if self._currentlock(self._lockref) is None:
1130 1133 raise error.ProgrammingError('transaction requires locking')
1131 1134 tr = self.currenttransaction()
1132 1135 if tr is not None:
1133 1136 scmutil.registersummarycallback(self, tr, desc)
1134 1137 return tr.nest()
1135 1138
1136 1139 # abort here if the journal already exists
1137 1140 if self.svfs.exists("journal"):
1138 1141 raise error.RepoError(
1139 1142 _("abandoned transaction found"),
1140 1143 hint=_("run 'hg recover' to clean up transaction"))
1141 1144
1142 1145 idbase = "%.40f#%f" % (random.random(), time.time())
1143 1146 ha = hex(hashlib.sha1(idbase).digest())
1144 1147 txnid = 'TXN:' + ha
1145 1148 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1146 1149
1147 1150 self._writejournal(desc)
1148 1151 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1149 1152 if report:
1150 1153 rp = report
1151 1154 else:
1152 1155 rp = self.ui.warn
1153 1156 vfsmap = {'plain': self.vfs} # root of .hg/
1154 1157 # we must avoid cyclic reference between repo and transaction.
1155 1158 reporef = weakref.ref(self)
1156 1159 # Code to track tag movement
1157 1160 #
1158 1161 # Since tags are all handled as file content, it is actually quite hard
1159 1162 # to track these movement from a code perspective. So we fallback to a
1160 1163 # tracking at the repository level. One could envision to track changes
1161 1164 # to the '.hgtags' file through changegroup apply but that fails to
1162 1165 # cope with case where transaction expose new heads without changegroup
1163 1166 # being involved (eg: phase movement).
1164 1167 #
1165 1168 # For now, We gate the feature behind a flag since this likely comes
1166 1169 # with performance impacts. The current code run more often than needed
1167 1170 # and do not use caches as much as it could. The current focus is on
1168 1171 # the behavior of the feature so we disable it by default. The flag
1169 1172 # will be removed when we are happy with the performance impact.
1170 1173 #
1171 1174 # Once this feature is no longer experimental move the following
1172 1175 # documentation to the appropriate help section:
1173 1176 #
1174 1177 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1175 1178 # tags (new or changed or deleted tags). In addition the details of
1176 1179 # these changes are made available in a file at:
1177 1180 # ``REPOROOT/.hg/changes/tags.changes``.
1178 1181 # Make sure you check for HG_TAG_MOVED before reading that file as it
1179 1182 # might exist from a previous transaction even if no tag were touched
1180 1183 # in this one. Changes are recorded in a line base format::
1181 1184 #
1182 1185 # <action> <hex-node> <tag-name>\n
1183 1186 #
1184 1187 # Actions are defined as follow:
1185 1188 # "-R": tag is removed,
1186 1189 # "+A": tag is added,
1187 1190 # "-M": tag is moved (old value),
1188 1191 # "+M": tag is moved (new value),
1189 1192 tracktags = lambda x: None
1190 1193 # experimental config: experimental.hook-track-tags
1191 1194 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1192 1195 if desc != 'strip' and shouldtracktags:
1193 1196 oldheads = self.changelog.headrevs()
1194 1197 def tracktags(tr2):
1195 1198 repo = reporef()
1196 1199 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1197 1200 newheads = repo.changelog.headrevs()
1198 1201 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1199 1202 # notes: we compare lists here.
1200 1203 # As we do it only once buiding set would not be cheaper
1201 1204 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1202 1205 if changes:
1203 1206 tr2.hookargs['tag_moved'] = '1'
1204 1207 with repo.vfs('changes/tags.changes', 'w',
1205 1208 atomictemp=True) as changesfile:
1206 1209 # note: we do not register the file to the transaction
1207 1210 # because we needs it to still exist on the transaction
1208 1211 # is close (for txnclose hooks)
1209 1212 tagsmod.writediff(changesfile, changes)
1210 1213 def validate(tr2):
1211 1214 """will run pre-closing hooks"""
1212 1215 # XXX the transaction API is a bit lacking here so we take a hacky
1213 1216 # path for now
1214 1217 #
1215 1218 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1216 1219 # dict is copied before these run. In addition we needs the data
1217 1220 # available to in memory hooks too.
1218 1221 #
1219 1222 # Moreover, we also need to make sure this runs before txnclose
1220 1223 # hooks and there is no "pending" mechanism that would execute
1221 1224 # logic only if hooks are about to run.
1222 1225 #
1223 1226 # Fixing this limitation of the transaction is also needed to track
1224 1227 # other families of changes (bookmarks, phases, obsolescence).
1225 1228 #
1226 1229 # This will have to be fixed before we remove the experimental
1227 1230 # gating.
1228 1231 tracktags(tr2)
1229 1232 reporef().hook('pretxnclose', throw=True,
1230 1233 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1231 1234 def releasefn(tr, success):
1232 1235 repo = reporef()
1233 1236 if success:
1234 1237 # this should be explicitly invoked here, because
1235 1238 # in-memory changes aren't written out at closing
1236 1239 # transaction, if tr.addfilegenerator (via
1237 1240 # dirstate.write or so) isn't invoked while
1238 1241 # transaction running
1239 1242 repo.dirstate.write(None)
1240 1243 else:
1241 1244 # discard all changes (including ones already written
1242 1245 # out) in this transaction
1243 1246 repo.dirstate.restorebackup(None, 'journal.dirstate')
1244 1247
1245 1248 repo.invalidate(clearfilecache=True)
1246 1249
1247 1250 tr = transaction.transaction(rp, self.svfs, vfsmap,
1248 1251 "journal",
1249 1252 "undo",
1250 1253 aftertrans(renames),
1251 1254 self.store.createmode,
1252 1255 validator=validate,
1253 1256 releasefn=releasefn,
1254 1257 checkambigfiles=_cachedfiles)
1255 1258 tr.changes['revs'] = set()
1256 1259 tr.changes['obsmarkers'] = set()
1257 1260 tr.changes['phases'] = {}
1258 1261 tr.changes['bookmarks'] = {}
1259 1262
1260 1263 tr.hookargs['txnid'] = txnid
1261 1264 # note: writing the fncache only during finalize mean that the file is
1262 1265 # outdated when running hooks. As fncache is used for streaming clone,
1263 1266 # this is not expected to break anything that happen during the hooks.
1264 1267 tr.addfinalize('flush-fncache', self.store.write)
1265 1268 def txnclosehook(tr2):
1266 1269 """To be run if transaction is successful, will schedule a hook run
1267 1270 """
1268 1271 # Don't reference tr2 in hook() so we don't hold a reference.
1269 1272 # This reduces memory consumption when there are multiple
1270 1273 # transactions per lock. This can likely go away if issue5045
1271 1274 # fixes the function accumulation.
1272 1275 hookargs = tr2.hookargs
1273 1276
1274 1277 def hook():
1275 1278 reporef().hook('txnclose', throw=False, txnname=desc,
1276 1279 **pycompat.strkwargs(hookargs))
1277 1280 reporef()._afterlock(hook)
1278 1281 tr.addfinalize('txnclose-hook', txnclosehook)
1279 1282 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1280 1283 def txnaborthook(tr2):
1281 1284 """To be run if transaction is aborted
1282 1285 """
1283 1286 reporef().hook('txnabort', throw=False, txnname=desc,
1284 1287 **tr2.hookargs)
1285 1288 tr.addabort('txnabort-hook', txnaborthook)
1286 1289 # avoid eager cache invalidation. in-memory data should be identical
1287 1290 # to stored data if transaction has no error.
1288 1291 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1289 1292 self._transref = weakref.ref(tr)
1290 1293 scmutil.registersummarycallback(self, tr, desc)
1291 1294 return tr
1292 1295
1293 1296 def _journalfiles(self):
1294 1297 return ((self.svfs, 'journal'),
1295 1298 (self.vfs, 'journal.dirstate'),
1296 1299 (self.vfs, 'journal.branch'),
1297 1300 (self.vfs, 'journal.desc'),
1298 1301 (self.vfs, 'journal.bookmarks'),
1299 1302 (self.svfs, 'journal.phaseroots'))
1300 1303
1301 1304 def undofiles(self):
1302 1305 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1303 1306
1304 1307 @unfilteredmethod
1305 1308 def _writejournal(self, desc):
1306 1309 self.dirstate.savebackup(None, 'journal.dirstate')
1307 1310 self.vfs.write("journal.branch",
1308 1311 encoding.fromlocal(self.dirstate.branch()))
1309 1312 self.vfs.write("journal.desc",
1310 1313 "%d\n%s\n" % (len(self), desc))
1311 1314 self.vfs.write("journal.bookmarks",
1312 1315 self.vfs.tryread("bookmarks"))
1313 1316 self.svfs.write("journal.phaseroots",
1314 1317 self.svfs.tryread("phaseroots"))
1315 1318
1316 1319 def recover(self):
1317 1320 with self.lock():
1318 1321 if self.svfs.exists("journal"):
1319 1322 self.ui.status(_("rolling back interrupted transaction\n"))
1320 1323 vfsmap = {'': self.svfs,
1321 1324 'plain': self.vfs,}
1322 1325 transaction.rollback(self.svfs, vfsmap, "journal",
1323 1326 self.ui.warn,
1324 1327 checkambigfiles=_cachedfiles)
1325 1328 self.invalidate()
1326 1329 return True
1327 1330 else:
1328 1331 self.ui.warn(_("no interrupted transaction available\n"))
1329 1332 return False
1330 1333
1331 1334 def rollback(self, dryrun=False, force=False):
1332 1335 wlock = lock = dsguard = None
1333 1336 try:
1334 1337 wlock = self.wlock()
1335 1338 lock = self.lock()
1336 1339 if self.svfs.exists("undo"):
1337 1340 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1338 1341
1339 1342 return self._rollback(dryrun, force, dsguard)
1340 1343 else:
1341 1344 self.ui.warn(_("no rollback information available\n"))
1342 1345 return 1
1343 1346 finally:
1344 1347 release(dsguard, lock, wlock)
1345 1348
1346 1349 @unfilteredmethod # Until we get smarter cache management
1347 1350 def _rollback(self, dryrun, force, dsguard):
1348 1351 ui = self.ui
1349 1352 try:
1350 1353 args = self.vfs.read('undo.desc').splitlines()
1351 1354 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1352 1355 if len(args) >= 3:
1353 1356 detail = args[2]
1354 1357 oldtip = oldlen - 1
1355 1358
1356 1359 if detail and ui.verbose:
1357 1360 msg = (_('repository tip rolled back to revision %d'
1358 1361 ' (undo %s: %s)\n')
1359 1362 % (oldtip, desc, detail))
1360 1363 else:
1361 1364 msg = (_('repository tip rolled back to revision %d'
1362 1365 ' (undo %s)\n')
1363 1366 % (oldtip, desc))
1364 1367 except IOError:
1365 1368 msg = _('rolling back unknown transaction\n')
1366 1369 desc = None
1367 1370
1368 1371 if not force and self['.'] != self['tip'] and desc == 'commit':
1369 1372 raise error.Abort(
1370 1373 _('rollback of last commit while not checked out '
1371 1374 'may lose data'), hint=_('use -f to force'))
1372 1375
1373 1376 ui.status(msg)
1374 1377 if dryrun:
1375 1378 return 0
1376 1379
1377 1380 parents = self.dirstate.parents()
1378 1381 self.destroying()
1379 1382 vfsmap = {'plain': self.vfs, '': self.svfs}
1380 1383 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1381 1384 checkambigfiles=_cachedfiles)
1382 1385 if self.vfs.exists('undo.bookmarks'):
1383 1386 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1384 1387 if self.svfs.exists('undo.phaseroots'):
1385 1388 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1386 1389 self.invalidate()
1387 1390
1388 1391 parentgone = (parents[0] not in self.changelog.nodemap or
1389 1392 parents[1] not in self.changelog.nodemap)
1390 1393 if parentgone:
1391 1394 # prevent dirstateguard from overwriting already restored one
1392 1395 dsguard.close()
1393 1396
1394 1397 self.dirstate.restorebackup(None, 'undo.dirstate')
1395 1398 try:
1396 1399 branch = self.vfs.read('undo.branch')
1397 1400 self.dirstate.setbranch(encoding.tolocal(branch))
1398 1401 except IOError:
1399 1402 ui.warn(_('named branch could not be reset: '
1400 1403 'current branch is still \'%s\'\n')
1401 1404 % self.dirstate.branch())
1402 1405
1403 1406 parents = tuple([p.rev() for p in self[None].parents()])
1404 1407 if len(parents) > 1:
1405 1408 ui.status(_('working directory now based on '
1406 1409 'revisions %d and %d\n') % parents)
1407 1410 else:
1408 1411 ui.status(_('working directory now based on '
1409 1412 'revision %d\n') % parents)
1410 1413 mergemod.mergestate.clean(self, self['.'].node())
1411 1414
1412 1415 # TODO: if we know which new heads may result from this rollback, pass
1413 1416 # them to destroy(), which will prevent the branchhead cache from being
1414 1417 # invalidated.
1415 1418 self.destroyed()
1416 1419 return 0
1417 1420
1418 1421 def _buildcacheupdater(self, newtransaction):
1419 1422 """called during transaction to build the callback updating cache
1420 1423
1421 1424 Lives on the repository to help extension who might want to augment
1422 1425 this logic. For this purpose, the created transaction is passed to the
1423 1426 method.
1424 1427 """
1425 1428 # we must avoid cyclic reference between repo and transaction.
1426 1429 reporef = weakref.ref(self)
1427 1430 def updater(tr):
1428 1431 repo = reporef()
1429 1432 repo.updatecaches(tr)
1430 1433 return updater
1431 1434
1432 1435 @unfilteredmethod
1433 1436 def updatecaches(self, tr=None):
1434 1437 """warm appropriate caches
1435 1438
1436 1439 If this function is called after a transaction closed. The transaction
1437 1440 will be available in the 'tr' argument. This can be used to selectively
1438 1441 update caches relevant to the changes in that transaction.
1439 1442 """
1440 1443 if tr is not None and tr.hookargs.get('source') == 'strip':
1441 1444 # During strip, many caches are invalid but
1442 1445 # later call to `destroyed` will refresh them.
1443 1446 return
1444 1447
1445 1448 if tr is None or tr.changes['revs']:
1446 1449 # updating the unfiltered branchmap should refresh all the others,
1447 1450 self.ui.debug('updating the branch cache\n')
1448 1451 branchmap.updatecache(self.filtered('served'))
1449 1452
1450 1453 def invalidatecaches(self):
1451 1454
1452 1455 if '_tagscache' in vars(self):
1453 1456 # can't use delattr on proxy
1454 1457 del self.__dict__['_tagscache']
1455 1458
1456 1459 self.unfiltered()._branchcaches.clear()
1457 1460 self.invalidatevolatilesets()
1458 1461 self._sparsesignaturecache.clear()
1459 1462
1460 1463 def invalidatevolatilesets(self):
1461 1464 self.filteredrevcache.clear()
1462 1465 obsolete.clearobscaches(self)
1463 1466
1464 1467 def invalidatedirstate(self):
1465 1468 '''Invalidates the dirstate, causing the next call to dirstate
1466 1469 to check if it was modified since the last time it was read,
1467 1470 rereading it if it has.
1468 1471
1469 1472 This is different to dirstate.invalidate() that it doesn't always
1470 1473 rereads the dirstate. Use dirstate.invalidate() if you want to
1471 1474 explicitly read the dirstate again (i.e. restoring it to a previous
1472 1475 known good state).'''
1473 1476 if hasunfilteredcache(self, 'dirstate'):
1474 1477 for k in self.dirstate._filecache:
1475 1478 try:
1476 1479 delattr(self.dirstate, k)
1477 1480 except AttributeError:
1478 1481 pass
1479 1482 delattr(self.unfiltered(), 'dirstate')
1480 1483
1481 1484 def invalidate(self, clearfilecache=False):
1482 1485 '''Invalidates both store and non-store parts other than dirstate
1483 1486
1484 1487 If a transaction is running, invalidation of store is omitted,
1485 1488 because discarding in-memory changes might cause inconsistency
1486 1489 (e.g. incomplete fncache causes unintentional failure, but
1487 1490 redundant one doesn't).
1488 1491 '''
1489 1492 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1490 1493 for k in list(self._filecache.keys()):
1491 1494 # dirstate is invalidated separately in invalidatedirstate()
1492 1495 if k == 'dirstate':
1493 1496 continue
1494 1497 if (k == 'changelog' and
1495 1498 self.currenttransaction() and
1496 1499 self.changelog._delayed):
1497 1500 # The changelog object may store unwritten revisions. We don't
1498 1501 # want to lose them.
1499 1502 # TODO: Solve the problem instead of working around it.
1500 1503 continue
1501 1504
1502 1505 if clearfilecache:
1503 1506 del self._filecache[k]
1504 1507 try:
1505 1508 delattr(unfiltered, k)
1506 1509 except AttributeError:
1507 1510 pass
1508 1511 self.invalidatecaches()
1509 1512 if not self.currenttransaction():
1510 1513 # TODO: Changing contents of store outside transaction
1511 1514 # causes inconsistency. We should make in-memory store
1512 1515 # changes detectable, and abort if changed.
1513 1516 self.store.invalidatecaches()
1514 1517
1515 1518 def invalidateall(self):
1516 1519 '''Fully invalidates both store and non-store parts, causing the
1517 1520 subsequent operation to reread any outside changes.'''
1518 1521 # extension should hook this to invalidate its caches
1519 1522 self.invalidate()
1520 1523 self.invalidatedirstate()
1521 1524
1522 1525 @unfilteredmethod
1523 1526 def _refreshfilecachestats(self, tr):
1524 1527 """Reload stats of cached files so that they are flagged as valid"""
1525 1528 for k, ce in self._filecache.items():
1526 1529 if k == 'dirstate' or k not in self.__dict__:
1527 1530 continue
1528 1531 ce.refresh()
1529 1532
1530 1533 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1531 1534 inheritchecker=None, parentenvvar=None):
1532 1535 parentlock = None
1533 1536 # the contents of parentenvvar are used by the underlying lock to
1534 1537 # determine whether it can be inherited
1535 1538 if parentenvvar is not None:
1536 1539 parentlock = encoding.environ.get(parentenvvar)
1537 1540 try:
1538 1541 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1539 1542 acquirefn=acquirefn, desc=desc,
1540 1543 inheritchecker=inheritchecker,
1541 1544 parentlock=parentlock)
1542 1545 except error.LockHeld as inst:
1543 1546 if not wait:
1544 1547 raise
1545 1548 # show more details for new-style locks
1546 1549 if ':' in inst.locker:
1547 1550 host, pid = inst.locker.split(":", 1)
1548 1551 self.ui.warn(
1549 1552 _("waiting for lock on %s held by process %r "
1550 1553 "on host %r\n") % (desc, pid, host))
1551 1554 else:
1552 1555 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1553 1556 (desc, inst.locker))
1554 1557 # default to 600 seconds timeout
1555 1558 l = lockmod.lock(vfs, lockname,
1556 1559 int(self.ui.config("ui", "timeout")),
1557 1560 releasefn=releasefn, acquirefn=acquirefn,
1558 1561 desc=desc)
1559 1562 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1560 1563 return l
1561 1564
1562 1565 def _afterlock(self, callback):
1563 1566 """add a callback to be run when the repository is fully unlocked
1564 1567
1565 1568 The callback will be executed when the outermost lock is released
1566 1569 (with wlock being higher level than 'lock')."""
1567 1570 for ref in (self._wlockref, self._lockref):
1568 1571 l = ref and ref()
1569 1572 if l and l.held:
1570 1573 l.postrelease.append(callback)
1571 1574 break
1572 1575 else: # no lock have been found.
1573 1576 callback()
1574 1577
1575 1578 def lock(self, wait=True):
1576 1579 '''Lock the repository store (.hg/store) and return a weak reference
1577 1580 to the lock. Use this before modifying the store (e.g. committing or
1578 1581 stripping). If you are opening a transaction, get a lock as well.)
1579 1582
1580 1583 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1581 1584 'wlock' first to avoid a dead-lock hazard.'''
1582 1585 l = self._currentlock(self._lockref)
1583 1586 if l is not None:
1584 1587 l.lock()
1585 1588 return l
1586 1589
1587 1590 l = self._lock(self.svfs, "lock", wait, None,
1588 1591 self.invalidate, _('repository %s') % self.origroot)
1589 1592 self._lockref = weakref.ref(l)
1590 1593 return l
1591 1594
1592 1595 def _wlockchecktransaction(self):
1593 1596 if self.currenttransaction() is not None:
1594 1597 raise error.LockInheritanceContractViolation(
1595 1598 'wlock cannot be inherited in the middle of a transaction')
1596 1599
1597 1600 def wlock(self, wait=True):
1598 1601 '''Lock the non-store parts of the repository (everything under
1599 1602 .hg except .hg/store) and return a weak reference to the lock.
1600 1603
1601 1604 Use this before modifying files in .hg.
1602 1605
1603 1606 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1604 1607 'wlock' first to avoid a dead-lock hazard.'''
1605 1608 l = self._wlockref and self._wlockref()
1606 1609 if l is not None and l.held:
1607 1610 l.lock()
1608 1611 return l
1609 1612
1610 1613 # We do not need to check for non-waiting lock acquisition. Such
1611 1614 # acquisition would not cause dead-lock as they would just fail.
1612 1615 if wait and (self.ui.configbool('devel', 'all-warnings')
1613 1616 or self.ui.configbool('devel', 'check-locks')):
1614 1617 if self._currentlock(self._lockref) is not None:
1615 1618 self.ui.develwarn('"wlock" acquired after "lock"')
1616 1619
1617 1620 def unlock():
1618 1621 if self.dirstate.pendingparentchange():
1619 1622 self.dirstate.invalidate()
1620 1623 else:
1621 1624 self.dirstate.write(None)
1622 1625
1623 1626 self._filecache['dirstate'].refresh()
1624 1627
1625 1628 l = self._lock(self.vfs, "wlock", wait, unlock,
1626 1629 self.invalidatedirstate, _('working directory of %s') %
1627 1630 self.origroot,
1628 1631 inheritchecker=self._wlockchecktransaction,
1629 1632 parentenvvar='HG_WLOCK_LOCKER')
1630 1633 self._wlockref = weakref.ref(l)
1631 1634 return l
1632 1635
1633 1636 def _currentlock(self, lockref):
1634 1637 """Returns the lock if it's held, or None if it's not."""
1635 1638 if lockref is None:
1636 1639 return None
1637 1640 l = lockref()
1638 1641 if l is None or not l.held:
1639 1642 return None
1640 1643 return l
1641 1644
1642 1645 def currentwlock(self):
1643 1646 """Returns the wlock if it's held, or None if it's not."""
1644 1647 return self._currentlock(self._wlockref)
1645 1648
1646 1649 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1647 1650 """
1648 1651 commit an individual file as part of a larger transaction
1649 1652 """
1650 1653
1651 1654 fname = fctx.path()
1652 1655 fparent1 = manifest1.get(fname, nullid)
1653 1656 fparent2 = manifest2.get(fname, nullid)
1654 1657 if isinstance(fctx, context.filectx):
1655 1658 node = fctx.filenode()
1656 1659 if node in [fparent1, fparent2]:
1657 1660 self.ui.debug('reusing %s filelog entry\n' % fname)
1658 1661 if manifest1.flags(fname) != fctx.flags():
1659 1662 changelist.append(fname)
1660 1663 return node
1661 1664
1662 1665 flog = self.file(fname)
1663 1666 meta = {}
1664 1667 copy = fctx.renamed()
1665 1668 if copy and copy[0] != fname:
1666 1669 # Mark the new revision of this file as a copy of another
1667 1670 # file. This copy data will effectively act as a parent
1668 1671 # of this new revision. If this is a merge, the first
1669 1672 # parent will be the nullid (meaning "look up the copy data")
1670 1673 # and the second one will be the other parent. For example:
1671 1674 #
1672 1675 # 0 --- 1 --- 3 rev1 changes file foo
1673 1676 # \ / rev2 renames foo to bar and changes it
1674 1677 # \- 2 -/ rev3 should have bar with all changes and
1675 1678 # should record that bar descends from
1676 1679 # bar in rev2 and foo in rev1
1677 1680 #
1678 1681 # this allows this merge to succeed:
1679 1682 #
1680 1683 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1681 1684 # \ / merging rev3 and rev4 should use bar@rev2
1682 1685 # \- 2 --- 4 as the merge base
1683 1686 #
1684 1687
1685 1688 cfname = copy[0]
1686 1689 crev = manifest1.get(cfname)
1687 1690 newfparent = fparent2
1688 1691
1689 1692 if manifest2: # branch merge
1690 1693 if fparent2 == nullid or crev is None: # copied on remote side
1691 1694 if cfname in manifest2:
1692 1695 crev = manifest2[cfname]
1693 1696 newfparent = fparent1
1694 1697
1695 1698 # Here, we used to search backwards through history to try to find
1696 1699 # where the file copy came from if the source of a copy was not in
1697 1700 # the parent directory. However, this doesn't actually make sense to
1698 1701 # do (what does a copy from something not in your working copy even
1699 1702 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1700 1703 # the user that copy information was dropped, so if they didn't
1701 1704 # expect this outcome it can be fixed, but this is the correct
1702 1705 # behavior in this circumstance.
1703 1706
1704 1707 if crev:
1705 1708 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1706 1709 meta["copy"] = cfname
1707 1710 meta["copyrev"] = hex(crev)
1708 1711 fparent1, fparent2 = nullid, newfparent
1709 1712 else:
1710 1713 self.ui.warn(_("warning: can't find ancestor for '%s' "
1711 1714 "copied from '%s'!\n") % (fname, cfname))
1712 1715
1713 1716 elif fparent1 == nullid:
1714 1717 fparent1, fparent2 = fparent2, nullid
1715 1718 elif fparent2 != nullid:
1716 1719 # is one parent an ancestor of the other?
1717 1720 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1718 1721 if fparent1 in fparentancestors:
1719 1722 fparent1, fparent2 = fparent2, nullid
1720 1723 elif fparent2 in fparentancestors:
1721 1724 fparent2 = nullid
1722 1725
1723 1726 # is the file changed?
1724 1727 text = fctx.data()
1725 1728 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1726 1729 changelist.append(fname)
1727 1730 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1728 1731 # are just the flags changed during merge?
1729 1732 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1730 1733 changelist.append(fname)
1731 1734
1732 1735 return fparent1
1733 1736
1734 1737 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1735 1738 """check for commit arguments that aren't committable"""
1736 1739 if match.isexact() or match.prefix():
1737 1740 matched = set(status.modified + status.added + status.removed)
1738 1741
1739 1742 for f in match.files():
1740 1743 f = self.dirstate.normalize(f)
1741 1744 if f == '.' or f in matched or f in wctx.substate:
1742 1745 continue
1743 1746 if f in status.deleted:
1744 1747 fail(f, _('file not found!'))
1745 1748 if f in vdirs: # visited directory
1746 1749 d = f + '/'
1747 1750 for mf in matched:
1748 1751 if mf.startswith(d):
1749 1752 break
1750 1753 else:
1751 1754 fail(f, _("no match under directory!"))
1752 1755 elif f not in self.dirstate:
1753 1756 fail(f, _("file not tracked!"))
1754 1757
1755 1758 @unfilteredmethod
1756 1759 def commit(self, text="", user=None, date=None, match=None, force=False,
1757 1760 editor=False, extra=None):
1758 1761 """Add a new revision to current repository.
1759 1762
1760 1763 Revision information is gathered from the working directory,
1761 1764 match can be used to filter the committed files. If editor is
1762 1765 supplied, it is called to get a commit message.
1763 1766 """
1764 1767 if extra is None:
1765 1768 extra = {}
1766 1769
1767 1770 def fail(f, msg):
1768 1771 raise error.Abort('%s: %s' % (f, msg))
1769 1772
1770 1773 if not match:
1771 1774 match = matchmod.always(self.root, '')
1772 1775
1773 1776 if not force:
1774 1777 vdirs = []
1775 1778 match.explicitdir = vdirs.append
1776 1779 match.bad = fail
1777 1780
1778 1781 wlock = lock = tr = None
1779 1782 try:
1780 1783 wlock = self.wlock()
1781 1784 lock = self.lock() # for recent changelog (see issue4368)
1782 1785
1783 1786 wctx = self[None]
1784 1787 merge = len(wctx.parents()) > 1
1785 1788
1786 1789 if not force and merge and not match.always():
1787 1790 raise error.Abort(_('cannot partially commit a merge '
1788 1791 '(do not specify files or patterns)'))
1789 1792
1790 1793 status = self.status(match=match, clean=force)
1791 1794 if force:
1792 1795 status.modified.extend(status.clean) # mq may commit clean files
1793 1796
1794 1797 # check subrepos
1795 1798 subs = []
1796 1799 commitsubs = set()
1797 1800 newstate = wctx.substate.copy()
1798 1801 # only manage subrepos and .hgsubstate if .hgsub is present
1799 1802 if '.hgsub' in wctx:
1800 1803 # we'll decide whether to track this ourselves, thanks
1801 1804 for c in status.modified, status.added, status.removed:
1802 1805 if '.hgsubstate' in c:
1803 1806 c.remove('.hgsubstate')
1804 1807
1805 1808 # compare current state to last committed state
1806 1809 # build new substate based on last committed state
1807 1810 oldstate = wctx.p1().substate
1808 1811 for s in sorted(newstate.keys()):
1809 1812 if not match(s):
1810 1813 # ignore working copy, use old state if present
1811 1814 if s in oldstate:
1812 1815 newstate[s] = oldstate[s]
1813 1816 continue
1814 1817 if not force:
1815 1818 raise error.Abort(
1816 1819 _("commit with new subrepo %s excluded") % s)
1817 1820 dirtyreason = wctx.sub(s).dirtyreason(True)
1818 1821 if dirtyreason:
1819 1822 if not self.ui.configbool('ui', 'commitsubrepos'):
1820 1823 raise error.Abort(dirtyreason,
1821 1824 hint=_("use --subrepos for recursive commit"))
1822 1825 subs.append(s)
1823 1826 commitsubs.add(s)
1824 1827 else:
1825 1828 bs = wctx.sub(s).basestate()
1826 1829 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1827 1830 if oldstate.get(s, (None, None, None))[1] != bs:
1828 1831 subs.append(s)
1829 1832
1830 1833 # check for removed subrepos
1831 1834 for p in wctx.parents():
1832 1835 r = [s for s in p.substate if s not in newstate]
1833 1836 subs += [s for s in r if match(s)]
1834 1837 if subs:
1835 1838 if (not match('.hgsub') and
1836 1839 '.hgsub' in (wctx.modified() + wctx.added())):
1837 1840 raise error.Abort(
1838 1841 _("can't commit subrepos without .hgsub"))
1839 1842 status.modified.insert(0, '.hgsubstate')
1840 1843
1841 1844 elif '.hgsub' in status.removed:
1842 1845 # clean up .hgsubstate when .hgsub is removed
1843 1846 if ('.hgsubstate' in wctx and
1844 1847 '.hgsubstate' not in (status.modified + status.added +
1845 1848 status.removed)):
1846 1849 status.removed.insert(0, '.hgsubstate')
1847 1850
1848 1851 # make sure all explicit patterns are matched
1849 1852 if not force:
1850 1853 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1851 1854
1852 1855 cctx = context.workingcommitctx(self, status,
1853 1856 text, user, date, extra)
1854 1857
1855 1858 # internal config: ui.allowemptycommit
1856 1859 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1857 1860 or extra.get('close') or merge or cctx.files()
1858 1861 or self.ui.configbool('ui', 'allowemptycommit'))
1859 1862 if not allowemptycommit:
1860 1863 return None
1861 1864
1862 1865 if merge and cctx.deleted():
1863 1866 raise error.Abort(_("cannot commit merge with missing files"))
1864 1867
1865 1868 ms = mergemod.mergestate.read(self)
1866 1869 mergeutil.checkunresolved(ms)
1867 1870
1868 1871 if editor:
1869 1872 cctx._text = editor(self, cctx, subs)
1870 1873 edited = (text != cctx._text)
1871 1874
1872 1875 # Save commit message in case this transaction gets rolled back
1873 1876 # (e.g. by a pretxncommit hook). Leave the content alone on
1874 1877 # the assumption that the user will use the same editor again.
1875 1878 msgfn = self.savecommitmessage(cctx._text)
1876 1879
1877 1880 # commit subs and write new state
1878 1881 if subs:
1879 1882 for s in sorted(commitsubs):
1880 1883 sub = wctx.sub(s)
1881 1884 self.ui.status(_('committing subrepository %s\n') %
1882 1885 subrepo.subrelpath(sub))
1883 1886 sr = sub.commit(cctx._text, user, date)
1884 1887 newstate[s] = (newstate[s][0], sr)
1885 1888 subrepo.writestate(self, newstate)
1886 1889
1887 1890 p1, p2 = self.dirstate.parents()
1888 1891 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1889 1892 try:
1890 1893 self.hook("precommit", throw=True, parent1=hookp1,
1891 1894 parent2=hookp2)
1892 1895 tr = self.transaction('commit')
1893 1896 ret = self.commitctx(cctx, True)
1894 1897 except: # re-raises
1895 1898 if edited:
1896 1899 self.ui.write(
1897 1900 _('note: commit message saved in %s\n') % msgfn)
1898 1901 raise
1899 1902 # update bookmarks, dirstate and mergestate
1900 1903 bookmarks.update(self, [p1, p2], ret)
1901 1904 cctx.markcommitted(ret)
1902 1905 ms.reset()
1903 1906 tr.close()
1904 1907
1905 1908 finally:
1906 1909 lockmod.release(tr, lock, wlock)
1907 1910
1908 1911 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1909 1912 # hack for command that use a temporary commit (eg: histedit)
1910 1913 # temporary commit got stripped before hook release
1911 1914 if self.changelog.hasnode(ret):
1912 1915 self.hook("commit", node=node, parent1=parent1,
1913 1916 parent2=parent2)
1914 1917 self._afterlock(commithook)
1915 1918 return ret
1916 1919
1917 1920 @unfilteredmethod
1918 1921 def commitctx(self, ctx, error=False):
1919 1922 """Add a new revision to current repository.
1920 1923 Revision information is passed via the context argument.
1921 1924 """
1922 1925
1923 1926 tr = None
1924 1927 p1, p2 = ctx.p1(), ctx.p2()
1925 1928 user = ctx.user()
1926 1929
1927 1930 lock = self.lock()
1928 1931 try:
1929 1932 tr = self.transaction("commit")
1930 1933 trp = weakref.proxy(tr)
1931 1934
1932 1935 if ctx.manifestnode():
1933 1936 # reuse an existing manifest revision
1934 1937 mn = ctx.manifestnode()
1935 1938 files = ctx.files()
1936 1939 elif ctx.files():
1937 1940 m1ctx = p1.manifestctx()
1938 1941 m2ctx = p2.manifestctx()
1939 1942 mctx = m1ctx.copy()
1940 1943
1941 1944 m = mctx.read()
1942 1945 m1 = m1ctx.read()
1943 1946 m2 = m2ctx.read()
1944 1947
1945 1948 # check in files
1946 1949 added = []
1947 1950 changed = []
1948 1951 removed = list(ctx.removed())
1949 1952 linkrev = len(self)
1950 1953 self.ui.note(_("committing files:\n"))
1951 1954 for f in sorted(ctx.modified() + ctx.added()):
1952 1955 self.ui.note(f + "\n")
1953 1956 try:
1954 1957 fctx = ctx[f]
1955 1958 if fctx is None:
1956 1959 removed.append(f)
1957 1960 else:
1958 1961 added.append(f)
1959 1962 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1960 1963 trp, changed)
1961 1964 m.setflag(f, fctx.flags())
1962 1965 except OSError as inst:
1963 1966 self.ui.warn(_("trouble committing %s!\n") % f)
1964 1967 raise
1965 1968 except IOError as inst:
1966 1969 errcode = getattr(inst, 'errno', errno.ENOENT)
1967 1970 if error or errcode and errcode != errno.ENOENT:
1968 1971 self.ui.warn(_("trouble committing %s!\n") % f)
1969 1972 raise
1970 1973
1971 1974 # update manifest
1972 1975 self.ui.note(_("committing manifest\n"))
1973 1976 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1974 1977 drop = [f for f in removed if f in m]
1975 1978 for f in drop:
1976 1979 del m[f]
1977 1980 mn = mctx.write(trp, linkrev,
1978 1981 p1.manifestnode(), p2.manifestnode(),
1979 1982 added, drop)
1980 1983 files = changed + removed
1981 1984 else:
1982 1985 mn = p1.manifestnode()
1983 1986 files = []
1984 1987
1985 1988 # update changelog
1986 1989 self.ui.note(_("committing changelog\n"))
1987 1990 self.changelog.delayupdate(tr)
1988 1991 n = self.changelog.add(mn, files, ctx.description(),
1989 1992 trp, p1.node(), p2.node(),
1990 1993 user, ctx.date(), ctx.extra().copy())
1991 1994 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1992 1995 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1993 1996 parent2=xp2)
1994 1997 # set the new commit is proper phase
1995 1998 targetphase = subrepo.newcommitphase(self.ui, ctx)
1996 1999 if targetphase:
1997 2000 # retract boundary do not alter parent changeset.
1998 2001 # if a parent have higher the resulting phase will
1999 2002 # be compliant anyway
2000 2003 #
2001 2004 # if minimal phase was 0 we don't need to retract anything
2002 2005 phases.registernew(self, tr, targetphase, [n])
2003 2006 tr.close()
2004 2007 return n
2005 2008 finally:
2006 2009 if tr:
2007 2010 tr.release()
2008 2011 lock.release()
2009 2012
2010 2013 @unfilteredmethod
2011 2014 def destroying(self):
2012 2015 '''Inform the repository that nodes are about to be destroyed.
2013 2016 Intended for use by strip and rollback, so there's a common
2014 2017 place for anything that has to be done before destroying history.
2015 2018
2016 2019 This is mostly useful for saving state that is in memory and waiting
2017 2020 to be flushed when the current lock is released. Because a call to
2018 2021 destroyed is imminent, the repo will be invalidated causing those
2019 2022 changes to stay in memory (waiting for the next unlock), or vanish
2020 2023 completely.
2021 2024 '''
2022 2025 # When using the same lock to commit and strip, the phasecache is left
2023 2026 # dirty after committing. Then when we strip, the repo is invalidated,
2024 2027 # causing those changes to disappear.
2025 2028 if '_phasecache' in vars(self):
2026 2029 self._phasecache.write()
2027 2030
2028 2031 @unfilteredmethod
2029 2032 def destroyed(self):
2030 2033 '''Inform the repository that nodes have been destroyed.
2031 2034 Intended for use by strip and rollback, so there's a common
2032 2035 place for anything that has to be done after destroying history.
2033 2036 '''
2034 2037 # When one tries to:
2035 2038 # 1) destroy nodes thus calling this method (e.g. strip)
2036 2039 # 2) use phasecache somewhere (e.g. commit)
2037 2040 #
2038 2041 # then 2) will fail because the phasecache contains nodes that were
2039 2042 # removed. We can either remove phasecache from the filecache,
2040 2043 # causing it to reload next time it is accessed, or simply filter
2041 2044 # the removed nodes now and write the updated cache.
2042 2045 self._phasecache.filterunknown(self)
2043 2046 self._phasecache.write()
2044 2047
2045 2048 # refresh all repository caches
2046 2049 self.updatecaches()
2047 2050
2048 2051 # Ensure the persistent tag cache is updated. Doing it now
2049 2052 # means that the tag cache only has to worry about destroyed
2050 2053 # heads immediately after a strip/rollback. That in turn
2051 2054 # guarantees that "cachetip == currenttip" (comparing both rev
2052 2055 # and node) always means no nodes have been added or destroyed.
2053 2056
2054 2057 # XXX this is suboptimal when qrefresh'ing: we strip the current
2055 2058 # head, refresh the tag cache, then immediately add a new head.
2056 2059 # But I think doing it this way is necessary for the "instant
2057 2060 # tag cache retrieval" case to work.
2058 2061 self.invalidate()
2059 2062
2060 2063 def walk(self, match, node=None):
2061 2064 '''
2062 2065 walk recursively through the directory tree or a given
2063 2066 changeset, finding all files matched by the match
2064 2067 function
2065 2068 '''
2066 2069 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2067 2070 return self[node].walk(match)
2068 2071
2069 2072 def status(self, node1='.', node2=None, match=None,
2070 2073 ignored=False, clean=False, unknown=False,
2071 2074 listsubrepos=False):
2072 2075 '''a convenience method that calls node1.status(node2)'''
2073 2076 return self[node1].status(node2, match, ignored, clean, unknown,
2074 2077 listsubrepos)
2075 2078
2076 2079 def addpostdsstatus(self, ps):
2077 2080 """Add a callback to run within the wlock, at the point at which status
2078 2081 fixups happen.
2079 2082
2080 2083 On status completion, callback(wctx, status) will be called with the
2081 2084 wlock held, unless the dirstate has changed from underneath or the wlock
2082 2085 couldn't be grabbed.
2083 2086
2084 2087 Callbacks should not capture and use a cached copy of the dirstate --
2085 2088 it might change in the meanwhile. Instead, they should access the
2086 2089 dirstate via wctx.repo().dirstate.
2087 2090
2088 2091 This list is emptied out after each status run -- extensions should
2089 2092 make sure it adds to this list each time dirstate.status is called.
2090 2093 Extensions should also make sure they don't call this for statuses
2091 2094 that don't involve the dirstate.
2092 2095 """
2093 2096
2094 2097 # The list is located here for uniqueness reasons -- it is actually
2095 2098 # managed by the workingctx, but that isn't unique per-repo.
2096 2099 self._postdsstatus.append(ps)
2097 2100
2098 2101 def postdsstatus(self):
2099 2102 """Used by workingctx to get the list of post-dirstate-status hooks."""
2100 2103 return self._postdsstatus
2101 2104
2102 2105 def clearpostdsstatus(self):
2103 2106 """Used by workingctx to clear post-dirstate-status hooks."""
2104 2107 del self._postdsstatus[:]
2105 2108
2106 2109 def heads(self, start=None):
2107 2110 if start is None:
2108 2111 cl = self.changelog
2109 2112 headrevs = reversed(cl.headrevs())
2110 2113 return [cl.node(rev) for rev in headrevs]
2111 2114
2112 2115 heads = self.changelog.heads(start)
2113 2116 # sort the output in rev descending order
2114 2117 return sorted(heads, key=self.changelog.rev, reverse=True)
2115 2118
2116 2119 def branchheads(self, branch=None, start=None, closed=False):
2117 2120 '''return a (possibly filtered) list of heads for the given branch
2118 2121
2119 2122 Heads are returned in topological order, from newest to oldest.
2120 2123 If branch is None, use the dirstate branch.
2121 2124 If start is not None, return only heads reachable from start.
2122 2125 If closed is True, return heads that are marked as closed as well.
2123 2126 '''
2124 2127 if branch is None:
2125 2128 branch = self[None].branch()
2126 2129 branches = self.branchmap()
2127 2130 if branch not in branches:
2128 2131 return []
2129 2132 # the cache returns heads ordered lowest to highest
2130 2133 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2131 2134 if start is not None:
2132 2135 # filter out the heads that cannot be reached from startrev
2133 2136 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2134 2137 bheads = [h for h in bheads if h in fbheads]
2135 2138 return bheads
2136 2139
2137 2140 def branches(self, nodes):
2138 2141 if not nodes:
2139 2142 nodes = [self.changelog.tip()]
2140 2143 b = []
2141 2144 for n in nodes:
2142 2145 t = n
2143 2146 while True:
2144 2147 p = self.changelog.parents(n)
2145 2148 if p[1] != nullid or p[0] == nullid:
2146 2149 b.append((t, n, p[0], p[1]))
2147 2150 break
2148 2151 n = p[0]
2149 2152 return b
2150 2153
2151 2154 def between(self, pairs):
2152 2155 r = []
2153 2156
2154 2157 for top, bottom in pairs:
2155 2158 n, l, i = top, [], 0
2156 2159 f = 1
2157 2160
2158 2161 while n != bottom and n != nullid:
2159 2162 p = self.changelog.parents(n)[0]
2160 2163 if i == f:
2161 2164 l.append(n)
2162 2165 f = f * 2
2163 2166 n = p
2164 2167 i += 1
2165 2168
2166 2169 r.append(l)
2167 2170
2168 2171 return r
2169 2172
2170 2173 def checkpush(self, pushop):
2171 2174 """Extensions can override this function if additional checks have
2172 2175 to be performed before pushing, or call it if they override push
2173 2176 command.
2174 2177 """
2175 2178 pass
2176 2179
2177 2180 @unfilteredpropertycache
2178 2181 def prepushoutgoinghooks(self):
2179 2182 """Return util.hooks consists of a pushop with repo, remote, outgoing
2180 2183 methods, which are called before pushing changesets.
2181 2184 """
2182 2185 return util.hooks()
2183 2186
2184 2187 def pushkey(self, namespace, key, old, new):
2185 2188 try:
2186 2189 tr = self.currenttransaction()
2187 2190 hookargs = {}
2188 2191 if tr is not None:
2189 2192 hookargs.update(tr.hookargs)
2190 2193 hookargs['namespace'] = namespace
2191 2194 hookargs['key'] = key
2192 2195 hookargs['old'] = old
2193 2196 hookargs['new'] = new
2194 2197 self.hook('prepushkey', throw=True, **hookargs)
2195 2198 except error.HookAbort as exc:
2196 2199 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2197 2200 if exc.hint:
2198 2201 self.ui.write_err(_("(%s)\n") % exc.hint)
2199 2202 return False
2200 2203 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2201 2204 ret = pushkey.push(self, namespace, key, old, new)
2202 2205 def runhook():
2203 2206 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2204 2207 ret=ret)
2205 2208 self._afterlock(runhook)
2206 2209 return ret
2207 2210
2208 2211 def listkeys(self, namespace):
2209 2212 self.hook('prelistkeys', throw=True, namespace=namespace)
2210 2213 self.ui.debug('listing keys for "%s"\n' % namespace)
2211 2214 values = pushkey.list(self, namespace)
2212 2215 self.hook('listkeys', namespace=namespace, values=values)
2213 2216 return values
2214 2217
2215 2218 def debugwireargs(self, one, two, three=None, four=None, five=None):
2216 2219 '''used to test argument passing over the wire'''
2217 2220 return "%s %s %s %s %s" % (one, two, three, four, five)
2218 2221
2219 2222 def savecommitmessage(self, text):
2220 2223 fp = self.vfs('last-message.txt', 'wb')
2221 2224 try:
2222 2225 fp.write(text)
2223 2226 finally:
2224 2227 fp.close()
2225 2228 return self.pathto(fp.name[len(self.root) + 1:])
2226 2229
2227 2230 # used to avoid circular references so destructors work
2228 2231 def aftertrans(files):
2229 2232 renamefiles = [tuple(t) for t in files]
2230 2233 def a():
2231 2234 for vfs, src, dest in renamefiles:
2232 2235 # if src and dest refer to a same file, vfs.rename is a no-op,
2233 2236 # leaving both src and dest on disk. delete dest to make sure
2234 2237 # the rename couldn't be such a no-op.
2235 2238 vfs.tryunlink(dest)
2236 2239 try:
2237 2240 vfs.rename(src, dest)
2238 2241 except OSError: # journal file does not yet exist
2239 2242 pass
2240 2243 return a
2241 2244
2242 2245 def undoname(fn):
2243 2246 base, name = os.path.split(fn)
2244 2247 assert name.startswith('journal')
2245 2248 return os.path.join(base, name.replace('journal', 'undo', 1))
2246 2249
2247 2250 def instance(ui, path, create):
2248 2251 return localrepository(ui, util.urllocalpath(path), create)
2249 2252
2250 2253 def islocal(path):
2251 2254 return True
2252 2255
2253 2256 def newreporequirements(repo):
2254 2257 """Determine the set of requirements for a new local repository.
2255 2258
2256 2259 Extensions can wrap this function to specify custom requirements for
2257 2260 new repositories.
2258 2261 """
2259 2262 ui = repo.ui
2260 2263 requirements = {'revlogv1'}
2261 2264 if ui.configbool('format', 'usestore'):
2262 2265 requirements.add('store')
2263 2266 if ui.configbool('format', 'usefncache'):
2264 2267 requirements.add('fncache')
2265 2268 if ui.configbool('format', 'dotencode'):
2266 2269 requirements.add('dotencode')
2267 2270
2268 2271 compengine = ui.config('experimental', 'format.compression')
2269 2272 if compengine not in util.compengines:
2270 2273 raise error.Abort(_('compression engine %s defined by '
2271 2274 'experimental.format.compression not available') %
2272 2275 compengine,
2273 2276 hint=_('run "hg debuginstall" to list available '
2274 2277 'compression engines'))
2275 2278
2276 2279 # zlib is the historical default and doesn't need an explicit requirement.
2277 2280 if compengine != 'zlib':
2278 2281 requirements.add('exp-compression-%s' % compengine)
2279 2282
2280 2283 if scmutil.gdinitconfig(ui):
2281 2284 requirements.add('generaldelta')
2282 2285 if ui.configbool('experimental', 'treemanifest'):
2283 2286 requirements.add('treemanifest')
2284 2287 if ui.configbool('experimental', 'manifestv2'):
2285 2288 requirements.add('manifestv2')
2286 2289
2287 2290 revlogv2 = ui.config('experimental', 'revlogv2')
2288 2291 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2289 2292 requirements.remove('revlogv1')
2290 2293 # generaldelta is implied by revlogv2.
2291 2294 requirements.discard('generaldelta')
2292 2295 requirements.add(REVLOGV2_REQUIREMENT)
2293 2296
2294 2297 return requirements
@@ -1,1060 +1,1063
1 1 # wireproto.py - generic wire protocol support functions
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11 import os
12 12 import tempfile
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 bin,
17 17 hex,
18 18 nullid,
19 19 )
20 20
21 21 from . import (
22 22 bundle2,
23 23 changegroup as changegroupmod,
24 discovery,
24 25 encoding,
25 26 error,
26 27 exchange,
27 28 peer,
28 29 pushkey as pushkeymod,
29 30 pycompat,
30 31 repository,
31 32 streamclone,
32 33 util,
33 34 )
34 35
35 36 urlerr = util.urlerr
36 37 urlreq = util.urlreq
37 38
38 39 bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
39 40 bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
40 41 'IncompatibleClient')
41 42 bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
42 43
43 44 class abstractserverproto(object):
44 45 """abstract class that summarizes the protocol API
45 46
46 47 Used as reference and documentation.
47 48 """
48 49
49 50 def getargs(self, args):
50 51 """return the value for arguments in <args>
51 52
52 53 returns a list of values (same order as <args>)"""
53 54 raise NotImplementedError()
54 55
55 56 def getfile(self, fp):
56 57 """write the whole content of a file into a file like object
57 58
58 59 The file is in the form::
59 60
60 61 (<chunk-size>\n<chunk>)+0\n
61 62
62 63 chunk size is the ascii version of the int.
63 64 """
64 65 raise NotImplementedError()
65 66
66 67 def redirect(self):
67 68 """may setup interception for stdout and stderr
68 69
69 70 See also the `restore` method."""
70 71 raise NotImplementedError()
71 72
72 73 # If the `redirect` function does install interception, the `restore`
73 74 # function MUST be defined. If interception is not used, this function
74 75 # MUST NOT be defined.
75 76 #
76 77 # left commented here on purpose
77 78 #
78 79 #def restore(self):
79 80 # """reinstall previous stdout and stderr and return intercepted stdout
80 81 # """
81 82 # raise NotImplementedError()
82 83
83 84 class remoteiterbatcher(peer.iterbatcher):
84 85 def __init__(self, remote):
85 86 super(remoteiterbatcher, self).__init__()
86 87 self._remote = remote
87 88
88 89 def __getattr__(self, name):
89 90 # Validate this method is batchable, since submit() only supports
90 91 # batchable methods.
91 92 fn = getattr(self._remote, name)
92 93 if not getattr(fn, 'batchable', None):
93 94 raise error.ProgrammingError('Attempted to batch a non-batchable '
94 95 'call to %r' % name)
95 96
96 97 return super(remoteiterbatcher, self).__getattr__(name)
97 98
98 99 def submit(self):
99 100 """Break the batch request into many patch calls and pipeline them.
100 101
101 102 This is mostly valuable over http where request sizes can be
102 103 limited, but can be used in other places as well.
103 104 """
104 105 # 2-tuple of (command, arguments) that represents what will be
105 106 # sent over the wire.
106 107 requests = []
107 108
108 109 # 4-tuple of (command, final future, @batchable generator, remote
109 110 # future).
110 111 results = []
111 112
112 113 for command, args, opts, finalfuture in self.calls:
113 114 mtd = getattr(self._remote, command)
114 115 batchable = mtd.batchable(mtd.im_self, *args, **opts)
115 116
116 117 commandargs, fremote = next(batchable)
117 118 assert fremote
118 119 requests.append((command, commandargs))
119 120 results.append((command, finalfuture, batchable, fremote))
120 121
121 122 if requests:
122 123 self._resultiter = self._remote._submitbatch(requests)
123 124
124 125 self._results = results
125 126
126 127 def results(self):
127 128 for command, finalfuture, batchable, remotefuture in self._results:
128 129 # Get the raw result, set it in the remote future, feed it
129 130 # back into the @batchable generator so it can be decoded, and
130 131 # set the result on the final future to this value.
131 132 remoteresult = next(self._resultiter)
132 133 remotefuture.set(remoteresult)
133 134 finalfuture.set(next(batchable))
134 135
135 136 # Verify our @batchable generators only emit 2 values.
136 137 try:
137 138 next(batchable)
138 139 except StopIteration:
139 140 pass
140 141 else:
141 142 raise error.ProgrammingError('%s @batchable generator emitted '
142 143 'unexpected value count' % command)
143 144
144 145 yield finalfuture.value
145 146
146 147 # Forward a couple of names from peer to make wireproto interactions
147 148 # slightly more sensible.
148 149 batchable = peer.batchable
149 150 future = peer.future
150 151
151 152 # list of nodes encoding / decoding
152 153
153 154 def decodelist(l, sep=' '):
154 155 if l:
155 156 return map(bin, l.split(sep))
156 157 return []
157 158
158 159 def encodelist(l, sep=' '):
159 160 try:
160 161 return sep.join(map(hex, l))
161 162 except TypeError:
162 163 raise
163 164
164 165 # batched call argument encoding
165 166
166 167 def escapearg(plain):
167 168 return (plain
168 169 .replace(':', ':c')
169 170 .replace(',', ':o')
170 171 .replace(';', ':s')
171 172 .replace('=', ':e'))
172 173
173 174 def unescapearg(escaped):
174 175 return (escaped
175 176 .replace(':e', '=')
176 177 .replace(':s', ';')
177 178 .replace(':o', ',')
178 179 .replace(':c', ':'))
179 180
180 181 def encodebatchcmds(req):
181 182 """Return a ``cmds`` argument value for the ``batch`` command."""
182 183 cmds = []
183 184 for op, argsdict in req:
184 185 # Old servers didn't properly unescape argument names. So prevent
185 186 # the sending of argument names that may not be decoded properly by
186 187 # servers.
187 188 assert all(escapearg(k) == k for k in argsdict)
188 189
189 190 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
190 191 for k, v in argsdict.iteritems())
191 192 cmds.append('%s %s' % (op, args))
192 193
193 194 return ';'.join(cmds)
194 195
195 196 # mapping of options accepted by getbundle and their types
196 197 #
197 198 # Meant to be extended by extensions. It is extensions responsibility to ensure
198 199 # such options are properly processed in exchange.getbundle.
199 200 #
200 201 # supported types are:
201 202 #
202 203 # :nodes: list of binary nodes
203 204 # :csv: list of comma-separated values
204 205 # :scsv: list of comma-separated values return as set
205 206 # :plain: string with no transformation needed.
206 207 gboptsmap = {'heads': 'nodes',
207 208 'common': 'nodes',
208 209 'obsmarkers': 'boolean',
209 210 'bundlecaps': 'scsv',
210 211 'listkeys': 'csv',
211 212 'cg': 'boolean',
212 213 'cbattempted': 'boolean'}
213 214
214 215 # client side
215 216
216 217 class wirepeer(repository.legacypeer):
217 218 """Client-side interface for communicating with a peer repository.
218 219
219 220 Methods commonly call wire protocol commands of the same name.
220 221
221 222 See also httppeer.py and sshpeer.py for protocol-specific
222 223 implementations of this interface.
223 224 """
224 225 # Begin of basewirepeer interface.
225 226
226 227 def iterbatch(self):
227 228 return remoteiterbatcher(self)
228 229
229 230 @batchable
230 231 def lookup(self, key):
231 232 self.requirecap('lookup', _('look up remote revision'))
232 233 f = future()
233 234 yield {'key': encoding.fromlocal(key)}, f
234 235 d = f.value
235 236 success, data = d[:-1].split(" ", 1)
236 237 if int(success):
237 238 yield bin(data)
238 239 else:
239 240 self._abort(error.RepoError(data))
240 241
241 242 @batchable
242 243 def heads(self):
243 244 f = future()
244 245 yield {}, f
245 246 d = f.value
246 247 try:
247 248 yield decodelist(d[:-1])
248 249 except ValueError:
249 250 self._abort(error.ResponseError(_("unexpected response:"), d))
250 251
251 252 @batchable
252 253 def known(self, nodes):
253 254 f = future()
254 255 yield {'nodes': encodelist(nodes)}, f
255 256 d = f.value
256 257 try:
257 258 yield [bool(int(b)) for b in d]
258 259 except ValueError:
259 260 self._abort(error.ResponseError(_("unexpected response:"), d))
260 261
261 262 @batchable
262 263 def branchmap(self):
263 264 f = future()
264 265 yield {}, f
265 266 d = f.value
266 267 try:
267 268 branchmap = {}
268 269 for branchpart in d.splitlines():
269 270 branchname, branchheads = branchpart.split(' ', 1)
270 271 branchname = encoding.tolocal(urlreq.unquote(branchname))
271 272 branchheads = decodelist(branchheads)
272 273 branchmap[branchname] = branchheads
273 274 yield branchmap
274 275 except TypeError:
275 276 self._abort(error.ResponseError(_("unexpected response:"), d))
276 277
277 278 @batchable
278 279 def listkeys(self, namespace):
279 280 if not self.capable('pushkey'):
280 281 yield {}, None
281 282 f = future()
282 283 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
283 284 yield {'namespace': encoding.fromlocal(namespace)}, f
284 285 d = f.value
285 286 self.ui.debug('received listkey for "%s": %i bytes\n'
286 287 % (namespace, len(d)))
287 288 yield pushkeymod.decodekeys(d)
288 289
289 290 @batchable
290 291 def pushkey(self, namespace, key, old, new):
291 292 if not self.capable('pushkey'):
292 293 yield False, None
293 294 f = future()
294 295 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
295 296 yield {'namespace': encoding.fromlocal(namespace),
296 297 'key': encoding.fromlocal(key),
297 298 'old': encoding.fromlocal(old),
298 299 'new': encoding.fromlocal(new)}, f
299 300 d = f.value
300 301 d, output = d.split('\n', 1)
301 302 try:
302 303 d = bool(int(d))
303 304 except ValueError:
304 305 raise error.ResponseError(
305 306 _('push failed (unexpected response):'), d)
306 307 for l in output.splitlines(True):
307 308 self.ui.status(_('remote: '), l)
308 309 yield d
309 310
310 311 def stream_out(self):
311 312 return self._callstream('stream_out')
312 313
313 314 def getbundle(self, source, **kwargs):
314 315 self.requirecap('getbundle', _('look up remote changes'))
315 316 opts = {}
316 317 bundlecaps = kwargs.get('bundlecaps')
317 318 if bundlecaps is not None:
318 319 kwargs['bundlecaps'] = sorted(bundlecaps)
319 320 else:
320 321 bundlecaps = () # kwargs could have it to None
321 322 for key, value in kwargs.iteritems():
322 323 if value is None:
323 324 continue
324 325 keytype = gboptsmap.get(key)
325 326 if keytype is None:
326 327 assert False, 'unexpected'
327 328 elif keytype == 'nodes':
328 329 value = encodelist(value)
329 330 elif keytype in ('csv', 'scsv'):
330 331 value = ','.join(value)
331 332 elif keytype == 'boolean':
332 333 value = '%i' % bool(value)
333 334 elif keytype != 'plain':
334 335 raise KeyError('unknown getbundle option type %s'
335 336 % keytype)
336 337 opts[key] = value
337 338 f = self._callcompressable("getbundle", **opts)
338 339 if any((cap.startswith('HG2') for cap in bundlecaps)):
339 340 return bundle2.getunbundler(self.ui, f)
340 341 else:
341 342 return changegroupmod.cg1unpacker(f, 'UN')
342 343
343 344 def unbundle(self, cg, heads, url):
344 345 '''Send cg (a readable file-like object representing the
345 346 changegroup to push, typically a chunkbuffer object) to the
346 347 remote server as a bundle.
347 348
348 349 When pushing a bundle10 stream, return an integer indicating the
349 350 result of the push (see changegroup.apply()).
350 351
351 352 When pushing a bundle20 stream, return a bundle20 stream.
352 353
353 354 `url` is the url the client thinks it's pushing to, which is
354 355 visible to hooks.
355 356 '''
356 357
357 358 if heads != ['force'] and self.capable('unbundlehash'):
358 359 heads = encodelist(['hashed',
359 360 hashlib.sha1(''.join(sorted(heads))).digest()])
360 361 else:
361 362 heads = encodelist(heads)
362 363
363 364 if util.safehasattr(cg, 'deltaheader'):
364 365 # this a bundle10, do the old style call sequence
365 366 ret, output = self._callpush("unbundle", cg, heads=heads)
366 367 if ret == "":
367 368 raise error.ResponseError(
368 369 _('push failed:'), output)
369 370 try:
370 371 ret = int(ret)
371 372 except ValueError:
372 373 raise error.ResponseError(
373 374 _('push failed (unexpected response):'), ret)
374 375
375 376 for l in output.splitlines(True):
376 377 self.ui.status(_('remote: '), l)
377 378 else:
378 379 # bundle2 push. Send a stream, fetch a stream.
379 380 stream = self._calltwowaystream('unbundle', cg, heads=heads)
380 381 ret = bundle2.getunbundler(self.ui, stream)
381 382 return ret
382 383
383 384 # End of basewirepeer interface.
384 385
385 386 # Begin of baselegacywirepeer interface.
386 387
387 388 def branches(self, nodes):
388 389 n = encodelist(nodes)
389 390 d = self._call("branches", nodes=n)
390 391 try:
391 392 br = [tuple(decodelist(b)) for b in d.splitlines()]
392 393 return br
393 394 except ValueError:
394 395 self._abort(error.ResponseError(_("unexpected response:"), d))
395 396
396 397 def between(self, pairs):
397 398 batch = 8 # avoid giant requests
398 399 r = []
399 400 for i in xrange(0, len(pairs), batch):
400 401 n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
401 402 d = self._call("between", pairs=n)
402 403 try:
403 404 r.extend(l and decodelist(l) or [] for l in d.splitlines())
404 405 except ValueError:
405 406 self._abort(error.ResponseError(_("unexpected response:"), d))
406 407 return r
407 408
408 409 def changegroup(self, nodes, kind):
409 410 n = encodelist(nodes)
410 411 f = self._callcompressable("changegroup", roots=n)
411 412 return changegroupmod.cg1unpacker(f, 'UN')
412 413
413 414 def changegroupsubset(self, bases, heads, kind):
414 415 self.requirecap('changegroupsubset', _('look up remote changes'))
415 416 bases = encodelist(bases)
416 417 heads = encodelist(heads)
417 418 f = self._callcompressable("changegroupsubset",
418 419 bases=bases, heads=heads)
419 420 return changegroupmod.cg1unpacker(f, 'UN')
420 421
421 422 # End of baselegacywirepeer interface.
422 423
423 424 def _submitbatch(self, req):
424 425 """run batch request <req> on the server
425 426
426 427 Returns an iterator of the raw responses from the server.
427 428 """
428 429 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
429 430 chunk = rsp.read(1024)
430 431 work = [chunk]
431 432 while chunk:
432 433 while ';' not in chunk and chunk:
433 434 chunk = rsp.read(1024)
434 435 work.append(chunk)
435 436 merged = ''.join(work)
436 437 while ';' in merged:
437 438 one, merged = merged.split(';', 1)
438 439 yield unescapearg(one)
439 440 chunk = rsp.read(1024)
440 441 work = [merged, chunk]
441 442 yield unescapearg(''.join(work))
442 443
443 444 def _submitone(self, op, args):
444 445 return self._call(op, **args)
445 446
446 447 def debugwireargs(self, one, two, three=None, four=None, five=None):
447 448 # don't pass optional arguments left at their default value
448 449 opts = {}
449 450 if three is not None:
450 451 opts['three'] = three
451 452 if four is not None:
452 453 opts['four'] = four
453 454 return self._call('debugwireargs', one=one, two=two, **opts)
454 455
455 456 def _call(self, cmd, **args):
456 457 """execute <cmd> on the server
457 458
458 459 The command is expected to return a simple string.
459 460
460 461 returns the server reply as a string."""
461 462 raise NotImplementedError()
462 463
463 464 def _callstream(self, cmd, **args):
464 465 """execute <cmd> on the server
465 466
466 467 The command is expected to return a stream. Note that if the
467 468 command doesn't return a stream, _callstream behaves
468 469 differently for ssh and http peers.
469 470
470 471 returns the server reply as a file like object.
471 472 """
472 473 raise NotImplementedError()
473 474
474 475 def _callcompressable(self, cmd, **args):
475 476 """execute <cmd> on the server
476 477
477 478 The command is expected to return a stream.
478 479
479 480 The stream may have been compressed in some implementations. This
480 481 function takes care of the decompression. This is the only difference
481 482 with _callstream.
482 483
483 484 returns the server reply as a file like object.
484 485 """
485 486 raise NotImplementedError()
486 487
487 488 def _callpush(self, cmd, fp, **args):
488 489 """execute a <cmd> on server
489 490
490 491 The command is expected to be related to a push. Push has a special
491 492 return method.
492 493
493 494 returns the server reply as a (ret, output) tuple. ret is either
494 495 empty (error) or a stringified int.
495 496 """
496 497 raise NotImplementedError()
497 498
498 499 def _calltwowaystream(self, cmd, fp, **args):
499 500 """execute <cmd> on server
500 501
501 502 The command will send a stream to the server and get a stream in reply.
502 503 """
503 504 raise NotImplementedError()
504 505
505 506 def _abort(self, exception):
506 507 """clearly abort the wire protocol connection and raise the exception
507 508 """
508 509 raise NotImplementedError()
509 510
510 511 # server side
511 512
512 513 # wire protocol command can either return a string or one of these classes.
513 514 class streamres(object):
514 515 """wireproto reply: binary stream
515 516
516 517 The call was successful and the result is a stream.
517 518
518 519 Accepts either a generator or an object with a ``read(size)`` method.
519 520
520 521 ``v1compressible`` indicates whether this data can be compressed to
521 522 "version 1" clients (technically: HTTP peers using
522 523 application/mercurial-0.1 media type). This flag should NOT be used on
523 524 new commands because new clients should support a more modern compression
524 525 mechanism.
525 526 """
526 527 def __init__(self, gen=None, reader=None, v1compressible=False):
527 528 self.gen = gen
528 529 self.reader = reader
529 530 self.v1compressible = v1compressible
530 531
531 532 class pushres(object):
532 533 """wireproto reply: success with simple integer return
533 534
534 535 The call was successful and returned an integer contained in `self.res`.
535 536 """
536 537 def __init__(self, res):
537 538 self.res = res
538 539
539 540 class pusherr(object):
540 541 """wireproto reply: failure
541 542
542 543 The call failed. The `self.res` attribute contains the error message.
543 544 """
544 545 def __init__(self, res):
545 546 self.res = res
546 547
547 548 class ooberror(object):
548 549 """wireproto reply: failure of a batch of operation
549 550
550 551 Something failed during a batch call. The error message is stored in
551 552 `self.message`.
552 553 """
553 554 def __init__(self, message):
554 555 self.message = message
555 556
556 557 def getdispatchrepo(repo, proto, command):
557 558 """Obtain the repo used for processing wire protocol commands.
558 559
559 560 The intent of this function is to serve as a monkeypatch point for
560 561 extensions that need commands to operate on different repo views under
561 562 specialized circumstances.
562 563 """
563 564 return repo.filtered('served')
564 565
565 566 def dispatch(repo, proto, command):
566 567 repo = getdispatchrepo(repo, proto, command)
567 568 func, spec = commands[command]
568 569 args = proto.getargs(spec)
569 570 return func(repo, proto, *args)
570 571
571 572 def options(cmd, keys, others):
572 573 opts = {}
573 574 for k in keys:
574 575 if k in others:
575 576 opts[k] = others[k]
576 577 del others[k]
577 578 if others:
578 579 util.stderr.write("warning: %s ignored unexpected arguments %s\n"
579 580 % (cmd, ",".join(others)))
580 581 return opts
581 582
582 583 def bundle1allowed(repo, action):
583 584 """Whether a bundle1 operation is allowed from the server.
584 585
585 586 Priority is:
586 587
587 588 1. server.bundle1gd.<action> (if generaldelta active)
588 589 2. server.bundle1.<action>
589 590 3. server.bundle1gd (if generaldelta active)
590 591 4. server.bundle1
591 592 """
592 593 ui = repo.ui
593 594 gd = 'generaldelta' in repo.requirements
594 595
595 596 if gd:
596 597 v = ui.configbool('server', 'bundle1gd.%s' % action, None)
597 598 if v is not None:
598 599 return v
599 600
600 601 v = ui.configbool('server', 'bundle1.%s' % action, None)
601 602 if v is not None:
602 603 return v
603 604
604 605 if gd:
605 606 v = ui.configbool('server', 'bundle1gd')
606 607 if v is not None:
607 608 return v
608 609
609 610 return ui.configbool('server', 'bundle1')
610 611
611 612 def supportedcompengines(ui, proto, role):
612 613 """Obtain the list of supported compression engines for a request."""
613 614 assert role in (util.CLIENTROLE, util.SERVERROLE)
614 615
615 616 compengines = util.compengines.supportedwireengines(role)
616 617
617 618 # Allow config to override default list and ordering.
618 619 if role == util.SERVERROLE:
619 620 configengines = ui.configlist('server', 'compressionengines')
620 621 config = 'server.compressionengines'
621 622 else:
622 623 # This is currently implemented mainly to facilitate testing. In most
623 624 # cases, the server should be in charge of choosing a compression engine
624 625 # because a server has the most to lose from a sub-optimal choice. (e.g.
625 626 # CPU DoS due to an expensive engine or a network DoS due to poor
626 627 # compression ratio).
627 628 configengines = ui.configlist('experimental',
628 629 'clientcompressionengines')
629 630 config = 'experimental.clientcompressionengines'
630 631
631 632 # No explicit config. Filter out the ones that aren't supposed to be
632 633 # advertised and return default ordering.
633 634 if not configengines:
634 635 attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
635 636 return [e for e in compengines
636 637 if getattr(e.wireprotosupport(), attr) > 0]
637 638
638 639 # If compression engines are listed in the config, assume there is a good
639 640 # reason for it (like server operators wanting to achieve specific
640 641 # performance characteristics). So fail fast if the config references
641 642 # unusable compression engines.
642 643 validnames = set(e.name() for e in compengines)
643 644 invalidnames = set(e for e in configengines if e not in validnames)
644 645 if invalidnames:
645 646 raise error.Abort(_('invalid compression engine defined in %s: %s') %
646 647 (config, ', '.join(sorted(invalidnames))))
647 648
648 649 compengines = [e for e in compengines if e.name() in configengines]
649 650 compengines = sorted(compengines,
650 651 key=lambda e: configengines.index(e.name()))
651 652
652 653 if not compengines:
653 654 raise error.Abort(_('%s config option does not specify any known '
654 655 'compression engines') % config,
655 656 hint=_('usable compression engines: %s') %
656 657 ', '.sorted(validnames))
657 658
658 659 return compengines
659 660
660 661 # list of commands
661 662 commands = {}
662 663
663 664 def wireprotocommand(name, args=''):
664 665 """decorator for wire protocol command"""
665 666 def register(func):
666 667 commands[name] = (func, args)
667 668 return func
668 669 return register
669 670
670 671 @wireprotocommand('batch', 'cmds *')
671 672 def batch(repo, proto, cmds, others):
672 673 repo = repo.filtered("served")
673 674 res = []
674 675 for pair in cmds.split(';'):
675 676 op, args = pair.split(' ', 1)
676 677 vals = {}
677 678 for a in args.split(','):
678 679 if a:
679 680 n, v = a.split('=')
680 681 vals[unescapearg(n)] = unescapearg(v)
681 682 func, spec = commands[op]
682 683 if spec:
683 684 keys = spec.split()
684 685 data = {}
685 686 for k in keys:
686 687 if k == '*':
687 688 star = {}
688 689 for key in vals.keys():
689 690 if key not in keys:
690 691 star[key] = vals[key]
691 692 data['*'] = star
692 693 else:
693 694 data[k] = vals[k]
694 695 result = func(repo, proto, *[data[k] for k in keys])
695 696 else:
696 697 result = func(repo, proto)
697 698 if isinstance(result, ooberror):
698 699 return result
699 700 res.append(escapearg(result))
700 701 return ';'.join(res)
701 702
702 703 @wireprotocommand('between', 'pairs')
703 704 def between(repo, proto, pairs):
704 705 pairs = [decodelist(p, '-') for p in pairs.split(" ")]
705 706 r = []
706 707 for b in repo.between(pairs):
707 708 r.append(encodelist(b) + "\n")
708 709 return "".join(r)
709 710
710 711 @wireprotocommand('branchmap')
711 712 def branchmap(repo, proto):
712 713 branchmap = repo.branchmap()
713 714 heads = []
714 715 for branch, nodes in branchmap.iteritems():
715 716 branchname = urlreq.quote(encoding.fromlocal(branch))
716 717 branchnodes = encodelist(nodes)
717 718 heads.append('%s %s' % (branchname, branchnodes))
718 719 return '\n'.join(heads)
719 720
720 721 @wireprotocommand('branches', 'nodes')
721 722 def branches(repo, proto, nodes):
722 723 nodes = decodelist(nodes)
723 724 r = []
724 725 for b in repo.branches(nodes):
725 726 r.append(encodelist(b) + "\n")
726 727 return "".join(r)
727 728
728 729 @wireprotocommand('clonebundles', '')
729 730 def clonebundles(repo, proto):
730 731 """Server command for returning info for available bundles to seed clones.
731 732
732 733 Clients will parse this response and determine what bundle to fetch.
733 734
734 735 Extensions may wrap this command to filter or dynamically emit data
735 736 depending on the request. e.g. you could advertise URLs for the closest
736 737 data center given the client's IP address.
737 738 """
738 739 return repo.vfs.tryread('clonebundles.manifest')
739 740
740 741 wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
741 742 'known', 'getbundle', 'unbundlehash', 'batch']
742 743
743 744 def _capabilities(repo, proto):
744 745 """return a list of capabilities for a repo
745 746
746 747 This function exists to allow extensions to easily wrap capabilities
747 748 computation
748 749
749 750 - returns a lists: easy to alter
750 751 - change done here will be propagated to both `capabilities` and `hello`
751 752 command without any other action needed.
752 753 """
753 754 # copy to prevent modification of the global list
754 755 caps = list(wireprotocaps)
755 756 if streamclone.allowservergeneration(repo):
756 757 if repo.ui.configbool('server', 'preferuncompressed'):
757 758 caps.append('stream-preferred')
758 759 requiredformats = repo.requirements & repo.supportedformats
759 760 # if our local revlogs are just revlogv1, add 'stream' cap
760 761 if not requiredformats - {'revlogv1'}:
761 762 caps.append('stream')
762 763 # otherwise, add 'streamreqs' detailing our local revlog format
763 764 else:
764 765 caps.append('streamreqs=%s' % ','.join(sorted(requiredformats)))
765 766 if repo.ui.configbool('experimental', 'bundle2-advertise'):
766 767 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
767 768 caps.append('bundle2=' + urlreq.quote(capsblob))
768 769 caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
769 770
770 771 if proto.name == 'http':
771 772 caps.append('httpheader=%d' %
772 773 repo.ui.configint('server', 'maxhttpheaderlen'))
773 774 if repo.ui.configbool('experimental', 'httppostargs'):
774 775 caps.append('httppostargs')
775 776
776 777 # FUTURE advertise 0.2rx once support is implemented
777 778 # FUTURE advertise minrx and mintx after consulting config option
778 779 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
779 780
780 781 compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
781 782 if compengines:
782 783 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
783 784 for e in compengines)
784 785 caps.append('compression=%s' % comptypes)
785 786
786 787 return caps
787 788
788 789 # If you are writing an extension and consider wrapping this function. Wrap
789 790 # `_capabilities` instead.
790 791 @wireprotocommand('capabilities')
791 792 def capabilities(repo, proto):
792 793 return ' '.join(_capabilities(repo, proto))
793 794
794 795 @wireprotocommand('changegroup', 'roots')
795 796 def changegroup(repo, proto, roots):
796 797 nodes = decodelist(roots)
797 798 cg = changegroupmod.changegroup(repo, nodes, 'serve')
798 799 return streamres(reader=cg, v1compressible=True)
799 800
800 801 @wireprotocommand('changegroupsubset', 'bases heads')
801 802 def changegroupsubset(repo, proto, bases, heads):
802 803 bases = decodelist(bases)
803 804 heads = decodelist(heads)
804 cg = changegroupmod.changegroupsubset(repo, bases, heads, 'serve')
805 outgoing = discovery.outgoing(repo, missingroots=bases,
806 missingheads=heads)
807 cg = changegroupmod.makechangegroup(repo, outgoing, '01', 'serve')
805 808 return streamres(reader=cg, v1compressible=True)
806 809
807 810 @wireprotocommand('debugwireargs', 'one two *')
808 811 def debugwireargs(repo, proto, one, two, others):
809 812 # only accept optional args from the known set
810 813 opts = options('debugwireargs', ['three', 'four'], others)
811 814 return repo.debugwireargs(one, two, **opts)
812 815
813 816 @wireprotocommand('getbundle', '*')
814 817 def getbundle(repo, proto, others):
815 818 opts = options('getbundle', gboptsmap.keys(), others)
816 819 for k, v in opts.iteritems():
817 820 keytype = gboptsmap[k]
818 821 if keytype == 'nodes':
819 822 opts[k] = decodelist(v)
820 823 elif keytype == 'csv':
821 824 opts[k] = list(v.split(','))
822 825 elif keytype == 'scsv':
823 826 opts[k] = set(v.split(','))
824 827 elif keytype == 'boolean':
825 828 # Client should serialize False as '0', which is a non-empty string
826 829 # so it evaluates as a True bool.
827 830 if v == '0':
828 831 opts[k] = False
829 832 else:
830 833 opts[k] = bool(v)
831 834 elif keytype != 'plain':
832 835 raise KeyError('unknown getbundle option type %s'
833 836 % keytype)
834 837
835 838 if not bundle1allowed(repo, 'pull'):
836 839 if not exchange.bundle2requested(opts.get('bundlecaps')):
837 840 if proto.name == 'http':
838 841 return ooberror(bundle2required)
839 842 raise error.Abort(bundle2requiredmain,
840 843 hint=bundle2requiredhint)
841 844
842 845 try:
843 846 if repo.ui.configbool('server', 'disablefullbundle'):
844 847 # Check to see if this is a full clone.
845 848 clheads = set(repo.changelog.heads())
846 849 heads = set(opts.get('heads', set()))
847 850 common = set(opts.get('common', set()))
848 851 common.discard(nullid)
849 852 if not common and clheads == heads:
850 853 raise error.Abort(
851 854 _('server has pull-based clones disabled'),
852 855 hint=_('remove --pull if specified or upgrade Mercurial'))
853 856
854 857 chunks = exchange.getbundlechunks(repo, 'serve', **opts)
855 858 except error.Abort as exc:
856 859 # cleanly forward Abort error to the client
857 860 if not exchange.bundle2requested(opts.get('bundlecaps')):
858 861 if proto.name == 'http':
859 862 return ooberror(str(exc) + '\n')
860 863 raise # cannot do better for bundle1 + ssh
861 864 # bundle2 request expect a bundle2 reply
862 865 bundler = bundle2.bundle20(repo.ui)
863 866 manargs = [('message', str(exc))]
864 867 advargs = []
865 868 if exc.hint is not None:
866 869 advargs.append(('hint', exc.hint))
867 870 bundler.addpart(bundle2.bundlepart('error:abort',
868 871 manargs, advargs))
869 872 return streamres(gen=bundler.getchunks(), v1compressible=True)
870 873 return streamres(gen=chunks, v1compressible=True)
871 874
872 875 @wireprotocommand('heads')
873 876 def heads(repo, proto):
874 877 h = repo.heads()
875 878 return encodelist(h) + "\n"
876 879
877 880 @wireprotocommand('hello')
878 881 def hello(repo, proto):
879 882 '''the hello command returns a set of lines describing various
880 883 interesting things about the server, in an RFC822-like format.
881 884 Currently the only one defined is "capabilities", which
882 885 consists of a line in the form:
883 886
884 887 capabilities: space separated list of tokens
885 888 '''
886 889 return "capabilities: %s\n" % (capabilities(repo, proto))
887 890
888 891 @wireprotocommand('listkeys', 'namespace')
889 892 def listkeys(repo, proto, namespace):
890 893 d = repo.listkeys(encoding.tolocal(namespace)).items()
891 894 return pushkeymod.encodekeys(d)
892 895
893 896 @wireprotocommand('lookup', 'key')
894 897 def lookup(repo, proto, key):
895 898 try:
896 899 k = encoding.tolocal(key)
897 900 c = repo[k]
898 901 r = c.hex()
899 902 success = 1
900 903 except Exception as inst:
901 904 r = str(inst)
902 905 success = 0
903 906 return "%s %s\n" % (success, r)
904 907
905 908 @wireprotocommand('known', 'nodes *')
906 909 def known(repo, proto, nodes, others):
907 910 return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
908 911
909 912 @wireprotocommand('pushkey', 'namespace key old new')
910 913 def pushkey(repo, proto, namespace, key, old, new):
911 914 # compatibility with pre-1.8 clients which were accidentally
912 915 # sending raw binary nodes rather than utf-8-encoded hex
913 916 if len(new) == 20 and util.escapestr(new) != new:
914 917 # looks like it could be a binary node
915 918 try:
916 919 new.decode('utf-8')
917 920 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
918 921 except UnicodeDecodeError:
919 922 pass # binary, leave unmodified
920 923 else:
921 924 new = encoding.tolocal(new) # normal path
922 925
923 926 if util.safehasattr(proto, 'restore'):
924 927
925 928 proto.redirect()
926 929
927 930 try:
928 931 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
929 932 encoding.tolocal(old), new) or False
930 933 except error.Abort:
931 934 r = False
932 935
933 936 output = proto.restore()
934 937
935 938 return '%s\n%s' % (int(r), output)
936 939
937 940 r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
938 941 encoding.tolocal(old), new)
939 942 return '%s\n' % int(r)
940 943
941 944 @wireprotocommand('stream_out')
942 945 def stream(repo, proto):
943 946 '''If the server supports streaming clone, it advertises the "stream"
944 947 capability with a value representing the version and flags of the repo
945 948 it is serving. Client checks to see if it understands the format.
946 949 '''
947 950 if not streamclone.allowservergeneration(repo):
948 951 return '1\n'
949 952
950 953 def getstream(it):
951 954 yield '0\n'
952 955 for chunk in it:
953 956 yield chunk
954 957
955 958 try:
956 959 # LockError may be raised before the first result is yielded. Don't
957 960 # emit output until we're sure we got the lock successfully.
958 961 it = streamclone.generatev1wireproto(repo)
959 962 return streamres(gen=getstream(it))
960 963 except error.LockError:
961 964 return '2\n'
962 965
963 966 @wireprotocommand('unbundle', 'heads')
964 967 def unbundle(repo, proto, heads):
965 968 their_heads = decodelist(heads)
966 969
967 970 try:
968 971 proto.redirect()
969 972
970 973 exchange.check_heads(repo, their_heads, 'preparing changes')
971 974
972 975 # write bundle data to temporary file because it can be big
973 976 fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
974 977 fp = os.fdopen(fd, pycompat.sysstr('wb+'))
975 978 r = 0
976 979 try:
977 980 proto.getfile(fp)
978 981 fp.seek(0)
979 982 gen = exchange.readbundle(repo.ui, fp, None)
980 983 if (isinstance(gen, changegroupmod.cg1unpacker)
981 984 and not bundle1allowed(repo, 'push')):
982 985 if proto.name == 'http':
983 986 # need to special case http because stderr do not get to
984 987 # the http client on failed push so we need to abuse some
985 988 # other error type to make sure the message get to the
986 989 # user.
987 990 return ooberror(bundle2required)
988 991 raise error.Abort(bundle2requiredmain,
989 992 hint=bundle2requiredhint)
990 993
991 994 r = exchange.unbundle(repo, gen, their_heads, 'serve',
992 995 proto._client())
993 996 if util.safehasattr(r, 'addpart'):
994 997 # The return looks streamable, we are in the bundle2 case and
995 998 # should return a stream.
996 999 return streamres(gen=r.getchunks())
997 1000 return pushres(r)
998 1001
999 1002 finally:
1000 1003 fp.close()
1001 1004 os.unlink(tempname)
1002 1005
1003 1006 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
1004 1007 # handle non-bundle2 case first
1005 1008 if not getattr(exc, 'duringunbundle2', False):
1006 1009 try:
1007 1010 raise
1008 1011 except error.Abort:
1009 1012 # The old code we moved used util.stderr directly.
1010 1013 # We did not change it to minimise code change.
1011 1014 # This need to be moved to something proper.
1012 1015 # Feel free to do it.
1013 1016 util.stderr.write("abort: %s\n" % exc)
1014 1017 if exc.hint is not None:
1015 1018 util.stderr.write("(%s)\n" % exc.hint)
1016 1019 return pushres(0)
1017 1020 except error.PushRaced:
1018 1021 return pusherr(str(exc))
1019 1022
1020 1023 bundler = bundle2.bundle20(repo.ui)
1021 1024 for out in getattr(exc, '_bundle2salvagedoutput', ()):
1022 1025 bundler.addpart(out)
1023 1026 try:
1024 1027 try:
1025 1028 raise
1026 1029 except error.PushkeyFailed as exc:
1027 1030 # check client caps
1028 1031 remotecaps = getattr(exc, '_replycaps', None)
1029 1032 if (remotecaps is not None
1030 1033 and 'pushkey' not in remotecaps.get('error', ())):
1031 1034 # no support remote side, fallback to Abort handler.
1032 1035 raise
1033 1036 part = bundler.newpart('error:pushkey')
1034 1037 part.addparam('in-reply-to', exc.partid)
1035 1038 if exc.namespace is not None:
1036 1039 part.addparam('namespace', exc.namespace, mandatory=False)
1037 1040 if exc.key is not None:
1038 1041 part.addparam('key', exc.key, mandatory=False)
1039 1042 if exc.new is not None:
1040 1043 part.addparam('new', exc.new, mandatory=False)
1041 1044 if exc.old is not None:
1042 1045 part.addparam('old', exc.old, mandatory=False)
1043 1046 if exc.ret is not None:
1044 1047 part.addparam('ret', exc.ret, mandatory=False)
1045 1048 except error.BundleValueError as exc:
1046 1049 errpart = bundler.newpart('error:unsupportedcontent')
1047 1050 if exc.parttype is not None:
1048 1051 errpart.addparam('parttype', exc.parttype)
1049 1052 if exc.params:
1050 1053 errpart.addparam('params', '\0'.join(exc.params))
1051 1054 except error.Abort as exc:
1052 1055 manargs = [('message', str(exc))]
1053 1056 advargs = []
1054 1057 if exc.hint is not None:
1055 1058 advargs.append(('hint', exc.hint))
1056 1059 bundler.addpart(bundle2.bundlepart('error:abort',
1057 1060 manargs, advargs))
1058 1061 except error.PushRaced as exc:
1059 1062 bundler.newpart('error:pushraced', [('message', str(exc))])
1060 1063 return streamres(gen=bundler.getchunks())
General Comments 0
You need to be logged in to leave comments. Login now