##// END OF EJS Templates
localrepo: lowercase "unexpected response" message
Martin Geisler -
r16941:a1eb17be default
parent child Browse files
Show More
@@ -1,2370 +1,2370
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class storecache(filecache):
23 23 """filecache for files in the store"""
24 24 def join(self, obj, fname):
25 25 return obj.sjoin(fname)
26 26
27 27 class localrepository(repo.repository):
28 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 29 'known', 'getbundle'))
30 30 supportedformats = set(('revlogv1', 'generaldelta'))
31 31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 32 'dotencode'))
33 33
34 34 def __init__(self, baseui, path=None, create=False):
35 35 repo.repository.__init__(self)
36 36 self.root = os.path.realpath(util.expandpath(path))
37 37 self.path = os.path.join(self.root, ".hg")
38 38 self.origroot = path
39 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 40 self.opener = scmutil.opener(self.path)
41 41 self.wopener = scmutil.opener(self.root)
42 42 self.baseui = baseui
43 43 self.ui = baseui.copy()
44 44 # A list of callback to shape the phase if no data were found.
45 45 # Callback are in the form: func(repo, roots) --> processed root.
46 46 # This list it to be filled by extension during repo setup
47 47 self._phasedefaults = []
48 48
49 49 try:
50 50 self.ui.readconfig(self.join("hgrc"), self.root)
51 51 extensions.loadall(self.ui)
52 52 except IOError:
53 53 pass
54 54
55 55 if not os.path.isdir(self.path):
56 56 if create:
57 57 if not os.path.exists(path):
58 58 util.makedirs(path)
59 59 util.makedir(self.path, notindexed=True)
60 60 requirements = ["revlogv1"]
61 61 if self.ui.configbool('format', 'usestore', True):
62 62 os.mkdir(os.path.join(self.path, "store"))
63 63 requirements.append("store")
64 64 if self.ui.configbool('format', 'usefncache', True):
65 65 requirements.append("fncache")
66 66 if self.ui.configbool('format', 'dotencode', True):
67 67 requirements.append('dotencode')
68 68 # create an invalid changelog
69 69 self.opener.append(
70 70 "00changelog.i",
71 71 '\0\0\0\2' # represents revlogv2
72 72 ' dummy changelog to prevent using the old repo layout'
73 73 )
74 74 if self.ui.configbool('format', 'generaldelta', False):
75 75 requirements.append("generaldelta")
76 76 requirements = set(requirements)
77 77 else:
78 78 raise error.RepoError(_("repository %s not found") % path)
79 79 elif create:
80 80 raise error.RepoError(_("repository %s already exists") % path)
81 81 else:
82 82 try:
83 83 requirements = scmutil.readrequires(self.opener, self.supported)
84 84 except IOError, inst:
85 85 if inst.errno != errno.ENOENT:
86 86 raise
87 87 requirements = set()
88 88
89 89 self.sharedpath = self.path
90 90 try:
91 91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 92 if not os.path.exists(s):
93 93 raise error.RepoError(
94 94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 95 self.sharedpath = s
96 96 except IOError, inst:
97 97 if inst.errno != errno.ENOENT:
98 98 raise
99 99
100 100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 101 self.spath = self.store.path
102 102 self.sopener = self.store.opener
103 103 self.sjoin = self.store.join
104 104 self.opener.createmode = self.store.createmode
105 105 self._applyrequirements(requirements)
106 106 if create:
107 107 self._writerequirements()
108 108
109 109
110 110 self._branchcache = None
111 111 self._branchcachetip = None
112 112 self.filterpats = {}
113 113 self._datafilters = {}
114 114 self._transref = self._lockref = self._wlockref = None
115 115
116 116 # A cache for various files under .hg/ that tracks file changes,
117 117 # (used by the filecache decorator)
118 118 #
119 119 # Maps a property name to its util.filecacheentry
120 120 self._filecache = {}
121 121
122 122 def _applyrequirements(self, requirements):
123 123 self.requirements = requirements
124 124 openerreqs = set(('revlogv1', 'generaldelta'))
125 125 self.sopener.options = dict((r, 1) for r in requirements
126 126 if r in openerreqs)
127 127
128 128 def _writerequirements(self):
129 129 reqfile = self.opener("requires", "w")
130 130 for r in self.requirements:
131 131 reqfile.write("%s\n" % r)
132 132 reqfile.close()
133 133
134 134 def _checknested(self, path):
135 135 """Determine if path is a legal nested repository."""
136 136 if not path.startswith(self.root):
137 137 return False
138 138 subpath = path[len(self.root) + 1:]
139 139 normsubpath = util.pconvert(subpath)
140 140
141 141 # XXX: Checking against the current working copy is wrong in
142 142 # the sense that it can reject things like
143 143 #
144 144 # $ hg cat -r 10 sub/x.txt
145 145 #
146 146 # if sub/ is no longer a subrepository in the working copy
147 147 # parent revision.
148 148 #
149 149 # However, it can of course also allow things that would have
150 150 # been rejected before, such as the above cat command if sub/
151 151 # is a subrepository now, but was a normal directory before.
152 152 # The old path auditor would have rejected by mistake since it
153 153 # panics when it sees sub/.hg/.
154 154 #
155 155 # All in all, checking against the working copy seems sensible
156 156 # since we want to prevent access to nested repositories on
157 157 # the filesystem *now*.
158 158 ctx = self[None]
159 159 parts = util.splitpath(subpath)
160 160 while parts:
161 161 prefix = '/'.join(parts)
162 162 if prefix in ctx.substate:
163 163 if prefix == normsubpath:
164 164 return True
165 165 else:
166 166 sub = ctx.sub(prefix)
167 167 return sub.checknested(subpath[len(prefix) + 1:])
168 168 else:
169 169 parts.pop()
170 170 return False
171 171
172 172 @filecache('bookmarks')
173 173 def _bookmarks(self):
174 174 return bookmarks.read(self)
175 175
176 176 @filecache('bookmarks.current')
177 177 def _bookmarkcurrent(self):
178 178 return bookmarks.readcurrent(self)
179 179
180 180 def _writebookmarks(self, marks):
181 181 bookmarks.write(self)
182 182
183 183 def bookmarkheads(self, bookmark):
184 184 name = bookmark.split('@', 1)[0]
185 185 heads = []
186 186 for mark, n in self._bookmarks.iteritems():
187 187 if mark.split('@', 1)[0] == name:
188 188 heads.append(n)
189 189 return heads
190 190
191 191 @storecache('phaseroots')
192 192 def _phasecache(self):
193 193 return phases.phasecache(self, self._phasedefaults)
194 194
195 195 @storecache('00changelog.i')
196 196 def changelog(self):
197 197 c = changelog.changelog(self.sopener)
198 198 if 'HG_PENDING' in os.environ:
199 199 p = os.environ['HG_PENDING']
200 200 if p.startswith(self.root):
201 201 c.readpending('00changelog.i.a')
202 202 return c
203 203
204 204 @storecache('00manifest.i')
205 205 def manifest(self):
206 206 return manifest.manifest(self.sopener)
207 207
208 208 @filecache('dirstate')
209 209 def dirstate(self):
210 210 warned = [0]
211 211 def validate(node):
212 212 try:
213 213 self.changelog.rev(node)
214 214 return node
215 215 except error.LookupError:
216 216 if not warned[0]:
217 217 warned[0] = True
218 218 self.ui.warn(_("warning: ignoring unknown"
219 219 " working parent %s!\n") % short(node))
220 220 return nullid
221 221
222 222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
223 223
224 224 def __getitem__(self, changeid):
225 225 if changeid is None:
226 226 return context.workingctx(self)
227 227 return context.changectx(self, changeid)
228 228
229 229 def __contains__(self, changeid):
230 230 try:
231 231 return bool(self.lookup(changeid))
232 232 except error.RepoLookupError:
233 233 return False
234 234
235 235 def __nonzero__(self):
236 236 return True
237 237
238 238 def __len__(self):
239 239 return len(self.changelog)
240 240
241 241 def __iter__(self):
242 242 for i in xrange(len(self)):
243 243 yield i
244 244
245 245 def revs(self, expr, *args):
246 246 '''Return a list of revisions matching the given revset'''
247 247 expr = revset.formatspec(expr, *args)
248 248 m = revset.match(None, expr)
249 249 return [r for r in m(self, range(len(self)))]
250 250
251 251 def set(self, expr, *args):
252 252 '''
253 253 Yield a context for each matching revision, after doing arg
254 254 replacement via revset.formatspec
255 255 '''
256 256 for r in self.revs(expr, *args):
257 257 yield self[r]
258 258
259 259 def url(self):
260 260 return 'file:' + self.root
261 261
262 262 def hook(self, name, throw=False, **args):
263 263 return hook.hook(self.ui, self, name, throw, **args)
264 264
265 265 tag_disallowed = ':\r\n'
266 266
267 267 def _tag(self, names, node, message, local, user, date, extra={}):
268 268 if isinstance(names, str):
269 269 allchars = names
270 270 names = (names,)
271 271 else:
272 272 allchars = ''.join(names)
273 273 for c in self.tag_disallowed:
274 274 if c in allchars:
275 275 raise util.Abort(_('%r cannot be used in a tag name') % c)
276 276
277 277 branches = self.branchmap()
278 278 for name in names:
279 279 self.hook('pretag', throw=True, node=hex(node), tag=name,
280 280 local=local)
281 281 if name in branches:
282 282 self.ui.warn(_("warning: tag %s conflicts with existing"
283 283 " branch name\n") % name)
284 284
285 285 def writetags(fp, names, munge, prevtags):
286 286 fp.seek(0, 2)
287 287 if prevtags and prevtags[-1] != '\n':
288 288 fp.write('\n')
289 289 for name in names:
290 290 m = munge and munge(name) or name
291 291 if (self._tagscache.tagtypes and
292 292 name in self._tagscache.tagtypes):
293 293 old = self.tags().get(name, nullid)
294 294 fp.write('%s %s\n' % (hex(old), m))
295 295 fp.write('%s %s\n' % (hex(node), m))
296 296 fp.close()
297 297
298 298 prevtags = ''
299 299 if local:
300 300 try:
301 301 fp = self.opener('localtags', 'r+')
302 302 except IOError:
303 303 fp = self.opener('localtags', 'a')
304 304 else:
305 305 prevtags = fp.read()
306 306
307 307 # local tags are stored in the current charset
308 308 writetags(fp, names, None, prevtags)
309 309 for name in names:
310 310 self.hook('tag', node=hex(node), tag=name, local=local)
311 311 return
312 312
313 313 try:
314 314 fp = self.wfile('.hgtags', 'rb+')
315 315 except IOError, e:
316 316 if e.errno != errno.ENOENT:
317 317 raise
318 318 fp = self.wfile('.hgtags', 'ab')
319 319 else:
320 320 prevtags = fp.read()
321 321
322 322 # committed tags are stored in UTF-8
323 323 writetags(fp, names, encoding.fromlocal, prevtags)
324 324
325 325 fp.close()
326 326
327 327 self.invalidatecaches()
328 328
329 329 if '.hgtags' not in self.dirstate:
330 330 self[None].add(['.hgtags'])
331 331
332 332 m = matchmod.exact(self.root, '', ['.hgtags'])
333 333 tagnode = self.commit(message, user, date, extra=extra, match=m)
334 334
335 335 for name in names:
336 336 self.hook('tag', node=hex(node), tag=name, local=local)
337 337
338 338 return tagnode
339 339
340 340 def tag(self, names, node, message, local, user, date):
341 341 '''tag a revision with one or more symbolic names.
342 342
343 343 names is a list of strings or, when adding a single tag, names may be a
344 344 string.
345 345
346 346 if local is True, the tags are stored in a per-repository file.
347 347 otherwise, they are stored in the .hgtags file, and a new
348 348 changeset is committed with the change.
349 349
350 350 keyword arguments:
351 351
352 352 local: whether to store tags in non-version-controlled file
353 353 (default False)
354 354
355 355 message: commit message to use if committing
356 356
357 357 user: name of user to use if committing
358 358
359 359 date: date tuple to use if committing'''
360 360
361 361 if not local:
362 362 for x in self.status()[:5]:
363 363 if '.hgtags' in x:
364 364 raise util.Abort(_('working copy of .hgtags is changed '
365 365 '(please commit .hgtags manually)'))
366 366
367 367 self.tags() # instantiate the cache
368 368 self._tag(names, node, message, local, user, date)
369 369
370 370 @propertycache
371 371 def _tagscache(self):
372 372 '''Returns a tagscache object that contains various tags related
373 373 caches.'''
374 374
375 375 # This simplifies its cache management by having one decorated
376 376 # function (this one) and the rest simply fetch things from it.
377 377 class tagscache(object):
378 378 def __init__(self):
379 379 # These two define the set of tags for this repository. tags
380 380 # maps tag name to node; tagtypes maps tag name to 'global' or
381 381 # 'local'. (Global tags are defined by .hgtags across all
382 382 # heads, and local tags are defined in .hg/localtags.)
383 383 # They constitute the in-memory cache of tags.
384 384 self.tags = self.tagtypes = None
385 385
386 386 self.nodetagscache = self.tagslist = None
387 387
388 388 cache = tagscache()
389 389 cache.tags, cache.tagtypes = self._findtags()
390 390
391 391 return cache
392 392
393 393 def tags(self):
394 394 '''return a mapping of tag to node'''
395 395 t = {}
396 396 for k, v in self._tagscache.tags.iteritems():
397 397 try:
398 398 # ignore tags to unknown nodes
399 399 self.changelog.rev(v)
400 400 t[k] = v
401 401 except (error.LookupError, ValueError):
402 402 pass
403 403 return t
404 404
405 405 def _findtags(self):
406 406 '''Do the hard work of finding tags. Return a pair of dicts
407 407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
408 408 maps tag name to a string like \'global\' or \'local\'.
409 409 Subclasses or extensions are free to add their own tags, but
410 410 should be aware that the returned dicts will be retained for the
411 411 duration of the localrepo object.'''
412 412
413 413 # XXX what tagtype should subclasses/extensions use? Currently
414 414 # mq and bookmarks add tags, but do not set the tagtype at all.
415 415 # Should each extension invent its own tag type? Should there
416 416 # be one tagtype for all such "virtual" tags? Or is the status
417 417 # quo fine?
418 418
419 419 alltags = {} # map tag name to (node, hist)
420 420 tagtypes = {}
421 421
422 422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
423 423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
424 424
425 425 # Build the return dicts. Have to re-encode tag names because
426 426 # the tags module always uses UTF-8 (in order not to lose info
427 427 # writing to the cache), but the rest of Mercurial wants them in
428 428 # local encoding.
429 429 tags = {}
430 430 for (name, (node, hist)) in alltags.iteritems():
431 431 if node != nullid:
432 432 tags[encoding.tolocal(name)] = node
433 433 tags['tip'] = self.changelog.tip()
434 434 tagtypes = dict([(encoding.tolocal(name), value)
435 435 for (name, value) in tagtypes.iteritems()])
436 436 return (tags, tagtypes)
437 437
438 438 def tagtype(self, tagname):
439 439 '''
440 440 return the type of the given tag. result can be:
441 441
442 442 'local' : a local tag
443 443 'global' : a global tag
444 444 None : tag does not exist
445 445 '''
446 446
447 447 return self._tagscache.tagtypes.get(tagname)
448 448
449 449 def tagslist(self):
450 450 '''return a list of tags ordered by revision'''
451 451 if not self._tagscache.tagslist:
452 452 l = []
453 453 for t, n in self.tags().iteritems():
454 454 r = self.changelog.rev(n)
455 455 l.append((r, t, n))
456 456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
457 457
458 458 return self._tagscache.tagslist
459 459
460 460 def nodetags(self, node):
461 461 '''return the tags associated with a node'''
462 462 if not self._tagscache.nodetagscache:
463 463 nodetagscache = {}
464 464 for t, n in self._tagscache.tags.iteritems():
465 465 nodetagscache.setdefault(n, []).append(t)
466 466 for tags in nodetagscache.itervalues():
467 467 tags.sort()
468 468 self._tagscache.nodetagscache = nodetagscache
469 469 return self._tagscache.nodetagscache.get(node, [])
470 470
471 471 def nodebookmarks(self, node):
472 472 marks = []
473 473 for bookmark, n in self._bookmarks.iteritems():
474 474 if n == node:
475 475 marks.append(bookmark)
476 476 return sorted(marks)
477 477
478 478 def _branchtags(self, partial, lrev):
479 479 # TODO: rename this function?
480 480 tiprev = len(self) - 1
481 481 if lrev != tiprev:
482 482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
483 483 self._updatebranchcache(partial, ctxgen)
484 484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
485 485
486 486 return partial
487 487
488 488 def updatebranchcache(self):
489 489 tip = self.changelog.tip()
490 490 if self._branchcache is not None and self._branchcachetip == tip:
491 491 return
492 492
493 493 oldtip = self._branchcachetip
494 494 self._branchcachetip = tip
495 495 if oldtip is None or oldtip not in self.changelog.nodemap:
496 496 partial, last, lrev = self._readbranchcache()
497 497 else:
498 498 lrev = self.changelog.rev(oldtip)
499 499 partial = self._branchcache
500 500
501 501 self._branchtags(partial, lrev)
502 502 # this private cache holds all heads (not just the branch tips)
503 503 self._branchcache = partial
504 504
505 505 def branchmap(self):
506 506 '''returns a dictionary {branch: [branchheads]}'''
507 507 self.updatebranchcache()
508 508 return self._branchcache
509 509
510 510 def _branchtip(self, heads):
511 511 '''return the tipmost branch head in heads'''
512 512 tip = heads[-1]
513 513 for h in reversed(heads):
514 514 if not self[h].closesbranch():
515 515 tip = h
516 516 break
517 517 return tip
518 518
519 519 def branchtip(self, branch):
520 520 '''return the tip node for a given branch'''
521 521 if branch not in self.branchmap():
522 522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
523 523 return self._branchtip(self.branchmap()[branch])
524 524
525 525 def branchtags(self):
526 526 '''return a dict where branch names map to the tipmost head of
527 527 the branch, open heads come before closed'''
528 528 bt = {}
529 529 for bn, heads in self.branchmap().iteritems():
530 530 bt[bn] = self._branchtip(heads)
531 531 return bt
532 532
533 533 def _readbranchcache(self):
534 534 partial = {}
535 535 try:
536 536 f = self.opener("cache/branchheads")
537 537 lines = f.read().split('\n')
538 538 f.close()
539 539 except (IOError, OSError):
540 540 return {}, nullid, nullrev
541 541
542 542 try:
543 543 last, lrev = lines.pop(0).split(" ", 1)
544 544 last, lrev = bin(last), int(lrev)
545 545 if lrev >= len(self) or self[lrev].node() != last:
546 546 # invalidate the cache
547 547 raise ValueError('invalidating branch cache (tip differs)')
548 548 for l in lines:
549 549 if not l:
550 550 continue
551 551 node, label = l.split(" ", 1)
552 552 label = encoding.tolocal(label.strip())
553 553 partial.setdefault(label, []).append(bin(node))
554 554 except KeyboardInterrupt:
555 555 raise
556 556 except Exception, inst:
557 557 if self.ui.debugflag:
558 558 self.ui.warn(str(inst), '\n')
559 559 partial, last, lrev = {}, nullid, nullrev
560 560 return partial, last, lrev
561 561
562 562 def _writebranchcache(self, branches, tip, tiprev):
563 563 try:
564 564 f = self.opener("cache/branchheads", "w", atomictemp=True)
565 565 f.write("%s %s\n" % (hex(tip), tiprev))
566 566 for label, nodes in branches.iteritems():
567 567 for node in nodes:
568 568 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
569 569 f.close()
570 570 except (IOError, OSError):
571 571 pass
572 572
573 573 def _updatebranchcache(self, partial, ctxgen):
574 574 # collect new branch entries
575 575 newbranches = {}
576 576 for c in ctxgen:
577 577 newbranches.setdefault(c.branch(), []).append(c.node())
578 578 # if older branchheads are reachable from new ones, they aren't
579 579 # really branchheads. Note checking parents is insufficient:
580 580 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
581 581 for branch, newnodes in newbranches.iteritems():
582 582 bheads = partial.setdefault(branch, [])
583 583 bheads.extend(newnodes)
584 584 if len(bheads) <= 1:
585 585 continue
586 586 bheads = sorted(bheads, key=lambda x: self[x].rev())
587 587 # starting from tip means fewer passes over reachable
588 588 while newnodes:
589 589 latest = newnodes.pop()
590 590 if latest not in bheads:
591 591 continue
592 592 minbhnode = self[bheads[0]].node()
593 593 reachable = self.changelog.reachable(latest, minbhnode)
594 594 reachable.remove(latest)
595 595 if reachable:
596 596 bheads = [b for b in bheads if b not in reachable]
597 597 partial[branch] = bheads
598 598
599 599 def lookup(self, key):
600 600 return self[key].node()
601 601
602 602 def lookupbranch(self, key, remote=None):
603 603 repo = remote or self
604 604 if key in repo.branchmap():
605 605 return key
606 606
607 607 repo = (remote and remote.local()) and remote or self
608 608 return repo[key].branch()
609 609
610 610 def known(self, nodes):
611 611 nm = self.changelog.nodemap
612 612 pc = self._phasecache
613 613 result = []
614 614 for n in nodes:
615 615 r = nm.get(n)
616 616 resp = not (r is None or pc.phase(self, r) >= phases.secret)
617 617 result.append(resp)
618 618 return result
619 619
620 620 def local(self):
621 621 return self
622 622
623 623 def join(self, f):
624 624 return os.path.join(self.path, f)
625 625
626 626 def wjoin(self, f):
627 627 return os.path.join(self.root, f)
628 628
629 629 def file(self, f):
630 630 if f[0] == '/':
631 631 f = f[1:]
632 632 return filelog.filelog(self.sopener, f)
633 633
634 634 def changectx(self, changeid):
635 635 return self[changeid]
636 636
637 637 def parents(self, changeid=None):
638 638 '''get list of changectxs for parents of changeid'''
639 639 return self[changeid].parents()
640 640
641 641 def setparents(self, p1, p2=nullid):
642 642 copies = self.dirstate.setparents(p1, p2)
643 643 if copies:
644 644 # Adjust copy records, the dirstate cannot do it, it
645 645 # requires access to parents manifests. Preserve them
646 646 # only for entries added to first parent.
647 647 pctx = self[p1]
648 648 for f in copies:
649 649 if f not in pctx and copies[f] in pctx:
650 650 self.dirstate.copy(copies[f], f)
651 651
652 652 def filectx(self, path, changeid=None, fileid=None):
653 653 """changeid can be a changeset revision, node, or tag.
654 654 fileid can be a file revision or node."""
655 655 return context.filectx(self, path, changeid, fileid)
656 656
657 657 def getcwd(self):
658 658 return self.dirstate.getcwd()
659 659
660 660 def pathto(self, f, cwd=None):
661 661 return self.dirstate.pathto(f, cwd)
662 662
663 663 def wfile(self, f, mode='r'):
664 664 return self.wopener(f, mode)
665 665
666 666 def _link(self, f):
667 667 return os.path.islink(self.wjoin(f))
668 668
669 669 def _loadfilter(self, filter):
670 670 if filter not in self.filterpats:
671 671 l = []
672 672 for pat, cmd in self.ui.configitems(filter):
673 673 if cmd == '!':
674 674 continue
675 675 mf = matchmod.match(self.root, '', [pat])
676 676 fn = None
677 677 params = cmd
678 678 for name, filterfn in self._datafilters.iteritems():
679 679 if cmd.startswith(name):
680 680 fn = filterfn
681 681 params = cmd[len(name):].lstrip()
682 682 break
683 683 if not fn:
684 684 fn = lambda s, c, **kwargs: util.filter(s, c)
685 685 # Wrap old filters not supporting keyword arguments
686 686 if not inspect.getargspec(fn)[2]:
687 687 oldfn = fn
688 688 fn = lambda s, c, **kwargs: oldfn(s, c)
689 689 l.append((mf, fn, params))
690 690 self.filterpats[filter] = l
691 691 return self.filterpats[filter]
692 692
693 693 def _filter(self, filterpats, filename, data):
694 694 for mf, fn, cmd in filterpats:
695 695 if mf(filename):
696 696 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
697 697 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
698 698 break
699 699
700 700 return data
701 701
702 702 @propertycache
703 703 def _encodefilterpats(self):
704 704 return self._loadfilter('encode')
705 705
706 706 @propertycache
707 707 def _decodefilterpats(self):
708 708 return self._loadfilter('decode')
709 709
710 710 def adddatafilter(self, name, filter):
711 711 self._datafilters[name] = filter
712 712
713 713 def wread(self, filename):
714 714 if self._link(filename):
715 715 data = os.readlink(self.wjoin(filename))
716 716 else:
717 717 data = self.wopener.read(filename)
718 718 return self._filter(self._encodefilterpats, filename, data)
719 719
720 720 def wwrite(self, filename, data, flags):
721 721 data = self._filter(self._decodefilterpats, filename, data)
722 722 if 'l' in flags:
723 723 self.wopener.symlink(data, filename)
724 724 else:
725 725 self.wopener.write(filename, data)
726 726 if 'x' in flags:
727 727 util.setflags(self.wjoin(filename), False, True)
728 728
729 729 def wwritedata(self, filename, data):
730 730 return self._filter(self._decodefilterpats, filename, data)
731 731
732 732 def transaction(self, desc):
733 733 tr = self._transref and self._transref() or None
734 734 if tr and tr.running():
735 735 return tr.nest()
736 736
737 737 # abort here if the journal already exists
738 738 if os.path.exists(self.sjoin("journal")):
739 739 raise error.RepoError(
740 740 _("abandoned transaction found - run hg recover"))
741 741
742 742 self._writejournal(desc)
743 743 renames = [(x, undoname(x)) for x in self._journalfiles()]
744 744
745 745 tr = transaction.transaction(self.ui.warn, self.sopener,
746 746 self.sjoin("journal"),
747 747 aftertrans(renames),
748 748 self.store.createmode)
749 749 self._transref = weakref.ref(tr)
750 750 return tr
751 751
752 752 def _journalfiles(self):
753 753 return (self.sjoin('journal'), self.join('journal.dirstate'),
754 754 self.join('journal.branch'), self.join('journal.desc'),
755 755 self.join('journal.bookmarks'),
756 756 self.sjoin('journal.phaseroots'))
757 757
758 758 def undofiles(self):
759 759 return [undoname(x) for x in self._journalfiles()]
760 760
761 761 def _writejournal(self, desc):
762 762 self.opener.write("journal.dirstate",
763 763 self.opener.tryread("dirstate"))
764 764 self.opener.write("journal.branch",
765 765 encoding.fromlocal(self.dirstate.branch()))
766 766 self.opener.write("journal.desc",
767 767 "%d\n%s\n" % (len(self), desc))
768 768 self.opener.write("journal.bookmarks",
769 769 self.opener.tryread("bookmarks"))
770 770 self.sopener.write("journal.phaseroots",
771 771 self.sopener.tryread("phaseroots"))
772 772
773 773 def recover(self):
774 774 lock = self.lock()
775 775 try:
776 776 if os.path.exists(self.sjoin("journal")):
777 777 self.ui.status(_("rolling back interrupted transaction\n"))
778 778 transaction.rollback(self.sopener, self.sjoin("journal"),
779 779 self.ui.warn)
780 780 self.invalidate()
781 781 return True
782 782 else:
783 783 self.ui.warn(_("no interrupted transaction available\n"))
784 784 return False
785 785 finally:
786 786 lock.release()
787 787
788 788 def rollback(self, dryrun=False, force=False):
789 789 wlock = lock = None
790 790 try:
791 791 wlock = self.wlock()
792 792 lock = self.lock()
793 793 if os.path.exists(self.sjoin("undo")):
794 794 return self._rollback(dryrun, force)
795 795 else:
796 796 self.ui.warn(_("no rollback information available\n"))
797 797 return 1
798 798 finally:
799 799 release(lock, wlock)
800 800
801 801 def _rollback(self, dryrun, force):
802 802 ui = self.ui
803 803 try:
804 804 args = self.opener.read('undo.desc').splitlines()
805 805 (oldlen, desc, detail) = (int(args[0]), args[1], None)
806 806 if len(args) >= 3:
807 807 detail = args[2]
808 808 oldtip = oldlen - 1
809 809
810 810 if detail and ui.verbose:
811 811 msg = (_('repository tip rolled back to revision %s'
812 812 ' (undo %s: %s)\n')
813 813 % (oldtip, desc, detail))
814 814 else:
815 815 msg = (_('repository tip rolled back to revision %s'
816 816 ' (undo %s)\n')
817 817 % (oldtip, desc))
818 818 except IOError:
819 819 msg = _('rolling back unknown transaction\n')
820 820 desc = None
821 821
822 822 if not force and self['.'] != self['tip'] and desc == 'commit':
823 823 raise util.Abort(
824 824 _('rollback of last commit while not checked out '
825 825 'may lose data'), hint=_('use -f to force'))
826 826
827 827 ui.status(msg)
828 828 if dryrun:
829 829 return 0
830 830
831 831 parents = self.dirstate.parents()
832 832 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
833 833 if os.path.exists(self.join('undo.bookmarks')):
834 834 util.rename(self.join('undo.bookmarks'),
835 835 self.join('bookmarks'))
836 836 if os.path.exists(self.sjoin('undo.phaseroots')):
837 837 util.rename(self.sjoin('undo.phaseroots'),
838 838 self.sjoin('phaseroots'))
839 839 self.invalidate()
840 840
841 841 parentgone = (parents[0] not in self.changelog.nodemap or
842 842 parents[1] not in self.changelog.nodemap)
843 843 if parentgone:
844 844 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
845 845 try:
846 846 branch = self.opener.read('undo.branch')
847 847 self.dirstate.setbranch(branch)
848 848 except IOError:
849 849 ui.warn(_('named branch could not be reset: '
850 850 'current branch is still \'%s\'\n')
851 851 % self.dirstate.branch())
852 852
853 853 self.dirstate.invalidate()
854 854 parents = tuple([p.rev() for p in self.parents()])
855 855 if len(parents) > 1:
856 856 ui.status(_('working directory now based on '
857 857 'revisions %d and %d\n') % parents)
858 858 else:
859 859 ui.status(_('working directory now based on '
860 860 'revision %d\n') % parents)
861 861 self.destroyed()
862 862 return 0
863 863
864 864 def invalidatecaches(self):
865 865 def delcache(name):
866 866 try:
867 867 delattr(self, name)
868 868 except AttributeError:
869 869 pass
870 870
871 871 delcache('_tagscache')
872 872
873 873 self._branchcache = None # in UTF-8
874 874 self._branchcachetip = None
875 875
876 876 def invalidatedirstate(self):
877 877 '''Invalidates the dirstate, causing the next call to dirstate
878 878 to check if it was modified since the last time it was read,
879 879 rereading it if it has.
880 880
881 881 This is different to dirstate.invalidate() that it doesn't always
882 882 rereads the dirstate. Use dirstate.invalidate() if you want to
883 883 explicitly read the dirstate again (i.e. restoring it to a previous
884 884 known good state).'''
885 885 if 'dirstate' in self.__dict__:
886 886 for k in self.dirstate._filecache:
887 887 try:
888 888 delattr(self.dirstate, k)
889 889 except AttributeError:
890 890 pass
891 891 delattr(self, 'dirstate')
892 892
893 893 def invalidate(self):
894 894 for k in self._filecache:
895 895 # dirstate is invalidated separately in invalidatedirstate()
896 896 if k == 'dirstate':
897 897 continue
898 898
899 899 try:
900 900 delattr(self, k)
901 901 except AttributeError:
902 902 pass
903 903 self.invalidatecaches()
904 904
905 905 # Discard all cache entries to force reloading everything.
906 906 self._filecache.clear()
907 907
908 908 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
909 909 try:
910 910 l = lock.lock(lockname, 0, releasefn, desc=desc)
911 911 except error.LockHeld, inst:
912 912 if not wait:
913 913 raise
914 914 self.ui.warn(_("waiting for lock on %s held by %r\n") %
915 915 (desc, inst.locker))
916 916 # default to 600 seconds timeout
917 917 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
918 918 releasefn, desc=desc)
919 919 if acquirefn:
920 920 acquirefn()
921 921 return l
922 922
923 923 def _afterlock(self, callback):
924 924 """add a callback to the current repository lock.
925 925
926 926 The callback will be executed on lock release."""
927 927 l = self._lockref and self._lockref()
928 928 if l:
929 929 l.postrelease.append(callback)
930 930 else:
931 931 callback()
932 932
933 933 def lock(self, wait=True):
934 934 '''Lock the repository store (.hg/store) and return a weak reference
935 935 to the lock. Use this before modifying the store (e.g. committing or
936 936 stripping). If you are opening a transaction, get a lock as well.)'''
937 937 l = self._lockref and self._lockref()
938 938 if l is not None and l.held:
939 939 l.lock()
940 940 return l
941 941
942 942 def unlock():
943 943 self.store.write()
944 944 if '_phasecache' in vars(self):
945 945 self._phasecache.write()
946 946 for k, ce in self._filecache.items():
947 947 if k == 'dirstate':
948 948 continue
949 949 ce.refresh()
950 950
951 951 l = self._lock(self.sjoin("lock"), wait, unlock,
952 952 self.invalidate, _('repository %s') % self.origroot)
953 953 self._lockref = weakref.ref(l)
954 954 return l
955 955
956 956 def wlock(self, wait=True):
957 957 '''Lock the non-store parts of the repository (everything under
958 958 .hg except .hg/store) and return a weak reference to the lock.
959 959 Use this before modifying files in .hg.'''
960 960 l = self._wlockref and self._wlockref()
961 961 if l is not None and l.held:
962 962 l.lock()
963 963 return l
964 964
965 965 def unlock():
966 966 self.dirstate.write()
967 967 ce = self._filecache.get('dirstate')
968 968 if ce:
969 969 ce.refresh()
970 970
971 971 l = self._lock(self.join("wlock"), wait, unlock,
972 972 self.invalidatedirstate, _('working directory of %s') %
973 973 self.origroot)
974 974 self._wlockref = weakref.ref(l)
975 975 return l
976 976
977 977 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
978 978 """
979 979 commit an individual file as part of a larger transaction
980 980 """
981 981
982 982 fname = fctx.path()
983 983 text = fctx.data()
984 984 flog = self.file(fname)
985 985 fparent1 = manifest1.get(fname, nullid)
986 986 fparent2 = fparent2o = manifest2.get(fname, nullid)
987 987
988 988 meta = {}
989 989 copy = fctx.renamed()
990 990 if copy and copy[0] != fname:
991 991 # Mark the new revision of this file as a copy of another
992 992 # file. This copy data will effectively act as a parent
993 993 # of this new revision. If this is a merge, the first
994 994 # parent will be the nullid (meaning "look up the copy data")
995 995 # and the second one will be the other parent. For example:
996 996 #
997 997 # 0 --- 1 --- 3 rev1 changes file foo
998 998 # \ / rev2 renames foo to bar and changes it
999 999 # \- 2 -/ rev3 should have bar with all changes and
1000 1000 # should record that bar descends from
1001 1001 # bar in rev2 and foo in rev1
1002 1002 #
1003 1003 # this allows this merge to succeed:
1004 1004 #
1005 1005 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1006 1006 # \ / merging rev3 and rev4 should use bar@rev2
1007 1007 # \- 2 --- 4 as the merge base
1008 1008 #
1009 1009
1010 1010 cfname = copy[0]
1011 1011 crev = manifest1.get(cfname)
1012 1012 newfparent = fparent2
1013 1013
1014 1014 if manifest2: # branch merge
1015 1015 if fparent2 == nullid or crev is None: # copied on remote side
1016 1016 if cfname in manifest2:
1017 1017 crev = manifest2[cfname]
1018 1018 newfparent = fparent1
1019 1019
1020 1020 # find source in nearest ancestor if we've lost track
1021 1021 if not crev:
1022 1022 self.ui.debug(" %s: searching for copy revision for %s\n" %
1023 1023 (fname, cfname))
1024 1024 for ancestor in self[None].ancestors():
1025 1025 if cfname in ancestor:
1026 1026 crev = ancestor[cfname].filenode()
1027 1027 break
1028 1028
1029 1029 if crev:
1030 1030 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1031 1031 meta["copy"] = cfname
1032 1032 meta["copyrev"] = hex(crev)
1033 1033 fparent1, fparent2 = nullid, newfparent
1034 1034 else:
1035 1035 self.ui.warn(_("warning: can't find ancestor for '%s' "
1036 1036 "copied from '%s'!\n") % (fname, cfname))
1037 1037
1038 1038 elif fparent2 != nullid:
1039 1039 # is one parent an ancestor of the other?
1040 1040 fparentancestor = flog.ancestor(fparent1, fparent2)
1041 1041 if fparentancestor == fparent1:
1042 1042 fparent1, fparent2 = fparent2, nullid
1043 1043 elif fparentancestor == fparent2:
1044 1044 fparent2 = nullid
1045 1045
1046 1046 # is the file changed?
1047 1047 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1048 1048 changelist.append(fname)
1049 1049 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1050 1050
1051 1051 # are just the flags changed during merge?
1052 1052 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1053 1053 changelist.append(fname)
1054 1054
1055 1055 return fparent1
1056 1056
1057 1057 def commit(self, text="", user=None, date=None, match=None, force=False,
1058 1058 editor=False, extra={}):
1059 1059 """Add a new revision to current repository.
1060 1060
1061 1061 Revision information is gathered from the working directory,
1062 1062 match can be used to filter the committed files. If editor is
1063 1063 supplied, it is called to get a commit message.
1064 1064 """
1065 1065
1066 1066 def fail(f, msg):
1067 1067 raise util.Abort('%s: %s' % (f, msg))
1068 1068
1069 1069 if not match:
1070 1070 match = matchmod.always(self.root, '')
1071 1071
1072 1072 if not force:
1073 1073 vdirs = []
1074 1074 match.dir = vdirs.append
1075 1075 match.bad = fail
1076 1076
1077 1077 wlock = self.wlock()
1078 1078 try:
1079 1079 wctx = self[None]
1080 1080 merge = len(wctx.parents()) > 1
1081 1081
1082 1082 if (not force and merge and match and
1083 1083 (match.files() or match.anypats())):
1084 1084 raise util.Abort(_('cannot partially commit a merge '
1085 1085 '(do not specify files or patterns)'))
1086 1086
1087 1087 changes = self.status(match=match, clean=force)
1088 1088 if force:
1089 1089 changes[0].extend(changes[6]) # mq may commit unchanged files
1090 1090
1091 1091 # check subrepos
1092 1092 subs = []
1093 1093 commitsubs = set()
1094 1094 newstate = wctx.substate.copy()
1095 1095 # only manage subrepos and .hgsubstate if .hgsub is present
1096 1096 if '.hgsub' in wctx:
1097 1097 # we'll decide whether to track this ourselves, thanks
1098 1098 if '.hgsubstate' in changes[0]:
1099 1099 changes[0].remove('.hgsubstate')
1100 1100 if '.hgsubstate' in changes[2]:
1101 1101 changes[2].remove('.hgsubstate')
1102 1102
1103 1103 # compare current state to last committed state
1104 1104 # build new substate based on last committed state
1105 1105 oldstate = wctx.p1().substate
1106 1106 for s in sorted(newstate.keys()):
1107 1107 if not match(s):
1108 1108 # ignore working copy, use old state if present
1109 1109 if s in oldstate:
1110 1110 newstate[s] = oldstate[s]
1111 1111 continue
1112 1112 if not force:
1113 1113 raise util.Abort(
1114 1114 _("commit with new subrepo %s excluded") % s)
1115 1115 if wctx.sub(s).dirty(True):
1116 1116 if not self.ui.configbool('ui', 'commitsubrepos'):
1117 1117 raise util.Abort(
1118 1118 _("uncommitted changes in subrepo %s") % s,
1119 1119 hint=_("use --subrepos for recursive commit"))
1120 1120 subs.append(s)
1121 1121 commitsubs.add(s)
1122 1122 else:
1123 1123 bs = wctx.sub(s).basestate()
1124 1124 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1125 1125 if oldstate.get(s, (None, None, None))[1] != bs:
1126 1126 subs.append(s)
1127 1127
1128 1128 # check for removed subrepos
1129 1129 for p in wctx.parents():
1130 1130 r = [s for s in p.substate if s not in newstate]
1131 1131 subs += [s for s in r if match(s)]
1132 1132 if subs:
1133 1133 if (not match('.hgsub') and
1134 1134 '.hgsub' in (wctx.modified() + wctx.added())):
1135 1135 raise util.Abort(
1136 1136 _("can't commit subrepos without .hgsub"))
1137 1137 changes[0].insert(0, '.hgsubstate')
1138 1138
1139 1139 elif '.hgsub' in changes[2]:
1140 1140 # clean up .hgsubstate when .hgsub is removed
1141 1141 if ('.hgsubstate' in wctx and
1142 1142 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1143 1143 changes[2].insert(0, '.hgsubstate')
1144 1144
1145 1145 # make sure all explicit patterns are matched
1146 1146 if not force and match.files():
1147 1147 matched = set(changes[0] + changes[1] + changes[2])
1148 1148
1149 1149 for f in match.files():
1150 1150 if f == '.' or f in matched or f in wctx.substate:
1151 1151 continue
1152 1152 if f in changes[3]: # missing
1153 1153 fail(f, _('file not found!'))
1154 1154 if f in vdirs: # visited directory
1155 1155 d = f + '/'
1156 1156 for mf in matched:
1157 1157 if mf.startswith(d):
1158 1158 break
1159 1159 else:
1160 1160 fail(f, _("no match under directory!"))
1161 1161 elif f not in self.dirstate:
1162 1162 fail(f, _("file not tracked!"))
1163 1163
1164 1164 if (not force and not extra.get("close") and not merge
1165 1165 and not (changes[0] or changes[1] or changes[2])
1166 1166 and wctx.branch() == wctx.p1().branch()):
1167 1167 return None
1168 1168
1169 1169 if merge and changes[3]:
1170 1170 raise util.Abort(_("cannot commit merge with missing files"))
1171 1171
1172 1172 ms = mergemod.mergestate(self)
1173 1173 for f in changes[0]:
1174 1174 if f in ms and ms[f] == 'u':
1175 1175 raise util.Abort(_("unresolved merge conflicts "
1176 1176 "(see hg help resolve)"))
1177 1177
1178 1178 cctx = context.workingctx(self, text, user, date, extra, changes)
1179 1179 if editor:
1180 1180 cctx._text = editor(self, cctx, subs)
1181 1181 edited = (text != cctx._text)
1182 1182
1183 1183 # commit subs and write new state
1184 1184 if subs:
1185 1185 for s in sorted(commitsubs):
1186 1186 sub = wctx.sub(s)
1187 1187 self.ui.status(_('committing subrepository %s\n') %
1188 1188 subrepo.subrelpath(sub))
1189 1189 sr = sub.commit(cctx._text, user, date)
1190 1190 newstate[s] = (newstate[s][0], sr)
1191 1191 subrepo.writestate(self, newstate)
1192 1192
1193 1193 # Save commit message in case this transaction gets rolled back
1194 1194 # (e.g. by a pretxncommit hook). Leave the content alone on
1195 1195 # the assumption that the user will use the same editor again.
1196 1196 msgfn = self.savecommitmessage(cctx._text)
1197 1197
1198 1198 p1, p2 = self.dirstate.parents()
1199 1199 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1200 1200 try:
1201 1201 self.hook("precommit", throw=True, parent1=hookp1,
1202 1202 parent2=hookp2)
1203 1203 ret = self.commitctx(cctx, True)
1204 1204 except: # re-raises
1205 1205 if edited:
1206 1206 self.ui.write(
1207 1207 _('note: commit message saved in %s\n') % msgfn)
1208 1208 raise
1209 1209
1210 1210 # update bookmarks, dirstate and mergestate
1211 1211 bookmarks.update(self, [p1, p2], ret)
1212 1212 for f in changes[0] + changes[1]:
1213 1213 self.dirstate.normal(f)
1214 1214 for f in changes[2]:
1215 1215 self.dirstate.drop(f)
1216 1216 self.dirstate.setparents(ret)
1217 1217 ms.reset()
1218 1218 finally:
1219 1219 wlock.release()
1220 1220
1221 1221 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1222 1222 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1223 1223 self._afterlock(commithook)
1224 1224 return ret
1225 1225
1226 1226 def commitctx(self, ctx, error=False):
1227 1227 """Add a new revision to current repository.
1228 1228 Revision information is passed via the context argument.
1229 1229 """
1230 1230
1231 1231 tr = lock = None
1232 1232 removed = list(ctx.removed())
1233 1233 p1, p2 = ctx.p1(), ctx.p2()
1234 1234 user = ctx.user()
1235 1235
1236 1236 lock = self.lock()
1237 1237 try:
1238 1238 tr = self.transaction("commit")
1239 1239 trp = weakref.proxy(tr)
1240 1240
1241 1241 if ctx.files():
1242 1242 m1 = p1.manifest().copy()
1243 1243 m2 = p2.manifest()
1244 1244
1245 1245 # check in files
1246 1246 new = {}
1247 1247 changed = []
1248 1248 linkrev = len(self)
1249 1249 for f in sorted(ctx.modified() + ctx.added()):
1250 1250 self.ui.note(f + "\n")
1251 1251 try:
1252 1252 fctx = ctx[f]
1253 1253 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1254 1254 changed)
1255 1255 m1.set(f, fctx.flags())
1256 1256 except OSError, inst:
1257 1257 self.ui.warn(_("trouble committing %s!\n") % f)
1258 1258 raise
1259 1259 except IOError, inst:
1260 1260 errcode = getattr(inst, 'errno', errno.ENOENT)
1261 1261 if error or errcode and errcode != errno.ENOENT:
1262 1262 self.ui.warn(_("trouble committing %s!\n") % f)
1263 1263 raise
1264 1264 else:
1265 1265 removed.append(f)
1266 1266
1267 1267 # update manifest
1268 1268 m1.update(new)
1269 1269 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1270 1270 drop = [f for f in removed if f in m1]
1271 1271 for f in drop:
1272 1272 del m1[f]
1273 1273 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1274 1274 p2.manifestnode(), (new, drop))
1275 1275 files = changed + removed
1276 1276 else:
1277 1277 mn = p1.manifestnode()
1278 1278 files = []
1279 1279
1280 1280 # update changelog
1281 1281 self.changelog.delayupdate()
1282 1282 n = self.changelog.add(mn, files, ctx.description(),
1283 1283 trp, p1.node(), p2.node(),
1284 1284 user, ctx.date(), ctx.extra().copy())
1285 1285 p = lambda: self.changelog.writepending() and self.root or ""
1286 1286 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1287 1287 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1288 1288 parent2=xp2, pending=p)
1289 1289 self.changelog.finalize(trp)
1290 1290 # set the new commit is proper phase
1291 1291 targetphase = phases.newcommitphase(self.ui)
1292 1292 if targetphase:
1293 1293 # retract boundary do not alter parent changeset.
1294 1294 # if a parent have higher the resulting phase will
1295 1295 # be compliant anyway
1296 1296 #
1297 1297 # if minimal phase was 0 we don't need to retract anything
1298 1298 phases.retractboundary(self, targetphase, [n])
1299 1299 tr.close()
1300 1300 self.updatebranchcache()
1301 1301 return n
1302 1302 finally:
1303 1303 if tr:
1304 1304 tr.release()
1305 1305 lock.release()
1306 1306
1307 1307 def destroyed(self):
1308 1308 '''Inform the repository that nodes have been destroyed.
1309 1309 Intended for use by strip and rollback, so there's a common
1310 1310 place for anything that has to be done after destroying history.'''
1311 1311 # XXX it might be nice if we could take the list of destroyed
1312 1312 # nodes, but I don't see an easy way for rollback() to do that
1313 1313
1314 1314 # Ensure the persistent tag cache is updated. Doing it now
1315 1315 # means that the tag cache only has to worry about destroyed
1316 1316 # heads immediately after a strip/rollback. That in turn
1317 1317 # guarantees that "cachetip == currenttip" (comparing both rev
1318 1318 # and node) always means no nodes have been added or destroyed.
1319 1319
1320 1320 # XXX this is suboptimal when qrefresh'ing: we strip the current
1321 1321 # head, refresh the tag cache, then immediately add a new head.
1322 1322 # But I think doing it this way is necessary for the "instant
1323 1323 # tag cache retrieval" case to work.
1324 1324 self.invalidatecaches()
1325 1325
1326 1326 def walk(self, match, node=None):
1327 1327 '''
1328 1328 walk recursively through the directory tree or a given
1329 1329 changeset, finding all files matched by the match
1330 1330 function
1331 1331 '''
1332 1332 return self[node].walk(match)
1333 1333
1334 1334 def status(self, node1='.', node2=None, match=None,
1335 1335 ignored=False, clean=False, unknown=False,
1336 1336 listsubrepos=False):
1337 1337 """return status of files between two nodes or node and working
1338 1338 directory.
1339 1339
1340 1340 If node1 is None, use the first dirstate parent instead.
1341 1341 If node2 is None, compare node1 with working directory.
1342 1342 """
1343 1343
1344 1344 def mfmatches(ctx):
1345 1345 mf = ctx.manifest().copy()
1346 1346 if match.always():
1347 1347 return mf
1348 1348 for fn in mf.keys():
1349 1349 if not match(fn):
1350 1350 del mf[fn]
1351 1351 return mf
1352 1352
1353 1353 if isinstance(node1, context.changectx):
1354 1354 ctx1 = node1
1355 1355 else:
1356 1356 ctx1 = self[node1]
1357 1357 if isinstance(node2, context.changectx):
1358 1358 ctx2 = node2
1359 1359 else:
1360 1360 ctx2 = self[node2]
1361 1361
1362 1362 working = ctx2.rev() is None
1363 1363 parentworking = working and ctx1 == self['.']
1364 1364 match = match or matchmod.always(self.root, self.getcwd())
1365 1365 listignored, listclean, listunknown = ignored, clean, unknown
1366 1366
1367 1367 # load earliest manifest first for caching reasons
1368 1368 if not working and ctx2.rev() < ctx1.rev():
1369 1369 ctx2.manifest()
1370 1370
1371 1371 if not parentworking:
1372 1372 def bad(f, msg):
1373 1373 # 'f' may be a directory pattern from 'match.files()',
1374 1374 # so 'f not in ctx1' is not enough
1375 1375 if f not in ctx1 and f not in ctx1.dirs():
1376 1376 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1377 1377 match.bad = bad
1378 1378
1379 1379 if working: # we need to scan the working dir
1380 1380 subrepos = []
1381 1381 if '.hgsub' in self.dirstate:
1382 1382 subrepos = ctx2.substate.keys()
1383 1383 s = self.dirstate.status(match, subrepos, listignored,
1384 1384 listclean, listunknown)
1385 1385 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1386 1386
1387 1387 # check for any possibly clean files
1388 1388 if parentworking and cmp:
1389 1389 fixup = []
1390 1390 # do a full compare of any files that might have changed
1391 1391 for f in sorted(cmp):
1392 1392 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1393 1393 or ctx1[f].cmp(ctx2[f])):
1394 1394 modified.append(f)
1395 1395 else:
1396 1396 fixup.append(f)
1397 1397
1398 1398 # update dirstate for files that are actually clean
1399 1399 if fixup:
1400 1400 if listclean:
1401 1401 clean += fixup
1402 1402
1403 1403 try:
1404 1404 # updating the dirstate is optional
1405 1405 # so we don't wait on the lock
1406 1406 wlock = self.wlock(False)
1407 1407 try:
1408 1408 for f in fixup:
1409 1409 self.dirstate.normal(f)
1410 1410 finally:
1411 1411 wlock.release()
1412 1412 except error.LockError:
1413 1413 pass
1414 1414
1415 1415 if not parentworking:
1416 1416 mf1 = mfmatches(ctx1)
1417 1417 if working:
1418 1418 # we are comparing working dir against non-parent
1419 1419 # generate a pseudo-manifest for the working dir
1420 1420 mf2 = mfmatches(self['.'])
1421 1421 for f in cmp + modified + added:
1422 1422 mf2[f] = None
1423 1423 mf2.set(f, ctx2.flags(f))
1424 1424 for f in removed:
1425 1425 if f in mf2:
1426 1426 del mf2[f]
1427 1427 else:
1428 1428 # we are comparing two revisions
1429 1429 deleted, unknown, ignored = [], [], []
1430 1430 mf2 = mfmatches(ctx2)
1431 1431
1432 1432 modified, added, clean = [], [], []
1433 1433 withflags = mf1.withflags() | mf2.withflags()
1434 1434 for fn in mf2:
1435 1435 if fn in mf1:
1436 1436 if (fn not in deleted and
1437 1437 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1438 1438 (mf1[fn] != mf2[fn] and
1439 1439 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1440 1440 modified.append(fn)
1441 1441 elif listclean:
1442 1442 clean.append(fn)
1443 1443 del mf1[fn]
1444 1444 elif fn not in deleted:
1445 1445 added.append(fn)
1446 1446 removed = mf1.keys()
1447 1447
1448 1448 if working and modified and not self.dirstate._checklink:
1449 1449 # Symlink placeholders may get non-symlink-like contents
1450 1450 # via user error or dereferencing by NFS or Samba servers,
1451 1451 # so we filter out any placeholders that don't look like a
1452 1452 # symlink
1453 1453 sane = []
1454 1454 for f in modified:
1455 1455 if ctx2.flags(f) == 'l':
1456 1456 d = ctx2[f].data()
1457 1457 if len(d) >= 1024 or '\n' in d or util.binary(d):
1458 1458 self.ui.debug('ignoring suspect symlink placeholder'
1459 1459 ' "%s"\n' % f)
1460 1460 continue
1461 1461 sane.append(f)
1462 1462 modified = sane
1463 1463
1464 1464 r = modified, added, removed, deleted, unknown, ignored, clean
1465 1465
1466 1466 if listsubrepos:
1467 1467 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1468 1468 if working:
1469 1469 rev2 = None
1470 1470 else:
1471 1471 rev2 = ctx2.substate[subpath][1]
1472 1472 try:
1473 1473 submatch = matchmod.narrowmatcher(subpath, match)
1474 1474 s = sub.status(rev2, match=submatch, ignored=listignored,
1475 1475 clean=listclean, unknown=listunknown,
1476 1476 listsubrepos=True)
1477 1477 for rfiles, sfiles in zip(r, s):
1478 1478 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1479 1479 except error.LookupError:
1480 1480 self.ui.status(_("skipping missing subrepository: %s\n")
1481 1481 % subpath)
1482 1482
1483 1483 for l in r:
1484 1484 l.sort()
1485 1485 return r
1486 1486
1487 1487 def heads(self, start=None):
1488 1488 heads = self.changelog.heads(start)
1489 1489 # sort the output in rev descending order
1490 1490 return sorted(heads, key=self.changelog.rev, reverse=True)
1491 1491
1492 1492 def branchheads(self, branch=None, start=None, closed=False):
1493 1493 '''return a (possibly filtered) list of heads for the given branch
1494 1494
1495 1495 Heads are returned in topological order, from newest to oldest.
1496 1496 If branch is None, use the dirstate branch.
1497 1497 If start is not None, return only heads reachable from start.
1498 1498 If closed is True, return heads that are marked as closed as well.
1499 1499 '''
1500 1500 if branch is None:
1501 1501 branch = self[None].branch()
1502 1502 branches = self.branchmap()
1503 1503 if branch not in branches:
1504 1504 return []
1505 1505 # the cache returns heads ordered lowest to highest
1506 1506 bheads = list(reversed(branches[branch]))
1507 1507 if start is not None:
1508 1508 # filter out the heads that cannot be reached from startrev
1509 1509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1510 1510 bheads = [h for h in bheads if h in fbheads]
1511 1511 if not closed:
1512 1512 bheads = [h for h in bheads if not self[h].closesbranch()]
1513 1513 return bheads
1514 1514
1515 1515 def branches(self, nodes):
1516 1516 if not nodes:
1517 1517 nodes = [self.changelog.tip()]
1518 1518 b = []
1519 1519 for n in nodes:
1520 1520 t = n
1521 1521 while True:
1522 1522 p = self.changelog.parents(n)
1523 1523 if p[1] != nullid or p[0] == nullid:
1524 1524 b.append((t, n, p[0], p[1]))
1525 1525 break
1526 1526 n = p[0]
1527 1527 return b
1528 1528
1529 1529 def between(self, pairs):
1530 1530 r = []
1531 1531
1532 1532 for top, bottom in pairs:
1533 1533 n, l, i = top, [], 0
1534 1534 f = 1
1535 1535
1536 1536 while n != bottom and n != nullid:
1537 1537 p = self.changelog.parents(n)[0]
1538 1538 if i == f:
1539 1539 l.append(n)
1540 1540 f = f * 2
1541 1541 n = p
1542 1542 i += 1
1543 1543
1544 1544 r.append(l)
1545 1545
1546 1546 return r
1547 1547
1548 1548 def pull(self, remote, heads=None, force=False):
1549 1549 lock = self.lock()
1550 1550 try:
1551 1551 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1552 1552 force=force)
1553 1553 common, fetch, rheads = tmp
1554 1554 if not fetch:
1555 1555 self.ui.status(_("no changes found\n"))
1556 1556 added = []
1557 1557 result = 0
1558 1558 else:
1559 1559 if heads is None and list(common) == [nullid]:
1560 1560 self.ui.status(_("requesting all changes\n"))
1561 1561 elif heads is None and remote.capable('changegroupsubset'):
1562 1562 # issue1320, avoid a race if remote changed after discovery
1563 1563 heads = rheads
1564 1564
1565 1565 if remote.capable('getbundle'):
1566 1566 cg = remote.getbundle('pull', common=common,
1567 1567 heads=heads or rheads)
1568 1568 elif heads is None:
1569 1569 cg = remote.changegroup(fetch, 'pull')
1570 1570 elif not remote.capable('changegroupsubset'):
1571 1571 raise util.Abort(_("partial pull cannot be done because "
1572 1572 "other repository doesn't support "
1573 1573 "changegroupsubset."))
1574 1574 else:
1575 1575 cg = remote.changegroupsubset(fetch, heads, 'pull')
1576 1576 clstart = len(self.changelog)
1577 1577 result = self.addchangegroup(cg, 'pull', remote.url())
1578 1578 clend = len(self.changelog)
1579 1579 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1580 1580
1581 1581 # compute target subset
1582 1582 if heads is None:
1583 1583 # We pulled every thing possible
1584 1584 # sync on everything common
1585 1585 subset = common + added
1586 1586 else:
1587 1587 # We pulled a specific subset
1588 1588 # sync on this subset
1589 1589 subset = heads
1590 1590
1591 1591 # Get remote phases data from remote
1592 1592 remotephases = remote.listkeys('phases')
1593 1593 publishing = bool(remotephases.get('publishing', False))
1594 1594 if remotephases and not publishing:
1595 1595 # remote is new and unpublishing
1596 1596 pheads, _dr = phases.analyzeremotephases(self, subset,
1597 1597 remotephases)
1598 1598 phases.advanceboundary(self, phases.public, pheads)
1599 1599 phases.advanceboundary(self, phases.draft, subset)
1600 1600 else:
1601 1601 # Remote is old or publishing all common changesets
1602 1602 # should be seen as public
1603 1603 phases.advanceboundary(self, phases.public, subset)
1604 1604 finally:
1605 1605 lock.release()
1606 1606
1607 1607 return result
1608 1608
1609 1609 def checkpush(self, force, revs):
1610 1610 """Extensions can override this function if additional checks have
1611 1611 to be performed before pushing, or call it if they override push
1612 1612 command.
1613 1613 """
1614 1614 pass
1615 1615
1616 1616 def push(self, remote, force=False, revs=None, newbranch=False):
1617 1617 '''Push outgoing changesets (limited by revs) from the current
1618 1618 repository to remote. Return an integer:
1619 1619 - None means nothing to push
1620 1620 - 0 means HTTP error
1621 1621 - 1 means we pushed and remote head count is unchanged *or*
1622 1622 we have outgoing changesets but refused to push
1623 1623 - other values as described by addchangegroup()
1624 1624 '''
1625 1625 # there are two ways to push to remote repo:
1626 1626 #
1627 1627 # addchangegroup assumes local user can lock remote
1628 1628 # repo (local filesystem, old ssh servers).
1629 1629 #
1630 1630 # unbundle assumes local user cannot lock remote repo (new ssh
1631 1631 # servers, http servers).
1632 1632
1633 1633 # get local lock as we might write phase data
1634 1634 locallock = self.lock()
1635 1635 try:
1636 1636 self.checkpush(force, revs)
1637 1637 lock = None
1638 1638 unbundle = remote.capable('unbundle')
1639 1639 if not unbundle:
1640 1640 lock = remote.lock()
1641 1641 try:
1642 1642 # discovery
1643 1643 fci = discovery.findcommonincoming
1644 1644 commoninc = fci(self, remote, force=force)
1645 1645 common, inc, remoteheads = commoninc
1646 1646 fco = discovery.findcommonoutgoing
1647 1647 outgoing = fco(self, remote, onlyheads=revs,
1648 1648 commoninc=commoninc, force=force)
1649 1649
1650 1650
1651 1651 if not outgoing.missing:
1652 1652 # nothing to push
1653 1653 scmutil.nochangesfound(self.ui, outgoing.excluded)
1654 1654 ret = None
1655 1655 else:
1656 1656 # something to push
1657 1657 if not force:
1658 1658 discovery.checkheads(self, remote, outgoing,
1659 1659 remoteheads, newbranch,
1660 1660 bool(inc))
1661 1661
1662 1662 # create a changegroup from local
1663 1663 if revs is None and not outgoing.excluded:
1664 1664 # push everything,
1665 1665 # use the fast path, no race possible on push
1666 1666 cg = self._changegroup(outgoing.missing, 'push')
1667 1667 else:
1668 1668 cg = self.getlocalbundle('push', outgoing)
1669 1669
1670 1670 # apply changegroup to remote
1671 1671 if unbundle:
1672 1672 # local repo finds heads on server, finds out what
1673 1673 # revs it must push. once revs transferred, if server
1674 1674 # finds it has different heads (someone else won
1675 1675 # commit/push race), server aborts.
1676 1676 if force:
1677 1677 remoteheads = ['force']
1678 1678 # ssh: return remote's addchangegroup()
1679 1679 # http: return remote's addchangegroup() or 0 for error
1680 1680 ret = remote.unbundle(cg, remoteheads, 'push')
1681 1681 else:
1682 1682 # we return an integer indicating remote head count
1683 1683 # change
1684 1684 ret = remote.addchangegroup(cg, 'push', self.url())
1685 1685
1686 1686 if ret:
1687 1687 # push succeed, synchonize target of the push
1688 1688 cheads = outgoing.missingheads
1689 1689 elif revs is None:
1690 1690 # All out push fails. synchronize all common
1691 1691 cheads = outgoing.commonheads
1692 1692 else:
1693 1693 # I want cheads = heads(::missingheads and ::commonheads)
1694 1694 # (missingheads is revs with secret changeset filtered out)
1695 1695 #
1696 1696 # This can be expressed as:
1697 1697 # cheads = ( (missingheads and ::commonheads)
1698 1698 # + (commonheads and ::missingheads))"
1699 1699 # )
1700 1700 #
1701 1701 # while trying to push we already computed the following:
1702 1702 # common = (::commonheads)
1703 1703 # missing = ((commonheads::missingheads) - commonheads)
1704 1704 #
1705 1705 # We can pick:
1706 1706 # * missingheads part of comon (::commonheads)
1707 1707 common = set(outgoing.common)
1708 1708 cheads = [node for node in revs if node in common]
1709 1709 # and
1710 1710 # * commonheads parents on missing
1711 1711 revset = self.set('%ln and parents(roots(%ln))',
1712 1712 outgoing.commonheads,
1713 1713 outgoing.missing)
1714 1714 cheads.extend(c.node() for c in revset)
1715 1715 # even when we don't push, exchanging phase data is useful
1716 1716 remotephases = remote.listkeys('phases')
1717 1717 if not remotephases: # old server or public only repo
1718 1718 phases.advanceboundary(self, phases.public, cheads)
1719 1719 # don't push any phase data as there is nothing to push
1720 1720 else:
1721 1721 ana = phases.analyzeremotephases(self, cheads, remotephases)
1722 1722 pheads, droots = ana
1723 1723 ### Apply remote phase on local
1724 1724 if remotephases.get('publishing', False):
1725 1725 phases.advanceboundary(self, phases.public, cheads)
1726 1726 else: # publish = False
1727 1727 phases.advanceboundary(self, phases.public, pheads)
1728 1728 phases.advanceboundary(self, phases.draft, cheads)
1729 1729 ### Apply local phase on remote
1730 1730
1731 1731 # Get the list of all revs draft on remote by public here.
1732 1732 # XXX Beware that revset break if droots is not strictly
1733 1733 # XXX root we may want to ensure it is but it is costly
1734 1734 outdated = self.set('heads((%ln::%ln) and public())',
1735 1735 droots, cheads)
1736 1736 for newremotehead in outdated:
1737 1737 r = remote.pushkey('phases',
1738 1738 newremotehead.hex(),
1739 1739 str(phases.draft),
1740 1740 str(phases.public))
1741 1741 if not r:
1742 1742 self.ui.warn(_('updating %s to public failed!\n')
1743 1743 % newremotehead)
1744 1744 finally:
1745 1745 if lock is not None:
1746 1746 lock.release()
1747 1747 finally:
1748 1748 locallock.release()
1749 1749
1750 1750 self.ui.debug("checking for updated bookmarks\n")
1751 1751 rb = remote.listkeys('bookmarks')
1752 1752 for k in rb.keys():
1753 1753 if k in self._bookmarks:
1754 1754 nr, nl = rb[k], hex(self._bookmarks[k])
1755 1755 if nr in self:
1756 1756 cr = self[nr]
1757 1757 cl = self[nl]
1758 1758 if cl in cr.descendants():
1759 1759 r = remote.pushkey('bookmarks', k, nr, nl)
1760 1760 if r:
1761 1761 self.ui.status(_("updating bookmark %s\n") % k)
1762 1762 else:
1763 1763 self.ui.warn(_('updating bookmark %s'
1764 1764 ' failed!\n') % k)
1765 1765
1766 1766 return ret
1767 1767
1768 1768 def changegroupinfo(self, nodes, source):
1769 1769 if self.ui.verbose or source == 'bundle':
1770 1770 self.ui.status(_("%d changesets found\n") % len(nodes))
1771 1771 if self.ui.debugflag:
1772 1772 self.ui.debug("list of changesets:\n")
1773 1773 for node in nodes:
1774 1774 self.ui.debug("%s\n" % hex(node))
1775 1775
1776 1776 def changegroupsubset(self, bases, heads, source):
1777 1777 """Compute a changegroup consisting of all the nodes that are
1778 1778 descendants of any of the bases and ancestors of any of the heads.
1779 1779 Return a chunkbuffer object whose read() method will return
1780 1780 successive changegroup chunks.
1781 1781
1782 1782 It is fairly complex as determining which filenodes and which
1783 1783 manifest nodes need to be included for the changeset to be complete
1784 1784 is non-trivial.
1785 1785
1786 1786 Another wrinkle is doing the reverse, figuring out which changeset in
1787 1787 the changegroup a particular filenode or manifestnode belongs to.
1788 1788 """
1789 1789 cl = self.changelog
1790 1790 if not bases:
1791 1791 bases = [nullid]
1792 1792 csets, bases, heads = cl.nodesbetween(bases, heads)
1793 1793 # We assume that all ancestors of bases are known
1794 1794 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1795 1795 return self._changegroupsubset(common, csets, heads, source)
1796 1796
1797 1797 def getlocalbundle(self, source, outgoing):
1798 1798 """Like getbundle, but taking a discovery.outgoing as an argument.
1799 1799
1800 1800 This is only implemented for local repos and reuses potentially
1801 1801 precomputed sets in outgoing."""
1802 1802 if not outgoing.missing:
1803 1803 return None
1804 1804 return self._changegroupsubset(outgoing.common,
1805 1805 outgoing.missing,
1806 1806 outgoing.missingheads,
1807 1807 source)
1808 1808
1809 1809 def getbundle(self, source, heads=None, common=None):
1810 1810 """Like changegroupsubset, but returns the set difference between the
1811 1811 ancestors of heads and the ancestors common.
1812 1812
1813 1813 If heads is None, use the local heads. If common is None, use [nullid].
1814 1814
1815 1815 The nodes in common might not all be known locally due to the way the
1816 1816 current discovery protocol works.
1817 1817 """
1818 1818 cl = self.changelog
1819 1819 if common:
1820 1820 nm = cl.nodemap
1821 1821 common = [n for n in common if n in nm]
1822 1822 else:
1823 1823 common = [nullid]
1824 1824 if not heads:
1825 1825 heads = cl.heads()
1826 1826 return self.getlocalbundle(source,
1827 1827 discovery.outgoing(cl, common, heads))
1828 1828
1829 1829 def _changegroupsubset(self, commonrevs, csets, heads, source):
1830 1830
1831 1831 cl = self.changelog
1832 1832 mf = self.manifest
1833 1833 mfs = {} # needed manifests
1834 1834 fnodes = {} # needed file nodes
1835 1835 changedfiles = set()
1836 1836 fstate = ['', {}]
1837 1837 count = [0, 0]
1838 1838
1839 1839 # can we go through the fast path ?
1840 1840 heads.sort()
1841 1841 if heads == sorted(self.heads()):
1842 1842 return self._changegroup(csets, source)
1843 1843
1844 1844 # slow path
1845 1845 self.hook('preoutgoing', throw=True, source=source)
1846 1846 self.changegroupinfo(csets, source)
1847 1847
1848 1848 # filter any nodes that claim to be part of the known set
1849 1849 def prune(revlog, missing):
1850 1850 rr, rl = revlog.rev, revlog.linkrev
1851 1851 return [n for n in missing
1852 1852 if rl(rr(n)) not in commonrevs]
1853 1853
1854 1854 progress = self.ui.progress
1855 1855 _bundling = _('bundling')
1856 1856 _changesets = _('changesets')
1857 1857 _manifests = _('manifests')
1858 1858 _files = _('files')
1859 1859
1860 1860 def lookup(revlog, x):
1861 1861 if revlog == cl:
1862 1862 c = cl.read(x)
1863 1863 changedfiles.update(c[3])
1864 1864 mfs.setdefault(c[0], x)
1865 1865 count[0] += 1
1866 1866 progress(_bundling, count[0],
1867 1867 unit=_changesets, total=count[1])
1868 1868 return x
1869 1869 elif revlog == mf:
1870 1870 clnode = mfs[x]
1871 1871 mdata = mf.readfast(x)
1872 1872 for f, n in mdata.iteritems():
1873 1873 if f in changedfiles:
1874 1874 fnodes[f].setdefault(n, clnode)
1875 1875 count[0] += 1
1876 1876 progress(_bundling, count[0],
1877 1877 unit=_manifests, total=count[1])
1878 1878 return clnode
1879 1879 else:
1880 1880 progress(_bundling, count[0], item=fstate[0],
1881 1881 unit=_files, total=count[1])
1882 1882 return fstate[1][x]
1883 1883
1884 1884 bundler = changegroup.bundle10(lookup)
1885 1885 reorder = self.ui.config('bundle', 'reorder', 'auto')
1886 1886 if reorder == 'auto':
1887 1887 reorder = None
1888 1888 else:
1889 1889 reorder = util.parsebool(reorder)
1890 1890
1891 1891 def gengroup():
1892 1892 # Create a changenode group generator that will call our functions
1893 1893 # back to lookup the owning changenode and collect information.
1894 1894 count[:] = [0, len(csets)]
1895 1895 for chunk in cl.group(csets, bundler, reorder=reorder):
1896 1896 yield chunk
1897 1897 progress(_bundling, None)
1898 1898
1899 1899 # Create a generator for the manifestnodes that calls our lookup
1900 1900 # and data collection functions back.
1901 1901 for f in changedfiles:
1902 1902 fnodes[f] = {}
1903 1903 count[:] = [0, len(mfs)]
1904 1904 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1905 1905 yield chunk
1906 1906 progress(_bundling, None)
1907 1907
1908 1908 mfs.clear()
1909 1909
1910 1910 # Go through all our files in order sorted by name.
1911 1911 count[:] = [0, len(changedfiles)]
1912 1912 for fname in sorted(changedfiles):
1913 1913 filerevlog = self.file(fname)
1914 1914 if not len(filerevlog):
1915 1915 raise util.Abort(_("empty or missing revlog for %s")
1916 1916 % fname)
1917 1917 fstate[0] = fname
1918 1918 fstate[1] = fnodes.pop(fname, {})
1919 1919
1920 1920 nodelist = prune(filerevlog, fstate[1])
1921 1921 if nodelist:
1922 1922 count[0] += 1
1923 1923 yield bundler.fileheader(fname)
1924 1924 for chunk in filerevlog.group(nodelist, bundler, reorder):
1925 1925 yield chunk
1926 1926
1927 1927 # Signal that no more groups are left.
1928 1928 yield bundler.close()
1929 1929 progress(_bundling, None)
1930 1930
1931 1931 if csets:
1932 1932 self.hook('outgoing', node=hex(csets[0]), source=source)
1933 1933
1934 1934 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1935 1935
1936 1936 def changegroup(self, basenodes, source):
1937 1937 # to avoid a race we use changegroupsubset() (issue1320)
1938 1938 return self.changegroupsubset(basenodes, self.heads(), source)
1939 1939
1940 1940 def _changegroup(self, nodes, source):
1941 1941 """Compute the changegroup of all nodes that we have that a recipient
1942 1942 doesn't. Return a chunkbuffer object whose read() method will return
1943 1943 successive changegroup chunks.
1944 1944
1945 1945 This is much easier than the previous function as we can assume that
1946 1946 the recipient has any changenode we aren't sending them.
1947 1947
1948 1948 nodes is the set of nodes to send"""
1949 1949
1950 1950 cl = self.changelog
1951 1951 mf = self.manifest
1952 1952 mfs = {}
1953 1953 changedfiles = set()
1954 1954 fstate = ['']
1955 1955 count = [0, 0]
1956 1956
1957 1957 self.hook('preoutgoing', throw=True, source=source)
1958 1958 self.changegroupinfo(nodes, source)
1959 1959
1960 1960 revset = set([cl.rev(n) for n in nodes])
1961 1961
1962 1962 def gennodelst(log):
1963 1963 ln, llr = log.node, log.linkrev
1964 1964 return [ln(r) for r in log if llr(r) in revset]
1965 1965
1966 1966 progress = self.ui.progress
1967 1967 _bundling = _('bundling')
1968 1968 _changesets = _('changesets')
1969 1969 _manifests = _('manifests')
1970 1970 _files = _('files')
1971 1971
1972 1972 def lookup(revlog, x):
1973 1973 if revlog == cl:
1974 1974 c = cl.read(x)
1975 1975 changedfiles.update(c[3])
1976 1976 mfs.setdefault(c[0], x)
1977 1977 count[0] += 1
1978 1978 progress(_bundling, count[0],
1979 1979 unit=_changesets, total=count[1])
1980 1980 return x
1981 1981 elif revlog == mf:
1982 1982 count[0] += 1
1983 1983 progress(_bundling, count[0],
1984 1984 unit=_manifests, total=count[1])
1985 1985 return cl.node(revlog.linkrev(revlog.rev(x)))
1986 1986 else:
1987 1987 progress(_bundling, count[0], item=fstate[0],
1988 1988 total=count[1], unit=_files)
1989 1989 return cl.node(revlog.linkrev(revlog.rev(x)))
1990 1990
1991 1991 bundler = changegroup.bundle10(lookup)
1992 1992 reorder = self.ui.config('bundle', 'reorder', 'auto')
1993 1993 if reorder == 'auto':
1994 1994 reorder = None
1995 1995 else:
1996 1996 reorder = util.parsebool(reorder)
1997 1997
1998 1998 def gengroup():
1999 1999 '''yield a sequence of changegroup chunks (strings)'''
2000 2000 # construct a list of all changed files
2001 2001
2002 2002 count[:] = [0, len(nodes)]
2003 2003 for chunk in cl.group(nodes, bundler, reorder=reorder):
2004 2004 yield chunk
2005 2005 progress(_bundling, None)
2006 2006
2007 2007 count[:] = [0, len(mfs)]
2008 2008 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2009 2009 yield chunk
2010 2010 progress(_bundling, None)
2011 2011
2012 2012 count[:] = [0, len(changedfiles)]
2013 2013 for fname in sorted(changedfiles):
2014 2014 filerevlog = self.file(fname)
2015 2015 if not len(filerevlog):
2016 2016 raise util.Abort(_("empty or missing revlog for %s")
2017 2017 % fname)
2018 2018 fstate[0] = fname
2019 2019 nodelist = gennodelst(filerevlog)
2020 2020 if nodelist:
2021 2021 count[0] += 1
2022 2022 yield bundler.fileheader(fname)
2023 2023 for chunk in filerevlog.group(nodelist, bundler, reorder):
2024 2024 yield chunk
2025 2025 yield bundler.close()
2026 2026 progress(_bundling, None)
2027 2027
2028 2028 if nodes:
2029 2029 self.hook('outgoing', node=hex(nodes[0]), source=source)
2030 2030
2031 2031 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2032 2032
2033 2033 def addchangegroup(self, source, srctype, url, emptyok=False):
2034 2034 """Add the changegroup returned by source.read() to this repo.
2035 2035 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2036 2036 the URL of the repo where this changegroup is coming from.
2037 2037
2038 2038 Return an integer summarizing the change to this repo:
2039 2039 - nothing changed or no source: 0
2040 2040 - more heads than before: 1+added heads (2..n)
2041 2041 - fewer heads than before: -1-removed heads (-2..-n)
2042 2042 - number of heads stays the same: 1
2043 2043 """
2044 2044 def csmap(x):
2045 2045 self.ui.debug("add changeset %s\n" % short(x))
2046 2046 return len(cl)
2047 2047
2048 2048 def revmap(x):
2049 2049 return cl.rev(x)
2050 2050
2051 2051 if not source:
2052 2052 return 0
2053 2053
2054 2054 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2055 2055
2056 2056 changesets = files = revisions = 0
2057 2057 efiles = set()
2058 2058
2059 2059 # write changelog data to temp files so concurrent readers will not see
2060 2060 # inconsistent view
2061 2061 cl = self.changelog
2062 2062 cl.delayupdate()
2063 2063 oldheads = cl.heads()
2064 2064
2065 2065 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2066 2066 try:
2067 2067 trp = weakref.proxy(tr)
2068 2068 # pull off the changeset group
2069 2069 self.ui.status(_("adding changesets\n"))
2070 2070 clstart = len(cl)
2071 2071 class prog(object):
2072 2072 step = _('changesets')
2073 2073 count = 1
2074 2074 ui = self.ui
2075 2075 total = None
2076 2076 def __call__(self):
2077 2077 self.ui.progress(self.step, self.count, unit=_('chunks'),
2078 2078 total=self.total)
2079 2079 self.count += 1
2080 2080 pr = prog()
2081 2081 source.callback = pr
2082 2082
2083 2083 source.changelogheader()
2084 2084 srccontent = cl.addgroup(source, csmap, trp)
2085 2085 if not (srccontent or emptyok):
2086 2086 raise util.Abort(_("received changelog group is empty"))
2087 2087 clend = len(cl)
2088 2088 changesets = clend - clstart
2089 2089 for c in xrange(clstart, clend):
2090 2090 efiles.update(self[c].files())
2091 2091 efiles = len(efiles)
2092 2092 self.ui.progress(_('changesets'), None)
2093 2093
2094 2094 # pull off the manifest group
2095 2095 self.ui.status(_("adding manifests\n"))
2096 2096 pr.step = _('manifests')
2097 2097 pr.count = 1
2098 2098 pr.total = changesets # manifests <= changesets
2099 2099 # no need to check for empty manifest group here:
2100 2100 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2101 2101 # no new manifest will be created and the manifest group will
2102 2102 # be empty during the pull
2103 2103 source.manifestheader()
2104 2104 self.manifest.addgroup(source, revmap, trp)
2105 2105 self.ui.progress(_('manifests'), None)
2106 2106
2107 2107 needfiles = {}
2108 2108 if self.ui.configbool('server', 'validate', default=False):
2109 2109 # validate incoming csets have their manifests
2110 2110 for cset in xrange(clstart, clend):
2111 2111 mfest = self.changelog.read(self.changelog.node(cset))[0]
2112 2112 mfest = self.manifest.readdelta(mfest)
2113 2113 # store file nodes we must see
2114 2114 for f, n in mfest.iteritems():
2115 2115 needfiles.setdefault(f, set()).add(n)
2116 2116
2117 2117 # process the files
2118 2118 self.ui.status(_("adding file changes\n"))
2119 2119 pr.step = _('files')
2120 2120 pr.count = 1
2121 2121 pr.total = efiles
2122 2122 source.callback = None
2123 2123
2124 2124 while True:
2125 2125 chunkdata = source.filelogheader()
2126 2126 if not chunkdata:
2127 2127 break
2128 2128 f = chunkdata["filename"]
2129 2129 self.ui.debug("adding %s revisions\n" % f)
2130 2130 pr()
2131 2131 fl = self.file(f)
2132 2132 o = len(fl)
2133 2133 if not fl.addgroup(source, revmap, trp):
2134 2134 raise util.Abort(_("received file revlog group is empty"))
2135 2135 revisions += len(fl) - o
2136 2136 files += 1
2137 2137 if f in needfiles:
2138 2138 needs = needfiles[f]
2139 2139 for new in xrange(o, len(fl)):
2140 2140 n = fl.node(new)
2141 2141 if n in needs:
2142 2142 needs.remove(n)
2143 2143 if not needs:
2144 2144 del needfiles[f]
2145 2145 self.ui.progress(_('files'), None)
2146 2146
2147 2147 for f, needs in needfiles.iteritems():
2148 2148 fl = self.file(f)
2149 2149 for n in needs:
2150 2150 try:
2151 2151 fl.rev(n)
2152 2152 except error.LookupError:
2153 2153 raise util.Abort(
2154 2154 _('missing file data for %s:%s - run hg verify') %
2155 2155 (f, hex(n)))
2156 2156
2157 2157 dh = 0
2158 2158 if oldheads:
2159 2159 heads = cl.heads()
2160 2160 dh = len(heads) - len(oldheads)
2161 2161 for h in heads:
2162 2162 if h not in oldheads and self[h].closesbranch():
2163 2163 dh -= 1
2164 2164 htext = ""
2165 2165 if dh:
2166 2166 htext = _(" (%+d heads)") % dh
2167 2167
2168 2168 self.ui.status(_("added %d changesets"
2169 2169 " with %d changes to %d files%s\n")
2170 2170 % (changesets, revisions, files, htext))
2171 2171
2172 2172 if changesets > 0:
2173 2173 p = lambda: cl.writepending() and self.root or ""
2174 2174 self.hook('pretxnchangegroup', throw=True,
2175 2175 node=hex(cl.node(clstart)), source=srctype,
2176 2176 url=url, pending=p)
2177 2177
2178 2178 added = [cl.node(r) for r in xrange(clstart, clend)]
2179 2179 publishing = self.ui.configbool('phases', 'publish', True)
2180 2180 if srctype == 'push':
2181 2181 # Old server can not push the boundary themself.
2182 2182 # New server won't push the boundary if changeset already
2183 2183 # existed locally as secrete
2184 2184 #
2185 2185 # We should not use added here but the list of all change in
2186 2186 # the bundle
2187 2187 if publishing:
2188 2188 phases.advanceboundary(self, phases.public, srccontent)
2189 2189 else:
2190 2190 phases.advanceboundary(self, phases.draft, srccontent)
2191 2191 phases.retractboundary(self, phases.draft, added)
2192 2192 elif srctype != 'strip':
2193 2193 # publishing only alter behavior during push
2194 2194 #
2195 2195 # strip should not touch boundary at all
2196 2196 phases.retractboundary(self, phases.draft, added)
2197 2197
2198 2198 # make changelog see real files again
2199 2199 cl.finalize(trp)
2200 2200
2201 2201 tr.close()
2202 2202
2203 2203 if changesets > 0:
2204 2204 def runhooks():
2205 2205 # forcefully update the on-disk branch cache
2206 2206 self.ui.debug("updating the branch cache\n")
2207 2207 self.updatebranchcache()
2208 2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2209 2209 source=srctype, url=url)
2210 2210
2211 2211 for n in added:
2212 2212 self.hook("incoming", node=hex(n), source=srctype,
2213 2213 url=url)
2214 2214 self._afterlock(runhooks)
2215 2215
2216 2216 finally:
2217 2217 tr.release()
2218 2218 # never return 0 here:
2219 2219 if dh < 0:
2220 2220 return dh - 1
2221 2221 else:
2222 2222 return dh + 1
2223 2223
2224 2224 def stream_in(self, remote, requirements):
2225 2225 lock = self.lock()
2226 2226 try:
2227 2227 fp = remote.stream_out()
2228 2228 l = fp.readline()
2229 2229 try:
2230 2230 resp = int(l)
2231 2231 except ValueError:
2232 2232 raise error.ResponseError(
2233 _('Unexpected response from remote server:'), l)
2233 _('unexpected response from remote server:'), l)
2234 2234 if resp == 1:
2235 2235 raise util.Abort(_('operation forbidden by server'))
2236 2236 elif resp == 2:
2237 2237 raise util.Abort(_('locking the remote repository failed'))
2238 2238 elif resp != 0:
2239 2239 raise util.Abort(_('the server sent an unknown error code'))
2240 2240 self.ui.status(_('streaming all changes\n'))
2241 2241 l = fp.readline()
2242 2242 try:
2243 2243 total_files, total_bytes = map(int, l.split(' ', 1))
2244 2244 except (ValueError, TypeError):
2245 2245 raise error.ResponseError(
2246 _('Unexpected response from remote server:'), l)
2246 _('unexpected response from remote server:'), l)
2247 2247 self.ui.status(_('%d files to transfer, %s of data\n') %
2248 2248 (total_files, util.bytecount(total_bytes)))
2249 2249 handled_bytes = 0
2250 2250 self.ui.progress(_('clone'), 0, total=total_bytes)
2251 2251 start = time.time()
2252 2252 for i in xrange(total_files):
2253 2253 # XXX doesn't support '\n' or '\r' in filenames
2254 2254 l = fp.readline()
2255 2255 try:
2256 2256 name, size = l.split('\0', 1)
2257 2257 size = int(size)
2258 2258 except (ValueError, TypeError):
2259 2259 raise error.ResponseError(
2260 _('Unexpected response from remote server:'), l)
2260 _('unexpected response from remote server:'), l)
2261 2261 if self.ui.debugflag:
2262 2262 self.ui.debug('adding %s (%s)\n' %
2263 2263 (name, util.bytecount(size)))
2264 2264 # for backwards compat, name was partially encoded
2265 2265 ofp = self.sopener(store.decodedir(name), 'w')
2266 2266 for chunk in util.filechunkiter(fp, limit=size):
2267 2267 handled_bytes += len(chunk)
2268 2268 self.ui.progress(_('clone'), handled_bytes,
2269 2269 total=total_bytes)
2270 2270 ofp.write(chunk)
2271 2271 ofp.close()
2272 2272 elapsed = time.time() - start
2273 2273 if elapsed <= 0:
2274 2274 elapsed = 0.001
2275 2275 self.ui.progress(_('clone'), None)
2276 2276 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2277 2277 (util.bytecount(total_bytes), elapsed,
2278 2278 util.bytecount(total_bytes / elapsed)))
2279 2279
2280 2280 # new requirements = old non-format requirements +
2281 2281 # new format-related
2282 2282 # requirements from the streamed-in repository
2283 2283 requirements.update(set(self.requirements) - self.supportedformats)
2284 2284 self._applyrequirements(requirements)
2285 2285 self._writerequirements()
2286 2286
2287 2287 self.invalidate()
2288 2288 return len(self.heads()) + 1
2289 2289 finally:
2290 2290 lock.release()
2291 2291
2292 2292 def clone(self, remote, heads=[], stream=False):
2293 2293 '''clone remote repository.
2294 2294
2295 2295 keyword arguments:
2296 2296 heads: list of revs to clone (forces use of pull)
2297 2297 stream: use streaming clone if possible'''
2298 2298
2299 2299 # now, all clients that can request uncompressed clones can
2300 2300 # read repo formats supported by all servers that can serve
2301 2301 # them.
2302 2302
2303 2303 # if revlog format changes, client will have to check version
2304 2304 # and format flags on "stream" capability, and use
2305 2305 # uncompressed only if compatible.
2306 2306
2307 2307 if not stream:
2308 2308 # if the server explicitely prefer to stream (for fast LANs)
2309 2309 stream = remote.capable('stream-preferred')
2310 2310
2311 2311 if stream and not heads:
2312 2312 # 'stream' means remote revlog format is revlogv1 only
2313 2313 if remote.capable('stream'):
2314 2314 return self.stream_in(remote, set(('revlogv1',)))
2315 2315 # otherwise, 'streamreqs' contains the remote revlog format
2316 2316 streamreqs = remote.capable('streamreqs')
2317 2317 if streamreqs:
2318 2318 streamreqs = set(streamreqs.split(','))
2319 2319 # if we support it, stream in and adjust our requirements
2320 2320 if not streamreqs - self.supportedformats:
2321 2321 return self.stream_in(remote, streamreqs)
2322 2322 return self.pull(remote, heads)
2323 2323
2324 2324 def pushkey(self, namespace, key, old, new):
2325 2325 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2326 2326 old=old, new=new)
2327 2327 ret = pushkey.push(self, namespace, key, old, new)
2328 2328 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2329 2329 ret=ret)
2330 2330 return ret
2331 2331
2332 2332 def listkeys(self, namespace):
2333 2333 self.hook('prelistkeys', throw=True, namespace=namespace)
2334 2334 values = pushkey.list(self, namespace)
2335 2335 self.hook('listkeys', namespace=namespace, values=values)
2336 2336 return values
2337 2337
2338 2338 def debugwireargs(self, one, two, three=None, four=None, five=None):
2339 2339 '''used to test argument passing over the wire'''
2340 2340 return "%s %s %s %s %s" % (one, two, three, four, five)
2341 2341
2342 2342 def savecommitmessage(self, text):
2343 2343 fp = self.opener('last-message.txt', 'wb')
2344 2344 try:
2345 2345 fp.write(text)
2346 2346 finally:
2347 2347 fp.close()
2348 2348 return self.pathto(fp.name[len(self.root)+1:])
2349 2349
2350 2350 # used to avoid circular references so destructors work
2351 2351 def aftertrans(files):
2352 2352 renamefiles = [tuple(t) for t in files]
2353 2353 def a():
2354 2354 for src, dest in renamefiles:
2355 2355 try:
2356 2356 util.rename(src, dest)
2357 2357 except OSError: # journal file does not yet exist
2358 2358 pass
2359 2359 return a
2360 2360
2361 2361 def undoname(fn):
2362 2362 base, name = os.path.split(fn)
2363 2363 assert name.startswith('journal')
2364 2364 return os.path.join(base, name.replace('journal', 'undo', 1))
2365 2365
2366 2366 def instance(ui, path, create):
2367 2367 return localrepository(ui, util.urllocalpath(path), create)
2368 2368
2369 2369 def islocal(path):
2370 2370 return True
General Comments 0
You need to be logged in to leave comments. Login now