##// END OF EJS Templates
addchangegroup: use a postrelease callback to call changegroup hook...
Pierre-Yves David -
r15584:9df9444e default
parent child Browse files
Show More
@@ -1,2157 +1,2160 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40
41 41 try:
42 42 self.ui.readconfig(self.join("hgrc"), self.root)
43 43 extensions.loadall(self.ui)
44 44 except IOError:
45 45 pass
46 46
47 47 if not os.path.isdir(self.path):
48 48 if create:
49 49 if not os.path.exists(path):
50 50 util.makedirs(path)
51 51 util.makedir(self.path, notindexed=True)
52 52 requirements = ["revlogv1"]
53 53 if self.ui.configbool('format', 'usestore', True):
54 54 os.mkdir(os.path.join(self.path, "store"))
55 55 requirements.append("store")
56 56 if self.ui.configbool('format', 'usefncache', True):
57 57 requirements.append("fncache")
58 58 if self.ui.configbool('format', 'dotencode', True):
59 59 requirements.append('dotencode')
60 60 # create an invalid changelog
61 61 self.opener.append(
62 62 "00changelog.i",
63 63 '\0\0\0\2' # represents revlogv2
64 64 ' dummy changelog to prevent using the old repo layout'
65 65 )
66 66 if self.ui.configbool('format', 'generaldelta', False):
67 67 requirements.append("generaldelta")
68 68 requirements = set(requirements)
69 69 else:
70 70 raise error.RepoError(_("repository %s not found") % path)
71 71 elif create:
72 72 raise error.RepoError(_("repository %s already exists") % path)
73 73 else:
74 74 try:
75 75 requirements = scmutil.readrequires(self.opener, self.supported)
76 76 except IOError, inst:
77 77 if inst.errno != errno.ENOENT:
78 78 raise
79 79 requirements = set()
80 80
81 81 self.sharedpath = self.path
82 82 try:
83 83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 84 if not os.path.exists(s):
85 85 raise error.RepoError(
86 86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 87 self.sharedpath = s
88 88 except IOError, inst:
89 89 if inst.errno != errno.ENOENT:
90 90 raise
91 91
92 92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 93 self.spath = self.store.path
94 94 self.sopener = self.store.opener
95 95 self.sjoin = self.store.join
96 96 self.opener.createmode = self.store.createmode
97 97 self._applyrequirements(requirements)
98 98 if create:
99 99 self._writerequirements()
100 100
101 101
102 102 self._branchcache = None
103 103 self._branchcachetip = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 # A cache for various files under .hg/ that tracks file changes,
109 109 # (used by the filecache decorator)
110 110 #
111 111 # Maps a property name to its util.filecacheentry
112 112 self._filecache = {}
113 113
114 114 def _applyrequirements(self, requirements):
115 115 self.requirements = requirements
116 116 openerreqs = set(('revlogv1', 'generaldelta'))
117 117 self.sopener.options = dict((r, 1) for r in requirements
118 118 if r in openerreqs)
119 119
120 120 def _writerequirements(self):
121 121 reqfile = self.opener("requires", "w")
122 122 for r in self.requirements:
123 123 reqfile.write("%s\n" % r)
124 124 reqfile.close()
125 125
126 126 def _checknested(self, path):
127 127 """Determine if path is a legal nested repository."""
128 128 if not path.startswith(self.root):
129 129 return False
130 130 subpath = path[len(self.root) + 1:]
131 131
132 132 # XXX: Checking against the current working copy is wrong in
133 133 # the sense that it can reject things like
134 134 #
135 135 # $ hg cat -r 10 sub/x.txt
136 136 #
137 137 # if sub/ is no longer a subrepository in the working copy
138 138 # parent revision.
139 139 #
140 140 # However, it can of course also allow things that would have
141 141 # been rejected before, such as the above cat command if sub/
142 142 # is a subrepository now, but was a normal directory before.
143 143 # The old path auditor would have rejected by mistake since it
144 144 # panics when it sees sub/.hg/.
145 145 #
146 146 # All in all, checking against the working copy seems sensible
147 147 # since we want to prevent access to nested repositories on
148 148 # the filesystem *now*.
149 149 ctx = self[None]
150 150 parts = util.splitpath(subpath)
151 151 while parts:
152 152 prefix = os.sep.join(parts)
153 153 if prefix in ctx.substate:
154 154 if prefix == subpath:
155 155 return True
156 156 else:
157 157 sub = ctx.sub(prefix)
158 158 return sub.checknested(subpath[len(prefix) + 1:])
159 159 else:
160 160 parts.pop()
161 161 return False
162 162
163 163 @filecache('bookmarks')
164 164 def _bookmarks(self):
165 165 return bookmarks.read(self)
166 166
167 167 @filecache('bookmarks.current')
168 168 def _bookmarkcurrent(self):
169 169 return bookmarks.readcurrent(self)
170 170
171 171 def _writebookmarks(self, marks):
172 172 bookmarks.write(self)
173 173
174 174 @filecache('phaseroots')
175 175 def _phaseroots(self):
176 176 self._dirtyphases = False
177 177 phaseroots = phases.readroots(self)
178 178 phases.filterunknown(self, phaseroots)
179 179 return phaseroots
180 180
181 181 @propertycache
182 182 def _phaserev(self):
183 183 cache = [0] * len(self)
184 184 for phase in phases.trackedphases:
185 185 roots = map(self.changelog.rev, self._phaseroots[phase])
186 186 if roots:
187 187 for rev in roots:
188 188 cache[rev] = phase
189 189 for rev in self.changelog.descendants(*roots):
190 190 cache[rev] = phase
191 191 return cache
192 192
193 193 @filecache('00changelog.i', True)
194 194 def changelog(self):
195 195 c = changelog.changelog(self.sopener)
196 196 if 'HG_PENDING' in os.environ:
197 197 p = os.environ['HG_PENDING']
198 198 if p.startswith(self.root):
199 199 c.readpending('00changelog.i.a')
200 200 return c
201 201
202 202 @filecache('00manifest.i', True)
203 203 def manifest(self):
204 204 return manifest.manifest(self.sopener)
205 205
206 206 @filecache('dirstate')
207 207 def dirstate(self):
208 208 warned = [0]
209 209 def validate(node):
210 210 try:
211 211 self.changelog.rev(node)
212 212 return node
213 213 except error.LookupError:
214 214 if not warned[0]:
215 215 warned[0] = True
216 216 self.ui.warn(_("warning: ignoring unknown"
217 217 " working parent %s!\n") % short(node))
218 218 return nullid
219 219
220 220 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221 221
222 222 def __getitem__(self, changeid):
223 223 if changeid is None:
224 224 return context.workingctx(self)
225 225 return context.changectx(self, changeid)
226 226
227 227 def __contains__(self, changeid):
228 228 try:
229 229 return bool(self.lookup(changeid))
230 230 except error.RepoLookupError:
231 231 return False
232 232
233 233 def __nonzero__(self):
234 234 return True
235 235
236 236 def __len__(self):
237 237 return len(self.changelog)
238 238
239 239 def __iter__(self):
240 240 for i in xrange(len(self)):
241 241 yield i
242 242
243 243 def revs(self, expr, *args):
244 244 '''Return a list of revisions matching the given revset'''
245 245 expr = revset.formatspec(expr, *args)
246 246 m = revset.match(None, expr)
247 247 return [r for r in m(self, range(len(self)))]
248 248
249 249 def set(self, expr, *args):
250 250 '''
251 251 Yield a context for each matching revision, after doing arg
252 252 replacement via revset.formatspec
253 253 '''
254 254 for r in self.revs(expr, *args):
255 255 yield self[r]
256 256
257 257 def url(self):
258 258 return 'file:' + self.root
259 259
260 260 def hook(self, name, throw=False, **args):
261 261 return hook.hook(self.ui, self, name, throw, **args)
262 262
263 263 tag_disallowed = ':\r\n'
264 264
265 265 def _tag(self, names, node, message, local, user, date, extra={}):
266 266 if isinstance(names, str):
267 267 allchars = names
268 268 names = (names,)
269 269 else:
270 270 allchars = ''.join(names)
271 271 for c in self.tag_disallowed:
272 272 if c in allchars:
273 273 raise util.Abort(_('%r cannot be used in a tag name') % c)
274 274
275 275 branches = self.branchmap()
276 276 for name in names:
277 277 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 278 local=local)
279 279 if name in branches:
280 280 self.ui.warn(_("warning: tag %s conflicts with existing"
281 281 " branch name\n") % name)
282 282
283 283 def writetags(fp, names, munge, prevtags):
284 284 fp.seek(0, 2)
285 285 if prevtags and prevtags[-1] != '\n':
286 286 fp.write('\n')
287 287 for name in names:
288 288 m = munge and munge(name) or name
289 289 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 290 old = self.tags().get(name, nullid)
291 291 fp.write('%s %s\n' % (hex(old), m))
292 292 fp.write('%s %s\n' % (hex(node), m))
293 293 fp.close()
294 294
295 295 prevtags = ''
296 296 if local:
297 297 try:
298 298 fp = self.opener('localtags', 'r+')
299 299 except IOError:
300 300 fp = self.opener('localtags', 'a')
301 301 else:
302 302 prevtags = fp.read()
303 303
304 304 # local tags are stored in the current charset
305 305 writetags(fp, names, None, prevtags)
306 306 for name in names:
307 307 self.hook('tag', node=hex(node), tag=name, local=local)
308 308 return
309 309
310 310 try:
311 311 fp = self.wfile('.hgtags', 'rb+')
312 312 except IOError, e:
313 313 if e.errno != errno.ENOENT:
314 314 raise
315 315 fp = self.wfile('.hgtags', 'ab')
316 316 else:
317 317 prevtags = fp.read()
318 318
319 319 # committed tags are stored in UTF-8
320 320 writetags(fp, names, encoding.fromlocal, prevtags)
321 321
322 322 fp.close()
323 323
324 324 if '.hgtags' not in self.dirstate:
325 325 self[None].add(['.hgtags'])
326 326
327 327 m = matchmod.exact(self.root, '', ['.hgtags'])
328 328 tagnode = self.commit(message, user, date, extra=extra, match=m)
329 329
330 330 for name in names:
331 331 self.hook('tag', node=hex(node), tag=name, local=local)
332 332
333 333 return tagnode
334 334
335 335 def tag(self, names, node, message, local, user, date):
336 336 '''tag a revision with one or more symbolic names.
337 337
338 338 names is a list of strings or, when adding a single tag, names may be a
339 339 string.
340 340
341 341 if local is True, the tags are stored in a per-repository file.
342 342 otherwise, they are stored in the .hgtags file, and a new
343 343 changeset is committed with the change.
344 344
345 345 keyword arguments:
346 346
347 347 local: whether to store tags in non-version-controlled file
348 348 (default False)
349 349
350 350 message: commit message to use if committing
351 351
352 352 user: name of user to use if committing
353 353
354 354 date: date tuple to use if committing'''
355 355
356 356 if not local:
357 357 for x in self.status()[:5]:
358 358 if '.hgtags' in x:
359 359 raise util.Abort(_('working copy of .hgtags is changed '
360 360 '(please commit .hgtags manually)'))
361 361
362 362 self.tags() # instantiate the cache
363 363 self._tag(names, node, message, local, user, date)
364 364
365 365 @propertycache
366 366 def _tagscache(self):
367 367 '''Returns a tagscache object that contains various tags related caches.'''
368 368
369 369 # This simplifies its cache management by having one decorated
370 370 # function (this one) and the rest simply fetch things from it.
371 371 class tagscache(object):
372 372 def __init__(self):
373 373 # These two define the set of tags for this repository. tags
374 374 # maps tag name to node; tagtypes maps tag name to 'global' or
375 375 # 'local'. (Global tags are defined by .hgtags across all
376 376 # heads, and local tags are defined in .hg/localtags.)
377 377 # They constitute the in-memory cache of tags.
378 378 self.tags = self.tagtypes = None
379 379
380 380 self.nodetagscache = self.tagslist = None
381 381
382 382 cache = tagscache()
383 383 cache.tags, cache.tagtypes = self._findtags()
384 384
385 385 return cache
386 386
387 387 def tags(self):
388 388 '''return a mapping of tag to node'''
389 389 return self._tagscache.tags
390 390
391 391 def _findtags(self):
392 392 '''Do the hard work of finding tags. Return a pair of dicts
393 393 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 394 maps tag name to a string like \'global\' or \'local\'.
395 395 Subclasses or extensions are free to add their own tags, but
396 396 should be aware that the returned dicts will be retained for the
397 397 duration of the localrepo object.'''
398 398
399 399 # XXX what tagtype should subclasses/extensions use? Currently
400 400 # mq and bookmarks add tags, but do not set the tagtype at all.
401 401 # Should each extension invent its own tag type? Should there
402 402 # be one tagtype for all such "virtual" tags? Or is the status
403 403 # quo fine?
404 404
405 405 alltags = {} # map tag name to (node, hist)
406 406 tagtypes = {}
407 407
408 408 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 409 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410 410
411 411 # Build the return dicts. Have to re-encode tag names because
412 412 # the tags module always uses UTF-8 (in order not to lose info
413 413 # writing to the cache), but the rest of Mercurial wants them in
414 414 # local encoding.
415 415 tags = {}
416 416 for (name, (node, hist)) in alltags.iteritems():
417 417 if node != nullid:
418 418 try:
419 419 # ignore tags to unknown nodes
420 420 self.changelog.lookup(node)
421 421 tags[encoding.tolocal(name)] = node
422 422 except error.LookupError:
423 423 pass
424 424 tags['tip'] = self.changelog.tip()
425 425 tagtypes = dict([(encoding.tolocal(name), value)
426 426 for (name, value) in tagtypes.iteritems()])
427 427 return (tags, tagtypes)
428 428
429 429 def tagtype(self, tagname):
430 430 '''
431 431 return the type of the given tag. result can be:
432 432
433 433 'local' : a local tag
434 434 'global' : a global tag
435 435 None : tag does not exist
436 436 '''
437 437
438 438 return self._tagscache.tagtypes.get(tagname)
439 439
440 440 def tagslist(self):
441 441 '''return a list of tags ordered by revision'''
442 442 if not self._tagscache.tagslist:
443 443 l = []
444 444 for t, n in self.tags().iteritems():
445 445 r = self.changelog.rev(n)
446 446 l.append((r, t, n))
447 447 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 448
449 449 return self._tagscache.tagslist
450 450
451 451 def nodetags(self, node):
452 452 '''return the tags associated with a node'''
453 453 if not self._tagscache.nodetagscache:
454 454 nodetagscache = {}
455 455 for t, n in self.tags().iteritems():
456 456 nodetagscache.setdefault(n, []).append(t)
457 457 for tags in nodetagscache.itervalues():
458 458 tags.sort()
459 459 self._tagscache.nodetagscache = nodetagscache
460 460 return self._tagscache.nodetagscache.get(node, [])
461 461
462 462 def nodebookmarks(self, node):
463 463 marks = []
464 464 for bookmark, n in self._bookmarks.iteritems():
465 465 if n == node:
466 466 marks.append(bookmark)
467 467 return sorted(marks)
468 468
469 469 def _branchtags(self, partial, lrev):
470 470 # TODO: rename this function?
471 471 tiprev = len(self) - 1
472 472 if lrev != tiprev:
473 473 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 474 self._updatebranchcache(partial, ctxgen)
475 475 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 476
477 477 return partial
478 478
479 479 def updatebranchcache(self):
480 480 tip = self.changelog.tip()
481 481 if self._branchcache is not None and self._branchcachetip == tip:
482 482 return self._branchcache
483 483
484 484 oldtip = self._branchcachetip
485 485 self._branchcachetip = tip
486 486 if oldtip is None or oldtip not in self.changelog.nodemap:
487 487 partial, last, lrev = self._readbranchcache()
488 488 else:
489 489 lrev = self.changelog.rev(oldtip)
490 490 partial = self._branchcache
491 491
492 492 self._branchtags(partial, lrev)
493 493 # this private cache holds all heads (not just tips)
494 494 self._branchcache = partial
495 495
496 496 def branchmap(self):
497 497 '''returns a dictionary {branch: [branchheads]}'''
498 498 self.updatebranchcache()
499 499 return self._branchcache
500 500
501 501 def branchtags(self):
502 502 '''return a dict where branch names map to the tipmost head of
503 503 the branch, open heads come before closed'''
504 504 bt = {}
505 505 for bn, heads in self.branchmap().iteritems():
506 506 tip = heads[-1]
507 507 for h in reversed(heads):
508 508 if 'close' not in self.changelog.read(h)[5]:
509 509 tip = h
510 510 break
511 511 bt[bn] = tip
512 512 return bt
513 513
514 514 def _readbranchcache(self):
515 515 partial = {}
516 516 try:
517 517 f = self.opener("cache/branchheads")
518 518 lines = f.read().split('\n')
519 519 f.close()
520 520 except (IOError, OSError):
521 521 return {}, nullid, nullrev
522 522
523 523 try:
524 524 last, lrev = lines.pop(0).split(" ", 1)
525 525 last, lrev = bin(last), int(lrev)
526 526 if lrev >= len(self) or self[lrev].node() != last:
527 527 # invalidate the cache
528 528 raise ValueError('invalidating branch cache (tip differs)')
529 529 for l in lines:
530 530 if not l:
531 531 continue
532 532 node, label = l.split(" ", 1)
533 533 label = encoding.tolocal(label.strip())
534 534 partial.setdefault(label, []).append(bin(node))
535 535 except KeyboardInterrupt:
536 536 raise
537 537 except Exception, inst:
538 538 if self.ui.debugflag:
539 539 self.ui.warn(str(inst), '\n')
540 540 partial, last, lrev = {}, nullid, nullrev
541 541 return partial, last, lrev
542 542
543 543 def _writebranchcache(self, branches, tip, tiprev):
544 544 try:
545 545 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 546 f.write("%s %s\n" % (hex(tip), tiprev))
547 547 for label, nodes in branches.iteritems():
548 548 for node in nodes:
549 549 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 550 f.close()
551 551 except (IOError, OSError):
552 552 pass
553 553
554 554 def _updatebranchcache(self, partial, ctxgen):
555 555 # collect new branch entries
556 556 newbranches = {}
557 557 for c in ctxgen:
558 558 newbranches.setdefault(c.branch(), []).append(c.node())
559 559 # if older branchheads are reachable from new ones, they aren't
560 560 # really branchheads. Note checking parents is insufficient:
561 561 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 562 for branch, newnodes in newbranches.iteritems():
563 563 bheads = partial.setdefault(branch, [])
564 564 bheads.extend(newnodes)
565 565 if len(bheads) <= 1:
566 566 continue
567 567 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 568 # starting from tip means fewer passes over reachable
569 569 while newnodes:
570 570 latest = newnodes.pop()
571 571 if latest not in bheads:
572 572 continue
573 573 minbhrev = self[bheads[0]].node()
574 574 reachable = self.changelog.reachable(latest, minbhrev)
575 575 reachable.remove(latest)
576 576 if reachable:
577 577 bheads = [b for b in bheads if b not in reachable]
578 578 partial[branch] = bheads
579 579
580 580 def lookup(self, key):
581 581 if isinstance(key, int):
582 582 return self.changelog.node(key)
583 583 elif key == '.':
584 584 return self.dirstate.p1()
585 585 elif key == 'null':
586 586 return nullid
587 587 elif key == 'tip':
588 588 return self.changelog.tip()
589 589 n = self.changelog._match(key)
590 590 if n:
591 591 return n
592 592 if key in self._bookmarks:
593 593 return self._bookmarks[key]
594 594 if key in self.tags():
595 595 return self.tags()[key]
596 596 if key in self.branchtags():
597 597 return self.branchtags()[key]
598 598 n = self.changelog._partialmatch(key)
599 599 if n:
600 600 return n
601 601
602 602 # can't find key, check if it might have come from damaged dirstate
603 603 if key in self.dirstate.parents():
604 604 raise error.Abort(_("working directory has unknown parent '%s'!")
605 605 % short(key))
606 606 try:
607 607 if len(key) == 20:
608 608 key = hex(key)
609 609 except TypeError:
610 610 pass
611 611 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612 612
613 613 def lookupbranch(self, key, remote=None):
614 614 repo = remote or self
615 615 if key in repo.branchmap():
616 616 return key
617 617
618 618 repo = (remote and remote.local()) and remote or self
619 619 return repo[key].branch()
620 620
621 621 def known(self, nodes):
622 622 nm = self.changelog.nodemap
623 623 return [(n in nm) for n in nodes]
624 624
625 625 def local(self):
626 626 return self
627 627
628 628 def join(self, f):
629 629 return os.path.join(self.path, f)
630 630
631 631 def wjoin(self, f):
632 632 return os.path.join(self.root, f)
633 633
634 634 def file(self, f):
635 635 if f[0] == '/':
636 636 f = f[1:]
637 637 return filelog.filelog(self.sopener, f)
638 638
639 639 def changectx(self, changeid):
640 640 return self[changeid]
641 641
642 642 def parents(self, changeid=None):
643 643 '''get list of changectxs for parents of changeid'''
644 644 return self[changeid].parents()
645 645
646 646 def filectx(self, path, changeid=None, fileid=None):
647 647 """changeid can be a changeset revision, node, or tag.
648 648 fileid can be a file revision or node."""
649 649 return context.filectx(self, path, changeid, fileid)
650 650
651 651 def getcwd(self):
652 652 return self.dirstate.getcwd()
653 653
654 654 def pathto(self, f, cwd=None):
655 655 return self.dirstate.pathto(f, cwd)
656 656
657 657 def wfile(self, f, mode='r'):
658 658 return self.wopener(f, mode)
659 659
660 660 def _link(self, f):
661 661 return os.path.islink(self.wjoin(f))
662 662
663 663 def _loadfilter(self, filter):
664 664 if filter not in self.filterpats:
665 665 l = []
666 666 for pat, cmd in self.ui.configitems(filter):
667 667 if cmd == '!':
668 668 continue
669 669 mf = matchmod.match(self.root, '', [pat])
670 670 fn = None
671 671 params = cmd
672 672 for name, filterfn in self._datafilters.iteritems():
673 673 if cmd.startswith(name):
674 674 fn = filterfn
675 675 params = cmd[len(name):].lstrip()
676 676 break
677 677 if not fn:
678 678 fn = lambda s, c, **kwargs: util.filter(s, c)
679 679 # Wrap old filters not supporting keyword arguments
680 680 if not inspect.getargspec(fn)[2]:
681 681 oldfn = fn
682 682 fn = lambda s, c, **kwargs: oldfn(s, c)
683 683 l.append((mf, fn, params))
684 684 self.filterpats[filter] = l
685 685 return self.filterpats[filter]
686 686
687 687 def _filter(self, filterpats, filename, data):
688 688 for mf, fn, cmd in filterpats:
689 689 if mf(filename):
690 690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 692 break
693 693
694 694 return data
695 695
696 696 @propertycache
697 697 def _encodefilterpats(self):
698 698 return self._loadfilter('encode')
699 699
700 700 @propertycache
701 701 def _decodefilterpats(self):
702 702 return self._loadfilter('decode')
703 703
704 704 def adddatafilter(self, name, filter):
705 705 self._datafilters[name] = filter
706 706
707 707 def wread(self, filename):
708 708 if self._link(filename):
709 709 data = os.readlink(self.wjoin(filename))
710 710 else:
711 711 data = self.wopener.read(filename)
712 712 return self._filter(self._encodefilterpats, filename, data)
713 713
714 714 def wwrite(self, filename, data, flags):
715 715 data = self._filter(self._decodefilterpats, filename, data)
716 716 if 'l' in flags:
717 717 self.wopener.symlink(data, filename)
718 718 else:
719 719 self.wopener.write(filename, data)
720 720 if 'x' in flags:
721 721 util.setflags(self.wjoin(filename), False, True)
722 722
723 723 def wwritedata(self, filename, data):
724 724 return self._filter(self._decodefilterpats, filename, data)
725 725
726 726 def transaction(self, desc):
727 727 tr = self._transref and self._transref() or None
728 728 if tr and tr.running():
729 729 return tr.nest()
730 730
731 731 # abort here if the journal already exists
732 732 if os.path.exists(self.sjoin("journal")):
733 733 raise error.RepoError(
734 734 _("abandoned transaction found - run hg recover"))
735 735
736 736 journalfiles = self._writejournal(desc)
737 737 renames = [(x, undoname(x)) for x in journalfiles]
738 738
739 739 tr = transaction.transaction(self.ui.warn, self.sopener,
740 740 self.sjoin("journal"),
741 741 aftertrans(renames),
742 742 self.store.createmode)
743 743 self._transref = weakref.ref(tr)
744 744 return tr
745 745
746 746 def _writejournal(self, desc):
747 747 # save dirstate for rollback
748 748 try:
749 749 ds = self.opener.read("dirstate")
750 750 except IOError:
751 751 ds = ""
752 752 self.opener.write("journal.dirstate", ds)
753 753 self.opener.write("journal.branch",
754 754 encoding.fromlocal(self.dirstate.branch()))
755 755 self.opener.write("journal.desc",
756 756 "%d\n%s\n" % (len(self), desc))
757 757
758 758 bkname = self.join('bookmarks')
759 759 if os.path.exists(bkname):
760 760 util.copyfile(bkname, self.join('journal.bookmarks'))
761 761 else:
762 762 self.opener.write('journal.bookmarks', '')
763 763 phasesname = self.sjoin('phaseroots')
764 764 if os.path.exists(phasesname):
765 765 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
766 766 else:
767 767 self.sopener.write('journal.phaseroots', '')
768 768
769 769 return (self.sjoin('journal'), self.join('journal.dirstate'),
770 770 self.join('journal.branch'), self.join('journal.desc'),
771 771 self.join('journal.bookmarks'),
772 772 self.sjoin('journal.phaseroots'))
773 773
774 774 def recover(self):
775 775 lock = self.lock()
776 776 try:
777 777 if os.path.exists(self.sjoin("journal")):
778 778 self.ui.status(_("rolling back interrupted transaction\n"))
779 779 transaction.rollback(self.sopener, self.sjoin("journal"),
780 780 self.ui.warn)
781 781 self.invalidate()
782 782 return True
783 783 else:
784 784 self.ui.warn(_("no interrupted transaction available\n"))
785 785 return False
786 786 finally:
787 787 lock.release()
788 788
789 789 def rollback(self, dryrun=False, force=False):
790 790 wlock = lock = None
791 791 try:
792 792 wlock = self.wlock()
793 793 lock = self.lock()
794 794 if os.path.exists(self.sjoin("undo")):
795 795 return self._rollback(dryrun, force)
796 796 else:
797 797 self.ui.warn(_("no rollback information available\n"))
798 798 return 1
799 799 finally:
800 800 release(lock, wlock)
801 801
802 802 def _rollback(self, dryrun, force):
803 803 ui = self.ui
804 804 try:
805 805 args = self.opener.read('undo.desc').splitlines()
806 806 (oldlen, desc, detail) = (int(args[0]), args[1], None)
807 807 if len(args) >= 3:
808 808 detail = args[2]
809 809 oldtip = oldlen - 1
810 810
811 811 if detail and ui.verbose:
812 812 msg = (_('repository tip rolled back to revision %s'
813 813 ' (undo %s: %s)\n')
814 814 % (oldtip, desc, detail))
815 815 else:
816 816 msg = (_('repository tip rolled back to revision %s'
817 817 ' (undo %s)\n')
818 818 % (oldtip, desc))
819 819 except IOError:
820 820 msg = _('rolling back unknown transaction\n')
821 821 desc = None
822 822
823 823 if not force and self['.'] != self['tip'] and desc == 'commit':
824 824 raise util.Abort(
825 825 _('rollback of last commit while not checked out '
826 826 'may lose data'), hint=_('use -f to force'))
827 827
828 828 ui.status(msg)
829 829 if dryrun:
830 830 return 0
831 831
832 832 parents = self.dirstate.parents()
833 833 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
834 834 if os.path.exists(self.join('undo.bookmarks')):
835 835 util.rename(self.join('undo.bookmarks'),
836 836 self.join('bookmarks'))
837 837 if os.path.exists(self.sjoin('undo.phaseroots')):
838 838 util.rename(self.sjoin('undo.phaseroots'),
839 839 self.sjoin('phaseroots'))
840 840 self.invalidate()
841 841
842 842 parentgone = (parents[0] not in self.changelog.nodemap or
843 843 parents[1] not in self.changelog.nodemap)
844 844 if parentgone:
845 845 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
846 846 try:
847 847 branch = self.opener.read('undo.branch')
848 848 self.dirstate.setbranch(branch)
849 849 except IOError:
850 850 ui.warn(_('named branch could not be reset: '
851 851 'current branch is still \'%s\'\n')
852 852 % self.dirstate.branch())
853 853
854 854 self.dirstate.invalidate()
855 855 self.destroyed()
856 856 parents = tuple([p.rev() for p in self.parents()])
857 857 if len(parents) > 1:
858 858 ui.status(_('working directory now based on '
859 859 'revisions %d and %d\n') % parents)
860 860 else:
861 861 ui.status(_('working directory now based on '
862 862 'revision %d\n') % parents)
863 863 return 0
864 864
865 865 def invalidatecaches(self):
866 866 try:
867 867 delattr(self, '_tagscache')
868 868 except AttributeError:
869 869 pass
870 870
871 871 self._branchcache = None # in UTF-8
872 872 self._branchcachetip = None
873 873
874 874 def invalidatedirstate(self):
875 875 '''Invalidates the dirstate, causing the next call to dirstate
876 876 to check if it was modified since the last time it was read,
877 877 rereading it if it has.
878 878
879 879 This is different to dirstate.invalidate() that it doesn't always
880 880 rereads the dirstate. Use dirstate.invalidate() if you want to
881 881 explicitly read the dirstate again (i.e. restoring it to a previous
882 882 known good state).'''
883 883 try:
884 884 delattr(self, 'dirstate')
885 885 except AttributeError:
886 886 pass
887 887
888 888 def invalidate(self):
889 889 for k in self._filecache:
890 890 # dirstate is invalidated separately in invalidatedirstate()
891 891 if k == 'dirstate':
892 892 continue
893 893
894 894 try:
895 895 delattr(self, k)
896 896 except AttributeError:
897 897 pass
898 898 self.invalidatecaches()
899 899
900 900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 901 try:
902 902 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 903 except error.LockHeld, inst:
904 904 if not wait:
905 905 raise
906 906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 907 (desc, inst.locker))
908 908 # default to 600 seconds timeout
909 909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 910 releasefn, desc=desc)
911 911 if acquirefn:
912 912 acquirefn()
913 913 return l
914 914
915 915 def _postrelease(self, callback):
916 916 """add a callback to the current repository lock.
917 917
918 918 The callback will be executed on lock release."""
919 919 l = self._lockref and self._lockref()
920 920 assert l is not None
921 921 assert l.held
922 922 l.postreleasehooks.append(callback)
923 923
924 924 def lock(self, wait=True):
925 925 '''Lock the repository store (.hg/store) and return a weak reference
926 926 to the lock. Use this before modifying the store (e.g. committing or
927 927 stripping). If you are opening a transaction, get a lock as well.)'''
928 928 l = self._lockref and self._lockref()
929 929 if l is not None and l.held:
930 930 l.lock()
931 931 return l
932 932
933 933 def unlock():
934 934 self.store.write()
935 935 if self._dirtyphases:
936 936 phases.writeroots(self)
937 937 for k, ce in self._filecache.items():
938 938 if k == 'dirstate':
939 939 continue
940 940 ce.refresh()
941 941
942 942 l = self._lock(self.sjoin("lock"), wait, unlock,
943 943 self.invalidate, _('repository %s') % self.origroot)
944 944 self._lockref = weakref.ref(l)
945 945 return l
946 946
947 947 def wlock(self, wait=True):
948 948 '''Lock the non-store parts of the repository (everything under
949 949 .hg except .hg/store) and return a weak reference to the lock.
950 950 Use this before modifying files in .hg.'''
951 951 l = self._wlockref and self._wlockref()
952 952 if l is not None and l.held:
953 953 l.lock()
954 954 return l
955 955
956 956 def unlock():
957 957 self.dirstate.write()
958 958 ce = self._filecache.get('dirstate')
959 959 if ce:
960 960 ce.refresh()
961 961
962 962 l = self._lock(self.join("wlock"), wait, unlock,
963 963 self.invalidatedirstate, _('working directory of %s') %
964 964 self.origroot)
965 965 self._wlockref = weakref.ref(l)
966 966 return l
967 967
968 968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
969 969 """
970 970 commit an individual file as part of a larger transaction
971 971 """
972 972
973 973 fname = fctx.path()
974 974 text = fctx.data()
975 975 flog = self.file(fname)
976 976 fparent1 = manifest1.get(fname, nullid)
977 977 fparent2 = fparent2o = manifest2.get(fname, nullid)
978 978
979 979 meta = {}
980 980 copy = fctx.renamed()
981 981 if copy and copy[0] != fname:
982 982 # Mark the new revision of this file as a copy of another
983 983 # file. This copy data will effectively act as a parent
984 984 # of this new revision. If this is a merge, the first
985 985 # parent will be the nullid (meaning "look up the copy data")
986 986 # and the second one will be the other parent. For example:
987 987 #
988 988 # 0 --- 1 --- 3 rev1 changes file foo
989 989 # \ / rev2 renames foo to bar and changes it
990 990 # \- 2 -/ rev3 should have bar with all changes and
991 991 # should record that bar descends from
992 992 # bar in rev2 and foo in rev1
993 993 #
994 994 # this allows this merge to succeed:
995 995 #
996 996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
997 997 # \ / merging rev3 and rev4 should use bar@rev2
998 998 # \- 2 --- 4 as the merge base
999 999 #
1000 1000
1001 1001 cfname = copy[0]
1002 1002 crev = manifest1.get(cfname)
1003 1003 newfparent = fparent2
1004 1004
1005 1005 if manifest2: # branch merge
1006 1006 if fparent2 == nullid or crev is None: # copied on remote side
1007 1007 if cfname in manifest2:
1008 1008 crev = manifest2[cfname]
1009 1009 newfparent = fparent1
1010 1010
1011 1011 # find source in nearest ancestor if we've lost track
1012 1012 if not crev:
1013 1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1014 1014 (fname, cfname))
1015 1015 for ancestor in self[None].ancestors():
1016 1016 if cfname in ancestor:
1017 1017 crev = ancestor[cfname].filenode()
1018 1018 break
1019 1019
1020 1020 if crev:
1021 1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1022 1022 meta["copy"] = cfname
1023 1023 meta["copyrev"] = hex(crev)
1024 1024 fparent1, fparent2 = nullid, newfparent
1025 1025 else:
1026 1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1027 1027 "copied from '%s'!\n") % (fname, cfname))
1028 1028
1029 1029 elif fparent2 != nullid:
1030 1030 # is one parent an ancestor of the other?
1031 1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1032 1032 if fparentancestor == fparent1:
1033 1033 fparent1, fparent2 = fparent2, nullid
1034 1034 elif fparentancestor == fparent2:
1035 1035 fparent2 = nullid
1036 1036
1037 1037 # is the file changed?
1038 1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1039 1039 changelist.append(fname)
1040 1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1041 1041
1042 1042 # are just the flags changed during merge?
1043 1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1044 1044 changelist.append(fname)
1045 1045
1046 1046 return fparent1
1047 1047
1048 1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1049 1049 editor=False, extra={}):
1050 1050 """Add a new revision to current repository.
1051 1051
1052 1052 Revision information is gathered from the working directory,
1053 1053 match can be used to filter the committed files. If editor is
1054 1054 supplied, it is called to get a commit message.
1055 1055 """
1056 1056
1057 1057 def fail(f, msg):
1058 1058 raise util.Abort('%s: %s' % (f, msg))
1059 1059
1060 1060 if not match:
1061 1061 match = matchmod.always(self.root, '')
1062 1062
1063 1063 if not force:
1064 1064 vdirs = []
1065 1065 match.dir = vdirs.append
1066 1066 match.bad = fail
1067 1067
1068 1068 wlock = self.wlock()
1069 1069 try:
1070 1070 wctx = self[None]
1071 1071 merge = len(wctx.parents()) > 1
1072 1072
1073 1073 if (not force and merge and match and
1074 1074 (match.files() or match.anypats())):
1075 1075 raise util.Abort(_('cannot partially commit a merge '
1076 1076 '(do not specify files or patterns)'))
1077 1077
1078 1078 changes = self.status(match=match, clean=force)
1079 1079 if force:
1080 1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1081 1081
1082 1082 # check subrepos
1083 1083 subs = []
1084 1084 removedsubs = set()
1085 1085 if '.hgsub' in wctx:
1086 1086 # only manage subrepos and .hgsubstate if .hgsub is present
1087 1087 for p in wctx.parents():
1088 1088 removedsubs.update(s for s in p.substate if match(s))
1089 1089 for s in wctx.substate:
1090 1090 removedsubs.discard(s)
1091 1091 if match(s) and wctx.sub(s).dirty():
1092 1092 subs.append(s)
1093 1093 if (subs or removedsubs):
1094 1094 if (not match('.hgsub') and
1095 1095 '.hgsub' in (wctx.modified() + wctx.added())):
1096 1096 raise util.Abort(
1097 1097 _("can't commit subrepos without .hgsub"))
1098 1098 if '.hgsubstate' not in changes[0]:
1099 1099 changes[0].insert(0, '.hgsubstate')
1100 1100 if '.hgsubstate' in changes[2]:
1101 1101 changes[2].remove('.hgsubstate')
1102 1102 elif '.hgsub' in changes[2]:
1103 1103 # clean up .hgsubstate when .hgsub is removed
1104 1104 if ('.hgsubstate' in wctx and
1105 1105 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1106 1106 changes[2].insert(0, '.hgsubstate')
1107 1107
1108 1108 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1109 1109 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1110 1110 if changedsubs:
1111 1111 raise util.Abort(_("uncommitted changes in subrepo %s")
1112 1112 % changedsubs[0],
1113 1113 hint=_("use --subrepos for recursive commit"))
1114 1114
1115 1115 # make sure all explicit patterns are matched
1116 1116 if not force and match.files():
1117 1117 matched = set(changes[0] + changes[1] + changes[2])
1118 1118
1119 1119 for f in match.files():
1120 1120 if f == '.' or f in matched or f in wctx.substate:
1121 1121 continue
1122 1122 if f in changes[3]: # missing
1123 1123 fail(f, _('file not found!'))
1124 1124 if f in vdirs: # visited directory
1125 1125 d = f + '/'
1126 1126 for mf in matched:
1127 1127 if mf.startswith(d):
1128 1128 break
1129 1129 else:
1130 1130 fail(f, _("no match under directory!"))
1131 1131 elif f not in self.dirstate:
1132 1132 fail(f, _("file not tracked!"))
1133 1133
1134 1134 if (not force and not extra.get("close") and not merge
1135 1135 and not (changes[0] or changes[1] or changes[2])
1136 1136 and wctx.branch() == wctx.p1().branch()):
1137 1137 return None
1138 1138
1139 1139 ms = mergemod.mergestate(self)
1140 1140 for f in changes[0]:
1141 1141 if f in ms and ms[f] == 'u':
1142 1142 raise util.Abort(_("unresolved merge conflicts "
1143 1143 "(see hg help resolve)"))
1144 1144
1145 1145 cctx = context.workingctx(self, text, user, date, extra, changes)
1146 1146 if editor:
1147 1147 cctx._text = editor(self, cctx, subs)
1148 1148 edited = (text != cctx._text)
1149 1149
1150 1150 # commit subs
1151 1151 if subs or removedsubs:
1152 1152 state = wctx.substate.copy()
1153 1153 for s in sorted(subs):
1154 1154 sub = wctx.sub(s)
1155 1155 self.ui.status(_('committing subrepository %s\n') %
1156 1156 subrepo.subrelpath(sub))
1157 1157 sr = sub.commit(cctx._text, user, date)
1158 1158 state[s] = (state[s][0], sr)
1159 1159 subrepo.writestate(self, state)
1160 1160
1161 1161 # Save commit message in case this transaction gets rolled back
1162 1162 # (e.g. by a pretxncommit hook). Leave the content alone on
1163 1163 # the assumption that the user will use the same editor again.
1164 1164 msgfn = self.savecommitmessage(cctx._text)
1165 1165
1166 1166 p1, p2 = self.dirstate.parents()
1167 1167 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1168 1168 try:
1169 1169 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1170 1170 ret = self.commitctx(cctx, True)
1171 1171 except:
1172 1172 if edited:
1173 1173 self.ui.write(
1174 1174 _('note: commit message saved in %s\n') % msgfn)
1175 1175 raise
1176 1176
1177 1177 # update bookmarks, dirstate and mergestate
1178 1178 bookmarks.update(self, p1, ret)
1179 1179 for f in changes[0] + changes[1]:
1180 1180 self.dirstate.normal(f)
1181 1181 for f in changes[2]:
1182 1182 self.dirstate.drop(f)
1183 1183 self.dirstate.setparents(ret)
1184 1184 ms.reset()
1185 1185 finally:
1186 1186 wlock.release()
1187 1187
1188 1188 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1189 1189 return ret
1190 1190
1191 1191 def commitctx(self, ctx, error=False):
1192 1192 """Add a new revision to current repository.
1193 1193 Revision information is passed via the context argument.
1194 1194 """
1195 1195
1196 1196 tr = lock = None
1197 1197 removed = list(ctx.removed())
1198 1198 p1, p2 = ctx.p1(), ctx.p2()
1199 1199 user = ctx.user()
1200 1200
1201 1201 lock = self.lock()
1202 1202 try:
1203 1203 tr = self.transaction("commit")
1204 1204 trp = weakref.proxy(tr)
1205 1205
1206 1206 if ctx.files():
1207 1207 m1 = p1.manifest().copy()
1208 1208 m2 = p2.manifest()
1209 1209
1210 1210 # check in files
1211 1211 new = {}
1212 1212 changed = []
1213 1213 linkrev = len(self)
1214 1214 for f in sorted(ctx.modified() + ctx.added()):
1215 1215 self.ui.note(f + "\n")
1216 1216 try:
1217 1217 fctx = ctx[f]
1218 1218 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1219 1219 changed)
1220 1220 m1.set(f, fctx.flags())
1221 1221 except OSError, inst:
1222 1222 self.ui.warn(_("trouble committing %s!\n") % f)
1223 1223 raise
1224 1224 except IOError, inst:
1225 1225 errcode = getattr(inst, 'errno', errno.ENOENT)
1226 1226 if error or errcode and errcode != errno.ENOENT:
1227 1227 self.ui.warn(_("trouble committing %s!\n") % f)
1228 1228 raise
1229 1229 else:
1230 1230 removed.append(f)
1231 1231
1232 1232 # update manifest
1233 1233 m1.update(new)
1234 1234 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1235 1235 drop = [f for f in removed if f in m1]
1236 1236 for f in drop:
1237 1237 del m1[f]
1238 1238 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1239 1239 p2.manifestnode(), (new, drop))
1240 1240 files = changed + removed
1241 1241 else:
1242 1242 mn = p1.manifestnode()
1243 1243 files = []
1244 1244
1245 1245 # update changelog
1246 1246 self.changelog.delayupdate()
1247 1247 n = self.changelog.add(mn, files, ctx.description(),
1248 1248 trp, p1.node(), p2.node(),
1249 1249 user, ctx.date(), ctx.extra().copy())
1250 1250 p = lambda: self.changelog.writepending() and self.root or ""
1251 1251 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1252 1252 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1253 1253 parent2=xp2, pending=p)
1254 1254 self.changelog.finalize(trp)
1255 1255 # ensure the new commit is 1-phase
1256 1256 phases.retractboundary(self, 1, [n])
1257 1257 tr.close()
1258 1258
1259 1259 if self._branchcache:
1260 1260 self.updatebranchcache()
1261 1261 return n
1262 1262 finally:
1263 1263 if tr:
1264 1264 tr.release()
1265 1265 lock.release()
1266 1266
1267 1267 def destroyed(self):
1268 1268 '''Inform the repository that nodes have been destroyed.
1269 1269 Intended for use by strip and rollback, so there's a common
1270 1270 place for anything that has to be done after destroying history.'''
1271 1271 # XXX it might be nice if we could take the list of destroyed
1272 1272 # nodes, but I don't see an easy way for rollback() to do that
1273 1273
1274 1274 # Ensure the persistent tag cache is updated. Doing it now
1275 1275 # means that the tag cache only has to worry about destroyed
1276 1276 # heads immediately after a strip/rollback. That in turn
1277 1277 # guarantees that "cachetip == currenttip" (comparing both rev
1278 1278 # and node) always means no nodes have been added or destroyed.
1279 1279
1280 1280 # XXX this is suboptimal when qrefresh'ing: we strip the current
1281 1281 # head, refresh the tag cache, then immediately add a new head.
1282 1282 # But I think doing it this way is necessary for the "instant
1283 1283 # tag cache retrieval" case to work.
1284 1284 self.invalidatecaches()
1285 1285
1286 1286 def walk(self, match, node=None):
1287 1287 '''
1288 1288 walk recursively through the directory tree or a given
1289 1289 changeset, finding all files matched by the match
1290 1290 function
1291 1291 '''
1292 1292 return self[node].walk(match)
1293 1293
1294 1294 def status(self, node1='.', node2=None, match=None,
1295 1295 ignored=False, clean=False, unknown=False,
1296 1296 listsubrepos=False):
1297 1297 """return status of files between two nodes or node and working directory
1298 1298
1299 1299 If node1 is None, use the first dirstate parent instead.
1300 1300 If node2 is None, compare node1 with working directory.
1301 1301 """
1302 1302
1303 1303 def mfmatches(ctx):
1304 1304 mf = ctx.manifest().copy()
1305 1305 for fn in mf.keys():
1306 1306 if not match(fn):
1307 1307 del mf[fn]
1308 1308 return mf
1309 1309
1310 1310 if isinstance(node1, context.changectx):
1311 1311 ctx1 = node1
1312 1312 else:
1313 1313 ctx1 = self[node1]
1314 1314 if isinstance(node2, context.changectx):
1315 1315 ctx2 = node2
1316 1316 else:
1317 1317 ctx2 = self[node2]
1318 1318
1319 1319 working = ctx2.rev() is None
1320 1320 parentworking = working and ctx1 == self['.']
1321 1321 match = match or matchmod.always(self.root, self.getcwd())
1322 1322 listignored, listclean, listunknown = ignored, clean, unknown
1323 1323
1324 1324 # load earliest manifest first for caching reasons
1325 1325 if not working and ctx2.rev() < ctx1.rev():
1326 1326 ctx2.manifest()
1327 1327
1328 1328 if not parentworking:
1329 1329 def bad(f, msg):
1330 1330 if f not in ctx1:
1331 1331 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1332 1332 match.bad = bad
1333 1333
1334 1334 if working: # we need to scan the working dir
1335 1335 subrepos = []
1336 1336 if '.hgsub' in self.dirstate:
1337 1337 subrepos = ctx2.substate.keys()
1338 1338 s = self.dirstate.status(match, subrepos, listignored,
1339 1339 listclean, listunknown)
1340 1340 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1341 1341
1342 1342 # check for any possibly clean files
1343 1343 if parentworking and cmp:
1344 1344 fixup = []
1345 1345 # do a full compare of any files that might have changed
1346 1346 for f in sorted(cmp):
1347 1347 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1348 1348 or ctx1[f].cmp(ctx2[f])):
1349 1349 modified.append(f)
1350 1350 else:
1351 1351 fixup.append(f)
1352 1352
1353 1353 # update dirstate for files that are actually clean
1354 1354 if fixup:
1355 1355 if listclean:
1356 1356 clean += fixup
1357 1357
1358 1358 try:
1359 1359 # updating the dirstate is optional
1360 1360 # so we don't wait on the lock
1361 1361 wlock = self.wlock(False)
1362 1362 try:
1363 1363 for f in fixup:
1364 1364 self.dirstate.normal(f)
1365 1365 finally:
1366 1366 wlock.release()
1367 1367 except error.LockError:
1368 1368 pass
1369 1369
1370 1370 if not parentworking:
1371 1371 mf1 = mfmatches(ctx1)
1372 1372 if working:
1373 1373 # we are comparing working dir against non-parent
1374 1374 # generate a pseudo-manifest for the working dir
1375 1375 mf2 = mfmatches(self['.'])
1376 1376 for f in cmp + modified + added:
1377 1377 mf2[f] = None
1378 1378 mf2.set(f, ctx2.flags(f))
1379 1379 for f in removed:
1380 1380 if f in mf2:
1381 1381 del mf2[f]
1382 1382 else:
1383 1383 # we are comparing two revisions
1384 1384 deleted, unknown, ignored = [], [], []
1385 1385 mf2 = mfmatches(ctx2)
1386 1386
1387 1387 modified, added, clean = [], [], []
1388 1388 for fn in mf2:
1389 1389 if fn in mf1:
1390 1390 if (fn not in deleted and
1391 1391 (mf1.flags(fn) != mf2.flags(fn) or
1392 1392 (mf1[fn] != mf2[fn] and
1393 1393 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1394 1394 modified.append(fn)
1395 1395 elif listclean:
1396 1396 clean.append(fn)
1397 1397 del mf1[fn]
1398 1398 elif fn not in deleted:
1399 1399 added.append(fn)
1400 1400 removed = mf1.keys()
1401 1401
1402 1402 if working and modified and not self.dirstate._checklink:
1403 1403 # Symlink placeholders may get non-symlink-like contents
1404 1404 # via user error or dereferencing by NFS or Samba servers,
1405 1405 # so we filter out any placeholders that don't look like a
1406 1406 # symlink
1407 1407 sane = []
1408 1408 for f in modified:
1409 1409 if ctx2.flags(f) == 'l':
1410 1410 d = ctx2[f].data()
1411 1411 if len(d) >= 1024 or '\n' in d or util.binary(d):
1412 1412 self.ui.debug('ignoring suspect symlink placeholder'
1413 1413 ' "%s"\n' % f)
1414 1414 continue
1415 1415 sane.append(f)
1416 1416 modified = sane
1417 1417
1418 1418 r = modified, added, removed, deleted, unknown, ignored, clean
1419 1419
1420 1420 if listsubrepos:
1421 1421 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1422 1422 if working:
1423 1423 rev2 = None
1424 1424 else:
1425 1425 rev2 = ctx2.substate[subpath][1]
1426 1426 try:
1427 1427 submatch = matchmod.narrowmatcher(subpath, match)
1428 1428 s = sub.status(rev2, match=submatch, ignored=listignored,
1429 1429 clean=listclean, unknown=listunknown,
1430 1430 listsubrepos=True)
1431 1431 for rfiles, sfiles in zip(r, s):
1432 1432 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1433 1433 except error.LookupError:
1434 1434 self.ui.status(_("skipping missing subrepository: %s\n")
1435 1435 % subpath)
1436 1436
1437 1437 for l in r:
1438 1438 l.sort()
1439 1439 return r
1440 1440
1441 1441 def heads(self, start=None):
1442 1442 heads = self.changelog.heads(start)
1443 1443 # sort the output in rev descending order
1444 1444 return sorted(heads, key=self.changelog.rev, reverse=True)
1445 1445
1446 1446 def branchheads(self, branch=None, start=None, closed=False):
1447 1447 '''return a (possibly filtered) list of heads for the given branch
1448 1448
1449 1449 Heads are returned in topological order, from newest to oldest.
1450 1450 If branch is None, use the dirstate branch.
1451 1451 If start is not None, return only heads reachable from start.
1452 1452 If closed is True, return heads that are marked as closed as well.
1453 1453 '''
1454 1454 if branch is None:
1455 1455 branch = self[None].branch()
1456 1456 branches = self.branchmap()
1457 1457 if branch not in branches:
1458 1458 return []
1459 1459 # the cache returns heads ordered lowest to highest
1460 1460 bheads = list(reversed(branches[branch]))
1461 1461 if start is not None:
1462 1462 # filter out the heads that cannot be reached from startrev
1463 1463 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1464 1464 bheads = [h for h in bheads if h in fbheads]
1465 1465 if not closed:
1466 1466 bheads = [h for h in bheads if
1467 1467 ('close' not in self.changelog.read(h)[5])]
1468 1468 return bheads
1469 1469
1470 1470 def branches(self, nodes):
1471 1471 if not nodes:
1472 1472 nodes = [self.changelog.tip()]
1473 1473 b = []
1474 1474 for n in nodes:
1475 1475 t = n
1476 1476 while True:
1477 1477 p = self.changelog.parents(n)
1478 1478 if p[1] != nullid or p[0] == nullid:
1479 1479 b.append((t, n, p[0], p[1]))
1480 1480 break
1481 1481 n = p[0]
1482 1482 return b
1483 1483
1484 1484 def between(self, pairs):
1485 1485 r = []
1486 1486
1487 1487 for top, bottom in pairs:
1488 1488 n, l, i = top, [], 0
1489 1489 f = 1
1490 1490
1491 1491 while n != bottom and n != nullid:
1492 1492 p = self.changelog.parents(n)[0]
1493 1493 if i == f:
1494 1494 l.append(n)
1495 1495 f = f * 2
1496 1496 n = p
1497 1497 i += 1
1498 1498
1499 1499 r.append(l)
1500 1500
1501 1501 return r
1502 1502
1503 1503 def pull(self, remote, heads=None, force=False):
1504 1504 lock = self.lock()
1505 1505 try:
1506 1506 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1507 1507 force=force)
1508 1508 common, fetch, rheads = tmp
1509 1509 if not fetch:
1510 1510 self.ui.status(_("no changes found\n"))
1511 1511 result = 0
1512 1512 else:
1513 1513 if heads is None and list(common) == [nullid]:
1514 1514 self.ui.status(_("requesting all changes\n"))
1515 1515 elif heads is None and remote.capable('changegroupsubset'):
1516 1516 # issue1320, avoid a race if remote changed after discovery
1517 1517 heads = rheads
1518 1518
1519 1519 if remote.capable('getbundle'):
1520 1520 cg = remote.getbundle('pull', common=common,
1521 1521 heads=heads or rheads)
1522 1522 elif heads is None:
1523 1523 cg = remote.changegroup(fetch, 'pull')
1524 1524 elif not remote.capable('changegroupsubset'):
1525 1525 raise util.Abort(_("partial pull cannot be done because "
1526 1526 "other repository doesn't support "
1527 1527 "changegroupsubset."))
1528 1528 else:
1529 1529 cg = remote.changegroupsubset(fetch, heads, 'pull')
1530 1530 result = self.addchangegroup(cg, 'pull', remote.url(),
1531 1531 lock=lock)
1532 1532 phases.advanceboundary(self, 0, common)
1533 1533 finally:
1534 1534 lock.release()
1535 1535
1536 1536 return result
1537 1537
1538 1538 def checkpush(self, force, revs):
1539 1539 """Extensions can override this function if additional checks have
1540 1540 to be performed before pushing, or call it if they override push
1541 1541 command.
1542 1542 """
1543 1543 pass
1544 1544
1545 1545 def push(self, remote, force=False, revs=None, newbranch=False):
1546 1546 '''Push outgoing changesets (limited by revs) from the current
1547 1547 repository to remote. Return an integer:
1548 1548 - 0 means HTTP error *or* nothing to push
1549 1549 - 1 means we pushed and remote head count is unchanged *or*
1550 1550 we have outgoing changesets but refused to push
1551 1551 - other values as described by addchangegroup()
1552 1552 '''
1553 1553 # there are two ways to push to remote repo:
1554 1554 #
1555 1555 # addchangegroup assumes local user can lock remote
1556 1556 # repo (local filesystem, old ssh servers).
1557 1557 #
1558 1558 # unbundle assumes local user cannot lock remote repo (new ssh
1559 1559 # servers, http servers).
1560 1560
1561 1561 self.checkpush(force, revs)
1562 1562 lock = None
1563 1563 unbundle = remote.capable('unbundle')
1564 1564 if not unbundle:
1565 1565 lock = remote.lock()
1566 1566 try:
1567 1567 # get local lock as we might write phase data
1568 1568 locallock = self.lock()
1569 1569 try:
1570 1570 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1571 1571 revs, newbranch)
1572 1572 ret = remote_heads
1573 1573 if cg is not None:
1574 1574 if unbundle:
1575 1575 # local repo finds heads on server, finds out what
1576 1576 # revs it must push. once revs transferred, if server
1577 1577 # finds it has different heads (someone else won
1578 1578 # commit/push race), server aborts.
1579 1579 if force:
1580 1580 remote_heads = ['force']
1581 1581 # ssh: return remote's addchangegroup()
1582 1582 # http: return remote's addchangegroup() or 0 for error
1583 1583 ret = remote.unbundle(cg, remote_heads, 'push')
1584 1584 else:
1585 1585 # we return an integer indicating remote head count change
1586 1586 ret = remote.addchangegroup(cg, 'push', self.url(),
1587 1587 lock=lock)
1588 1588 # if we don't push, the common data is already useful
1589 1589 # everything exchange is public for now
1590 1590 phases.advanceboundary(self, 0, fut)
1591 1591 finally:
1592 1592 locallock.release()
1593 1593 finally:
1594 1594 if lock is not None:
1595 1595 lock.release()
1596 1596
1597 1597 self.ui.debug("checking for updated bookmarks\n")
1598 1598 rb = remote.listkeys('bookmarks')
1599 1599 for k in rb.keys():
1600 1600 if k in self._bookmarks:
1601 1601 nr, nl = rb[k], hex(self._bookmarks[k])
1602 1602 if nr in self:
1603 1603 cr = self[nr]
1604 1604 cl = self[nl]
1605 1605 if cl in cr.descendants():
1606 1606 r = remote.pushkey('bookmarks', k, nr, nl)
1607 1607 if r:
1608 1608 self.ui.status(_("updating bookmark %s\n") % k)
1609 1609 else:
1610 1610 self.ui.warn(_('updating bookmark %s'
1611 1611 ' failed!\n') % k)
1612 1612
1613 1613 return ret
1614 1614
1615 1615 def changegroupinfo(self, nodes, source):
1616 1616 if self.ui.verbose or source == 'bundle':
1617 1617 self.ui.status(_("%d changesets found\n") % len(nodes))
1618 1618 if self.ui.debugflag:
1619 1619 self.ui.debug("list of changesets:\n")
1620 1620 for node in nodes:
1621 1621 self.ui.debug("%s\n" % hex(node))
1622 1622
1623 1623 def changegroupsubset(self, bases, heads, source):
1624 1624 """Compute a changegroup consisting of all the nodes that are
1625 1625 descendants of any of the bases and ancestors of any of the heads.
1626 1626 Return a chunkbuffer object whose read() method will return
1627 1627 successive changegroup chunks.
1628 1628
1629 1629 It is fairly complex as determining which filenodes and which
1630 1630 manifest nodes need to be included for the changeset to be complete
1631 1631 is non-trivial.
1632 1632
1633 1633 Another wrinkle is doing the reverse, figuring out which changeset in
1634 1634 the changegroup a particular filenode or manifestnode belongs to.
1635 1635 """
1636 1636 cl = self.changelog
1637 1637 if not bases:
1638 1638 bases = [nullid]
1639 1639 csets, bases, heads = cl.nodesbetween(bases, heads)
1640 1640 # We assume that all ancestors of bases are known
1641 1641 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1642 1642 return self._changegroupsubset(common, csets, heads, source)
1643 1643
1644 1644 def getbundle(self, source, heads=None, common=None):
1645 1645 """Like changegroupsubset, but returns the set difference between the
1646 1646 ancestors of heads and the ancestors common.
1647 1647
1648 1648 If heads is None, use the local heads. If common is None, use [nullid].
1649 1649
1650 1650 The nodes in common might not all be known locally due to the way the
1651 1651 current discovery protocol works.
1652 1652 """
1653 1653 cl = self.changelog
1654 1654 if common:
1655 1655 nm = cl.nodemap
1656 1656 common = [n for n in common if n in nm]
1657 1657 else:
1658 1658 common = [nullid]
1659 1659 if not heads:
1660 1660 heads = cl.heads()
1661 1661 common, missing = cl.findcommonmissing(common, heads)
1662 1662 if not missing:
1663 1663 return None
1664 1664 return self._changegroupsubset(common, missing, heads, source)
1665 1665
1666 1666 def _changegroupsubset(self, commonrevs, csets, heads, source):
1667 1667
1668 1668 cl = self.changelog
1669 1669 mf = self.manifest
1670 1670 mfs = {} # needed manifests
1671 1671 fnodes = {} # needed file nodes
1672 1672 changedfiles = set()
1673 1673 fstate = ['', {}]
1674 1674 count = [0]
1675 1675
1676 1676 # can we go through the fast path ?
1677 1677 heads.sort()
1678 1678 if heads == sorted(self.heads()):
1679 1679 return self._changegroup(csets, source)
1680 1680
1681 1681 # slow path
1682 1682 self.hook('preoutgoing', throw=True, source=source)
1683 1683 self.changegroupinfo(csets, source)
1684 1684
1685 1685 # filter any nodes that claim to be part of the known set
1686 1686 def prune(revlog, missing):
1687 1687 return [n for n in missing
1688 1688 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1689 1689
1690 1690 def lookup(revlog, x):
1691 1691 if revlog == cl:
1692 1692 c = cl.read(x)
1693 1693 changedfiles.update(c[3])
1694 1694 mfs.setdefault(c[0], x)
1695 1695 count[0] += 1
1696 1696 self.ui.progress(_('bundling'), count[0],
1697 1697 unit=_('changesets'), total=len(csets))
1698 1698 return x
1699 1699 elif revlog == mf:
1700 1700 clnode = mfs[x]
1701 1701 mdata = mf.readfast(x)
1702 1702 for f in changedfiles:
1703 1703 if f in mdata:
1704 1704 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1705 1705 count[0] += 1
1706 1706 self.ui.progress(_('bundling'), count[0],
1707 1707 unit=_('manifests'), total=len(mfs))
1708 1708 return mfs[x]
1709 1709 else:
1710 1710 self.ui.progress(
1711 1711 _('bundling'), count[0], item=fstate[0],
1712 1712 unit=_('files'), total=len(changedfiles))
1713 1713 return fstate[1][x]
1714 1714
1715 1715 bundler = changegroup.bundle10(lookup)
1716 1716 reorder = self.ui.config('bundle', 'reorder', 'auto')
1717 1717 if reorder == 'auto':
1718 1718 reorder = None
1719 1719 else:
1720 1720 reorder = util.parsebool(reorder)
1721 1721
1722 1722 def gengroup():
1723 1723 # Create a changenode group generator that will call our functions
1724 1724 # back to lookup the owning changenode and collect information.
1725 1725 for chunk in cl.group(csets, bundler, reorder=reorder):
1726 1726 yield chunk
1727 1727 self.ui.progress(_('bundling'), None)
1728 1728
1729 1729 # Create a generator for the manifestnodes that calls our lookup
1730 1730 # and data collection functions back.
1731 1731 count[0] = 0
1732 1732 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1733 1733 yield chunk
1734 1734 self.ui.progress(_('bundling'), None)
1735 1735
1736 1736 mfs.clear()
1737 1737
1738 1738 # Go through all our files in order sorted by name.
1739 1739 count[0] = 0
1740 1740 for fname in sorted(changedfiles):
1741 1741 filerevlog = self.file(fname)
1742 1742 if not len(filerevlog):
1743 1743 raise util.Abort(_("empty or missing revlog for %s") % fname)
1744 1744 fstate[0] = fname
1745 1745 fstate[1] = fnodes.pop(fname, {})
1746 1746
1747 1747 nodelist = prune(filerevlog, fstate[1])
1748 1748 if nodelist:
1749 1749 count[0] += 1
1750 1750 yield bundler.fileheader(fname)
1751 1751 for chunk in filerevlog.group(nodelist, bundler, reorder):
1752 1752 yield chunk
1753 1753
1754 1754 # Signal that no more groups are left.
1755 1755 yield bundler.close()
1756 1756 self.ui.progress(_('bundling'), None)
1757 1757
1758 1758 if csets:
1759 1759 self.hook('outgoing', node=hex(csets[0]), source=source)
1760 1760
1761 1761 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1762 1762
1763 1763 def changegroup(self, basenodes, source):
1764 1764 # to avoid a race we use changegroupsubset() (issue1320)
1765 1765 return self.changegroupsubset(basenodes, self.heads(), source)
1766 1766
1767 1767 def _changegroup(self, nodes, source):
1768 1768 """Compute the changegroup of all nodes that we have that a recipient
1769 1769 doesn't. Return a chunkbuffer object whose read() method will return
1770 1770 successive changegroup chunks.
1771 1771
1772 1772 This is much easier than the previous function as we can assume that
1773 1773 the recipient has any changenode we aren't sending them.
1774 1774
1775 1775 nodes is the set of nodes to send"""
1776 1776
1777 1777 cl = self.changelog
1778 1778 mf = self.manifest
1779 1779 mfs = {}
1780 1780 changedfiles = set()
1781 1781 fstate = ['']
1782 1782 count = [0]
1783 1783
1784 1784 self.hook('preoutgoing', throw=True, source=source)
1785 1785 self.changegroupinfo(nodes, source)
1786 1786
1787 1787 revset = set([cl.rev(n) for n in nodes])
1788 1788
1789 1789 def gennodelst(log):
1790 1790 return [log.node(r) for r in log if log.linkrev(r) in revset]
1791 1791
1792 1792 def lookup(revlog, x):
1793 1793 if revlog == cl:
1794 1794 c = cl.read(x)
1795 1795 changedfiles.update(c[3])
1796 1796 mfs.setdefault(c[0], x)
1797 1797 count[0] += 1
1798 1798 self.ui.progress(_('bundling'), count[0],
1799 1799 unit=_('changesets'), total=len(nodes))
1800 1800 return x
1801 1801 elif revlog == mf:
1802 1802 count[0] += 1
1803 1803 self.ui.progress(_('bundling'), count[0],
1804 1804 unit=_('manifests'), total=len(mfs))
1805 1805 return cl.node(revlog.linkrev(revlog.rev(x)))
1806 1806 else:
1807 1807 self.ui.progress(
1808 1808 _('bundling'), count[0], item=fstate[0],
1809 1809 total=len(changedfiles), unit=_('files'))
1810 1810 return cl.node(revlog.linkrev(revlog.rev(x)))
1811 1811
1812 1812 bundler = changegroup.bundle10(lookup)
1813 1813 reorder = self.ui.config('bundle', 'reorder', 'auto')
1814 1814 if reorder == 'auto':
1815 1815 reorder = None
1816 1816 else:
1817 1817 reorder = util.parsebool(reorder)
1818 1818
1819 1819 def gengroup():
1820 1820 '''yield a sequence of changegroup chunks (strings)'''
1821 1821 # construct a list of all changed files
1822 1822
1823 1823 for chunk in cl.group(nodes, bundler, reorder=reorder):
1824 1824 yield chunk
1825 1825 self.ui.progress(_('bundling'), None)
1826 1826
1827 1827 count[0] = 0
1828 1828 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1829 1829 yield chunk
1830 1830 self.ui.progress(_('bundling'), None)
1831 1831
1832 1832 count[0] = 0
1833 1833 for fname in sorted(changedfiles):
1834 1834 filerevlog = self.file(fname)
1835 1835 if not len(filerevlog):
1836 1836 raise util.Abort(_("empty or missing revlog for %s") % fname)
1837 1837 fstate[0] = fname
1838 1838 nodelist = gennodelst(filerevlog)
1839 1839 if nodelist:
1840 1840 count[0] += 1
1841 1841 yield bundler.fileheader(fname)
1842 1842 for chunk in filerevlog.group(nodelist, bundler, reorder):
1843 1843 yield chunk
1844 1844 yield bundler.close()
1845 1845 self.ui.progress(_('bundling'), None)
1846 1846
1847 1847 if nodes:
1848 1848 self.hook('outgoing', node=hex(nodes[0]), source=source)
1849 1849
1850 1850 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1851 1851
1852 1852 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1853 1853 """Add the changegroup returned by source.read() to this repo.
1854 1854 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1855 1855 the URL of the repo where this changegroup is coming from.
1856 1856 If lock is not None, the function takes ownership of the lock
1857 1857 and releases it after the changegroup is added.
1858 1858
1859 1859 Return an integer summarizing the change to this repo:
1860 1860 - nothing changed or no source: 0
1861 1861 - more heads than before: 1+added heads (2..n)
1862 1862 - fewer heads than before: -1-removed heads (-2..-n)
1863 1863 - number of heads stays the same: 1
1864 1864 """
1865 1865 def csmap(x):
1866 1866 self.ui.debug("add changeset %s\n" % short(x))
1867 1867 return len(cl)
1868 1868
1869 1869 def revmap(x):
1870 1870 return cl.rev(x)
1871 1871
1872 1872 if not source:
1873 1873 return 0
1874 1874
1875 1875 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1876 1876
1877 1877 changesets = files = revisions = 0
1878 1878 efiles = set()
1879 1879
1880 1880 # write changelog data to temp files so concurrent readers will not see
1881 1881 # inconsistent view
1882 1882 cl = self.changelog
1883 1883 cl.delayupdate()
1884 1884 oldheads = cl.heads()
1885 1885
1886 1886 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1887 1887 try:
1888 1888 trp = weakref.proxy(tr)
1889 1889 # pull off the changeset group
1890 1890 self.ui.status(_("adding changesets\n"))
1891 1891 clstart = len(cl)
1892 1892 class prog(object):
1893 1893 step = _('changesets')
1894 1894 count = 1
1895 1895 ui = self.ui
1896 1896 total = None
1897 1897 def __call__(self):
1898 1898 self.ui.progress(self.step, self.count, unit=_('chunks'),
1899 1899 total=self.total)
1900 1900 self.count += 1
1901 1901 pr = prog()
1902 1902 source.callback = pr
1903 1903
1904 1904 source.changelogheader()
1905 1905 if (cl.addgroup(source, csmap, trp) is None
1906 1906 and not emptyok):
1907 1907 raise util.Abort(_("received changelog group is empty"))
1908 1908 clend = len(cl)
1909 1909 changesets = clend - clstart
1910 1910 for c in xrange(clstart, clend):
1911 1911 efiles.update(self[c].files())
1912 1912 efiles = len(efiles)
1913 1913 self.ui.progress(_('changesets'), None)
1914 1914
1915 1915 # pull off the manifest group
1916 1916 self.ui.status(_("adding manifests\n"))
1917 1917 pr.step = _('manifests')
1918 1918 pr.count = 1
1919 1919 pr.total = changesets # manifests <= changesets
1920 1920 # no need to check for empty manifest group here:
1921 1921 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1922 1922 # no new manifest will be created and the manifest group will
1923 1923 # be empty during the pull
1924 1924 source.manifestheader()
1925 1925 self.manifest.addgroup(source, revmap, trp)
1926 1926 self.ui.progress(_('manifests'), None)
1927 1927
1928 1928 needfiles = {}
1929 1929 if self.ui.configbool('server', 'validate', default=False):
1930 1930 # validate incoming csets have their manifests
1931 1931 for cset in xrange(clstart, clend):
1932 1932 mfest = self.changelog.read(self.changelog.node(cset))[0]
1933 1933 mfest = self.manifest.readdelta(mfest)
1934 1934 # store file nodes we must see
1935 1935 for f, n in mfest.iteritems():
1936 1936 needfiles.setdefault(f, set()).add(n)
1937 1937
1938 1938 # process the files
1939 1939 self.ui.status(_("adding file changes\n"))
1940 1940 pr.step = _('files')
1941 1941 pr.count = 1
1942 1942 pr.total = efiles
1943 1943 source.callback = None
1944 1944
1945 1945 while True:
1946 1946 chunkdata = source.filelogheader()
1947 1947 if not chunkdata:
1948 1948 break
1949 1949 f = chunkdata["filename"]
1950 1950 self.ui.debug("adding %s revisions\n" % f)
1951 1951 pr()
1952 1952 fl = self.file(f)
1953 1953 o = len(fl)
1954 1954 if fl.addgroup(source, revmap, trp) is None:
1955 1955 raise util.Abort(_("received file revlog group is empty"))
1956 1956 revisions += len(fl) - o
1957 1957 files += 1
1958 1958 if f in needfiles:
1959 1959 needs = needfiles[f]
1960 1960 for new in xrange(o, len(fl)):
1961 1961 n = fl.node(new)
1962 1962 if n in needs:
1963 1963 needs.remove(n)
1964 1964 if not needs:
1965 1965 del needfiles[f]
1966 1966 self.ui.progress(_('files'), None)
1967 1967
1968 1968 for f, needs in needfiles.iteritems():
1969 1969 fl = self.file(f)
1970 1970 for n in needs:
1971 1971 try:
1972 1972 fl.rev(n)
1973 1973 except error.LookupError:
1974 1974 raise util.Abort(
1975 1975 _('missing file data for %s:%s - run hg verify') %
1976 1976 (f, hex(n)))
1977 1977
1978 1978 dh = 0
1979 1979 if oldheads:
1980 1980 heads = cl.heads()
1981 1981 dh = len(heads) - len(oldheads)
1982 1982 for h in heads:
1983 1983 if h not in oldheads and 'close' in self[h].extra():
1984 1984 dh -= 1
1985 1985 htext = ""
1986 1986 if dh:
1987 1987 htext = _(" (%+d heads)") % dh
1988 1988
1989 1989 self.ui.status(_("added %d changesets"
1990 1990 " with %d changes to %d files%s\n")
1991 1991 % (changesets, revisions, files, htext))
1992 1992
1993 1993 if changesets > 0:
1994 1994 p = lambda: cl.writepending() and self.root or ""
1995 1995 self.hook('pretxnchangegroup', throw=True,
1996 1996 node=hex(cl.node(clstart)), source=srctype,
1997 1997 url=url, pending=p)
1998 1998
1999 1999 added = [cl.node(r) for r in xrange(clstart, clend)]
2000 2000 if srctype != 'strip':
2001 2001 phases.advanceboundary(self, 0, added)
2002 2002 # make changelog see real files again
2003 2003 cl.finalize(trp)
2004 2004
2005 2005 tr.close()
2006 finally:
2007 tr.release()
2008 if lock:
2009 lock.release()
2010 2006
2007 def postaddchangegroup():
2011 2008 if changesets > 0:
2012 2009 # forcefully update the on-disk branch cache
2013 2010 self.ui.debug("updating the branch cache\n")
2014 2011 self.updatebranchcache()
2015 2012 self.hook("changegroup", node=hex(cl.node(clstart)),
2016 2013 source=srctype, url=url)
2017 2014
2018 2015 for n in added:
2019 self.hook("incoming", node=hex(n), source=srctype, url=url)
2016 self.hook("incoming", node=hex(n), source=srctype,
2017 url=url)
2018 self._postrelease(postaddchangegroup)
2020 2019
2020 finally:
2021 tr.release()
2022 if lock:
2023 lock.release()
2021 2024 # never return 0 here:
2022 2025 if dh < 0:
2023 2026 return dh - 1
2024 2027 else:
2025 2028 return dh + 1
2026 2029
2027 2030 def stream_in(self, remote, requirements):
2028 2031 lock = self.lock()
2029 2032 try:
2030 2033 fp = remote.stream_out()
2031 2034 l = fp.readline()
2032 2035 try:
2033 2036 resp = int(l)
2034 2037 except ValueError:
2035 2038 raise error.ResponseError(
2036 2039 _('Unexpected response from remote server:'), l)
2037 2040 if resp == 1:
2038 2041 raise util.Abort(_('operation forbidden by server'))
2039 2042 elif resp == 2:
2040 2043 raise util.Abort(_('locking the remote repository failed'))
2041 2044 elif resp != 0:
2042 2045 raise util.Abort(_('the server sent an unknown error code'))
2043 2046 self.ui.status(_('streaming all changes\n'))
2044 2047 l = fp.readline()
2045 2048 try:
2046 2049 total_files, total_bytes = map(int, l.split(' ', 1))
2047 2050 except (ValueError, TypeError):
2048 2051 raise error.ResponseError(
2049 2052 _('Unexpected response from remote server:'), l)
2050 2053 self.ui.status(_('%d files to transfer, %s of data\n') %
2051 2054 (total_files, util.bytecount(total_bytes)))
2052 2055 start = time.time()
2053 2056 for i in xrange(total_files):
2054 2057 # XXX doesn't support '\n' or '\r' in filenames
2055 2058 l = fp.readline()
2056 2059 try:
2057 2060 name, size = l.split('\0', 1)
2058 2061 size = int(size)
2059 2062 except (ValueError, TypeError):
2060 2063 raise error.ResponseError(
2061 2064 _('Unexpected response from remote server:'), l)
2062 2065 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2063 2066 # for backwards compat, name was partially encoded
2064 2067 ofp = self.sopener(store.decodedir(name), 'w')
2065 2068 for chunk in util.filechunkiter(fp, limit=size):
2066 2069 ofp.write(chunk)
2067 2070 ofp.close()
2068 2071 elapsed = time.time() - start
2069 2072 if elapsed <= 0:
2070 2073 elapsed = 0.001
2071 2074 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2072 2075 (util.bytecount(total_bytes), elapsed,
2073 2076 util.bytecount(total_bytes / elapsed)))
2074 2077
2075 2078 # new requirements = old non-format requirements + new format-related
2076 2079 # requirements from the streamed-in repository
2077 2080 requirements.update(set(self.requirements) - self.supportedformats)
2078 2081 self._applyrequirements(requirements)
2079 2082 self._writerequirements()
2080 2083
2081 2084 self.invalidate()
2082 2085 return len(self.heads()) + 1
2083 2086 finally:
2084 2087 lock.release()
2085 2088
2086 2089 def clone(self, remote, heads=[], stream=False):
2087 2090 '''clone remote repository.
2088 2091
2089 2092 keyword arguments:
2090 2093 heads: list of revs to clone (forces use of pull)
2091 2094 stream: use streaming clone if possible'''
2092 2095
2093 2096 # now, all clients that can request uncompressed clones can
2094 2097 # read repo formats supported by all servers that can serve
2095 2098 # them.
2096 2099
2097 2100 # if revlog format changes, client will have to check version
2098 2101 # and format flags on "stream" capability, and use
2099 2102 # uncompressed only if compatible.
2100 2103
2101 2104 if stream and not heads:
2102 2105 # 'stream' means remote revlog format is revlogv1 only
2103 2106 if remote.capable('stream'):
2104 2107 return self.stream_in(remote, set(('revlogv1',)))
2105 2108 # otherwise, 'streamreqs' contains the remote revlog format
2106 2109 streamreqs = remote.capable('streamreqs')
2107 2110 if streamreqs:
2108 2111 streamreqs = set(streamreqs.split(','))
2109 2112 # if we support it, stream in and adjust our requirements
2110 2113 if not streamreqs - self.supportedformats:
2111 2114 return self.stream_in(remote, streamreqs)
2112 2115 return self.pull(remote, heads)
2113 2116
2114 2117 def pushkey(self, namespace, key, old, new):
2115 2118 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2116 2119 old=old, new=new)
2117 2120 ret = pushkey.push(self, namespace, key, old, new)
2118 2121 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2119 2122 ret=ret)
2120 2123 return ret
2121 2124
2122 2125 def listkeys(self, namespace):
2123 2126 self.hook('prelistkeys', throw=True, namespace=namespace)
2124 2127 values = pushkey.list(self, namespace)
2125 2128 self.hook('listkeys', namespace=namespace, values=values)
2126 2129 return values
2127 2130
2128 2131 def debugwireargs(self, one, two, three=None, four=None, five=None):
2129 2132 '''used to test argument passing over the wire'''
2130 2133 return "%s %s %s %s %s" % (one, two, three, four, five)
2131 2134
2132 2135 def savecommitmessage(self, text):
2133 2136 fp = self.opener('last-message.txt', 'wb')
2134 2137 try:
2135 2138 fp.write(text)
2136 2139 finally:
2137 2140 fp.close()
2138 2141 return self.pathto(fp.name[len(self.root)+1:])
2139 2142
2140 2143 # used to avoid circular references so destructors work
2141 2144 def aftertrans(files):
2142 2145 renamefiles = [tuple(t) for t in files]
2143 2146 def a():
2144 2147 for src, dest in renamefiles:
2145 2148 util.rename(src, dest)
2146 2149 return a
2147 2150
2148 2151 def undoname(fn):
2149 2152 base, name = os.path.split(fn)
2150 2153 assert name.startswith('journal')
2151 2154 return os.path.join(base, name.replace('journal', 'undo', 1))
2152 2155
2153 2156 def instance(ui, path, create):
2154 2157 return localrepository(ui, util.urllocalpath(path), create)
2155 2158
2156 2159 def islocal(path):
2157 2160 return True
General Comments 0
You need to be logged in to leave comments. Login now