##// END OF EJS Templates
rollback: always call destroyed() (regression from 1.9)...
Greg Ward -
r15604:b8d85994 stable
parent child Browse files
Show More
@@ -1,2101 +1,2101 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener.append(
61 61 "00changelog.i",
62 62 '\0\0\0\2' # represents revlogv2
63 63 ' dummy changelog to prevent using the old repo layout'
64 64 )
65 65 if self.ui.configbool('format', 'generaldelta', False):
66 66 requirements.append("generaldelta")
67 67 requirements = set(requirements)
68 68 else:
69 69 raise error.RepoError(_("repository %s not found") % path)
70 70 elif create:
71 71 raise error.RepoError(_("repository %s already exists") % path)
72 72 else:
73 73 try:
74 74 requirements = scmutil.readrequires(self.opener, self.supported)
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 requirements = set()
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100
101 101 self._branchcache = None
102 102 self._branchcachetip = None
103 103 self.filterpats = {}
104 104 self._datafilters = {}
105 105 self._transref = self._lockref = self._wlockref = None
106 106
107 107 # A cache for various files under .hg/ that tracks file changes,
108 108 # (used by the filecache decorator)
109 109 #
110 110 # Maps a property name to its util.filecacheentry
111 111 self._filecache = {}
112 112
113 113 def _applyrequirements(self, requirements):
114 114 self.requirements = requirements
115 115 openerreqs = set(('revlogv1', 'generaldelta'))
116 116 self.sopener.options = dict((r, 1) for r in requirements
117 117 if r in openerreqs)
118 118
119 119 def _writerequirements(self):
120 120 reqfile = self.opener("requires", "w")
121 121 for r in self.requirements:
122 122 reqfile.write("%s\n" % r)
123 123 reqfile.close()
124 124
125 125 def _checknested(self, path):
126 126 """Determine if path is a legal nested repository."""
127 127 if not path.startswith(self.root):
128 128 return False
129 129 subpath = path[len(self.root) + 1:]
130 130
131 131 # XXX: Checking against the current working copy is wrong in
132 132 # the sense that it can reject things like
133 133 #
134 134 # $ hg cat -r 10 sub/x.txt
135 135 #
136 136 # if sub/ is no longer a subrepository in the working copy
137 137 # parent revision.
138 138 #
139 139 # However, it can of course also allow things that would have
140 140 # been rejected before, such as the above cat command if sub/
141 141 # is a subrepository now, but was a normal directory before.
142 142 # The old path auditor would have rejected by mistake since it
143 143 # panics when it sees sub/.hg/.
144 144 #
145 145 # All in all, checking against the working copy seems sensible
146 146 # since we want to prevent access to nested repositories on
147 147 # the filesystem *now*.
148 148 ctx = self[None]
149 149 parts = util.splitpath(subpath)
150 150 while parts:
151 151 prefix = os.sep.join(parts)
152 152 if prefix in ctx.substate:
153 153 if prefix == subpath:
154 154 return True
155 155 else:
156 156 sub = ctx.sub(prefix)
157 157 return sub.checknested(subpath[len(prefix) + 1:])
158 158 else:
159 159 parts.pop()
160 160 return False
161 161
162 162 @filecache('bookmarks')
163 163 def _bookmarks(self):
164 164 return bookmarks.read(self)
165 165
166 166 @filecache('bookmarks.current')
167 167 def _bookmarkcurrent(self):
168 168 return bookmarks.readcurrent(self)
169 169
170 170 def _writebookmarks(self, marks):
171 171 bookmarks.write(self)
172 172
173 173 @filecache('00changelog.i', True)
174 174 def changelog(self):
175 175 c = changelog.changelog(self.sopener)
176 176 if 'HG_PENDING' in os.environ:
177 177 p = os.environ['HG_PENDING']
178 178 if p.startswith(self.root):
179 179 c.readpending('00changelog.i.a')
180 180 return c
181 181
182 182 @filecache('00manifest.i', True)
183 183 def manifest(self):
184 184 return manifest.manifest(self.sopener)
185 185
186 186 @filecache('dirstate')
187 187 def dirstate(self):
188 188 warned = [0]
189 189 def validate(node):
190 190 try:
191 191 self.changelog.rev(node)
192 192 return node
193 193 except error.LookupError:
194 194 if not warned[0]:
195 195 warned[0] = True
196 196 self.ui.warn(_("warning: ignoring unknown"
197 197 " working parent %s!\n") % short(node))
198 198 return nullid
199 199
200 200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 201
202 202 def __getitem__(self, changeid):
203 203 if changeid is None:
204 204 return context.workingctx(self)
205 205 return context.changectx(self, changeid)
206 206
207 207 def __contains__(self, changeid):
208 208 try:
209 209 return bool(self.lookup(changeid))
210 210 except error.RepoLookupError:
211 211 return False
212 212
213 213 def __nonzero__(self):
214 214 return True
215 215
216 216 def __len__(self):
217 217 return len(self.changelog)
218 218
219 219 def __iter__(self):
220 220 for i in xrange(len(self)):
221 221 yield i
222 222
223 223 def set(self, expr, *args):
224 224 '''
225 225 Yield a context for each matching revision, after doing arg
226 226 replacement via revset.formatspec
227 227 '''
228 228
229 229 expr = revset.formatspec(expr, *args)
230 230 m = revset.match(None, expr)
231 231 for r in m(self, range(len(self))):
232 232 yield self[r]
233 233
234 234 def url(self):
235 235 return 'file:' + self.root
236 236
237 237 def hook(self, name, throw=False, **args):
238 238 return hook.hook(self.ui, self, name, throw, **args)
239 239
240 240 tag_disallowed = ':\r\n'
241 241
242 242 def _tag(self, names, node, message, local, user, date, extra={}):
243 243 if isinstance(names, str):
244 244 allchars = names
245 245 names = (names,)
246 246 else:
247 247 allchars = ''.join(names)
248 248 for c in self.tag_disallowed:
249 249 if c in allchars:
250 250 raise util.Abort(_('%r cannot be used in a tag name') % c)
251 251
252 252 branches = self.branchmap()
253 253 for name in names:
254 254 self.hook('pretag', throw=True, node=hex(node), tag=name,
255 255 local=local)
256 256 if name in branches:
257 257 self.ui.warn(_("warning: tag %s conflicts with existing"
258 258 " branch name\n") % name)
259 259
260 260 def writetags(fp, names, munge, prevtags):
261 261 fp.seek(0, 2)
262 262 if prevtags and prevtags[-1] != '\n':
263 263 fp.write('\n')
264 264 for name in names:
265 265 m = munge and munge(name) or name
266 266 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
267 267 old = self.tags().get(name, nullid)
268 268 fp.write('%s %s\n' % (hex(old), m))
269 269 fp.write('%s %s\n' % (hex(node), m))
270 270 fp.close()
271 271
272 272 prevtags = ''
273 273 if local:
274 274 try:
275 275 fp = self.opener('localtags', 'r+')
276 276 except IOError:
277 277 fp = self.opener('localtags', 'a')
278 278 else:
279 279 prevtags = fp.read()
280 280
281 281 # local tags are stored in the current charset
282 282 writetags(fp, names, None, prevtags)
283 283 for name in names:
284 284 self.hook('tag', node=hex(node), tag=name, local=local)
285 285 return
286 286
287 287 try:
288 288 fp = self.wfile('.hgtags', 'rb+')
289 289 except IOError, e:
290 290 if e.errno != errno.ENOENT:
291 291 raise
292 292 fp = self.wfile('.hgtags', 'ab')
293 293 else:
294 294 prevtags = fp.read()
295 295
296 296 # committed tags are stored in UTF-8
297 297 writetags(fp, names, encoding.fromlocal, prevtags)
298 298
299 299 fp.close()
300 300
301 301 if '.hgtags' not in self.dirstate:
302 302 self[None].add(['.hgtags'])
303 303
304 304 m = matchmod.exact(self.root, '', ['.hgtags'])
305 305 tagnode = self.commit(message, user, date, extra=extra, match=m)
306 306
307 307 for name in names:
308 308 self.hook('tag', node=hex(node), tag=name, local=local)
309 309
310 310 return tagnode
311 311
312 312 def tag(self, names, node, message, local, user, date):
313 313 '''tag a revision with one or more symbolic names.
314 314
315 315 names is a list of strings or, when adding a single tag, names may be a
316 316 string.
317 317
318 318 if local is True, the tags are stored in a per-repository file.
319 319 otherwise, they are stored in the .hgtags file, and a new
320 320 changeset is committed with the change.
321 321
322 322 keyword arguments:
323 323
324 324 local: whether to store tags in non-version-controlled file
325 325 (default False)
326 326
327 327 message: commit message to use if committing
328 328
329 329 user: name of user to use if committing
330 330
331 331 date: date tuple to use if committing'''
332 332
333 333 if not local:
334 334 for x in self.status()[:5]:
335 335 if '.hgtags' in x:
336 336 raise util.Abort(_('working copy of .hgtags is changed '
337 337 '(please commit .hgtags manually)'))
338 338
339 339 self.tags() # instantiate the cache
340 340 self._tag(names, node, message, local, user, date)
341 341
342 342 @propertycache
343 343 def _tagscache(self):
344 344 '''Returns a tagscache object that contains various tags related caches.'''
345 345
346 346 # This simplifies its cache management by having one decorated
347 347 # function (this one) and the rest simply fetch things from it.
348 348 class tagscache(object):
349 349 def __init__(self):
350 350 # These two define the set of tags for this repository. tags
351 351 # maps tag name to node; tagtypes maps tag name to 'global' or
352 352 # 'local'. (Global tags are defined by .hgtags across all
353 353 # heads, and local tags are defined in .hg/localtags.)
354 354 # They constitute the in-memory cache of tags.
355 355 self.tags = self.tagtypes = None
356 356
357 357 self.nodetagscache = self.tagslist = None
358 358
359 359 cache = tagscache()
360 360 cache.tags, cache.tagtypes = self._findtags()
361 361
362 362 return cache
363 363
364 364 def tags(self):
365 365 '''return a mapping of tag to node'''
366 366 return self._tagscache.tags
367 367
368 368 def _findtags(self):
369 369 '''Do the hard work of finding tags. Return a pair of dicts
370 370 (tags, tagtypes) where tags maps tag name to node, and tagtypes
371 371 maps tag name to a string like \'global\' or \'local\'.
372 372 Subclasses or extensions are free to add their own tags, but
373 373 should be aware that the returned dicts will be retained for the
374 374 duration of the localrepo object.'''
375 375
376 376 # XXX what tagtype should subclasses/extensions use? Currently
377 377 # mq and bookmarks add tags, but do not set the tagtype at all.
378 378 # Should each extension invent its own tag type? Should there
379 379 # be one tagtype for all such "virtual" tags? Or is the status
380 380 # quo fine?
381 381
382 382 alltags = {} # map tag name to (node, hist)
383 383 tagtypes = {}
384 384
385 385 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
386 386 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
387 387
388 388 # Build the return dicts. Have to re-encode tag names because
389 389 # the tags module always uses UTF-8 (in order not to lose info
390 390 # writing to the cache), but the rest of Mercurial wants them in
391 391 # local encoding.
392 392 tags = {}
393 393 for (name, (node, hist)) in alltags.iteritems():
394 394 if node != nullid:
395 395 try:
396 396 # ignore tags to unknown nodes
397 397 self.changelog.lookup(node)
398 398 tags[encoding.tolocal(name)] = node
399 399 except error.LookupError:
400 400 pass
401 401 tags['tip'] = self.changelog.tip()
402 402 tagtypes = dict([(encoding.tolocal(name), value)
403 403 for (name, value) in tagtypes.iteritems()])
404 404 return (tags, tagtypes)
405 405
406 406 def tagtype(self, tagname):
407 407 '''
408 408 return the type of the given tag. result can be:
409 409
410 410 'local' : a local tag
411 411 'global' : a global tag
412 412 None : tag does not exist
413 413 '''
414 414
415 415 return self._tagscache.tagtypes.get(tagname)
416 416
417 417 def tagslist(self):
418 418 '''return a list of tags ordered by revision'''
419 419 if not self._tagscache.tagslist:
420 420 l = []
421 421 for t, n in self.tags().iteritems():
422 422 r = self.changelog.rev(n)
423 423 l.append((r, t, n))
424 424 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
425 425
426 426 return self._tagscache.tagslist
427 427
428 428 def nodetags(self, node):
429 429 '''return the tags associated with a node'''
430 430 if not self._tagscache.nodetagscache:
431 431 nodetagscache = {}
432 432 for t, n in self.tags().iteritems():
433 433 nodetagscache.setdefault(n, []).append(t)
434 434 for tags in nodetagscache.itervalues():
435 435 tags.sort()
436 436 self._tagscache.nodetagscache = nodetagscache
437 437 return self._tagscache.nodetagscache.get(node, [])
438 438
439 439 def nodebookmarks(self, node):
440 440 marks = []
441 441 for bookmark, n in self._bookmarks.iteritems():
442 442 if n == node:
443 443 marks.append(bookmark)
444 444 return sorted(marks)
445 445
446 446 def _branchtags(self, partial, lrev):
447 447 # TODO: rename this function?
448 448 tiprev = len(self) - 1
449 449 if lrev != tiprev:
450 450 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
451 451 self._updatebranchcache(partial, ctxgen)
452 452 self._writebranchcache(partial, self.changelog.tip(), tiprev)
453 453
454 454 return partial
455 455
456 456 def updatebranchcache(self):
457 457 tip = self.changelog.tip()
458 458 if self._branchcache is not None and self._branchcachetip == tip:
459 459 return self._branchcache
460 460
461 461 oldtip = self._branchcachetip
462 462 self._branchcachetip = tip
463 463 if oldtip is None or oldtip not in self.changelog.nodemap:
464 464 partial, last, lrev = self._readbranchcache()
465 465 else:
466 466 lrev = self.changelog.rev(oldtip)
467 467 partial = self._branchcache
468 468
469 469 self._branchtags(partial, lrev)
470 470 # this private cache holds all heads (not just tips)
471 471 self._branchcache = partial
472 472
473 473 def branchmap(self):
474 474 '''returns a dictionary {branch: [branchheads]}'''
475 475 self.updatebranchcache()
476 476 return self._branchcache
477 477
478 478 def branchtags(self):
479 479 '''return a dict where branch names map to the tipmost head of
480 480 the branch, open heads come before closed'''
481 481 bt = {}
482 482 for bn, heads in self.branchmap().iteritems():
483 483 tip = heads[-1]
484 484 for h in reversed(heads):
485 485 if 'close' not in self.changelog.read(h)[5]:
486 486 tip = h
487 487 break
488 488 bt[bn] = tip
489 489 return bt
490 490
491 491 def _readbranchcache(self):
492 492 partial = {}
493 493 try:
494 494 f = self.opener("cache/branchheads")
495 495 lines = f.read().split('\n')
496 496 f.close()
497 497 except (IOError, OSError):
498 498 return {}, nullid, nullrev
499 499
500 500 try:
501 501 last, lrev = lines.pop(0).split(" ", 1)
502 502 last, lrev = bin(last), int(lrev)
503 503 if lrev >= len(self) or self[lrev].node() != last:
504 504 # invalidate the cache
505 505 raise ValueError('invalidating branch cache (tip differs)')
506 506 for l in lines:
507 507 if not l:
508 508 continue
509 509 node, label = l.split(" ", 1)
510 510 label = encoding.tolocal(label.strip())
511 511 partial.setdefault(label, []).append(bin(node))
512 512 except KeyboardInterrupt:
513 513 raise
514 514 except Exception, inst:
515 515 if self.ui.debugflag:
516 516 self.ui.warn(str(inst), '\n')
517 517 partial, last, lrev = {}, nullid, nullrev
518 518 return partial, last, lrev
519 519
520 520 def _writebranchcache(self, branches, tip, tiprev):
521 521 try:
522 522 f = self.opener("cache/branchheads", "w", atomictemp=True)
523 523 f.write("%s %s\n" % (hex(tip), tiprev))
524 524 for label, nodes in branches.iteritems():
525 525 for node in nodes:
526 526 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
527 527 f.close()
528 528 except (IOError, OSError):
529 529 pass
530 530
531 531 def _updatebranchcache(self, partial, ctxgen):
532 532 # collect new branch entries
533 533 newbranches = {}
534 534 for c in ctxgen:
535 535 newbranches.setdefault(c.branch(), []).append(c.node())
536 536 # if older branchheads are reachable from new ones, they aren't
537 537 # really branchheads. Note checking parents is insufficient:
538 538 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
539 539 for branch, newnodes in newbranches.iteritems():
540 540 bheads = partial.setdefault(branch, [])
541 541 bheads.extend(newnodes)
542 542 if len(bheads) <= 1:
543 543 continue
544 544 bheads = sorted(bheads, key=lambda x: self[x].rev())
545 545 # starting from tip means fewer passes over reachable
546 546 while newnodes:
547 547 latest = newnodes.pop()
548 548 if latest not in bheads:
549 549 continue
550 550 minbhrev = self[bheads[0]].node()
551 551 reachable = self.changelog.reachable(latest, minbhrev)
552 552 reachable.remove(latest)
553 553 if reachable:
554 554 bheads = [b for b in bheads if b not in reachable]
555 555 partial[branch] = bheads
556 556
557 557 def lookup(self, key):
558 558 if isinstance(key, int):
559 559 return self.changelog.node(key)
560 560 elif key == '.':
561 561 return self.dirstate.p1()
562 562 elif key == 'null':
563 563 return nullid
564 564 elif key == 'tip':
565 565 return self.changelog.tip()
566 566 n = self.changelog._match(key)
567 567 if n:
568 568 return n
569 569 if key in self._bookmarks:
570 570 return self._bookmarks[key]
571 571 if key in self.tags():
572 572 return self.tags()[key]
573 573 if key in self.branchtags():
574 574 return self.branchtags()[key]
575 575 n = self.changelog._partialmatch(key)
576 576 if n:
577 577 return n
578 578
579 579 # can't find key, check if it might have come from damaged dirstate
580 580 if key in self.dirstate.parents():
581 581 raise error.Abort(_("working directory has unknown parent '%s'!")
582 582 % short(key))
583 583 try:
584 584 if len(key) == 20:
585 585 key = hex(key)
586 586 except TypeError:
587 587 pass
588 588 raise error.RepoLookupError(_("unknown revision '%s'") % key)
589 589
590 590 def lookupbranch(self, key, remote=None):
591 591 repo = remote or self
592 592 if key in repo.branchmap():
593 593 return key
594 594
595 595 repo = (remote and remote.local()) and remote or self
596 596 return repo[key].branch()
597 597
598 598 def known(self, nodes):
599 599 nm = self.changelog.nodemap
600 600 return [(n in nm) for n in nodes]
601 601
602 602 def local(self):
603 603 return self
604 604
605 605 def join(self, f):
606 606 return os.path.join(self.path, f)
607 607
608 608 def wjoin(self, f):
609 609 return os.path.join(self.root, f)
610 610
611 611 def file(self, f):
612 612 if f[0] == '/':
613 613 f = f[1:]
614 614 return filelog.filelog(self.sopener, f)
615 615
616 616 def changectx(self, changeid):
617 617 return self[changeid]
618 618
619 619 def parents(self, changeid=None):
620 620 '''get list of changectxs for parents of changeid'''
621 621 return self[changeid].parents()
622 622
623 623 def filectx(self, path, changeid=None, fileid=None):
624 624 """changeid can be a changeset revision, node, or tag.
625 625 fileid can be a file revision or node."""
626 626 return context.filectx(self, path, changeid, fileid)
627 627
628 628 def getcwd(self):
629 629 return self.dirstate.getcwd()
630 630
631 631 def pathto(self, f, cwd=None):
632 632 return self.dirstate.pathto(f, cwd)
633 633
634 634 def wfile(self, f, mode='r'):
635 635 return self.wopener(f, mode)
636 636
637 637 def _link(self, f):
638 638 return os.path.islink(self.wjoin(f))
639 639
640 640 def _loadfilter(self, filter):
641 641 if filter not in self.filterpats:
642 642 l = []
643 643 for pat, cmd in self.ui.configitems(filter):
644 644 if cmd == '!':
645 645 continue
646 646 mf = matchmod.match(self.root, '', [pat])
647 647 fn = None
648 648 params = cmd
649 649 for name, filterfn in self._datafilters.iteritems():
650 650 if cmd.startswith(name):
651 651 fn = filterfn
652 652 params = cmd[len(name):].lstrip()
653 653 break
654 654 if not fn:
655 655 fn = lambda s, c, **kwargs: util.filter(s, c)
656 656 # Wrap old filters not supporting keyword arguments
657 657 if not inspect.getargspec(fn)[2]:
658 658 oldfn = fn
659 659 fn = lambda s, c, **kwargs: oldfn(s, c)
660 660 l.append((mf, fn, params))
661 661 self.filterpats[filter] = l
662 662 return self.filterpats[filter]
663 663
664 664 def _filter(self, filterpats, filename, data):
665 665 for mf, fn, cmd in filterpats:
666 666 if mf(filename):
667 667 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
668 668 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
669 669 break
670 670
671 671 return data
672 672
673 673 @propertycache
674 674 def _encodefilterpats(self):
675 675 return self._loadfilter('encode')
676 676
677 677 @propertycache
678 678 def _decodefilterpats(self):
679 679 return self._loadfilter('decode')
680 680
681 681 def adddatafilter(self, name, filter):
682 682 self._datafilters[name] = filter
683 683
684 684 def wread(self, filename):
685 685 if self._link(filename):
686 686 data = os.readlink(self.wjoin(filename))
687 687 else:
688 688 data = self.wopener.read(filename)
689 689 return self._filter(self._encodefilterpats, filename, data)
690 690
691 691 def wwrite(self, filename, data, flags):
692 692 data = self._filter(self._decodefilterpats, filename, data)
693 693 if 'l' in flags:
694 694 self.wopener.symlink(data, filename)
695 695 else:
696 696 self.wopener.write(filename, data)
697 697 if 'x' in flags:
698 698 util.setflags(self.wjoin(filename), False, True)
699 699
700 700 def wwritedata(self, filename, data):
701 701 return self._filter(self._decodefilterpats, filename, data)
702 702
703 703 def transaction(self, desc):
704 704 tr = self._transref and self._transref() or None
705 705 if tr and tr.running():
706 706 return tr.nest()
707 707
708 708 # abort here if the journal already exists
709 709 if os.path.exists(self.sjoin("journal")):
710 710 raise error.RepoError(
711 711 _("abandoned transaction found - run hg recover"))
712 712
713 713 journalfiles = self._writejournal(desc)
714 714 renames = [(x, undoname(x)) for x in journalfiles]
715 715
716 716 tr = transaction.transaction(self.ui.warn, self.sopener,
717 717 self.sjoin("journal"),
718 718 aftertrans(renames),
719 719 self.store.createmode)
720 720 self._transref = weakref.ref(tr)
721 721 return tr
722 722
723 723 def _writejournal(self, desc):
724 724 # save dirstate for rollback
725 725 try:
726 726 ds = self.opener.read("dirstate")
727 727 except IOError:
728 728 ds = ""
729 729 self.opener.write("journal.dirstate", ds)
730 730 self.opener.write("journal.branch",
731 731 encoding.fromlocal(self.dirstate.branch()))
732 732 self.opener.write("journal.desc",
733 733 "%d\n%s\n" % (len(self), desc))
734 734
735 735 bkname = self.join('bookmarks')
736 736 if os.path.exists(bkname):
737 737 util.copyfile(bkname, self.join('journal.bookmarks'))
738 738 else:
739 739 self.opener.write('journal.bookmarks', '')
740 740
741 741 return (self.sjoin('journal'), self.join('journal.dirstate'),
742 742 self.join('journal.branch'), self.join('journal.desc'),
743 743 self.join('journal.bookmarks'))
744 744
745 745 def recover(self):
746 746 lock = self.lock()
747 747 try:
748 748 if os.path.exists(self.sjoin("journal")):
749 749 self.ui.status(_("rolling back interrupted transaction\n"))
750 750 transaction.rollback(self.sopener, self.sjoin("journal"),
751 751 self.ui.warn)
752 752 self.invalidate()
753 753 return True
754 754 else:
755 755 self.ui.warn(_("no interrupted transaction available\n"))
756 756 return False
757 757 finally:
758 758 lock.release()
759 759
760 760 def rollback(self, dryrun=False, force=False):
761 761 wlock = lock = None
762 762 try:
763 763 wlock = self.wlock()
764 764 lock = self.lock()
765 765 if os.path.exists(self.sjoin("undo")):
766 766 return self._rollback(dryrun, force)
767 767 else:
768 768 self.ui.warn(_("no rollback information available\n"))
769 769 return 1
770 770 finally:
771 771 release(lock, wlock)
772 772
773 773 def _rollback(self, dryrun, force):
774 774 ui = self.ui
775 775 try:
776 776 args = self.opener.read('undo.desc').splitlines()
777 777 (oldlen, desc, detail) = (int(args[0]), args[1], None)
778 778 if len(args) >= 3:
779 779 detail = args[2]
780 780 oldtip = oldlen - 1
781 781
782 782 if detail and ui.verbose:
783 783 msg = (_('repository tip rolled back to revision %s'
784 784 ' (undo %s: %s)\n')
785 785 % (oldtip, desc, detail))
786 786 else:
787 787 msg = (_('repository tip rolled back to revision %s'
788 788 ' (undo %s)\n')
789 789 % (oldtip, desc))
790 790 except IOError:
791 791 msg = _('rolling back unknown transaction\n')
792 792 desc = None
793 793
794 794 if not force and self['.'] != self['tip'] and desc == 'commit':
795 795 raise util.Abort(
796 796 _('rollback of last commit while not checked out '
797 797 'may lose data'), hint=_('use -f to force'))
798 798
799 799 ui.status(msg)
800 800 if dryrun:
801 801 return 0
802 802
803 803 parents = self.dirstate.parents()
804 804 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
805 805 if os.path.exists(self.join('undo.bookmarks')):
806 806 util.rename(self.join('undo.bookmarks'),
807 807 self.join('bookmarks'))
808 808 self.invalidate()
809 809
810 810 parentgone = (parents[0] not in self.changelog.nodemap or
811 811 parents[1] not in self.changelog.nodemap)
812 812 if parentgone:
813 813 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
814 814 try:
815 815 branch = self.opener.read('undo.branch')
816 816 self.dirstate.setbranch(branch)
817 817 except IOError:
818 818 ui.warn(_('named branch could not be reset: '
819 819 'current branch is still \'%s\'\n')
820 820 % self.dirstate.branch())
821 821
822 822 self.dirstate.invalidate()
823 self.destroyed()
824 823 parents = tuple([p.rev() for p in self.parents()])
825 824 if len(parents) > 1:
826 825 ui.status(_('working directory now based on '
827 826 'revisions %d and %d\n') % parents)
828 827 else:
829 828 ui.status(_('working directory now based on '
830 829 'revision %d\n') % parents)
830 self.destroyed()
831 831 return 0
832 832
833 833 def invalidatecaches(self):
834 834 try:
835 835 delattr(self, '_tagscache')
836 836 except AttributeError:
837 837 pass
838 838
839 839 self._branchcache = None # in UTF-8
840 840 self._branchcachetip = None
841 841
842 842 def invalidatedirstate(self):
843 843 '''Invalidates the dirstate, causing the next call to dirstate
844 844 to check if it was modified since the last time it was read,
845 845 rereading it if it has.
846 846
847 847 This is different to dirstate.invalidate() that it doesn't always
848 848 rereads the dirstate. Use dirstate.invalidate() if you want to
849 849 explicitly read the dirstate again (i.e. restoring it to a previous
850 850 known good state).'''
851 851 try:
852 852 delattr(self, 'dirstate')
853 853 except AttributeError:
854 854 pass
855 855
856 856 def invalidate(self):
857 857 for k in self._filecache:
858 858 # dirstate is invalidated separately in invalidatedirstate()
859 859 if k == 'dirstate':
860 860 continue
861 861
862 862 try:
863 863 delattr(self, k)
864 864 except AttributeError:
865 865 pass
866 866 self.invalidatecaches()
867 867
868 868 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
869 869 try:
870 870 l = lock.lock(lockname, 0, releasefn, desc=desc)
871 871 except error.LockHeld, inst:
872 872 if not wait:
873 873 raise
874 874 self.ui.warn(_("waiting for lock on %s held by %r\n") %
875 875 (desc, inst.locker))
876 876 # default to 600 seconds timeout
877 877 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
878 878 releasefn, desc=desc)
879 879 if acquirefn:
880 880 acquirefn()
881 881 return l
882 882
883 883 def lock(self, wait=True):
884 884 '''Lock the repository store (.hg/store) and return a weak reference
885 885 to the lock. Use this before modifying the store (e.g. committing or
886 886 stripping). If you are opening a transaction, get a lock as well.)'''
887 887 l = self._lockref and self._lockref()
888 888 if l is not None and l.held:
889 889 l.lock()
890 890 return l
891 891
892 892 def unlock():
893 893 self.store.write()
894 894 for k, ce in self._filecache.items():
895 895 if k == 'dirstate':
896 896 continue
897 897 ce.refresh()
898 898
899 899 l = self._lock(self.sjoin("lock"), wait, unlock,
900 900 self.invalidate, _('repository %s') % self.origroot)
901 901 self._lockref = weakref.ref(l)
902 902 return l
903 903
904 904 def wlock(self, wait=True):
905 905 '''Lock the non-store parts of the repository (everything under
906 906 .hg except .hg/store) and return a weak reference to the lock.
907 907 Use this before modifying files in .hg.'''
908 908 l = self._wlockref and self._wlockref()
909 909 if l is not None and l.held:
910 910 l.lock()
911 911 return l
912 912
913 913 def unlock():
914 914 self.dirstate.write()
915 915 ce = self._filecache.get('dirstate')
916 916 if ce:
917 917 ce.refresh()
918 918
919 919 l = self._lock(self.join("wlock"), wait, unlock,
920 920 self.invalidatedirstate, _('working directory of %s') %
921 921 self.origroot)
922 922 self._wlockref = weakref.ref(l)
923 923 return l
924 924
925 925 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
926 926 """
927 927 commit an individual file as part of a larger transaction
928 928 """
929 929
930 930 fname = fctx.path()
931 931 text = fctx.data()
932 932 flog = self.file(fname)
933 933 fparent1 = manifest1.get(fname, nullid)
934 934 fparent2 = fparent2o = manifest2.get(fname, nullid)
935 935
936 936 meta = {}
937 937 copy = fctx.renamed()
938 938 if copy and copy[0] != fname:
939 939 # Mark the new revision of this file as a copy of another
940 940 # file. This copy data will effectively act as a parent
941 941 # of this new revision. If this is a merge, the first
942 942 # parent will be the nullid (meaning "look up the copy data")
943 943 # and the second one will be the other parent. For example:
944 944 #
945 945 # 0 --- 1 --- 3 rev1 changes file foo
946 946 # \ / rev2 renames foo to bar and changes it
947 947 # \- 2 -/ rev3 should have bar with all changes and
948 948 # should record that bar descends from
949 949 # bar in rev2 and foo in rev1
950 950 #
951 951 # this allows this merge to succeed:
952 952 #
953 953 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
954 954 # \ / merging rev3 and rev4 should use bar@rev2
955 955 # \- 2 --- 4 as the merge base
956 956 #
957 957
958 958 cfname = copy[0]
959 959 crev = manifest1.get(cfname)
960 960 newfparent = fparent2
961 961
962 962 if manifest2: # branch merge
963 963 if fparent2 == nullid or crev is None: # copied on remote side
964 964 if cfname in manifest2:
965 965 crev = manifest2[cfname]
966 966 newfparent = fparent1
967 967
968 968 # find source in nearest ancestor if we've lost track
969 969 if not crev:
970 970 self.ui.debug(" %s: searching for copy revision for %s\n" %
971 971 (fname, cfname))
972 972 for ancestor in self[None].ancestors():
973 973 if cfname in ancestor:
974 974 crev = ancestor[cfname].filenode()
975 975 break
976 976
977 977 if crev:
978 978 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
979 979 meta["copy"] = cfname
980 980 meta["copyrev"] = hex(crev)
981 981 fparent1, fparent2 = nullid, newfparent
982 982 else:
983 983 self.ui.warn(_("warning: can't find ancestor for '%s' "
984 984 "copied from '%s'!\n") % (fname, cfname))
985 985
986 986 elif fparent2 != nullid:
987 987 # is one parent an ancestor of the other?
988 988 fparentancestor = flog.ancestor(fparent1, fparent2)
989 989 if fparentancestor == fparent1:
990 990 fparent1, fparent2 = fparent2, nullid
991 991 elif fparentancestor == fparent2:
992 992 fparent2 = nullid
993 993
994 994 # is the file changed?
995 995 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
996 996 changelist.append(fname)
997 997 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
998 998
999 999 # are just the flags changed during merge?
1000 1000 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1001 1001 changelist.append(fname)
1002 1002
1003 1003 return fparent1
1004 1004
1005 1005 def commit(self, text="", user=None, date=None, match=None, force=False,
1006 1006 editor=False, extra={}):
1007 1007 """Add a new revision to current repository.
1008 1008
1009 1009 Revision information is gathered from the working directory,
1010 1010 match can be used to filter the committed files. If editor is
1011 1011 supplied, it is called to get a commit message.
1012 1012 """
1013 1013
1014 1014 def fail(f, msg):
1015 1015 raise util.Abort('%s: %s' % (f, msg))
1016 1016
1017 1017 if not match:
1018 1018 match = matchmod.always(self.root, '')
1019 1019
1020 1020 if not force:
1021 1021 vdirs = []
1022 1022 match.dir = vdirs.append
1023 1023 match.bad = fail
1024 1024
1025 1025 wlock = self.wlock()
1026 1026 try:
1027 1027 wctx = self[None]
1028 1028 merge = len(wctx.parents()) > 1
1029 1029
1030 1030 if (not force and merge and match and
1031 1031 (match.files() or match.anypats())):
1032 1032 raise util.Abort(_('cannot partially commit a merge '
1033 1033 '(do not specify files or patterns)'))
1034 1034
1035 1035 changes = self.status(match=match, clean=force)
1036 1036 if force:
1037 1037 changes[0].extend(changes[6]) # mq may commit unchanged files
1038 1038
1039 1039 # check subrepos
1040 1040 subs = []
1041 1041 removedsubs = set()
1042 1042 if '.hgsub' in wctx:
1043 1043 # only manage subrepos and .hgsubstate if .hgsub is present
1044 1044 for p in wctx.parents():
1045 1045 removedsubs.update(s for s in p.substate if match(s))
1046 1046 for s in wctx.substate:
1047 1047 removedsubs.discard(s)
1048 1048 if match(s) and wctx.sub(s).dirty():
1049 1049 subs.append(s)
1050 1050 if (subs or removedsubs):
1051 1051 if (not match('.hgsub') and
1052 1052 '.hgsub' in (wctx.modified() + wctx.added())):
1053 1053 raise util.Abort(
1054 1054 _("can't commit subrepos without .hgsub"))
1055 1055 if '.hgsubstate' not in changes[0]:
1056 1056 changes[0].insert(0, '.hgsubstate')
1057 1057 if '.hgsubstate' in changes[2]:
1058 1058 changes[2].remove('.hgsubstate')
1059 1059 elif '.hgsub' in changes[2]:
1060 1060 # clean up .hgsubstate when .hgsub is removed
1061 1061 if ('.hgsubstate' in wctx and
1062 1062 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1063 1063 changes[2].insert(0, '.hgsubstate')
1064 1064
1065 1065 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1066 1066 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1067 1067 if changedsubs:
1068 1068 raise util.Abort(_("uncommitted changes in subrepo %s")
1069 1069 % changedsubs[0],
1070 1070 hint=_("use --subrepos for recursive commit"))
1071 1071
1072 1072 # make sure all explicit patterns are matched
1073 1073 if not force and match.files():
1074 1074 matched = set(changes[0] + changes[1] + changes[2])
1075 1075
1076 1076 for f in match.files():
1077 1077 if f == '.' or f in matched or f in wctx.substate:
1078 1078 continue
1079 1079 if f in changes[3]: # missing
1080 1080 fail(f, _('file not found!'))
1081 1081 if f in vdirs: # visited directory
1082 1082 d = f + '/'
1083 1083 for mf in matched:
1084 1084 if mf.startswith(d):
1085 1085 break
1086 1086 else:
1087 1087 fail(f, _("no match under directory!"))
1088 1088 elif f not in self.dirstate:
1089 1089 fail(f, _("file not tracked!"))
1090 1090
1091 1091 if (not force and not extra.get("close") and not merge
1092 1092 and not (changes[0] or changes[1] or changes[2])
1093 1093 and wctx.branch() == wctx.p1().branch()):
1094 1094 return None
1095 1095
1096 1096 ms = mergemod.mergestate(self)
1097 1097 for f in changes[0]:
1098 1098 if f in ms and ms[f] == 'u':
1099 1099 raise util.Abort(_("unresolved merge conflicts "
1100 1100 "(see hg help resolve)"))
1101 1101
1102 1102 cctx = context.workingctx(self, text, user, date, extra, changes)
1103 1103 if editor:
1104 1104 cctx._text = editor(self, cctx, subs)
1105 1105 edited = (text != cctx._text)
1106 1106
1107 1107 # commit subs
1108 1108 if subs or removedsubs:
1109 1109 state = wctx.substate.copy()
1110 1110 for s in sorted(subs):
1111 1111 sub = wctx.sub(s)
1112 1112 self.ui.status(_('committing subrepository %s\n') %
1113 1113 subrepo.subrelpath(sub))
1114 1114 sr = sub.commit(cctx._text, user, date)
1115 1115 state[s] = (state[s][0], sr)
1116 1116 subrepo.writestate(self, state)
1117 1117
1118 1118 # Save commit message in case this transaction gets rolled back
1119 1119 # (e.g. by a pretxncommit hook). Leave the content alone on
1120 1120 # the assumption that the user will use the same editor again.
1121 1121 msgfn = self.savecommitmessage(cctx._text)
1122 1122
1123 1123 p1, p2 = self.dirstate.parents()
1124 1124 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1125 1125 try:
1126 1126 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1127 1127 ret = self.commitctx(cctx, True)
1128 1128 except:
1129 1129 if edited:
1130 1130 self.ui.write(
1131 1131 _('note: commit message saved in %s\n') % msgfn)
1132 1132 raise
1133 1133
1134 1134 # update bookmarks, dirstate and mergestate
1135 1135 bookmarks.update(self, p1, ret)
1136 1136 for f in changes[0] + changes[1]:
1137 1137 self.dirstate.normal(f)
1138 1138 for f in changes[2]:
1139 1139 self.dirstate.drop(f)
1140 1140 self.dirstate.setparents(ret)
1141 1141 ms.reset()
1142 1142 finally:
1143 1143 wlock.release()
1144 1144
1145 1145 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1146 1146 return ret
1147 1147
1148 1148 def commitctx(self, ctx, error=False):
1149 1149 """Add a new revision to current repository.
1150 1150 Revision information is passed via the context argument.
1151 1151 """
1152 1152
1153 1153 tr = lock = None
1154 1154 removed = list(ctx.removed())
1155 1155 p1, p2 = ctx.p1(), ctx.p2()
1156 1156 user = ctx.user()
1157 1157
1158 1158 lock = self.lock()
1159 1159 try:
1160 1160 tr = self.transaction("commit")
1161 1161 trp = weakref.proxy(tr)
1162 1162
1163 1163 if ctx.files():
1164 1164 m1 = p1.manifest().copy()
1165 1165 m2 = p2.manifest()
1166 1166
1167 1167 # check in files
1168 1168 new = {}
1169 1169 changed = []
1170 1170 linkrev = len(self)
1171 1171 for f in sorted(ctx.modified() + ctx.added()):
1172 1172 self.ui.note(f + "\n")
1173 1173 try:
1174 1174 fctx = ctx[f]
1175 1175 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1176 1176 changed)
1177 1177 m1.set(f, fctx.flags())
1178 1178 except OSError, inst:
1179 1179 self.ui.warn(_("trouble committing %s!\n") % f)
1180 1180 raise
1181 1181 except IOError, inst:
1182 1182 errcode = getattr(inst, 'errno', errno.ENOENT)
1183 1183 if error or errcode and errcode != errno.ENOENT:
1184 1184 self.ui.warn(_("trouble committing %s!\n") % f)
1185 1185 raise
1186 1186 else:
1187 1187 removed.append(f)
1188 1188
1189 1189 # update manifest
1190 1190 m1.update(new)
1191 1191 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1192 1192 drop = [f for f in removed if f in m1]
1193 1193 for f in drop:
1194 1194 del m1[f]
1195 1195 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1196 1196 p2.manifestnode(), (new, drop))
1197 1197 files = changed + removed
1198 1198 else:
1199 1199 mn = p1.manifestnode()
1200 1200 files = []
1201 1201
1202 1202 # update changelog
1203 1203 self.changelog.delayupdate()
1204 1204 n = self.changelog.add(mn, files, ctx.description(),
1205 1205 trp, p1.node(), p2.node(),
1206 1206 user, ctx.date(), ctx.extra().copy())
1207 1207 p = lambda: self.changelog.writepending() and self.root or ""
1208 1208 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1209 1209 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1210 1210 parent2=xp2, pending=p)
1211 1211 self.changelog.finalize(trp)
1212 1212 tr.close()
1213 1213
1214 1214 if self._branchcache:
1215 1215 self.updatebranchcache()
1216 1216 return n
1217 1217 finally:
1218 1218 if tr:
1219 1219 tr.release()
1220 1220 lock.release()
1221 1221
1222 1222 def destroyed(self):
1223 1223 '''Inform the repository that nodes have been destroyed.
1224 1224 Intended for use by strip and rollback, so there's a common
1225 1225 place for anything that has to be done after destroying history.'''
1226 1226 # XXX it might be nice if we could take the list of destroyed
1227 1227 # nodes, but I don't see an easy way for rollback() to do that
1228 1228
1229 1229 # Ensure the persistent tag cache is updated. Doing it now
1230 1230 # means that the tag cache only has to worry about destroyed
1231 1231 # heads immediately after a strip/rollback. That in turn
1232 1232 # guarantees that "cachetip == currenttip" (comparing both rev
1233 1233 # and node) always means no nodes have been added or destroyed.
1234 1234
1235 1235 # XXX this is suboptimal when qrefresh'ing: we strip the current
1236 1236 # head, refresh the tag cache, then immediately add a new head.
1237 1237 # But I think doing it this way is necessary for the "instant
1238 1238 # tag cache retrieval" case to work.
1239 1239 self.invalidatecaches()
1240 1240
1241 1241 def walk(self, match, node=None):
1242 1242 '''
1243 1243 walk recursively through the directory tree or a given
1244 1244 changeset, finding all files matched by the match
1245 1245 function
1246 1246 '''
1247 1247 return self[node].walk(match)
1248 1248
1249 1249 def status(self, node1='.', node2=None, match=None,
1250 1250 ignored=False, clean=False, unknown=False,
1251 1251 listsubrepos=False):
1252 1252 """return status of files between two nodes or node and working directory
1253 1253
1254 1254 If node1 is None, use the first dirstate parent instead.
1255 1255 If node2 is None, compare node1 with working directory.
1256 1256 """
1257 1257
1258 1258 def mfmatches(ctx):
1259 1259 mf = ctx.manifest().copy()
1260 1260 for fn in mf.keys():
1261 1261 if not match(fn):
1262 1262 del mf[fn]
1263 1263 return mf
1264 1264
1265 1265 if isinstance(node1, context.changectx):
1266 1266 ctx1 = node1
1267 1267 else:
1268 1268 ctx1 = self[node1]
1269 1269 if isinstance(node2, context.changectx):
1270 1270 ctx2 = node2
1271 1271 else:
1272 1272 ctx2 = self[node2]
1273 1273
1274 1274 working = ctx2.rev() is None
1275 1275 parentworking = working and ctx1 == self['.']
1276 1276 match = match or matchmod.always(self.root, self.getcwd())
1277 1277 listignored, listclean, listunknown = ignored, clean, unknown
1278 1278
1279 1279 # load earliest manifest first for caching reasons
1280 1280 if not working and ctx2.rev() < ctx1.rev():
1281 1281 ctx2.manifest()
1282 1282
1283 1283 if not parentworking:
1284 1284 def bad(f, msg):
1285 1285 if f not in ctx1:
1286 1286 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1287 1287 match.bad = bad
1288 1288
1289 1289 if working: # we need to scan the working dir
1290 1290 subrepos = []
1291 1291 if '.hgsub' in self.dirstate:
1292 1292 subrepos = ctx2.substate.keys()
1293 1293 s = self.dirstate.status(match, subrepos, listignored,
1294 1294 listclean, listunknown)
1295 1295 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1296 1296
1297 1297 # check for any possibly clean files
1298 1298 if parentworking and cmp:
1299 1299 fixup = []
1300 1300 # do a full compare of any files that might have changed
1301 1301 for f in sorted(cmp):
1302 1302 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1303 1303 or ctx1[f].cmp(ctx2[f])):
1304 1304 modified.append(f)
1305 1305 else:
1306 1306 fixup.append(f)
1307 1307
1308 1308 # update dirstate for files that are actually clean
1309 1309 if fixup:
1310 1310 if listclean:
1311 1311 clean += fixup
1312 1312
1313 1313 try:
1314 1314 # updating the dirstate is optional
1315 1315 # so we don't wait on the lock
1316 1316 wlock = self.wlock(False)
1317 1317 try:
1318 1318 for f in fixup:
1319 1319 self.dirstate.normal(f)
1320 1320 finally:
1321 1321 wlock.release()
1322 1322 except error.LockError:
1323 1323 pass
1324 1324
1325 1325 if not parentworking:
1326 1326 mf1 = mfmatches(ctx1)
1327 1327 if working:
1328 1328 # we are comparing working dir against non-parent
1329 1329 # generate a pseudo-manifest for the working dir
1330 1330 mf2 = mfmatches(self['.'])
1331 1331 for f in cmp + modified + added:
1332 1332 mf2[f] = None
1333 1333 mf2.set(f, ctx2.flags(f))
1334 1334 for f in removed:
1335 1335 if f in mf2:
1336 1336 del mf2[f]
1337 1337 else:
1338 1338 # we are comparing two revisions
1339 1339 deleted, unknown, ignored = [], [], []
1340 1340 mf2 = mfmatches(ctx2)
1341 1341
1342 1342 modified, added, clean = [], [], []
1343 1343 for fn in mf2:
1344 1344 if fn in mf1:
1345 1345 if (fn not in deleted and
1346 1346 (mf1.flags(fn) != mf2.flags(fn) or
1347 1347 (mf1[fn] != mf2[fn] and
1348 1348 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1349 1349 modified.append(fn)
1350 1350 elif listclean:
1351 1351 clean.append(fn)
1352 1352 del mf1[fn]
1353 1353 elif fn not in deleted:
1354 1354 added.append(fn)
1355 1355 removed = mf1.keys()
1356 1356
1357 1357 if working and modified and not self.dirstate._checklink:
1358 1358 # Symlink placeholders may get non-symlink-like contents
1359 1359 # via user error or dereferencing by NFS or Samba servers,
1360 1360 # so we filter out any placeholders that don't look like a
1361 1361 # symlink
1362 1362 sane = []
1363 1363 for f in modified:
1364 1364 if ctx2.flags(f) == 'l':
1365 1365 d = ctx2[f].data()
1366 1366 if len(d) >= 1024 or '\n' in d or util.binary(d):
1367 1367 self.ui.debug('ignoring suspect symlink placeholder'
1368 1368 ' "%s"\n' % f)
1369 1369 continue
1370 1370 sane.append(f)
1371 1371 modified = sane
1372 1372
1373 1373 r = modified, added, removed, deleted, unknown, ignored, clean
1374 1374
1375 1375 if listsubrepos:
1376 1376 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1377 1377 if working:
1378 1378 rev2 = None
1379 1379 else:
1380 1380 rev2 = ctx2.substate[subpath][1]
1381 1381 try:
1382 1382 submatch = matchmod.narrowmatcher(subpath, match)
1383 1383 s = sub.status(rev2, match=submatch, ignored=listignored,
1384 1384 clean=listclean, unknown=listunknown,
1385 1385 listsubrepos=True)
1386 1386 for rfiles, sfiles in zip(r, s):
1387 1387 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1388 1388 except error.LookupError:
1389 1389 self.ui.status(_("skipping missing subrepository: %s\n")
1390 1390 % subpath)
1391 1391
1392 1392 for l in r:
1393 1393 l.sort()
1394 1394 return r
1395 1395
1396 1396 def heads(self, start=None):
1397 1397 heads = self.changelog.heads(start)
1398 1398 # sort the output in rev descending order
1399 1399 return sorted(heads, key=self.changelog.rev, reverse=True)
1400 1400
1401 1401 def branchheads(self, branch=None, start=None, closed=False):
1402 1402 '''return a (possibly filtered) list of heads for the given branch
1403 1403
1404 1404 Heads are returned in topological order, from newest to oldest.
1405 1405 If branch is None, use the dirstate branch.
1406 1406 If start is not None, return only heads reachable from start.
1407 1407 If closed is True, return heads that are marked as closed as well.
1408 1408 '''
1409 1409 if branch is None:
1410 1410 branch = self[None].branch()
1411 1411 branches = self.branchmap()
1412 1412 if branch not in branches:
1413 1413 return []
1414 1414 # the cache returns heads ordered lowest to highest
1415 1415 bheads = list(reversed(branches[branch]))
1416 1416 if start is not None:
1417 1417 # filter out the heads that cannot be reached from startrev
1418 1418 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1419 1419 bheads = [h for h in bheads if h in fbheads]
1420 1420 if not closed:
1421 1421 bheads = [h for h in bheads if
1422 1422 ('close' not in self.changelog.read(h)[5])]
1423 1423 return bheads
1424 1424
1425 1425 def branches(self, nodes):
1426 1426 if not nodes:
1427 1427 nodes = [self.changelog.tip()]
1428 1428 b = []
1429 1429 for n in nodes:
1430 1430 t = n
1431 1431 while True:
1432 1432 p = self.changelog.parents(n)
1433 1433 if p[1] != nullid or p[0] == nullid:
1434 1434 b.append((t, n, p[0], p[1]))
1435 1435 break
1436 1436 n = p[0]
1437 1437 return b
1438 1438
1439 1439 def between(self, pairs):
1440 1440 r = []
1441 1441
1442 1442 for top, bottom in pairs:
1443 1443 n, l, i = top, [], 0
1444 1444 f = 1
1445 1445
1446 1446 while n != bottom and n != nullid:
1447 1447 p = self.changelog.parents(n)[0]
1448 1448 if i == f:
1449 1449 l.append(n)
1450 1450 f = f * 2
1451 1451 n = p
1452 1452 i += 1
1453 1453
1454 1454 r.append(l)
1455 1455
1456 1456 return r
1457 1457
1458 1458 def pull(self, remote, heads=None, force=False):
1459 1459 lock = self.lock()
1460 1460 try:
1461 1461 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1462 1462 force=force)
1463 1463 common, fetch, rheads = tmp
1464 1464 if not fetch:
1465 1465 self.ui.status(_("no changes found\n"))
1466 1466 result = 0
1467 1467 else:
1468 1468 if heads is None and list(common) == [nullid]:
1469 1469 self.ui.status(_("requesting all changes\n"))
1470 1470 elif heads is None and remote.capable('changegroupsubset'):
1471 1471 # issue1320, avoid a race if remote changed after discovery
1472 1472 heads = rheads
1473 1473
1474 1474 if remote.capable('getbundle'):
1475 1475 cg = remote.getbundle('pull', common=common,
1476 1476 heads=heads or rheads)
1477 1477 elif heads is None:
1478 1478 cg = remote.changegroup(fetch, 'pull')
1479 1479 elif not remote.capable('changegroupsubset'):
1480 1480 raise util.Abort(_("partial pull cannot be done because "
1481 1481 "other repository doesn't support "
1482 1482 "changegroupsubset."))
1483 1483 else:
1484 1484 cg = remote.changegroupsubset(fetch, heads, 'pull')
1485 1485 result = self.addchangegroup(cg, 'pull', remote.url(),
1486 1486 lock=lock)
1487 1487 finally:
1488 1488 lock.release()
1489 1489
1490 1490 return result
1491 1491
1492 1492 def checkpush(self, force, revs):
1493 1493 """Extensions can override this function if additional checks have
1494 1494 to be performed before pushing, or call it if they override push
1495 1495 command.
1496 1496 """
1497 1497 pass
1498 1498
1499 1499 def push(self, remote, force=False, revs=None, newbranch=False):
1500 1500 '''Push outgoing changesets (limited by revs) from the current
1501 1501 repository to remote. Return an integer:
1502 1502 - 0 means HTTP error *or* nothing to push
1503 1503 - 1 means we pushed and remote head count is unchanged *or*
1504 1504 we have outgoing changesets but refused to push
1505 1505 - other values as described by addchangegroup()
1506 1506 '''
1507 1507 # there are two ways to push to remote repo:
1508 1508 #
1509 1509 # addchangegroup assumes local user can lock remote
1510 1510 # repo (local filesystem, old ssh servers).
1511 1511 #
1512 1512 # unbundle assumes local user cannot lock remote repo (new ssh
1513 1513 # servers, http servers).
1514 1514
1515 1515 self.checkpush(force, revs)
1516 1516 lock = None
1517 1517 unbundle = remote.capable('unbundle')
1518 1518 if not unbundle:
1519 1519 lock = remote.lock()
1520 1520 try:
1521 1521 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1522 1522 newbranch)
1523 1523 ret = remote_heads
1524 1524 if cg is not None:
1525 1525 if unbundle:
1526 1526 # local repo finds heads on server, finds out what
1527 1527 # revs it must push. once revs transferred, if server
1528 1528 # finds it has different heads (someone else won
1529 1529 # commit/push race), server aborts.
1530 1530 if force:
1531 1531 remote_heads = ['force']
1532 1532 # ssh: return remote's addchangegroup()
1533 1533 # http: return remote's addchangegroup() or 0 for error
1534 1534 ret = remote.unbundle(cg, remote_heads, 'push')
1535 1535 else:
1536 1536 # we return an integer indicating remote head count change
1537 1537 ret = remote.addchangegroup(cg, 'push', self.url(),
1538 1538 lock=lock)
1539 1539 finally:
1540 1540 if lock is not None:
1541 1541 lock.release()
1542 1542
1543 1543 self.ui.debug("checking for updated bookmarks\n")
1544 1544 rb = remote.listkeys('bookmarks')
1545 1545 for k in rb.keys():
1546 1546 if k in self._bookmarks:
1547 1547 nr, nl = rb[k], hex(self._bookmarks[k])
1548 1548 if nr in self:
1549 1549 cr = self[nr]
1550 1550 cl = self[nl]
1551 1551 if cl in cr.descendants():
1552 1552 r = remote.pushkey('bookmarks', k, nr, nl)
1553 1553 if r:
1554 1554 self.ui.status(_("updating bookmark %s\n") % k)
1555 1555 else:
1556 1556 self.ui.warn(_('updating bookmark %s'
1557 1557 ' failed!\n') % k)
1558 1558
1559 1559 return ret
1560 1560
1561 1561 def changegroupinfo(self, nodes, source):
1562 1562 if self.ui.verbose or source == 'bundle':
1563 1563 self.ui.status(_("%d changesets found\n") % len(nodes))
1564 1564 if self.ui.debugflag:
1565 1565 self.ui.debug("list of changesets:\n")
1566 1566 for node in nodes:
1567 1567 self.ui.debug("%s\n" % hex(node))
1568 1568
1569 1569 def changegroupsubset(self, bases, heads, source):
1570 1570 """Compute a changegroup consisting of all the nodes that are
1571 1571 descendants of any of the bases and ancestors of any of the heads.
1572 1572 Return a chunkbuffer object whose read() method will return
1573 1573 successive changegroup chunks.
1574 1574
1575 1575 It is fairly complex as determining which filenodes and which
1576 1576 manifest nodes need to be included for the changeset to be complete
1577 1577 is non-trivial.
1578 1578
1579 1579 Another wrinkle is doing the reverse, figuring out which changeset in
1580 1580 the changegroup a particular filenode or manifestnode belongs to.
1581 1581 """
1582 1582 cl = self.changelog
1583 1583 if not bases:
1584 1584 bases = [nullid]
1585 1585 csets, bases, heads = cl.nodesbetween(bases, heads)
1586 1586 # We assume that all ancestors of bases are known
1587 1587 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1588 1588 return self._changegroupsubset(common, csets, heads, source)
1589 1589
1590 1590 def getbundle(self, source, heads=None, common=None):
1591 1591 """Like changegroupsubset, but returns the set difference between the
1592 1592 ancestors of heads and the ancestors common.
1593 1593
1594 1594 If heads is None, use the local heads. If common is None, use [nullid].
1595 1595
1596 1596 The nodes in common might not all be known locally due to the way the
1597 1597 current discovery protocol works.
1598 1598 """
1599 1599 cl = self.changelog
1600 1600 if common:
1601 1601 nm = cl.nodemap
1602 1602 common = [n for n in common if n in nm]
1603 1603 else:
1604 1604 common = [nullid]
1605 1605 if not heads:
1606 1606 heads = cl.heads()
1607 1607 common, missing = cl.findcommonmissing(common, heads)
1608 1608 if not missing:
1609 1609 return None
1610 1610 return self._changegroupsubset(common, missing, heads, source)
1611 1611
1612 1612 def _changegroupsubset(self, commonrevs, csets, heads, source):
1613 1613
1614 1614 cl = self.changelog
1615 1615 mf = self.manifest
1616 1616 mfs = {} # needed manifests
1617 1617 fnodes = {} # needed file nodes
1618 1618 changedfiles = set()
1619 1619 fstate = ['', {}]
1620 1620 count = [0]
1621 1621
1622 1622 # can we go through the fast path ?
1623 1623 heads.sort()
1624 1624 if heads == sorted(self.heads()):
1625 1625 return self._changegroup(csets, source)
1626 1626
1627 1627 # slow path
1628 1628 self.hook('preoutgoing', throw=True, source=source)
1629 1629 self.changegroupinfo(csets, source)
1630 1630
1631 1631 # filter any nodes that claim to be part of the known set
1632 1632 def prune(revlog, missing):
1633 1633 return [n for n in missing
1634 1634 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1635 1635
1636 1636 def lookup(revlog, x):
1637 1637 if revlog == cl:
1638 1638 c = cl.read(x)
1639 1639 changedfiles.update(c[3])
1640 1640 mfs.setdefault(c[0], x)
1641 1641 count[0] += 1
1642 1642 self.ui.progress(_('bundling'), count[0],
1643 1643 unit=_('changesets'), total=len(csets))
1644 1644 return x
1645 1645 elif revlog == mf:
1646 1646 clnode = mfs[x]
1647 1647 mdata = mf.readfast(x)
1648 1648 for f in changedfiles:
1649 1649 if f in mdata:
1650 1650 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1651 1651 count[0] += 1
1652 1652 self.ui.progress(_('bundling'), count[0],
1653 1653 unit=_('manifests'), total=len(mfs))
1654 1654 return mfs[x]
1655 1655 else:
1656 1656 self.ui.progress(
1657 1657 _('bundling'), count[0], item=fstate[0],
1658 1658 unit=_('files'), total=len(changedfiles))
1659 1659 return fstate[1][x]
1660 1660
1661 1661 bundler = changegroup.bundle10(lookup)
1662 1662 reorder = self.ui.config('bundle', 'reorder', 'auto')
1663 1663 if reorder == 'auto':
1664 1664 reorder = None
1665 1665 else:
1666 1666 reorder = util.parsebool(reorder)
1667 1667
1668 1668 def gengroup():
1669 1669 # Create a changenode group generator that will call our functions
1670 1670 # back to lookup the owning changenode and collect information.
1671 1671 for chunk in cl.group(csets, bundler, reorder=reorder):
1672 1672 yield chunk
1673 1673 self.ui.progress(_('bundling'), None)
1674 1674
1675 1675 # Create a generator for the manifestnodes that calls our lookup
1676 1676 # and data collection functions back.
1677 1677 count[0] = 0
1678 1678 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1679 1679 yield chunk
1680 1680 self.ui.progress(_('bundling'), None)
1681 1681
1682 1682 mfs.clear()
1683 1683
1684 1684 # Go through all our files in order sorted by name.
1685 1685 count[0] = 0
1686 1686 for fname in sorted(changedfiles):
1687 1687 filerevlog = self.file(fname)
1688 1688 if not len(filerevlog):
1689 1689 raise util.Abort(_("empty or missing revlog for %s") % fname)
1690 1690 fstate[0] = fname
1691 1691 fstate[1] = fnodes.pop(fname, {})
1692 1692
1693 1693 nodelist = prune(filerevlog, fstate[1])
1694 1694 if nodelist:
1695 1695 count[0] += 1
1696 1696 yield bundler.fileheader(fname)
1697 1697 for chunk in filerevlog.group(nodelist, bundler, reorder):
1698 1698 yield chunk
1699 1699
1700 1700 # Signal that no more groups are left.
1701 1701 yield bundler.close()
1702 1702 self.ui.progress(_('bundling'), None)
1703 1703
1704 1704 if csets:
1705 1705 self.hook('outgoing', node=hex(csets[0]), source=source)
1706 1706
1707 1707 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1708 1708
1709 1709 def changegroup(self, basenodes, source):
1710 1710 # to avoid a race we use changegroupsubset() (issue1320)
1711 1711 return self.changegroupsubset(basenodes, self.heads(), source)
1712 1712
1713 1713 def _changegroup(self, nodes, source):
1714 1714 """Compute the changegroup of all nodes that we have that a recipient
1715 1715 doesn't. Return a chunkbuffer object whose read() method will return
1716 1716 successive changegroup chunks.
1717 1717
1718 1718 This is much easier than the previous function as we can assume that
1719 1719 the recipient has any changenode we aren't sending them.
1720 1720
1721 1721 nodes is the set of nodes to send"""
1722 1722
1723 1723 cl = self.changelog
1724 1724 mf = self.manifest
1725 1725 mfs = {}
1726 1726 changedfiles = set()
1727 1727 fstate = ['']
1728 1728 count = [0]
1729 1729
1730 1730 self.hook('preoutgoing', throw=True, source=source)
1731 1731 self.changegroupinfo(nodes, source)
1732 1732
1733 1733 revset = set([cl.rev(n) for n in nodes])
1734 1734
1735 1735 def gennodelst(log):
1736 1736 return [log.node(r) for r in log if log.linkrev(r) in revset]
1737 1737
1738 1738 def lookup(revlog, x):
1739 1739 if revlog == cl:
1740 1740 c = cl.read(x)
1741 1741 changedfiles.update(c[3])
1742 1742 mfs.setdefault(c[0], x)
1743 1743 count[0] += 1
1744 1744 self.ui.progress(_('bundling'), count[0],
1745 1745 unit=_('changesets'), total=len(nodes))
1746 1746 return x
1747 1747 elif revlog == mf:
1748 1748 count[0] += 1
1749 1749 self.ui.progress(_('bundling'), count[0],
1750 1750 unit=_('manifests'), total=len(mfs))
1751 1751 return cl.node(revlog.linkrev(revlog.rev(x)))
1752 1752 else:
1753 1753 self.ui.progress(
1754 1754 _('bundling'), count[0], item=fstate[0],
1755 1755 total=len(changedfiles), unit=_('files'))
1756 1756 return cl.node(revlog.linkrev(revlog.rev(x)))
1757 1757
1758 1758 bundler = changegroup.bundle10(lookup)
1759 1759 reorder = self.ui.config('bundle', 'reorder', 'auto')
1760 1760 if reorder == 'auto':
1761 1761 reorder = None
1762 1762 else:
1763 1763 reorder = util.parsebool(reorder)
1764 1764
1765 1765 def gengroup():
1766 1766 '''yield a sequence of changegroup chunks (strings)'''
1767 1767 # construct a list of all changed files
1768 1768
1769 1769 for chunk in cl.group(nodes, bundler, reorder=reorder):
1770 1770 yield chunk
1771 1771 self.ui.progress(_('bundling'), None)
1772 1772
1773 1773 count[0] = 0
1774 1774 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1775 1775 yield chunk
1776 1776 self.ui.progress(_('bundling'), None)
1777 1777
1778 1778 count[0] = 0
1779 1779 for fname in sorted(changedfiles):
1780 1780 filerevlog = self.file(fname)
1781 1781 if not len(filerevlog):
1782 1782 raise util.Abort(_("empty or missing revlog for %s") % fname)
1783 1783 fstate[0] = fname
1784 1784 nodelist = gennodelst(filerevlog)
1785 1785 if nodelist:
1786 1786 count[0] += 1
1787 1787 yield bundler.fileheader(fname)
1788 1788 for chunk in filerevlog.group(nodelist, bundler, reorder):
1789 1789 yield chunk
1790 1790 yield bundler.close()
1791 1791 self.ui.progress(_('bundling'), None)
1792 1792
1793 1793 if nodes:
1794 1794 self.hook('outgoing', node=hex(nodes[0]), source=source)
1795 1795
1796 1796 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1797 1797
1798 1798 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1799 1799 """Add the changegroup returned by source.read() to this repo.
1800 1800 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1801 1801 the URL of the repo where this changegroup is coming from.
1802 1802 If lock is not None, the function takes ownership of the lock
1803 1803 and releases it after the changegroup is added.
1804 1804
1805 1805 Return an integer summarizing the change to this repo:
1806 1806 - nothing changed or no source: 0
1807 1807 - more heads than before: 1+added heads (2..n)
1808 1808 - fewer heads than before: -1-removed heads (-2..-n)
1809 1809 - number of heads stays the same: 1
1810 1810 """
1811 1811 def csmap(x):
1812 1812 self.ui.debug("add changeset %s\n" % short(x))
1813 1813 return len(cl)
1814 1814
1815 1815 def revmap(x):
1816 1816 return cl.rev(x)
1817 1817
1818 1818 if not source:
1819 1819 return 0
1820 1820
1821 1821 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1822 1822
1823 1823 changesets = files = revisions = 0
1824 1824 efiles = set()
1825 1825
1826 1826 # write changelog data to temp files so concurrent readers will not see
1827 1827 # inconsistent view
1828 1828 cl = self.changelog
1829 1829 cl.delayupdate()
1830 1830 oldheads = cl.heads()
1831 1831
1832 1832 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1833 1833 try:
1834 1834 trp = weakref.proxy(tr)
1835 1835 # pull off the changeset group
1836 1836 self.ui.status(_("adding changesets\n"))
1837 1837 clstart = len(cl)
1838 1838 class prog(object):
1839 1839 step = _('changesets')
1840 1840 count = 1
1841 1841 ui = self.ui
1842 1842 total = None
1843 1843 def __call__(self):
1844 1844 self.ui.progress(self.step, self.count, unit=_('chunks'),
1845 1845 total=self.total)
1846 1846 self.count += 1
1847 1847 pr = prog()
1848 1848 source.callback = pr
1849 1849
1850 1850 source.changelogheader()
1851 1851 if (cl.addgroup(source, csmap, trp) is None
1852 1852 and not emptyok):
1853 1853 raise util.Abort(_("received changelog group is empty"))
1854 1854 clend = len(cl)
1855 1855 changesets = clend - clstart
1856 1856 for c in xrange(clstart, clend):
1857 1857 efiles.update(self[c].files())
1858 1858 efiles = len(efiles)
1859 1859 self.ui.progress(_('changesets'), None)
1860 1860
1861 1861 # pull off the manifest group
1862 1862 self.ui.status(_("adding manifests\n"))
1863 1863 pr.step = _('manifests')
1864 1864 pr.count = 1
1865 1865 pr.total = changesets # manifests <= changesets
1866 1866 # no need to check for empty manifest group here:
1867 1867 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1868 1868 # no new manifest will be created and the manifest group will
1869 1869 # be empty during the pull
1870 1870 source.manifestheader()
1871 1871 self.manifest.addgroup(source, revmap, trp)
1872 1872 self.ui.progress(_('manifests'), None)
1873 1873
1874 1874 needfiles = {}
1875 1875 if self.ui.configbool('server', 'validate', default=False):
1876 1876 # validate incoming csets have their manifests
1877 1877 for cset in xrange(clstart, clend):
1878 1878 mfest = self.changelog.read(self.changelog.node(cset))[0]
1879 1879 mfest = self.manifest.readdelta(mfest)
1880 1880 # store file nodes we must see
1881 1881 for f, n in mfest.iteritems():
1882 1882 needfiles.setdefault(f, set()).add(n)
1883 1883
1884 1884 # process the files
1885 1885 self.ui.status(_("adding file changes\n"))
1886 1886 pr.step = _('files')
1887 1887 pr.count = 1
1888 1888 pr.total = efiles
1889 1889 source.callback = None
1890 1890
1891 1891 while True:
1892 1892 chunkdata = source.filelogheader()
1893 1893 if not chunkdata:
1894 1894 break
1895 1895 f = chunkdata["filename"]
1896 1896 self.ui.debug("adding %s revisions\n" % f)
1897 1897 pr()
1898 1898 fl = self.file(f)
1899 1899 o = len(fl)
1900 1900 if fl.addgroup(source, revmap, trp) is None:
1901 1901 raise util.Abort(_("received file revlog group is empty"))
1902 1902 revisions += len(fl) - o
1903 1903 files += 1
1904 1904 if f in needfiles:
1905 1905 needs = needfiles[f]
1906 1906 for new in xrange(o, len(fl)):
1907 1907 n = fl.node(new)
1908 1908 if n in needs:
1909 1909 needs.remove(n)
1910 1910 if not needs:
1911 1911 del needfiles[f]
1912 1912 self.ui.progress(_('files'), None)
1913 1913
1914 1914 for f, needs in needfiles.iteritems():
1915 1915 fl = self.file(f)
1916 1916 for n in needs:
1917 1917 try:
1918 1918 fl.rev(n)
1919 1919 except error.LookupError:
1920 1920 raise util.Abort(
1921 1921 _('missing file data for %s:%s - run hg verify') %
1922 1922 (f, hex(n)))
1923 1923
1924 1924 dh = 0
1925 1925 if oldheads:
1926 1926 heads = cl.heads()
1927 1927 dh = len(heads) - len(oldheads)
1928 1928 for h in heads:
1929 1929 if h not in oldheads and 'close' in self[h].extra():
1930 1930 dh -= 1
1931 1931 htext = ""
1932 1932 if dh:
1933 1933 htext = _(" (%+d heads)") % dh
1934 1934
1935 1935 self.ui.status(_("added %d changesets"
1936 1936 " with %d changes to %d files%s\n")
1937 1937 % (changesets, revisions, files, htext))
1938 1938
1939 1939 if changesets > 0:
1940 1940 p = lambda: cl.writepending() and self.root or ""
1941 1941 self.hook('pretxnchangegroup', throw=True,
1942 1942 node=hex(cl.node(clstart)), source=srctype,
1943 1943 url=url, pending=p)
1944 1944
1945 1945 # make changelog see real files again
1946 1946 cl.finalize(trp)
1947 1947
1948 1948 tr.close()
1949 1949 finally:
1950 1950 tr.release()
1951 1951 if lock:
1952 1952 lock.release()
1953 1953
1954 1954 if changesets > 0:
1955 1955 # forcefully update the on-disk branch cache
1956 1956 self.ui.debug("updating the branch cache\n")
1957 1957 self.updatebranchcache()
1958 1958 self.hook("changegroup", node=hex(cl.node(clstart)),
1959 1959 source=srctype, url=url)
1960 1960
1961 1961 for i in xrange(clstart, clend):
1962 1962 self.hook("incoming", node=hex(cl.node(i)),
1963 1963 source=srctype, url=url)
1964 1964
1965 1965 # never return 0 here:
1966 1966 if dh < 0:
1967 1967 return dh - 1
1968 1968 else:
1969 1969 return dh + 1
1970 1970
1971 1971 def stream_in(self, remote, requirements):
1972 1972 lock = self.lock()
1973 1973 try:
1974 1974 fp = remote.stream_out()
1975 1975 l = fp.readline()
1976 1976 try:
1977 1977 resp = int(l)
1978 1978 except ValueError:
1979 1979 raise error.ResponseError(
1980 1980 _('Unexpected response from remote server:'), l)
1981 1981 if resp == 1:
1982 1982 raise util.Abort(_('operation forbidden by server'))
1983 1983 elif resp == 2:
1984 1984 raise util.Abort(_('locking the remote repository failed'))
1985 1985 elif resp != 0:
1986 1986 raise util.Abort(_('the server sent an unknown error code'))
1987 1987 self.ui.status(_('streaming all changes\n'))
1988 1988 l = fp.readline()
1989 1989 try:
1990 1990 total_files, total_bytes = map(int, l.split(' ', 1))
1991 1991 except (ValueError, TypeError):
1992 1992 raise error.ResponseError(
1993 1993 _('Unexpected response from remote server:'), l)
1994 1994 self.ui.status(_('%d files to transfer, %s of data\n') %
1995 1995 (total_files, util.bytecount(total_bytes)))
1996 1996 start = time.time()
1997 1997 for i in xrange(total_files):
1998 1998 # XXX doesn't support '\n' or '\r' in filenames
1999 1999 l = fp.readline()
2000 2000 try:
2001 2001 name, size = l.split('\0', 1)
2002 2002 size = int(size)
2003 2003 except (ValueError, TypeError):
2004 2004 raise error.ResponseError(
2005 2005 _('Unexpected response from remote server:'), l)
2006 2006 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2007 2007 # for backwards compat, name was partially encoded
2008 2008 ofp = self.sopener(store.decodedir(name), 'w')
2009 2009 for chunk in util.filechunkiter(fp, limit=size):
2010 2010 ofp.write(chunk)
2011 2011 ofp.close()
2012 2012 elapsed = time.time() - start
2013 2013 if elapsed <= 0:
2014 2014 elapsed = 0.001
2015 2015 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2016 2016 (util.bytecount(total_bytes), elapsed,
2017 2017 util.bytecount(total_bytes / elapsed)))
2018 2018
2019 2019 # new requirements = old non-format requirements + new format-related
2020 2020 # requirements from the streamed-in repository
2021 2021 requirements.update(set(self.requirements) - self.supportedformats)
2022 2022 self._applyrequirements(requirements)
2023 2023 self._writerequirements()
2024 2024
2025 2025 self.invalidate()
2026 2026 return len(self.heads()) + 1
2027 2027 finally:
2028 2028 lock.release()
2029 2029
2030 2030 def clone(self, remote, heads=[], stream=False):
2031 2031 '''clone remote repository.
2032 2032
2033 2033 keyword arguments:
2034 2034 heads: list of revs to clone (forces use of pull)
2035 2035 stream: use streaming clone if possible'''
2036 2036
2037 2037 # now, all clients that can request uncompressed clones can
2038 2038 # read repo formats supported by all servers that can serve
2039 2039 # them.
2040 2040
2041 2041 # if revlog format changes, client will have to check version
2042 2042 # and format flags on "stream" capability, and use
2043 2043 # uncompressed only if compatible.
2044 2044
2045 2045 if stream and not heads:
2046 2046 # 'stream' means remote revlog format is revlogv1 only
2047 2047 if remote.capable('stream'):
2048 2048 return self.stream_in(remote, set(('revlogv1',)))
2049 2049 # otherwise, 'streamreqs' contains the remote revlog format
2050 2050 streamreqs = remote.capable('streamreqs')
2051 2051 if streamreqs:
2052 2052 streamreqs = set(streamreqs.split(','))
2053 2053 # if we support it, stream in and adjust our requirements
2054 2054 if not streamreqs - self.supportedformats:
2055 2055 return self.stream_in(remote, streamreqs)
2056 2056 return self.pull(remote, heads)
2057 2057
2058 2058 def pushkey(self, namespace, key, old, new):
2059 2059 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2060 2060 old=old, new=new)
2061 2061 ret = pushkey.push(self, namespace, key, old, new)
2062 2062 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2063 2063 ret=ret)
2064 2064 return ret
2065 2065
2066 2066 def listkeys(self, namespace):
2067 2067 self.hook('prelistkeys', throw=True, namespace=namespace)
2068 2068 values = pushkey.list(self, namespace)
2069 2069 self.hook('listkeys', namespace=namespace, values=values)
2070 2070 return values
2071 2071
2072 2072 def debugwireargs(self, one, two, three=None, four=None, five=None):
2073 2073 '''used to test argument passing over the wire'''
2074 2074 return "%s %s %s %s %s" % (one, two, three, four, five)
2075 2075
2076 2076 def savecommitmessage(self, text):
2077 2077 fp = self.opener('last-message.txt', 'wb')
2078 2078 try:
2079 2079 fp.write(text)
2080 2080 finally:
2081 2081 fp.close()
2082 2082 return self.pathto(fp.name[len(self.root)+1:])
2083 2083
2084 2084 # used to avoid circular references so destructors work
2085 2085 def aftertrans(files):
2086 2086 renamefiles = [tuple(t) for t in files]
2087 2087 def a():
2088 2088 for src, dest in renamefiles:
2089 2089 util.rename(src, dest)
2090 2090 return a
2091 2091
2092 2092 def undoname(fn):
2093 2093 base, name = os.path.split(fn)
2094 2094 assert name.startswith('journal')
2095 2095 return os.path.join(base, name.replace('journal', 'undo', 1))
2096 2096
2097 2097 def instance(ui, path, create):
2098 2098 return localrepository(ui, util.urllocalpath(path), create)
2099 2099
2100 2100 def islocal(path):
2101 2101 return True
General Comments 0
You need to be logged in to leave comments. Login now