##// END OF EJS Templates
localrepo: decorate manifest() with filecache
Idan Kamara -
r14934:019fe0b0 default
parent child Browse files
Show More
@@ -1,2037 +1,2037 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener.append(
61 61 "00changelog.i",
62 62 '\0\0\0\2' # represents revlogv2
63 63 ' dummy changelog to prevent using the old repo layout'
64 64 )
65 65 if self.ui.configbool('format', 'generaldelta', False):
66 66 requirements.append("generaldelta")
67 67 requirements = set(requirements)
68 68 else:
69 69 raise error.RepoError(_("repository %s not found") % path)
70 70 elif create:
71 71 raise error.RepoError(_("repository %s already exists") % path)
72 72 else:
73 73 try:
74 74 requirements = scmutil.readrequires(self.opener, self.supported)
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 requirements = set()
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener.read("sharedpath"))
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 # A cache for various files under .hg/ that tracks file changes,
116 116 # (used by the filecache decorator)
117 117 #
118 118 # Maps a property name to its util.filecacheentry
119 119 self._filecache = {}
120 120
121 121 def _applyrequirements(self, requirements):
122 122 self.requirements = requirements
123 123 openerreqs = set(('revlogv1', 'generaldelta'))
124 124 self.sopener.options = dict((r, 1) for r in requirements
125 125 if r in openerreqs)
126 126
127 127 def _writerequirements(self):
128 128 reqfile = self.opener("requires", "w")
129 129 for r in self.requirements:
130 130 reqfile.write("%s\n" % r)
131 131 reqfile.close()
132 132
133 133 def _checknested(self, path):
134 134 """Determine if path is a legal nested repository."""
135 135 if not path.startswith(self.root):
136 136 return False
137 137 subpath = path[len(self.root) + 1:]
138 138
139 139 # XXX: Checking against the current working copy is wrong in
140 140 # the sense that it can reject things like
141 141 #
142 142 # $ hg cat -r 10 sub/x.txt
143 143 #
144 144 # if sub/ is no longer a subrepository in the working copy
145 145 # parent revision.
146 146 #
147 147 # However, it can of course also allow things that would have
148 148 # been rejected before, such as the above cat command if sub/
149 149 # is a subrepository now, but was a normal directory before.
150 150 # The old path auditor would have rejected by mistake since it
151 151 # panics when it sees sub/.hg/.
152 152 #
153 153 # All in all, checking against the working copy seems sensible
154 154 # since we want to prevent access to nested repositories on
155 155 # the filesystem *now*.
156 156 ctx = self[None]
157 157 parts = util.splitpath(subpath)
158 158 while parts:
159 159 prefix = os.sep.join(parts)
160 160 if prefix in ctx.substate:
161 161 if prefix == subpath:
162 162 return True
163 163 else:
164 164 sub = ctx.sub(prefix)
165 165 return sub.checknested(subpath[len(prefix) + 1:])
166 166 else:
167 167 parts.pop()
168 168 return False
169 169
170 170 @filecache('bookmarks')
171 171 def _bookmarks(self):
172 172 return bookmarks.read(self)
173 173
174 174 @filecache('bookmarks.current')
175 175 def _bookmarkcurrent(self):
176 176 return bookmarks.readcurrent(self)
177 177
178 178 @filecache('00changelog.i', True)
179 179 def changelog(self):
180 180 c = changelog.changelog(self.sopener)
181 181 if 'HG_PENDING' in os.environ:
182 182 p = os.environ['HG_PENDING']
183 183 if p.startswith(self.root):
184 184 c.readpending('00changelog.i.a')
185 185 return c
186 186
187 @propertycache
187 @filecache('00manifest.i', True)
188 188 def manifest(self):
189 189 return manifest.manifest(self.sopener)
190 190
191 191 @filecache('dirstate')
192 192 def dirstate(self):
193 193 warned = [0]
194 194 def validate(node):
195 195 try:
196 196 self.changelog.rev(node)
197 197 return node
198 198 except error.LookupError:
199 199 if not warned[0]:
200 200 warned[0] = True
201 201 self.ui.warn(_("warning: ignoring unknown"
202 202 " working parent %s!\n") % short(node))
203 203 return nullid
204 204
205 205 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
206 206
207 207 def __getitem__(self, changeid):
208 208 if changeid is None:
209 209 return context.workingctx(self)
210 210 return context.changectx(self, changeid)
211 211
212 212 def __contains__(self, changeid):
213 213 try:
214 214 return bool(self.lookup(changeid))
215 215 except error.RepoLookupError:
216 216 return False
217 217
218 218 def __nonzero__(self):
219 219 return True
220 220
221 221 def __len__(self):
222 222 return len(self.changelog)
223 223
224 224 def __iter__(self):
225 225 for i in xrange(len(self)):
226 226 yield i
227 227
228 228 def set(self, expr, *args):
229 229 '''
230 230 Yield a context for each matching revision, after doing arg
231 231 replacement via revset.formatspec
232 232 '''
233 233
234 234 expr = revset.formatspec(expr, *args)
235 235 m = revset.match(None, expr)
236 236 for r in m(self, range(len(self))):
237 237 yield self[r]
238 238
239 239 def url(self):
240 240 return 'file:' + self.root
241 241
242 242 def hook(self, name, throw=False, **args):
243 243 return hook.hook(self.ui, self, name, throw, **args)
244 244
245 245 tag_disallowed = ':\r\n'
246 246
247 247 def _tag(self, names, node, message, local, user, date, extra={}):
248 248 if isinstance(names, str):
249 249 allchars = names
250 250 names = (names,)
251 251 else:
252 252 allchars = ''.join(names)
253 253 for c in self.tag_disallowed:
254 254 if c in allchars:
255 255 raise util.Abort(_('%r cannot be used in a tag name') % c)
256 256
257 257 branches = self.branchmap()
258 258 for name in names:
259 259 self.hook('pretag', throw=True, node=hex(node), tag=name,
260 260 local=local)
261 261 if name in branches:
262 262 self.ui.warn(_("warning: tag %s conflicts with existing"
263 263 " branch name\n") % name)
264 264
265 265 def writetags(fp, names, munge, prevtags):
266 266 fp.seek(0, 2)
267 267 if prevtags and prevtags[-1] != '\n':
268 268 fp.write('\n')
269 269 for name in names:
270 270 m = munge and munge(name) or name
271 271 if self._tagtypes and name in self._tagtypes:
272 272 old = self._tags.get(name, nullid)
273 273 fp.write('%s %s\n' % (hex(old), m))
274 274 fp.write('%s %s\n' % (hex(node), m))
275 275 fp.close()
276 276
277 277 prevtags = ''
278 278 if local:
279 279 try:
280 280 fp = self.opener('localtags', 'r+')
281 281 except IOError:
282 282 fp = self.opener('localtags', 'a')
283 283 else:
284 284 prevtags = fp.read()
285 285
286 286 # local tags are stored in the current charset
287 287 writetags(fp, names, None, prevtags)
288 288 for name in names:
289 289 self.hook('tag', node=hex(node), tag=name, local=local)
290 290 return
291 291
292 292 try:
293 293 fp = self.wfile('.hgtags', 'rb+')
294 294 except IOError, e:
295 295 if e.errno != errno.ENOENT:
296 296 raise
297 297 fp = self.wfile('.hgtags', 'ab')
298 298 else:
299 299 prevtags = fp.read()
300 300
301 301 # committed tags are stored in UTF-8
302 302 writetags(fp, names, encoding.fromlocal, prevtags)
303 303
304 304 fp.close()
305 305
306 306 if '.hgtags' not in self.dirstate:
307 307 self[None].add(['.hgtags'])
308 308
309 309 m = matchmod.exact(self.root, '', ['.hgtags'])
310 310 tagnode = self.commit(message, user, date, extra=extra, match=m)
311 311
312 312 for name in names:
313 313 self.hook('tag', node=hex(node), tag=name, local=local)
314 314
315 315 return tagnode
316 316
317 317 def tag(self, names, node, message, local, user, date):
318 318 '''tag a revision with one or more symbolic names.
319 319
320 320 names is a list of strings or, when adding a single tag, names may be a
321 321 string.
322 322
323 323 if local is True, the tags are stored in a per-repository file.
324 324 otherwise, they are stored in the .hgtags file, and a new
325 325 changeset is committed with the change.
326 326
327 327 keyword arguments:
328 328
329 329 local: whether to store tags in non-version-controlled file
330 330 (default False)
331 331
332 332 message: commit message to use if committing
333 333
334 334 user: name of user to use if committing
335 335
336 336 date: date tuple to use if committing'''
337 337
338 338 if not local:
339 339 for x in self.status()[:5]:
340 340 if '.hgtags' in x:
341 341 raise util.Abort(_('working copy of .hgtags is changed '
342 342 '(please commit .hgtags manually)'))
343 343
344 344 self.tags() # instantiate the cache
345 345 self._tag(names, node, message, local, user, date)
346 346
347 347 def tags(self):
348 348 '''return a mapping of tag to node'''
349 349 if self._tags is None:
350 350 (self._tags, self._tagtypes) = self._findtags()
351 351
352 352 return self._tags
353 353
354 354 def _findtags(self):
355 355 '''Do the hard work of finding tags. Return a pair of dicts
356 356 (tags, tagtypes) where tags maps tag name to node, and tagtypes
357 357 maps tag name to a string like \'global\' or \'local\'.
358 358 Subclasses or extensions are free to add their own tags, but
359 359 should be aware that the returned dicts will be retained for the
360 360 duration of the localrepo object.'''
361 361
362 362 # XXX what tagtype should subclasses/extensions use? Currently
363 363 # mq and bookmarks add tags, but do not set the tagtype at all.
364 364 # Should each extension invent its own tag type? Should there
365 365 # be one tagtype for all such "virtual" tags? Or is the status
366 366 # quo fine?
367 367
368 368 alltags = {} # map tag name to (node, hist)
369 369 tagtypes = {}
370 370
371 371 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
372 372 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
373 373
374 374 # Build the return dicts. Have to re-encode tag names because
375 375 # the tags module always uses UTF-8 (in order not to lose info
376 376 # writing to the cache), but the rest of Mercurial wants them in
377 377 # local encoding.
378 378 tags = {}
379 379 for (name, (node, hist)) in alltags.iteritems():
380 380 if node != nullid:
381 381 try:
382 382 # ignore tags to unknown nodes
383 383 self.changelog.lookup(node)
384 384 tags[encoding.tolocal(name)] = node
385 385 except error.LookupError:
386 386 pass
387 387 tags['tip'] = self.changelog.tip()
388 388 tagtypes = dict([(encoding.tolocal(name), value)
389 389 for (name, value) in tagtypes.iteritems()])
390 390 return (tags, tagtypes)
391 391
392 392 def tagtype(self, tagname):
393 393 '''
394 394 return the type of the given tag. result can be:
395 395
396 396 'local' : a local tag
397 397 'global' : a global tag
398 398 None : tag does not exist
399 399 '''
400 400
401 401 self.tags()
402 402
403 403 return self._tagtypes.get(tagname)
404 404
405 405 def tagslist(self):
406 406 '''return a list of tags ordered by revision'''
407 407 l = []
408 408 for t, n in self.tags().iteritems():
409 409 r = self.changelog.rev(n)
410 410 l.append((r, t, n))
411 411 return [(t, n) for r, t, n in sorted(l)]
412 412
413 413 def nodetags(self, node):
414 414 '''return the tags associated with a node'''
415 415 if not self.nodetagscache:
416 416 self.nodetagscache = {}
417 417 for t, n in self.tags().iteritems():
418 418 self.nodetagscache.setdefault(n, []).append(t)
419 419 for tags in self.nodetagscache.itervalues():
420 420 tags.sort()
421 421 return self.nodetagscache.get(node, [])
422 422
423 423 def nodebookmarks(self, node):
424 424 marks = []
425 425 for bookmark, n in self._bookmarks.iteritems():
426 426 if n == node:
427 427 marks.append(bookmark)
428 428 return sorted(marks)
429 429
430 430 def _branchtags(self, partial, lrev):
431 431 # TODO: rename this function?
432 432 tiprev = len(self) - 1
433 433 if lrev != tiprev:
434 434 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
435 435 self._updatebranchcache(partial, ctxgen)
436 436 self._writebranchcache(partial, self.changelog.tip(), tiprev)
437 437
438 438 return partial
439 439
440 440 def updatebranchcache(self):
441 441 tip = self.changelog.tip()
442 442 if self._branchcache is not None and self._branchcachetip == tip:
443 443 return self._branchcache
444 444
445 445 oldtip = self._branchcachetip
446 446 self._branchcachetip = tip
447 447 if oldtip is None or oldtip not in self.changelog.nodemap:
448 448 partial, last, lrev = self._readbranchcache()
449 449 else:
450 450 lrev = self.changelog.rev(oldtip)
451 451 partial = self._branchcache
452 452
453 453 self._branchtags(partial, lrev)
454 454 # this private cache holds all heads (not just tips)
455 455 self._branchcache = partial
456 456
457 457 def branchmap(self):
458 458 '''returns a dictionary {branch: [branchheads]}'''
459 459 self.updatebranchcache()
460 460 return self._branchcache
461 461
462 462 def branchtags(self):
463 463 '''return a dict where branch names map to the tipmost head of
464 464 the branch, open heads come before closed'''
465 465 bt = {}
466 466 for bn, heads in self.branchmap().iteritems():
467 467 tip = heads[-1]
468 468 for h in reversed(heads):
469 469 if 'close' not in self.changelog.read(h)[5]:
470 470 tip = h
471 471 break
472 472 bt[bn] = tip
473 473 return bt
474 474
475 475 def _readbranchcache(self):
476 476 partial = {}
477 477 try:
478 478 f = self.opener("cache/branchheads")
479 479 lines = f.read().split('\n')
480 480 f.close()
481 481 except (IOError, OSError):
482 482 return {}, nullid, nullrev
483 483
484 484 try:
485 485 last, lrev = lines.pop(0).split(" ", 1)
486 486 last, lrev = bin(last), int(lrev)
487 487 if lrev >= len(self) or self[lrev].node() != last:
488 488 # invalidate the cache
489 489 raise ValueError('invalidating branch cache (tip differs)')
490 490 for l in lines:
491 491 if not l:
492 492 continue
493 493 node, label = l.split(" ", 1)
494 494 label = encoding.tolocal(label.strip())
495 495 partial.setdefault(label, []).append(bin(node))
496 496 except KeyboardInterrupt:
497 497 raise
498 498 except Exception, inst:
499 499 if self.ui.debugflag:
500 500 self.ui.warn(str(inst), '\n')
501 501 partial, last, lrev = {}, nullid, nullrev
502 502 return partial, last, lrev
503 503
504 504 def _writebranchcache(self, branches, tip, tiprev):
505 505 try:
506 506 f = self.opener("cache/branchheads", "w", atomictemp=True)
507 507 f.write("%s %s\n" % (hex(tip), tiprev))
508 508 for label, nodes in branches.iteritems():
509 509 for node in nodes:
510 510 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
511 511 f.rename()
512 512 except (IOError, OSError):
513 513 pass
514 514
515 515 def _updatebranchcache(self, partial, ctxgen):
516 516 # collect new branch entries
517 517 newbranches = {}
518 518 for c in ctxgen:
519 519 newbranches.setdefault(c.branch(), []).append(c.node())
520 520 # if older branchheads are reachable from new ones, they aren't
521 521 # really branchheads. Note checking parents is insufficient:
522 522 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
523 523 for branch, newnodes in newbranches.iteritems():
524 524 bheads = partial.setdefault(branch, [])
525 525 bheads.extend(newnodes)
526 526 if len(bheads) <= 1:
527 527 continue
528 528 bheads = sorted(bheads, key=lambda x: self[x].rev())
529 529 # starting from tip means fewer passes over reachable
530 530 while newnodes:
531 531 latest = newnodes.pop()
532 532 if latest not in bheads:
533 533 continue
534 534 minbhrev = self[bheads[0]].node()
535 535 reachable = self.changelog.reachable(latest, minbhrev)
536 536 reachable.remove(latest)
537 537 if reachable:
538 538 bheads = [b for b in bheads if b not in reachable]
539 539 partial[branch] = bheads
540 540
541 541 def lookup(self, key):
542 542 if isinstance(key, int):
543 543 return self.changelog.node(key)
544 544 elif key == '.':
545 545 return self.dirstate.p1()
546 546 elif key == 'null':
547 547 return nullid
548 548 elif key == 'tip':
549 549 return self.changelog.tip()
550 550 n = self.changelog._match(key)
551 551 if n:
552 552 return n
553 553 if key in self._bookmarks:
554 554 return self._bookmarks[key]
555 555 if key in self.tags():
556 556 return self.tags()[key]
557 557 if key in self.branchtags():
558 558 return self.branchtags()[key]
559 559 n = self.changelog._partialmatch(key)
560 560 if n:
561 561 return n
562 562
563 563 # can't find key, check if it might have come from damaged dirstate
564 564 if key in self.dirstate.parents():
565 565 raise error.Abort(_("working directory has unknown parent '%s'!")
566 566 % short(key))
567 567 try:
568 568 if len(key) == 20:
569 569 key = hex(key)
570 570 except TypeError:
571 571 pass
572 572 raise error.RepoLookupError(_("unknown revision '%s'") % key)
573 573
574 574 def lookupbranch(self, key, remote=None):
575 575 repo = remote or self
576 576 if key in repo.branchmap():
577 577 return key
578 578
579 579 repo = (remote and remote.local()) and remote or self
580 580 return repo[key].branch()
581 581
582 582 def known(self, nodes):
583 583 nm = self.changelog.nodemap
584 584 return [(n in nm) for n in nodes]
585 585
586 586 def local(self):
587 587 return self
588 588
589 589 def join(self, f):
590 590 return os.path.join(self.path, f)
591 591
592 592 def wjoin(self, f):
593 593 return os.path.join(self.root, f)
594 594
595 595 def file(self, f):
596 596 if f[0] == '/':
597 597 f = f[1:]
598 598 return filelog.filelog(self.sopener, f)
599 599
600 600 def changectx(self, changeid):
601 601 return self[changeid]
602 602
603 603 def parents(self, changeid=None):
604 604 '''get list of changectxs for parents of changeid'''
605 605 return self[changeid].parents()
606 606
607 607 def filectx(self, path, changeid=None, fileid=None):
608 608 """changeid can be a changeset revision, node, or tag.
609 609 fileid can be a file revision or node."""
610 610 return context.filectx(self, path, changeid, fileid)
611 611
612 612 def getcwd(self):
613 613 return self.dirstate.getcwd()
614 614
615 615 def pathto(self, f, cwd=None):
616 616 return self.dirstate.pathto(f, cwd)
617 617
618 618 def wfile(self, f, mode='r'):
619 619 return self.wopener(f, mode)
620 620
621 621 def _link(self, f):
622 622 return os.path.islink(self.wjoin(f))
623 623
624 624 def _loadfilter(self, filter):
625 625 if filter not in self.filterpats:
626 626 l = []
627 627 for pat, cmd in self.ui.configitems(filter):
628 628 if cmd == '!':
629 629 continue
630 630 mf = matchmod.match(self.root, '', [pat])
631 631 fn = None
632 632 params = cmd
633 633 for name, filterfn in self._datafilters.iteritems():
634 634 if cmd.startswith(name):
635 635 fn = filterfn
636 636 params = cmd[len(name):].lstrip()
637 637 break
638 638 if not fn:
639 639 fn = lambda s, c, **kwargs: util.filter(s, c)
640 640 # Wrap old filters not supporting keyword arguments
641 641 if not inspect.getargspec(fn)[2]:
642 642 oldfn = fn
643 643 fn = lambda s, c, **kwargs: oldfn(s, c)
644 644 l.append((mf, fn, params))
645 645 self.filterpats[filter] = l
646 646 return self.filterpats[filter]
647 647
648 648 def _filter(self, filterpats, filename, data):
649 649 for mf, fn, cmd in filterpats:
650 650 if mf(filename):
651 651 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
652 652 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
653 653 break
654 654
655 655 return data
656 656
657 657 @propertycache
658 658 def _encodefilterpats(self):
659 659 return self._loadfilter('encode')
660 660
661 661 @propertycache
662 662 def _decodefilterpats(self):
663 663 return self._loadfilter('decode')
664 664
665 665 def adddatafilter(self, name, filter):
666 666 self._datafilters[name] = filter
667 667
668 668 def wread(self, filename):
669 669 if self._link(filename):
670 670 data = os.readlink(self.wjoin(filename))
671 671 else:
672 672 data = self.wopener.read(filename)
673 673 return self._filter(self._encodefilterpats, filename, data)
674 674
675 675 def wwrite(self, filename, data, flags):
676 676 data = self._filter(self._decodefilterpats, filename, data)
677 677 if 'l' in flags:
678 678 self.wopener.symlink(data, filename)
679 679 else:
680 680 self.wopener.write(filename, data)
681 681 if 'x' in flags:
682 682 util.setflags(self.wjoin(filename), False, True)
683 683
684 684 def wwritedata(self, filename, data):
685 685 return self._filter(self._decodefilterpats, filename, data)
686 686
687 687 def transaction(self, desc):
688 688 tr = self._transref and self._transref() or None
689 689 if tr and tr.running():
690 690 return tr.nest()
691 691
692 692 # abort here if the journal already exists
693 693 if os.path.exists(self.sjoin("journal")):
694 694 raise error.RepoError(
695 695 _("abandoned transaction found - run hg recover"))
696 696
697 697 journalfiles = self._writejournal(desc)
698 698 renames = [(x, undoname(x)) for x in journalfiles]
699 699
700 700 tr = transaction.transaction(self.ui.warn, self.sopener,
701 701 self.sjoin("journal"),
702 702 aftertrans(renames),
703 703 self.store.createmode)
704 704 self._transref = weakref.ref(tr)
705 705 return tr
706 706
707 707 def _writejournal(self, desc):
708 708 # save dirstate for rollback
709 709 try:
710 710 ds = self.opener.read("dirstate")
711 711 except IOError:
712 712 ds = ""
713 713 self.opener.write("journal.dirstate", ds)
714 714 self.opener.write("journal.branch",
715 715 encoding.fromlocal(self.dirstate.branch()))
716 716 self.opener.write("journal.desc",
717 717 "%d\n%s\n" % (len(self), desc))
718 718
719 719 bkname = self.join('bookmarks')
720 720 if os.path.exists(bkname):
721 721 util.copyfile(bkname, self.join('journal.bookmarks'))
722 722 else:
723 723 self.opener.write('journal.bookmarks', '')
724 724
725 725 return (self.sjoin('journal'), self.join('journal.dirstate'),
726 726 self.join('journal.branch'), self.join('journal.desc'),
727 727 self.join('journal.bookmarks'))
728 728
729 729 def recover(self):
730 730 lock = self.lock()
731 731 try:
732 732 if os.path.exists(self.sjoin("journal")):
733 733 self.ui.status(_("rolling back interrupted transaction\n"))
734 734 transaction.rollback(self.sopener, self.sjoin("journal"),
735 735 self.ui.warn)
736 736 self.invalidate()
737 737 return True
738 738 else:
739 739 self.ui.warn(_("no interrupted transaction available\n"))
740 740 return False
741 741 finally:
742 742 lock.release()
743 743
744 744 def rollback(self, dryrun=False):
745 745 wlock = lock = None
746 746 try:
747 747 wlock = self.wlock()
748 748 lock = self.lock()
749 749 if os.path.exists(self.sjoin("undo")):
750 750 try:
751 751 args = self.opener.read("undo.desc").splitlines()
752 752 if len(args) >= 3 and self.ui.verbose:
753 753 desc = _("repository tip rolled back to revision %s"
754 754 " (undo %s: %s)\n") % (
755 755 int(args[0]) - 1, args[1], args[2])
756 756 elif len(args) >= 2:
757 757 desc = _("repository tip rolled back to revision %s"
758 758 " (undo %s)\n") % (
759 759 int(args[0]) - 1, args[1])
760 760 except IOError:
761 761 desc = _("rolling back unknown transaction\n")
762 762 self.ui.status(desc)
763 763 if dryrun:
764 764 return
765 765 transaction.rollback(self.sopener, self.sjoin("undo"),
766 766 self.ui.warn)
767 767 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
768 768 if os.path.exists(self.join('undo.bookmarks')):
769 769 util.rename(self.join('undo.bookmarks'),
770 770 self.join('bookmarks'))
771 771 try:
772 772 branch = self.opener.read("undo.branch")
773 773 self.dirstate.setbranch(branch)
774 774 except IOError:
775 775 self.ui.warn(_("named branch could not be reset, "
776 776 "current branch is still: %s\n")
777 777 % self.dirstate.branch())
778 778 self.invalidate()
779 779 self.dirstate.invalidate()
780 780 self.destroyed()
781 781 parents = tuple([p.rev() for p in self.parents()])
782 782 if len(parents) > 1:
783 783 self.ui.status(_("working directory now based on "
784 784 "revisions %d and %d\n") % parents)
785 785 else:
786 786 self.ui.status(_("working directory now based on "
787 787 "revision %d\n") % parents)
788 788 else:
789 789 self.ui.warn(_("no rollback information available\n"))
790 790 return 1
791 791 finally:
792 792 release(lock, wlock)
793 793
794 794 def invalidatecaches(self):
795 795 self._tags = None
796 796 self._tagtypes = None
797 797 self.nodetagscache = None
798 798 self._branchcache = None # in UTF-8
799 799 self._branchcachetip = None
800 800
801 801 def invalidatedirstate(self):
802 802 '''Invalidates the dirstate, causing the next call to dirstate
803 803 to check if it was modified since the last time it was read,
804 804 rereading it if it has.
805 805
806 806 This is different to dirstate.invalidate() that it doesn't always
807 807 rereads the dirstate. Use dirstate.invalidate() if you want to
808 808 explicitly read the dirstate again (i.e. restoring it to a previous
809 809 known good state).'''
810 810 try:
811 811 delattr(self, 'dirstate')
812 812 except AttributeError:
813 813 pass
814 814
815 815 def invalidate(self):
816 816 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
817 817 if a in self.__dict__:
818 818 delattr(self, a)
819 819 self.invalidatecaches()
820 820
821 821 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
822 822 try:
823 823 l = lock.lock(lockname, 0, releasefn, desc=desc)
824 824 except error.LockHeld, inst:
825 825 if not wait:
826 826 raise
827 827 self.ui.warn(_("waiting for lock on %s held by %r\n") %
828 828 (desc, inst.locker))
829 829 # default to 600 seconds timeout
830 830 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
831 831 releasefn, desc=desc)
832 832 if acquirefn:
833 833 acquirefn()
834 834 return l
835 835
836 836 def lock(self, wait=True):
837 837 '''Lock the repository store (.hg/store) and return a weak reference
838 838 to the lock. Use this before modifying the store (e.g. committing or
839 839 stripping). If you are opening a transaction, get a lock as well.)'''
840 840 l = self._lockref and self._lockref()
841 841 if l is not None and l.held:
842 842 l.lock()
843 843 return l
844 844
845 845 def unlock():
846 846 self.store.write()
847 847 for k, ce in self._filecache.items():
848 848 if k == 'dirstate':
849 849 continue
850 850 ce.refresh()
851 851
852 852 l = self._lock(self.sjoin("lock"), wait, unlock,
853 853 self.invalidate, _('repository %s') % self.origroot)
854 854 self._lockref = weakref.ref(l)
855 855 return l
856 856
857 857 def wlock(self, wait=True):
858 858 '''Lock the non-store parts of the repository (everything under
859 859 .hg except .hg/store) and return a weak reference to the lock.
860 860 Use this before modifying files in .hg.'''
861 861 l = self._wlockref and self._wlockref()
862 862 if l is not None and l.held:
863 863 l.lock()
864 864 return l
865 865
866 866 def unlock():
867 867 self.dirstate.write()
868 868 ce = self._filecache.get('dirstate')
869 869 if ce:
870 870 ce.refresh()
871 871
872 872 l = self._lock(self.join("wlock"), wait, unlock,
873 873 self.invalidatedirstate, _('working directory of %s') %
874 874 self.origroot)
875 875 self._wlockref = weakref.ref(l)
876 876 return l
877 877
878 878 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
879 879 """
880 880 commit an individual file as part of a larger transaction
881 881 """
882 882
883 883 fname = fctx.path()
884 884 text = fctx.data()
885 885 flog = self.file(fname)
886 886 fparent1 = manifest1.get(fname, nullid)
887 887 fparent2 = fparent2o = manifest2.get(fname, nullid)
888 888
889 889 meta = {}
890 890 copy = fctx.renamed()
891 891 if copy and copy[0] != fname:
892 892 # Mark the new revision of this file as a copy of another
893 893 # file. This copy data will effectively act as a parent
894 894 # of this new revision. If this is a merge, the first
895 895 # parent will be the nullid (meaning "look up the copy data")
896 896 # and the second one will be the other parent. For example:
897 897 #
898 898 # 0 --- 1 --- 3 rev1 changes file foo
899 899 # \ / rev2 renames foo to bar and changes it
900 900 # \- 2 -/ rev3 should have bar with all changes and
901 901 # should record that bar descends from
902 902 # bar in rev2 and foo in rev1
903 903 #
904 904 # this allows this merge to succeed:
905 905 #
906 906 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
907 907 # \ / merging rev3 and rev4 should use bar@rev2
908 908 # \- 2 --- 4 as the merge base
909 909 #
910 910
911 911 cfname = copy[0]
912 912 crev = manifest1.get(cfname)
913 913 newfparent = fparent2
914 914
915 915 if manifest2: # branch merge
916 916 if fparent2 == nullid or crev is None: # copied on remote side
917 917 if cfname in manifest2:
918 918 crev = manifest2[cfname]
919 919 newfparent = fparent1
920 920
921 921 # find source in nearest ancestor if we've lost track
922 922 if not crev:
923 923 self.ui.debug(" %s: searching for copy revision for %s\n" %
924 924 (fname, cfname))
925 925 for ancestor in self[None].ancestors():
926 926 if cfname in ancestor:
927 927 crev = ancestor[cfname].filenode()
928 928 break
929 929
930 930 if crev:
931 931 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
932 932 meta["copy"] = cfname
933 933 meta["copyrev"] = hex(crev)
934 934 fparent1, fparent2 = nullid, newfparent
935 935 else:
936 936 self.ui.warn(_("warning: can't find ancestor for '%s' "
937 937 "copied from '%s'!\n") % (fname, cfname))
938 938
939 939 elif fparent2 != nullid:
940 940 # is one parent an ancestor of the other?
941 941 fparentancestor = flog.ancestor(fparent1, fparent2)
942 942 if fparentancestor == fparent1:
943 943 fparent1, fparent2 = fparent2, nullid
944 944 elif fparentancestor == fparent2:
945 945 fparent2 = nullid
946 946
947 947 # is the file changed?
948 948 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
949 949 changelist.append(fname)
950 950 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
951 951
952 952 # are just the flags changed during merge?
953 953 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
954 954 changelist.append(fname)
955 955
956 956 return fparent1
957 957
958 958 def commit(self, text="", user=None, date=None, match=None, force=False,
959 959 editor=False, extra={}):
960 960 """Add a new revision to current repository.
961 961
962 962 Revision information is gathered from the working directory,
963 963 match can be used to filter the committed files. If editor is
964 964 supplied, it is called to get a commit message.
965 965 """
966 966
967 967 def fail(f, msg):
968 968 raise util.Abort('%s: %s' % (f, msg))
969 969
970 970 if not match:
971 971 match = matchmod.always(self.root, '')
972 972
973 973 if not force:
974 974 vdirs = []
975 975 match.dir = vdirs.append
976 976 match.bad = fail
977 977
978 978 wlock = self.wlock()
979 979 try:
980 980 wctx = self[None]
981 981 merge = len(wctx.parents()) > 1
982 982
983 983 if (not force and merge and match and
984 984 (match.files() or match.anypats())):
985 985 raise util.Abort(_('cannot partially commit a merge '
986 986 '(do not specify files or patterns)'))
987 987
988 988 changes = self.status(match=match, clean=force)
989 989 if force:
990 990 changes[0].extend(changes[6]) # mq may commit unchanged files
991 991
992 992 # check subrepos
993 993 subs = []
994 994 removedsubs = set()
995 995 if '.hgsub' in wctx:
996 996 # only manage subrepos and .hgsubstate if .hgsub is present
997 997 for p in wctx.parents():
998 998 removedsubs.update(s for s in p.substate if match(s))
999 999 for s in wctx.substate:
1000 1000 removedsubs.discard(s)
1001 1001 if match(s) and wctx.sub(s).dirty():
1002 1002 subs.append(s)
1003 1003 if (subs or removedsubs):
1004 1004 if (not match('.hgsub') and
1005 1005 '.hgsub' in (wctx.modified() + wctx.added())):
1006 1006 raise util.Abort(
1007 1007 _("can't commit subrepos without .hgsub"))
1008 1008 if '.hgsubstate' not in changes[0]:
1009 1009 changes[0].insert(0, '.hgsubstate')
1010 1010 if '.hgsubstate' in changes[2]:
1011 1011 changes[2].remove('.hgsubstate')
1012 1012 elif '.hgsub' in changes[2]:
1013 1013 # clean up .hgsubstate when .hgsub is removed
1014 1014 if ('.hgsubstate' in wctx and
1015 1015 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1016 1016 changes[2].insert(0, '.hgsubstate')
1017 1017
1018 1018 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1019 1019 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1020 1020 if changedsubs:
1021 1021 raise util.Abort(_("uncommitted changes in subrepo %s")
1022 1022 % changedsubs[0])
1023 1023
1024 1024 # make sure all explicit patterns are matched
1025 1025 if not force and match.files():
1026 1026 matched = set(changes[0] + changes[1] + changes[2])
1027 1027
1028 1028 for f in match.files():
1029 1029 if f == '.' or f in matched or f in wctx.substate:
1030 1030 continue
1031 1031 if f in changes[3]: # missing
1032 1032 fail(f, _('file not found!'))
1033 1033 if f in vdirs: # visited directory
1034 1034 d = f + '/'
1035 1035 for mf in matched:
1036 1036 if mf.startswith(d):
1037 1037 break
1038 1038 else:
1039 1039 fail(f, _("no match under directory!"))
1040 1040 elif f not in self.dirstate:
1041 1041 fail(f, _("file not tracked!"))
1042 1042
1043 1043 if (not force and not extra.get("close") and not merge
1044 1044 and not (changes[0] or changes[1] or changes[2])
1045 1045 and wctx.branch() == wctx.p1().branch()):
1046 1046 return None
1047 1047
1048 1048 ms = mergemod.mergestate(self)
1049 1049 for f in changes[0]:
1050 1050 if f in ms and ms[f] == 'u':
1051 1051 raise util.Abort(_("unresolved merge conflicts "
1052 1052 "(see hg help resolve)"))
1053 1053
1054 1054 cctx = context.workingctx(self, text, user, date, extra, changes)
1055 1055 if editor:
1056 1056 cctx._text = editor(self, cctx, subs)
1057 1057 edited = (text != cctx._text)
1058 1058
1059 1059 # commit subs
1060 1060 if subs or removedsubs:
1061 1061 state = wctx.substate.copy()
1062 1062 for s in sorted(subs):
1063 1063 sub = wctx.sub(s)
1064 1064 self.ui.status(_('committing subrepository %s\n') %
1065 1065 subrepo.subrelpath(sub))
1066 1066 sr = sub.commit(cctx._text, user, date)
1067 1067 state[s] = (state[s][0], sr)
1068 1068 subrepo.writestate(self, state)
1069 1069
1070 1070 # Save commit message in case this transaction gets rolled back
1071 1071 # (e.g. by a pretxncommit hook). Leave the content alone on
1072 1072 # the assumption that the user will use the same editor again.
1073 1073 msgfn = self.savecommitmessage(cctx._text)
1074 1074
1075 1075 p1, p2 = self.dirstate.parents()
1076 1076 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1077 1077 try:
1078 1078 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1079 1079 ret = self.commitctx(cctx, True)
1080 1080 except:
1081 1081 if edited:
1082 1082 self.ui.write(
1083 1083 _('note: commit message saved in %s\n') % msgfn)
1084 1084 raise
1085 1085
1086 1086 # update bookmarks, dirstate and mergestate
1087 1087 bookmarks.update(self, p1, ret)
1088 1088 for f in changes[0] + changes[1]:
1089 1089 self.dirstate.normal(f)
1090 1090 for f in changes[2]:
1091 1091 self.dirstate.drop(f)
1092 1092 self.dirstate.setparents(ret)
1093 1093 ms.reset()
1094 1094 finally:
1095 1095 wlock.release()
1096 1096
1097 1097 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1098 1098 return ret
1099 1099
1100 1100 def commitctx(self, ctx, error=False):
1101 1101 """Add a new revision to current repository.
1102 1102 Revision information is passed via the context argument.
1103 1103 """
1104 1104
1105 1105 tr = lock = None
1106 1106 removed = list(ctx.removed())
1107 1107 p1, p2 = ctx.p1(), ctx.p2()
1108 1108 user = ctx.user()
1109 1109
1110 1110 lock = self.lock()
1111 1111 try:
1112 1112 tr = self.transaction("commit")
1113 1113 trp = weakref.proxy(tr)
1114 1114
1115 1115 if ctx.files():
1116 1116 m1 = p1.manifest().copy()
1117 1117 m2 = p2.manifest()
1118 1118
1119 1119 # check in files
1120 1120 new = {}
1121 1121 changed = []
1122 1122 linkrev = len(self)
1123 1123 for f in sorted(ctx.modified() + ctx.added()):
1124 1124 self.ui.note(f + "\n")
1125 1125 try:
1126 1126 fctx = ctx[f]
1127 1127 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1128 1128 changed)
1129 1129 m1.set(f, fctx.flags())
1130 1130 except OSError, inst:
1131 1131 self.ui.warn(_("trouble committing %s!\n") % f)
1132 1132 raise
1133 1133 except IOError, inst:
1134 1134 errcode = getattr(inst, 'errno', errno.ENOENT)
1135 1135 if error or errcode and errcode != errno.ENOENT:
1136 1136 self.ui.warn(_("trouble committing %s!\n") % f)
1137 1137 raise
1138 1138 else:
1139 1139 removed.append(f)
1140 1140
1141 1141 # update manifest
1142 1142 m1.update(new)
1143 1143 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1144 1144 drop = [f for f in removed if f in m1]
1145 1145 for f in drop:
1146 1146 del m1[f]
1147 1147 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1148 1148 p2.manifestnode(), (new, drop))
1149 1149 files = changed + removed
1150 1150 else:
1151 1151 mn = p1.manifestnode()
1152 1152 files = []
1153 1153
1154 1154 # update changelog
1155 1155 self.changelog.delayupdate()
1156 1156 n = self.changelog.add(mn, files, ctx.description(),
1157 1157 trp, p1.node(), p2.node(),
1158 1158 user, ctx.date(), ctx.extra().copy())
1159 1159 p = lambda: self.changelog.writepending() and self.root or ""
1160 1160 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1161 1161 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1162 1162 parent2=xp2, pending=p)
1163 1163 self.changelog.finalize(trp)
1164 1164 tr.close()
1165 1165
1166 1166 if self._branchcache:
1167 1167 self.updatebranchcache()
1168 1168 return n
1169 1169 finally:
1170 1170 if tr:
1171 1171 tr.release()
1172 1172 lock.release()
1173 1173
1174 1174 def destroyed(self):
1175 1175 '''Inform the repository that nodes have been destroyed.
1176 1176 Intended for use by strip and rollback, so there's a common
1177 1177 place for anything that has to be done after destroying history.'''
1178 1178 # XXX it might be nice if we could take the list of destroyed
1179 1179 # nodes, but I don't see an easy way for rollback() to do that
1180 1180
1181 1181 # Ensure the persistent tag cache is updated. Doing it now
1182 1182 # means that the tag cache only has to worry about destroyed
1183 1183 # heads immediately after a strip/rollback. That in turn
1184 1184 # guarantees that "cachetip == currenttip" (comparing both rev
1185 1185 # and node) always means no nodes have been added or destroyed.
1186 1186
1187 1187 # XXX this is suboptimal when qrefresh'ing: we strip the current
1188 1188 # head, refresh the tag cache, then immediately add a new head.
1189 1189 # But I think doing it this way is necessary for the "instant
1190 1190 # tag cache retrieval" case to work.
1191 1191 self.invalidatecaches()
1192 1192
1193 1193 def walk(self, match, node=None):
1194 1194 '''
1195 1195 walk recursively through the directory tree or a given
1196 1196 changeset, finding all files matched by the match
1197 1197 function
1198 1198 '''
1199 1199 return self[node].walk(match)
1200 1200
1201 1201 def status(self, node1='.', node2=None, match=None,
1202 1202 ignored=False, clean=False, unknown=False,
1203 1203 listsubrepos=False):
1204 1204 """return status of files between two nodes or node and working directory
1205 1205
1206 1206 If node1 is None, use the first dirstate parent instead.
1207 1207 If node2 is None, compare node1 with working directory.
1208 1208 """
1209 1209
1210 1210 def mfmatches(ctx):
1211 1211 mf = ctx.manifest().copy()
1212 1212 for fn in mf.keys():
1213 1213 if not match(fn):
1214 1214 del mf[fn]
1215 1215 return mf
1216 1216
1217 1217 if isinstance(node1, context.changectx):
1218 1218 ctx1 = node1
1219 1219 else:
1220 1220 ctx1 = self[node1]
1221 1221 if isinstance(node2, context.changectx):
1222 1222 ctx2 = node2
1223 1223 else:
1224 1224 ctx2 = self[node2]
1225 1225
1226 1226 working = ctx2.rev() is None
1227 1227 parentworking = working and ctx1 == self['.']
1228 1228 match = match or matchmod.always(self.root, self.getcwd())
1229 1229 listignored, listclean, listunknown = ignored, clean, unknown
1230 1230
1231 1231 # load earliest manifest first for caching reasons
1232 1232 if not working and ctx2.rev() < ctx1.rev():
1233 1233 ctx2.manifest()
1234 1234
1235 1235 if not parentworking:
1236 1236 def bad(f, msg):
1237 1237 if f not in ctx1:
1238 1238 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1239 1239 match.bad = bad
1240 1240
1241 1241 if working: # we need to scan the working dir
1242 1242 subrepos = []
1243 1243 if '.hgsub' in self.dirstate:
1244 1244 subrepos = ctx2.substate.keys()
1245 1245 s = self.dirstate.status(match, subrepos, listignored,
1246 1246 listclean, listunknown)
1247 1247 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1248 1248
1249 1249 # check for any possibly clean files
1250 1250 if parentworking and cmp:
1251 1251 fixup = []
1252 1252 # do a full compare of any files that might have changed
1253 1253 for f in sorted(cmp):
1254 1254 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1255 1255 or ctx1[f].cmp(ctx2[f])):
1256 1256 modified.append(f)
1257 1257 else:
1258 1258 fixup.append(f)
1259 1259
1260 1260 # update dirstate for files that are actually clean
1261 1261 if fixup:
1262 1262 if listclean:
1263 1263 clean += fixup
1264 1264
1265 1265 try:
1266 1266 # updating the dirstate is optional
1267 1267 # so we don't wait on the lock
1268 1268 wlock = self.wlock(False)
1269 1269 try:
1270 1270 for f in fixup:
1271 1271 self.dirstate.normal(f)
1272 1272 finally:
1273 1273 wlock.release()
1274 1274 except error.LockError:
1275 1275 pass
1276 1276
1277 1277 if not parentworking:
1278 1278 mf1 = mfmatches(ctx1)
1279 1279 if working:
1280 1280 # we are comparing working dir against non-parent
1281 1281 # generate a pseudo-manifest for the working dir
1282 1282 mf2 = mfmatches(self['.'])
1283 1283 for f in cmp + modified + added:
1284 1284 mf2[f] = None
1285 1285 mf2.set(f, ctx2.flags(f))
1286 1286 for f in removed:
1287 1287 if f in mf2:
1288 1288 del mf2[f]
1289 1289 else:
1290 1290 # we are comparing two revisions
1291 1291 deleted, unknown, ignored = [], [], []
1292 1292 mf2 = mfmatches(ctx2)
1293 1293
1294 1294 modified, added, clean = [], [], []
1295 1295 for fn in mf2:
1296 1296 if fn in mf1:
1297 1297 if (fn not in deleted and
1298 1298 (mf1.flags(fn) != mf2.flags(fn) or
1299 1299 (mf1[fn] != mf2[fn] and
1300 1300 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1301 1301 modified.append(fn)
1302 1302 elif listclean:
1303 1303 clean.append(fn)
1304 1304 del mf1[fn]
1305 1305 elif fn not in deleted:
1306 1306 added.append(fn)
1307 1307 removed = mf1.keys()
1308 1308
1309 1309 r = modified, added, removed, deleted, unknown, ignored, clean
1310 1310
1311 1311 if listsubrepos:
1312 1312 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1313 1313 if working:
1314 1314 rev2 = None
1315 1315 else:
1316 1316 rev2 = ctx2.substate[subpath][1]
1317 1317 try:
1318 1318 submatch = matchmod.narrowmatcher(subpath, match)
1319 1319 s = sub.status(rev2, match=submatch, ignored=listignored,
1320 1320 clean=listclean, unknown=listunknown,
1321 1321 listsubrepos=True)
1322 1322 for rfiles, sfiles in zip(r, s):
1323 1323 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1324 1324 except error.LookupError:
1325 1325 self.ui.status(_("skipping missing subrepository: %s\n")
1326 1326 % subpath)
1327 1327
1328 1328 for l in r:
1329 1329 l.sort()
1330 1330 return r
1331 1331
1332 1332 def heads(self, start=None):
1333 1333 heads = self.changelog.heads(start)
1334 1334 # sort the output in rev descending order
1335 1335 return sorted(heads, key=self.changelog.rev, reverse=True)
1336 1336
1337 1337 def branchheads(self, branch=None, start=None, closed=False):
1338 1338 '''return a (possibly filtered) list of heads for the given branch
1339 1339
1340 1340 Heads are returned in topological order, from newest to oldest.
1341 1341 If branch is None, use the dirstate branch.
1342 1342 If start is not None, return only heads reachable from start.
1343 1343 If closed is True, return heads that are marked as closed as well.
1344 1344 '''
1345 1345 if branch is None:
1346 1346 branch = self[None].branch()
1347 1347 branches = self.branchmap()
1348 1348 if branch not in branches:
1349 1349 return []
1350 1350 # the cache returns heads ordered lowest to highest
1351 1351 bheads = list(reversed(branches[branch]))
1352 1352 if start is not None:
1353 1353 # filter out the heads that cannot be reached from startrev
1354 1354 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1355 1355 bheads = [h for h in bheads if h in fbheads]
1356 1356 if not closed:
1357 1357 bheads = [h for h in bheads if
1358 1358 ('close' not in self.changelog.read(h)[5])]
1359 1359 return bheads
1360 1360
1361 1361 def branches(self, nodes):
1362 1362 if not nodes:
1363 1363 nodes = [self.changelog.tip()]
1364 1364 b = []
1365 1365 for n in nodes:
1366 1366 t = n
1367 1367 while True:
1368 1368 p = self.changelog.parents(n)
1369 1369 if p[1] != nullid or p[0] == nullid:
1370 1370 b.append((t, n, p[0], p[1]))
1371 1371 break
1372 1372 n = p[0]
1373 1373 return b
1374 1374
1375 1375 def between(self, pairs):
1376 1376 r = []
1377 1377
1378 1378 for top, bottom in pairs:
1379 1379 n, l, i = top, [], 0
1380 1380 f = 1
1381 1381
1382 1382 while n != bottom and n != nullid:
1383 1383 p = self.changelog.parents(n)[0]
1384 1384 if i == f:
1385 1385 l.append(n)
1386 1386 f = f * 2
1387 1387 n = p
1388 1388 i += 1
1389 1389
1390 1390 r.append(l)
1391 1391
1392 1392 return r
1393 1393
1394 1394 def pull(self, remote, heads=None, force=False):
1395 1395 lock = self.lock()
1396 1396 try:
1397 1397 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1398 1398 force=force)
1399 1399 common, fetch, rheads = tmp
1400 1400 if not fetch:
1401 1401 self.ui.status(_("no changes found\n"))
1402 1402 result = 0
1403 1403 else:
1404 1404 if heads is None and list(common) == [nullid]:
1405 1405 self.ui.status(_("requesting all changes\n"))
1406 1406 elif heads is None and remote.capable('changegroupsubset'):
1407 1407 # issue1320, avoid a race if remote changed after discovery
1408 1408 heads = rheads
1409 1409
1410 1410 if remote.capable('getbundle'):
1411 1411 cg = remote.getbundle('pull', common=common,
1412 1412 heads=heads or rheads)
1413 1413 elif heads is None:
1414 1414 cg = remote.changegroup(fetch, 'pull')
1415 1415 elif not remote.capable('changegroupsubset'):
1416 1416 raise util.Abort(_("partial pull cannot be done because "
1417 1417 "other repository doesn't support "
1418 1418 "changegroupsubset."))
1419 1419 else:
1420 1420 cg = remote.changegroupsubset(fetch, heads, 'pull')
1421 1421 result = self.addchangegroup(cg, 'pull', remote.url(),
1422 1422 lock=lock)
1423 1423 finally:
1424 1424 lock.release()
1425 1425
1426 1426 return result
1427 1427
1428 1428 def checkpush(self, force, revs):
1429 1429 """Extensions can override this function if additional checks have
1430 1430 to be performed before pushing, or call it if they override push
1431 1431 command.
1432 1432 """
1433 1433 pass
1434 1434
1435 1435 def push(self, remote, force=False, revs=None, newbranch=False):
1436 1436 '''Push outgoing changesets (limited by revs) from the current
1437 1437 repository to remote. Return an integer:
1438 1438 - 0 means HTTP error *or* nothing to push
1439 1439 - 1 means we pushed and remote head count is unchanged *or*
1440 1440 we have outgoing changesets but refused to push
1441 1441 - other values as described by addchangegroup()
1442 1442 '''
1443 1443 # there are two ways to push to remote repo:
1444 1444 #
1445 1445 # addchangegroup assumes local user can lock remote
1446 1446 # repo (local filesystem, old ssh servers).
1447 1447 #
1448 1448 # unbundle assumes local user cannot lock remote repo (new ssh
1449 1449 # servers, http servers).
1450 1450
1451 1451 self.checkpush(force, revs)
1452 1452 lock = None
1453 1453 unbundle = remote.capable('unbundle')
1454 1454 if not unbundle:
1455 1455 lock = remote.lock()
1456 1456 try:
1457 1457 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1458 1458 newbranch)
1459 1459 ret = remote_heads
1460 1460 if cg is not None:
1461 1461 if unbundle:
1462 1462 # local repo finds heads on server, finds out what
1463 1463 # revs it must push. once revs transferred, if server
1464 1464 # finds it has different heads (someone else won
1465 1465 # commit/push race), server aborts.
1466 1466 if force:
1467 1467 remote_heads = ['force']
1468 1468 # ssh: return remote's addchangegroup()
1469 1469 # http: return remote's addchangegroup() or 0 for error
1470 1470 ret = remote.unbundle(cg, remote_heads, 'push')
1471 1471 else:
1472 1472 # we return an integer indicating remote head count change
1473 1473 ret = remote.addchangegroup(cg, 'push', self.url(),
1474 1474 lock=lock)
1475 1475 finally:
1476 1476 if lock is not None:
1477 1477 lock.release()
1478 1478
1479 1479 self.ui.debug("checking for updated bookmarks\n")
1480 1480 rb = remote.listkeys('bookmarks')
1481 1481 for k in rb.keys():
1482 1482 if k in self._bookmarks:
1483 1483 nr, nl = rb[k], hex(self._bookmarks[k])
1484 1484 if nr in self:
1485 1485 cr = self[nr]
1486 1486 cl = self[nl]
1487 1487 if cl in cr.descendants():
1488 1488 r = remote.pushkey('bookmarks', k, nr, nl)
1489 1489 if r:
1490 1490 self.ui.status(_("updating bookmark %s\n") % k)
1491 1491 else:
1492 1492 self.ui.warn(_('updating bookmark %s'
1493 1493 ' failed!\n') % k)
1494 1494
1495 1495 return ret
1496 1496
1497 1497 def changegroupinfo(self, nodes, source):
1498 1498 if self.ui.verbose or source == 'bundle':
1499 1499 self.ui.status(_("%d changesets found\n") % len(nodes))
1500 1500 if self.ui.debugflag:
1501 1501 self.ui.debug("list of changesets:\n")
1502 1502 for node in nodes:
1503 1503 self.ui.debug("%s\n" % hex(node))
1504 1504
1505 1505 def changegroupsubset(self, bases, heads, source):
1506 1506 """Compute a changegroup consisting of all the nodes that are
1507 1507 descendants of any of the bases and ancestors of any of the heads.
1508 1508 Return a chunkbuffer object whose read() method will return
1509 1509 successive changegroup chunks.
1510 1510
1511 1511 It is fairly complex as determining which filenodes and which
1512 1512 manifest nodes need to be included for the changeset to be complete
1513 1513 is non-trivial.
1514 1514
1515 1515 Another wrinkle is doing the reverse, figuring out which changeset in
1516 1516 the changegroup a particular filenode or manifestnode belongs to.
1517 1517 """
1518 1518 cl = self.changelog
1519 1519 if not bases:
1520 1520 bases = [nullid]
1521 1521 csets, bases, heads = cl.nodesbetween(bases, heads)
1522 1522 # We assume that all ancestors of bases are known
1523 1523 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1524 1524 return self._changegroupsubset(common, csets, heads, source)
1525 1525
1526 1526 def getbundle(self, source, heads=None, common=None):
1527 1527 """Like changegroupsubset, but returns the set difference between the
1528 1528 ancestors of heads and the ancestors common.
1529 1529
1530 1530 If heads is None, use the local heads. If common is None, use [nullid].
1531 1531
1532 1532 The nodes in common might not all be known locally due to the way the
1533 1533 current discovery protocol works.
1534 1534 """
1535 1535 cl = self.changelog
1536 1536 if common:
1537 1537 nm = cl.nodemap
1538 1538 common = [n for n in common if n in nm]
1539 1539 else:
1540 1540 common = [nullid]
1541 1541 if not heads:
1542 1542 heads = cl.heads()
1543 1543 common, missing = cl.findcommonmissing(common, heads)
1544 1544 if not missing:
1545 1545 return None
1546 1546 return self._changegroupsubset(common, missing, heads, source)
1547 1547
1548 1548 def _changegroupsubset(self, commonrevs, csets, heads, source):
1549 1549
1550 1550 cl = self.changelog
1551 1551 mf = self.manifest
1552 1552 mfs = {} # needed manifests
1553 1553 fnodes = {} # needed file nodes
1554 1554 changedfiles = set()
1555 1555 fstate = ['', {}]
1556 1556 count = [0]
1557 1557
1558 1558 # can we go through the fast path ?
1559 1559 heads.sort()
1560 1560 if heads == sorted(self.heads()):
1561 1561 return self._changegroup(csets, source)
1562 1562
1563 1563 # slow path
1564 1564 self.hook('preoutgoing', throw=True, source=source)
1565 1565 self.changegroupinfo(csets, source)
1566 1566
1567 1567 # filter any nodes that claim to be part of the known set
1568 1568 def prune(revlog, missing):
1569 1569 return [n for n in missing
1570 1570 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1571 1571
1572 1572 def lookup(revlog, x):
1573 1573 if revlog == cl:
1574 1574 c = cl.read(x)
1575 1575 changedfiles.update(c[3])
1576 1576 mfs.setdefault(c[0], x)
1577 1577 count[0] += 1
1578 1578 self.ui.progress(_('bundling'), count[0],
1579 1579 unit=_('changesets'), total=len(csets))
1580 1580 return x
1581 1581 elif revlog == mf:
1582 1582 clnode = mfs[x]
1583 1583 mdata = mf.readfast(x)
1584 1584 for f in changedfiles:
1585 1585 if f in mdata:
1586 1586 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1587 1587 count[0] += 1
1588 1588 self.ui.progress(_('bundling'), count[0],
1589 1589 unit=_('manifests'), total=len(mfs))
1590 1590 return mfs[x]
1591 1591 else:
1592 1592 self.ui.progress(
1593 1593 _('bundling'), count[0], item=fstate[0],
1594 1594 unit=_('files'), total=len(changedfiles))
1595 1595 return fstate[1][x]
1596 1596
1597 1597 bundler = changegroup.bundle10(lookup)
1598 1598 reorder = self.ui.config('bundle', 'reorder', 'auto')
1599 1599 if reorder == 'auto':
1600 1600 reorder = None
1601 1601 else:
1602 1602 reorder = util.parsebool(reorder)
1603 1603
1604 1604 def gengroup():
1605 1605 # Create a changenode group generator that will call our functions
1606 1606 # back to lookup the owning changenode and collect information.
1607 1607 for chunk in cl.group(csets, bundler, reorder=reorder):
1608 1608 yield chunk
1609 1609 self.ui.progress(_('bundling'), None)
1610 1610
1611 1611 # Create a generator for the manifestnodes that calls our lookup
1612 1612 # and data collection functions back.
1613 1613 count[0] = 0
1614 1614 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1615 1615 yield chunk
1616 1616 self.ui.progress(_('bundling'), None)
1617 1617
1618 1618 mfs.clear()
1619 1619
1620 1620 # Go through all our files in order sorted by name.
1621 1621 count[0] = 0
1622 1622 for fname in sorted(changedfiles):
1623 1623 filerevlog = self.file(fname)
1624 1624 if not len(filerevlog):
1625 1625 raise util.Abort(_("empty or missing revlog for %s") % fname)
1626 1626 fstate[0] = fname
1627 1627 fstate[1] = fnodes.pop(fname, {})
1628 1628
1629 1629 nodelist = prune(filerevlog, fstate[1])
1630 1630 if nodelist:
1631 1631 count[0] += 1
1632 1632 yield bundler.fileheader(fname)
1633 1633 for chunk in filerevlog.group(nodelist, bundler, reorder):
1634 1634 yield chunk
1635 1635
1636 1636 # Signal that no more groups are left.
1637 1637 yield bundler.close()
1638 1638 self.ui.progress(_('bundling'), None)
1639 1639
1640 1640 if csets:
1641 1641 self.hook('outgoing', node=hex(csets[0]), source=source)
1642 1642
1643 1643 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1644 1644
1645 1645 def changegroup(self, basenodes, source):
1646 1646 # to avoid a race we use changegroupsubset() (issue1320)
1647 1647 return self.changegroupsubset(basenodes, self.heads(), source)
1648 1648
1649 1649 def _changegroup(self, nodes, source):
1650 1650 """Compute the changegroup of all nodes that we have that a recipient
1651 1651 doesn't. Return a chunkbuffer object whose read() method will return
1652 1652 successive changegroup chunks.
1653 1653
1654 1654 This is much easier than the previous function as we can assume that
1655 1655 the recipient has any changenode we aren't sending them.
1656 1656
1657 1657 nodes is the set of nodes to send"""
1658 1658
1659 1659 cl = self.changelog
1660 1660 mf = self.manifest
1661 1661 mfs = {}
1662 1662 changedfiles = set()
1663 1663 fstate = ['']
1664 1664 count = [0]
1665 1665
1666 1666 self.hook('preoutgoing', throw=True, source=source)
1667 1667 self.changegroupinfo(nodes, source)
1668 1668
1669 1669 revset = set([cl.rev(n) for n in nodes])
1670 1670
1671 1671 def gennodelst(log):
1672 1672 return [log.node(r) for r in log if log.linkrev(r) in revset]
1673 1673
1674 1674 def lookup(revlog, x):
1675 1675 if revlog == cl:
1676 1676 c = cl.read(x)
1677 1677 changedfiles.update(c[3])
1678 1678 mfs.setdefault(c[0], x)
1679 1679 count[0] += 1
1680 1680 self.ui.progress(_('bundling'), count[0],
1681 1681 unit=_('changesets'), total=len(nodes))
1682 1682 return x
1683 1683 elif revlog == mf:
1684 1684 count[0] += 1
1685 1685 self.ui.progress(_('bundling'), count[0],
1686 1686 unit=_('manifests'), total=len(mfs))
1687 1687 return cl.node(revlog.linkrev(revlog.rev(x)))
1688 1688 else:
1689 1689 self.ui.progress(
1690 1690 _('bundling'), count[0], item=fstate[0],
1691 1691 total=len(changedfiles), unit=_('files'))
1692 1692 return cl.node(revlog.linkrev(revlog.rev(x)))
1693 1693
1694 1694 bundler = changegroup.bundle10(lookup)
1695 1695 reorder = self.ui.config('bundle', 'reorder', 'auto')
1696 1696 if reorder == 'auto':
1697 1697 reorder = None
1698 1698 else:
1699 1699 reorder = util.parsebool(reorder)
1700 1700
1701 1701 def gengroup():
1702 1702 '''yield a sequence of changegroup chunks (strings)'''
1703 1703 # construct a list of all changed files
1704 1704
1705 1705 for chunk in cl.group(nodes, bundler, reorder=reorder):
1706 1706 yield chunk
1707 1707 self.ui.progress(_('bundling'), None)
1708 1708
1709 1709 count[0] = 0
1710 1710 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1711 1711 yield chunk
1712 1712 self.ui.progress(_('bundling'), None)
1713 1713
1714 1714 count[0] = 0
1715 1715 for fname in sorted(changedfiles):
1716 1716 filerevlog = self.file(fname)
1717 1717 if not len(filerevlog):
1718 1718 raise util.Abort(_("empty or missing revlog for %s") % fname)
1719 1719 fstate[0] = fname
1720 1720 nodelist = gennodelst(filerevlog)
1721 1721 if nodelist:
1722 1722 count[0] += 1
1723 1723 yield bundler.fileheader(fname)
1724 1724 for chunk in filerevlog.group(nodelist, bundler, reorder):
1725 1725 yield chunk
1726 1726 yield bundler.close()
1727 1727 self.ui.progress(_('bundling'), None)
1728 1728
1729 1729 if nodes:
1730 1730 self.hook('outgoing', node=hex(nodes[0]), source=source)
1731 1731
1732 1732 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1733 1733
1734 1734 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1735 1735 """Add the changegroup returned by source.read() to this repo.
1736 1736 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1737 1737 the URL of the repo where this changegroup is coming from.
1738 1738 If lock is not None, the function takes ownership of the lock
1739 1739 and releases it after the changegroup is added.
1740 1740
1741 1741 Return an integer summarizing the change to this repo:
1742 1742 - nothing changed or no source: 0
1743 1743 - more heads than before: 1+added heads (2..n)
1744 1744 - fewer heads than before: -1-removed heads (-2..-n)
1745 1745 - number of heads stays the same: 1
1746 1746 """
1747 1747 def csmap(x):
1748 1748 self.ui.debug("add changeset %s\n" % short(x))
1749 1749 return len(cl)
1750 1750
1751 1751 def revmap(x):
1752 1752 return cl.rev(x)
1753 1753
1754 1754 if not source:
1755 1755 return 0
1756 1756
1757 1757 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1758 1758
1759 1759 changesets = files = revisions = 0
1760 1760 efiles = set()
1761 1761
1762 1762 # write changelog data to temp files so concurrent readers will not see
1763 1763 # inconsistent view
1764 1764 cl = self.changelog
1765 1765 cl.delayupdate()
1766 1766 oldheads = cl.heads()
1767 1767
1768 1768 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1769 1769 try:
1770 1770 trp = weakref.proxy(tr)
1771 1771 # pull off the changeset group
1772 1772 self.ui.status(_("adding changesets\n"))
1773 1773 clstart = len(cl)
1774 1774 class prog(object):
1775 1775 step = _('changesets')
1776 1776 count = 1
1777 1777 ui = self.ui
1778 1778 total = None
1779 1779 def __call__(self):
1780 1780 self.ui.progress(self.step, self.count, unit=_('chunks'),
1781 1781 total=self.total)
1782 1782 self.count += 1
1783 1783 pr = prog()
1784 1784 source.callback = pr
1785 1785
1786 1786 source.changelogheader()
1787 1787 if (cl.addgroup(source, csmap, trp) is None
1788 1788 and not emptyok):
1789 1789 raise util.Abort(_("received changelog group is empty"))
1790 1790 clend = len(cl)
1791 1791 changesets = clend - clstart
1792 1792 for c in xrange(clstart, clend):
1793 1793 efiles.update(self[c].files())
1794 1794 efiles = len(efiles)
1795 1795 self.ui.progress(_('changesets'), None)
1796 1796
1797 1797 # pull off the manifest group
1798 1798 self.ui.status(_("adding manifests\n"))
1799 1799 pr.step = _('manifests')
1800 1800 pr.count = 1
1801 1801 pr.total = changesets # manifests <= changesets
1802 1802 # no need to check for empty manifest group here:
1803 1803 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1804 1804 # no new manifest will be created and the manifest group will
1805 1805 # be empty during the pull
1806 1806 source.manifestheader()
1807 1807 self.manifest.addgroup(source, revmap, trp)
1808 1808 self.ui.progress(_('manifests'), None)
1809 1809
1810 1810 needfiles = {}
1811 1811 if self.ui.configbool('server', 'validate', default=False):
1812 1812 # validate incoming csets have their manifests
1813 1813 for cset in xrange(clstart, clend):
1814 1814 mfest = self.changelog.read(self.changelog.node(cset))[0]
1815 1815 mfest = self.manifest.readdelta(mfest)
1816 1816 # store file nodes we must see
1817 1817 for f, n in mfest.iteritems():
1818 1818 needfiles.setdefault(f, set()).add(n)
1819 1819
1820 1820 # process the files
1821 1821 self.ui.status(_("adding file changes\n"))
1822 1822 pr.step = _('files')
1823 1823 pr.count = 1
1824 1824 pr.total = efiles
1825 1825 source.callback = None
1826 1826
1827 1827 while True:
1828 1828 chunkdata = source.filelogheader()
1829 1829 if not chunkdata:
1830 1830 break
1831 1831 f = chunkdata["filename"]
1832 1832 self.ui.debug("adding %s revisions\n" % f)
1833 1833 pr()
1834 1834 fl = self.file(f)
1835 1835 o = len(fl)
1836 1836 if fl.addgroup(source, revmap, trp) is None:
1837 1837 raise util.Abort(_("received file revlog group is empty"))
1838 1838 revisions += len(fl) - o
1839 1839 files += 1
1840 1840 if f in needfiles:
1841 1841 needs = needfiles[f]
1842 1842 for new in xrange(o, len(fl)):
1843 1843 n = fl.node(new)
1844 1844 if n in needs:
1845 1845 needs.remove(n)
1846 1846 if not needs:
1847 1847 del needfiles[f]
1848 1848 self.ui.progress(_('files'), None)
1849 1849
1850 1850 for f, needs in needfiles.iteritems():
1851 1851 fl = self.file(f)
1852 1852 for n in needs:
1853 1853 try:
1854 1854 fl.rev(n)
1855 1855 except error.LookupError:
1856 1856 raise util.Abort(
1857 1857 _('missing file data for %s:%s - run hg verify') %
1858 1858 (f, hex(n)))
1859 1859
1860 1860 dh = 0
1861 1861 if oldheads:
1862 1862 heads = cl.heads()
1863 1863 dh = len(heads) - len(oldheads)
1864 1864 for h in heads:
1865 1865 if h not in oldheads and 'close' in self[h].extra():
1866 1866 dh -= 1
1867 1867 htext = ""
1868 1868 if dh:
1869 1869 htext = _(" (%+d heads)") % dh
1870 1870
1871 1871 self.ui.status(_("added %d changesets"
1872 1872 " with %d changes to %d files%s\n")
1873 1873 % (changesets, revisions, files, htext))
1874 1874
1875 1875 if changesets > 0:
1876 1876 p = lambda: cl.writepending() and self.root or ""
1877 1877 self.hook('pretxnchangegroup', throw=True,
1878 1878 node=hex(cl.node(clstart)), source=srctype,
1879 1879 url=url, pending=p)
1880 1880
1881 1881 # make changelog see real files again
1882 1882 cl.finalize(trp)
1883 1883
1884 1884 tr.close()
1885 1885 finally:
1886 1886 tr.release()
1887 1887 if lock:
1888 1888 lock.release()
1889 1889
1890 1890 if changesets > 0:
1891 1891 # forcefully update the on-disk branch cache
1892 1892 self.ui.debug("updating the branch cache\n")
1893 1893 self.updatebranchcache()
1894 1894 self.hook("changegroup", node=hex(cl.node(clstart)),
1895 1895 source=srctype, url=url)
1896 1896
1897 1897 for i in xrange(clstart, clend):
1898 1898 self.hook("incoming", node=hex(cl.node(i)),
1899 1899 source=srctype, url=url)
1900 1900
1901 1901 # never return 0 here:
1902 1902 if dh < 0:
1903 1903 return dh - 1
1904 1904 else:
1905 1905 return dh + 1
1906 1906
1907 1907 def stream_in(self, remote, requirements):
1908 1908 lock = self.lock()
1909 1909 try:
1910 1910 fp = remote.stream_out()
1911 1911 l = fp.readline()
1912 1912 try:
1913 1913 resp = int(l)
1914 1914 except ValueError:
1915 1915 raise error.ResponseError(
1916 1916 _('Unexpected response from remote server:'), l)
1917 1917 if resp == 1:
1918 1918 raise util.Abort(_('operation forbidden by server'))
1919 1919 elif resp == 2:
1920 1920 raise util.Abort(_('locking the remote repository failed'))
1921 1921 elif resp != 0:
1922 1922 raise util.Abort(_('the server sent an unknown error code'))
1923 1923 self.ui.status(_('streaming all changes\n'))
1924 1924 l = fp.readline()
1925 1925 try:
1926 1926 total_files, total_bytes = map(int, l.split(' ', 1))
1927 1927 except (ValueError, TypeError):
1928 1928 raise error.ResponseError(
1929 1929 _('Unexpected response from remote server:'), l)
1930 1930 self.ui.status(_('%d files to transfer, %s of data\n') %
1931 1931 (total_files, util.bytecount(total_bytes)))
1932 1932 start = time.time()
1933 1933 for i in xrange(total_files):
1934 1934 # XXX doesn't support '\n' or '\r' in filenames
1935 1935 l = fp.readline()
1936 1936 try:
1937 1937 name, size = l.split('\0', 1)
1938 1938 size = int(size)
1939 1939 except (ValueError, TypeError):
1940 1940 raise error.ResponseError(
1941 1941 _('Unexpected response from remote server:'), l)
1942 1942 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1943 1943 # for backwards compat, name was partially encoded
1944 1944 ofp = self.sopener(store.decodedir(name), 'w')
1945 1945 for chunk in util.filechunkiter(fp, limit=size):
1946 1946 ofp.write(chunk)
1947 1947 ofp.close()
1948 1948 elapsed = time.time() - start
1949 1949 if elapsed <= 0:
1950 1950 elapsed = 0.001
1951 1951 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1952 1952 (util.bytecount(total_bytes), elapsed,
1953 1953 util.bytecount(total_bytes / elapsed)))
1954 1954
1955 1955 # new requirements = old non-format requirements + new format-related
1956 1956 # requirements from the streamed-in repository
1957 1957 requirements.update(set(self.requirements) - self.supportedformats)
1958 1958 self._applyrequirements(requirements)
1959 1959 self._writerequirements()
1960 1960
1961 1961 self.invalidate()
1962 1962 return len(self.heads()) + 1
1963 1963 finally:
1964 1964 lock.release()
1965 1965
1966 1966 def clone(self, remote, heads=[], stream=False):
1967 1967 '''clone remote repository.
1968 1968
1969 1969 keyword arguments:
1970 1970 heads: list of revs to clone (forces use of pull)
1971 1971 stream: use streaming clone if possible'''
1972 1972
1973 1973 # now, all clients that can request uncompressed clones can
1974 1974 # read repo formats supported by all servers that can serve
1975 1975 # them.
1976 1976
1977 1977 # if revlog format changes, client will have to check version
1978 1978 # and format flags on "stream" capability, and use
1979 1979 # uncompressed only if compatible.
1980 1980
1981 1981 if stream and not heads:
1982 1982 # 'stream' means remote revlog format is revlogv1 only
1983 1983 if remote.capable('stream'):
1984 1984 return self.stream_in(remote, set(('revlogv1',)))
1985 1985 # otherwise, 'streamreqs' contains the remote revlog format
1986 1986 streamreqs = remote.capable('streamreqs')
1987 1987 if streamreqs:
1988 1988 streamreqs = set(streamreqs.split(','))
1989 1989 # if we support it, stream in and adjust our requirements
1990 1990 if not streamreqs - self.supportedformats:
1991 1991 return self.stream_in(remote, streamreqs)
1992 1992 return self.pull(remote, heads)
1993 1993
1994 1994 def pushkey(self, namespace, key, old, new):
1995 1995 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1996 1996 old=old, new=new)
1997 1997 ret = pushkey.push(self, namespace, key, old, new)
1998 1998 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1999 1999 ret=ret)
2000 2000 return ret
2001 2001
2002 2002 def listkeys(self, namespace):
2003 2003 self.hook('prelistkeys', throw=True, namespace=namespace)
2004 2004 values = pushkey.list(self, namespace)
2005 2005 self.hook('listkeys', namespace=namespace, values=values)
2006 2006 return values
2007 2007
2008 2008 def debugwireargs(self, one, two, three=None, four=None, five=None):
2009 2009 '''used to test argument passing over the wire'''
2010 2010 return "%s %s %s %s %s" % (one, two, three, four, five)
2011 2011
2012 2012 def savecommitmessage(self, text):
2013 2013 fp = self.opener('last-message.txt', 'wb')
2014 2014 try:
2015 2015 fp.write(text)
2016 2016 finally:
2017 2017 fp.close()
2018 2018 return self.pathto(fp.name[len(self.root)+1:])
2019 2019
2020 2020 # used to avoid circular references so destructors work
2021 2021 def aftertrans(files):
2022 2022 renamefiles = [tuple(t) for t in files]
2023 2023 def a():
2024 2024 for src, dest in renamefiles:
2025 2025 util.rename(src, dest)
2026 2026 return a
2027 2027
2028 2028 def undoname(fn):
2029 2029 base, name = os.path.split(fn)
2030 2030 assert name.startswith('journal')
2031 2031 return os.path.join(base, name.replace('journal', 'undo', 1))
2032 2032
2033 2033 def instance(ui, path, create):
2034 2034 return localrepository(ui, util.urllocalpath(path), create)
2035 2035
2036 2036 def islocal(path):
2037 2037 return True
General Comments 0
You need to be logged in to leave comments. Login now