##// END OF EJS Templates
windows: use normalized path to check repository nesting...
FUJIWARA Katsunori -
r15722:417127af stable
parent child Browse files
Show More
@@ -1,2101 +1,2102 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener.append(
61 61 "00changelog.i",
62 62 '\0\0\0\2' # represents revlogv2
63 63 ' dummy changelog to prevent using the old repo layout'
64 64 )
65 65 if self.ui.configbool('format', 'generaldelta', False):
66 66 requirements.append("generaldelta")
67 67 requirements = set(requirements)
68 68 else:
69 69 raise error.RepoError(_("repository %s not found") % path)
70 70 elif create:
71 71 raise error.RepoError(_("repository %s already exists") % path)
72 72 else:
73 73 try:
74 74 requirements = scmutil.readrequires(self.opener, self.supported)
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 requirements = set()
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100
101 101 self._branchcache = None
102 102 self._branchcachetip = None
103 103 self.filterpats = {}
104 104 self._datafilters = {}
105 105 self._transref = self._lockref = self._wlockref = None
106 106
107 107 # A cache for various files under .hg/ that tracks file changes,
108 108 # (used by the filecache decorator)
109 109 #
110 110 # Maps a property name to its util.filecacheentry
111 111 self._filecache = {}
112 112
113 113 def _applyrequirements(self, requirements):
114 114 self.requirements = requirements
115 115 openerreqs = set(('revlogv1', 'generaldelta'))
116 116 self.sopener.options = dict((r, 1) for r in requirements
117 117 if r in openerreqs)
118 118
119 119 def _writerequirements(self):
120 120 reqfile = self.opener("requires", "w")
121 121 for r in self.requirements:
122 122 reqfile.write("%s\n" % r)
123 123 reqfile.close()
124 124
125 125 def _checknested(self, path):
126 126 """Determine if path is a legal nested repository."""
127 127 if not path.startswith(self.root):
128 128 return False
129 129 subpath = path[len(self.root) + 1:]
130 normsubpath = util.pconvert(subpath)
130 131
131 132 # XXX: Checking against the current working copy is wrong in
132 133 # the sense that it can reject things like
133 134 #
134 135 # $ hg cat -r 10 sub/x.txt
135 136 #
136 137 # if sub/ is no longer a subrepository in the working copy
137 138 # parent revision.
138 139 #
139 140 # However, it can of course also allow things that would have
140 141 # been rejected before, such as the above cat command if sub/
141 142 # is a subrepository now, but was a normal directory before.
142 143 # The old path auditor would have rejected by mistake since it
143 144 # panics when it sees sub/.hg/.
144 145 #
145 146 # All in all, checking against the working copy seems sensible
146 147 # since we want to prevent access to nested repositories on
147 148 # the filesystem *now*.
148 149 ctx = self[None]
149 150 parts = util.splitpath(subpath)
150 151 while parts:
151 prefix = os.sep.join(parts)
152 prefix = '/'.join(parts)
152 153 if prefix in ctx.substate:
153 if prefix == subpath:
154 if prefix == normsubpath:
154 155 return True
155 156 else:
156 157 sub = ctx.sub(prefix)
157 158 return sub.checknested(subpath[len(prefix) + 1:])
158 159 else:
159 160 parts.pop()
160 161 return False
161 162
162 163 @filecache('bookmarks')
163 164 def _bookmarks(self):
164 165 return bookmarks.read(self)
165 166
166 167 @filecache('bookmarks.current')
167 168 def _bookmarkcurrent(self):
168 169 return bookmarks.readcurrent(self)
169 170
170 171 def _writebookmarks(self, marks):
171 172 bookmarks.write(self)
172 173
173 174 @filecache('00changelog.i', True)
174 175 def changelog(self):
175 176 c = changelog.changelog(self.sopener)
176 177 if 'HG_PENDING' in os.environ:
177 178 p = os.environ['HG_PENDING']
178 179 if p.startswith(self.root):
179 180 c.readpending('00changelog.i.a')
180 181 return c
181 182
182 183 @filecache('00manifest.i', True)
183 184 def manifest(self):
184 185 return manifest.manifest(self.sopener)
185 186
186 187 @filecache('dirstate')
187 188 def dirstate(self):
188 189 warned = [0]
189 190 def validate(node):
190 191 try:
191 192 self.changelog.rev(node)
192 193 return node
193 194 except error.LookupError:
194 195 if not warned[0]:
195 196 warned[0] = True
196 197 self.ui.warn(_("warning: ignoring unknown"
197 198 " working parent %s!\n") % short(node))
198 199 return nullid
199 200
200 201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 202
202 203 def __getitem__(self, changeid):
203 204 if changeid is None:
204 205 return context.workingctx(self)
205 206 return context.changectx(self, changeid)
206 207
207 208 def __contains__(self, changeid):
208 209 try:
209 210 return bool(self.lookup(changeid))
210 211 except error.RepoLookupError:
211 212 return False
212 213
213 214 def __nonzero__(self):
214 215 return True
215 216
216 217 def __len__(self):
217 218 return len(self.changelog)
218 219
219 220 def __iter__(self):
220 221 for i in xrange(len(self)):
221 222 yield i
222 223
223 224 def set(self, expr, *args):
224 225 '''
225 226 Yield a context for each matching revision, after doing arg
226 227 replacement via revset.formatspec
227 228 '''
228 229
229 230 expr = revset.formatspec(expr, *args)
230 231 m = revset.match(None, expr)
231 232 for r in m(self, range(len(self))):
232 233 yield self[r]
233 234
234 235 def url(self):
235 236 return 'file:' + self.root
236 237
237 238 def hook(self, name, throw=False, **args):
238 239 return hook.hook(self.ui, self, name, throw, **args)
239 240
240 241 tag_disallowed = ':\r\n'
241 242
242 243 def _tag(self, names, node, message, local, user, date, extra={}):
243 244 if isinstance(names, str):
244 245 allchars = names
245 246 names = (names,)
246 247 else:
247 248 allchars = ''.join(names)
248 249 for c in self.tag_disallowed:
249 250 if c in allchars:
250 251 raise util.Abort(_('%r cannot be used in a tag name') % c)
251 252
252 253 branches = self.branchmap()
253 254 for name in names:
254 255 self.hook('pretag', throw=True, node=hex(node), tag=name,
255 256 local=local)
256 257 if name in branches:
257 258 self.ui.warn(_("warning: tag %s conflicts with existing"
258 259 " branch name\n") % name)
259 260
260 261 def writetags(fp, names, munge, prevtags):
261 262 fp.seek(0, 2)
262 263 if prevtags and prevtags[-1] != '\n':
263 264 fp.write('\n')
264 265 for name in names:
265 266 m = munge and munge(name) or name
266 267 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
267 268 old = self.tags().get(name, nullid)
268 269 fp.write('%s %s\n' % (hex(old), m))
269 270 fp.write('%s %s\n' % (hex(node), m))
270 271 fp.close()
271 272
272 273 prevtags = ''
273 274 if local:
274 275 try:
275 276 fp = self.opener('localtags', 'r+')
276 277 except IOError:
277 278 fp = self.opener('localtags', 'a')
278 279 else:
279 280 prevtags = fp.read()
280 281
281 282 # local tags are stored in the current charset
282 283 writetags(fp, names, None, prevtags)
283 284 for name in names:
284 285 self.hook('tag', node=hex(node), tag=name, local=local)
285 286 return
286 287
287 288 try:
288 289 fp = self.wfile('.hgtags', 'rb+')
289 290 except IOError, e:
290 291 if e.errno != errno.ENOENT:
291 292 raise
292 293 fp = self.wfile('.hgtags', 'ab')
293 294 else:
294 295 prevtags = fp.read()
295 296
296 297 # committed tags are stored in UTF-8
297 298 writetags(fp, names, encoding.fromlocal, prevtags)
298 299
299 300 fp.close()
300 301
301 302 if '.hgtags' not in self.dirstate:
302 303 self[None].add(['.hgtags'])
303 304
304 305 m = matchmod.exact(self.root, '', ['.hgtags'])
305 306 tagnode = self.commit(message, user, date, extra=extra, match=m)
306 307
307 308 for name in names:
308 309 self.hook('tag', node=hex(node), tag=name, local=local)
309 310
310 311 return tagnode
311 312
312 313 def tag(self, names, node, message, local, user, date):
313 314 '''tag a revision with one or more symbolic names.
314 315
315 316 names is a list of strings or, when adding a single tag, names may be a
316 317 string.
317 318
318 319 if local is True, the tags are stored in a per-repository file.
319 320 otherwise, they are stored in the .hgtags file, and a new
320 321 changeset is committed with the change.
321 322
322 323 keyword arguments:
323 324
324 325 local: whether to store tags in non-version-controlled file
325 326 (default False)
326 327
327 328 message: commit message to use if committing
328 329
329 330 user: name of user to use if committing
330 331
331 332 date: date tuple to use if committing'''
332 333
333 334 if not local:
334 335 for x in self.status()[:5]:
335 336 if '.hgtags' in x:
336 337 raise util.Abort(_('working copy of .hgtags is changed '
337 338 '(please commit .hgtags manually)'))
338 339
339 340 self.tags() # instantiate the cache
340 341 self._tag(names, node, message, local, user, date)
341 342
342 343 @propertycache
343 344 def _tagscache(self):
344 345 '''Returns a tagscache object that contains various tags related caches.'''
345 346
346 347 # This simplifies its cache management by having one decorated
347 348 # function (this one) and the rest simply fetch things from it.
348 349 class tagscache(object):
349 350 def __init__(self):
350 351 # These two define the set of tags for this repository. tags
351 352 # maps tag name to node; tagtypes maps tag name to 'global' or
352 353 # 'local'. (Global tags are defined by .hgtags across all
353 354 # heads, and local tags are defined in .hg/localtags.)
354 355 # They constitute the in-memory cache of tags.
355 356 self.tags = self.tagtypes = None
356 357
357 358 self.nodetagscache = self.tagslist = None
358 359
359 360 cache = tagscache()
360 361 cache.tags, cache.tagtypes = self._findtags()
361 362
362 363 return cache
363 364
364 365 def tags(self):
365 366 '''return a mapping of tag to node'''
366 367 return self._tagscache.tags
367 368
368 369 def _findtags(self):
369 370 '''Do the hard work of finding tags. Return a pair of dicts
370 371 (tags, tagtypes) where tags maps tag name to node, and tagtypes
371 372 maps tag name to a string like \'global\' or \'local\'.
372 373 Subclasses or extensions are free to add their own tags, but
373 374 should be aware that the returned dicts will be retained for the
374 375 duration of the localrepo object.'''
375 376
376 377 # XXX what tagtype should subclasses/extensions use? Currently
377 378 # mq and bookmarks add tags, but do not set the tagtype at all.
378 379 # Should each extension invent its own tag type? Should there
379 380 # be one tagtype for all such "virtual" tags? Or is the status
380 381 # quo fine?
381 382
382 383 alltags = {} # map tag name to (node, hist)
383 384 tagtypes = {}
384 385
385 386 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
386 387 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
387 388
388 389 # Build the return dicts. Have to re-encode tag names because
389 390 # the tags module always uses UTF-8 (in order not to lose info
390 391 # writing to the cache), but the rest of Mercurial wants them in
391 392 # local encoding.
392 393 tags = {}
393 394 for (name, (node, hist)) in alltags.iteritems():
394 395 if node != nullid:
395 396 try:
396 397 # ignore tags to unknown nodes
397 398 self.changelog.lookup(node)
398 399 tags[encoding.tolocal(name)] = node
399 400 except error.LookupError:
400 401 pass
401 402 tags['tip'] = self.changelog.tip()
402 403 tagtypes = dict([(encoding.tolocal(name), value)
403 404 for (name, value) in tagtypes.iteritems()])
404 405 return (tags, tagtypes)
405 406
406 407 def tagtype(self, tagname):
407 408 '''
408 409 return the type of the given tag. result can be:
409 410
410 411 'local' : a local tag
411 412 'global' : a global tag
412 413 None : tag does not exist
413 414 '''
414 415
415 416 return self._tagscache.tagtypes.get(tagname)
416 417
417 418 def tagslist(self):
418 419 '''return a list of tags ordered by revision'''
419 420 if not self._tagscache.tagslist:
420 421 l = []
421 422 for t, n in self.tags().iteritems():
422 423 r = self.changelog.rev(n)
423 424 l.append((r, t, n))
424 425 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
425 426
426 427 return self._tagscache.tagslist
427 428
428 429 def nodetags(self, node):
429 430 '''return the tags associated with a node'''
430 431 if not self._tagscache.nodetagscache:
431 432 nodetagscache = {}
432 433 for t, n in self.tags().iteritems():
433 434 nodetagscache.setdefault(n, []).append(t)
434 435 for tags in nodetagscache.itervalues():
435 436 tags.sort()
436 437 self._tagscache.nodetagscache = nodetagscache
437 438 return self._tagscache.nodetagscache.get(node, [])
438 439
439 440 def nodebookmarks(self, node):
440 441 marks = []
441 442 for bookmark, n in self._bookmarks.iteritems():
442 443 if n == node:
443 444 marks.append(bookmark)
444 445 return sorted(marks)
445 446
446 447 def _branchtags(self, partial, lrev):
447 448 # TODO: rename this function?
448 449 tiprev = len(self) - 1
449 450 if lrev != tiprev:
450 451 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
451 452 self._updatebranchcache(partial, ctxgen)
452 453 self._writebranchcache(partial, self.changelog.tip(), tiprev)
453 454
454 455 return partial
455 456
456 457 def updatebranchcache(self):
457 458 tip = self.changelog.tip()
458 459 if self._branchcache is not None and self._branchcachetip == tip:
459 460 return self._branchcache
460 461
461 462 oldtip = self._branchcachetip
462 463 self._branchcachetip = tip
463 464 if oldtip is None or oldtip not in self.changelog.nodemap:
464 465 partial, last, lrev = self._readbranchcache()
465 466 else:
466 467 lrev = self.changelog.rev(oldtip)
467 468 partial = self._branchcache
468 469
469 470 self._branchtags(partial, lrev)
470 471 # this private cache holds all heads (not just tips)
471 472 self._branchcache = partial
472 473
473 474 def branchmap(self):
474 475 '''returns a dictionary {branch: [branchheads]}'''
475 476 self.updatebranchcache()
476 477 return self._branchcache
477 478
478 479 def branchtags(self):
479 480 '''return a dict where branch names map to the tipmost head of
480 481 the branch, open heads come before closed'''
481 482 bt = {}
482 483 for bn, heads in self.branchmap().iteritems():
483 484 tip = heads[-1]
484 485 for h in reversed(heads):
485 486 if 'close' not in self.changelog.read(h)[5]:
486 487 tip = h
487 488 break
488 489 bt[bn] = tip
489 490 return bt
490 491
491 492 def _readbranchcache(self):
492 493 partial = {}
493 494 try:
494 495 f = self.opener("cache/branchheads")
495 496 lines = f.read().split('\n')
496 497 f.close()
497 498 except (IOError, OSError):
498 499 return {}, nullid, nullrev
499 500
500 501 try:
501 502 last, lrev = lines.pop(0).split(" ", 1)
502 503 last, lrev = bin(last), int(lrev)
503 504 if lrev >= len(self) or self[lrev].node() != last:
504 505 # invalidate the cache
505 506 raise ValueError('invalidating branch cache (tip differs)')
506 507 for l in lines:
507 508 if not l:
508 509 continue
509 510 node, label = l.split(" ", 1)
510 511 label = encoding.tolocal(label.strip())
511 512 partial.setdefault(label, []).append(bin(node))
512 513 except KeyboardInterrupt:
513 514 raise
514 515 except Exception, inst:
515 516 if self.ui.debugflag:
516 517 self.ui.warn(str(inst), '\n')
517 518 partial, last, lrev = {}, nullid, nullrev
518 519 return partial, last, lrev
519 520
520 521 def _writebranchcache(self, branches, tip, tiprev):
521 522 try:
522 523 f = self.opener("cache/branchheads", "w", atomictemp=True)
523 524 f.write("%s %s\n" % (hex(tip), tiprev))
524 525 for label, nodes in branches.iteritems():
525 526 for node in nodes:
526 527 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
527 528 f.close()
528 529 except (IOError, OSError):
529 530 pass
530 531
531 532 def _updatebranchcache(self, partial, ctxgen):
532 533 # collect new branch entries
533 534 newbranches = {}
534 535 for c in ctxgen:
535 536 newbranches.setdefault(c.branch(), []).append(c.node())
536 537 # if older branchheads are reachable from new ones, they aren't
537 538 # really branchheads. Note checking parents is insufficient:
538 539 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
539 540 for branch, newnodes in newbranches.iteritems():
540 541 bheads = partial.setdefault(branch, [])
541 542 bheads.extend(newnodes)
542 543 if len(bheads) <= 1:
543 544 continue
544 545 bheads = sorted(bheads, key=lambda x: self[x].rev())
545 546 # starting from tip means fewer passes over reachable
546 547 while newnodes:
547 548 latest = newnodes.pop()
548 549 if latest not in bheads:
549 550 continue
550 551 minbhrev = self[bheads[0]].node()
551 552 reachable = self.changelog.reachable(latest, minbhrev)
552 553 reachable.remove(latest)
553 554 if reachable:
554 555 bheads = [b for b in bheads if b not in reachable]
555 556 partial[branch] = bheads
556 557
557 558 def lookup(self, key):
558 559 if isinstance(key, int):
559 560 return self.changelog.node(key)
560 561 elif key == '.':
561 562 return self.dirstate.p1()
562 563 elif key == 'null':
563 564 return nullid
564 565 elif key == 'tip':
565 566 return self.changelog.tip()
566 567 n = self.changelog._match(key)
567 568 if n:
568 569 return n
569 570 if key in self._bookmarks:
570 571 return self._bookmarks[key]
571 572 if key in self.tags():
572 573 return self.tags()[key]
573 574 if key in self.branchtags():
574 575 return self.branchtags()[key]
575 576 n = self.changelog._partialmatch(key)
576 577 if n:
577 578 return n
578 579
579 580 # can't find key, check if it might have come from damaged dirstate
580 581 if key in self.dirstate.parents():
581 582 raise error.Abort(_("working directory has unknown parent '%s'!")
582 583 % short(key))
583 584 try:
584 585 if len(key) == 20:
585 586 key = hex(key)
586 587 except TypeError:
587 588 pass
588 589 raise error.RepoLookupError(_("unknown revision '%s'") % key)
589 590
590 591 def lookupbranch(self, key, remote=None):
591 592 repo = remote or self
592 593 if key in repo.branchmap():
593 594 return key
594 595
595 596 repo = (remote and remote.local()) and remote or self
596 597 return repo[key].branch()
597 598
598 599 def known(self, nodes):
599 600 nm = self.changelog.nodemap
600 601 return [(n in nm) for n in nodes]
601 602
602 603 def local(self):
603 604 return self
604 605
605 606 def join(self, f):
606 607 return os.path.join(self.path, f)
607 608
608 609 def wjoin(self, f):
609 610 return os.path.join(self.root, f)
610 611
611 612 def file(self, f):
612 613 if f[0] == '/':
613 614 f = f[1:]
614 615 return filelog.filelog(self.sopener, f)
615 616
616 617 def changectx(self, changeid):
617 618 return self[changeid]
618 619
619 620 def parents(self, changeid=None):
620 621 '''get list of changectxs for parents of changeid'''
621 622 return self[changeid].parents()
622 623
623 624 def filectx(self, path, changeid=None, fileid=None):
624 625 """changeid can be a changeset revision, node, or tag.
625 626 fileid can be a file revision or node."""
626 627 return context.filectx(self, path, changeid, fileid)
627 628
628 629 def getcwd(self):
629 630 return self.dirstate.getcwd()
630 631
631 632 def pathto(self, f, cwd=None):
632 633 return self.dirstate.pathto(f, cwd)
633 634
634 635 def wfile(self, f, mode='r'):
635 636 return self.wopener(f, mode)
636 637
637 638 def _link(self, f):
638 639 return os.path.islink(self.wjoin(f))
639 640
640 641 def _loadfilter(self, filter):
641 642 if filter not in self.filterpats:
642 643 l = []
643 644 for pat, cmd in self.ui.configitems(filter):
644 645 if cmd == '!':
645 646 continue
646 647 mf = matchmod.match(self.root, '', [pat])
647 648 fn = None
648 649 params = cmd
649 650 for name, filterfn in self._datafilters.iteritems():
650 651 if cmd.startswith(name):
651 652 fn = filterfn
652 653 params = cmd[len(name):].lstrip()
653 654 break
654 655 if not fn:
655 656 fn = lambda s, c, **kwargs: util.filter(s, c)
656 657 # Wrap old filters not supporting keyword arguments
657 658 if not inspect.getargspec(fn)[2]:
658 659 oldfn = fn
659 660 fn = lambda s, c, **kwargs: oldfn(s, c)
660 661 l.append((mf, fn, params))
661 662 self.filterpats[filter] = l
662 663 return self.filterpats[filter]
663 664
664 665 def _filter(self, filterpats, filename, data):
665 666 for mf, fn, cmd in filterpats:
666 667 if mf(filename):
667 668 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
668 669 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
669 670 break
670 671
671 672 return data
672 673
673 674 @propertycache
674 675 def _encodefilterpats(self):
675 676 return self._loadfilter('encode')
676 677
677 678 @propertycache
678 679 def _decodefilterpats(self):
679 680 return self._loadfilter('decode')
680 681
681 682 def adddatafilter(self, name, filter):
682 683 self._datafilters[name] = filter
683 684
684 685 def wread(self, filename):
685 686 if self._link(filename):
686 687 data = os.readlink(self.wjoin(filename))
687 688 else:
688 689 data = self.wopener.read(filename)
689 690 return self._filter(self._encodefilterpats, filename, data)
690 691
691 692 def wwrite(self, filename, data, flags):
692 693 data = self._filter(self._decodefilterpats, filename, data)
693 694 if 'l' in flags:
694 695 self.wopener.symlink(data, filename)
695 696 else:
696 697 self.wopener.write(filename, data)
697 698 if 'x' in flags:
698 699 util.setflags(self.wjoin(filename), False, True)
699 700
700 701 def wwritedata(self, filename, data):
701 702 return self._filter(self._decodefilterpats, filename, data)
702 703
703 704 def transaction(self, desc):
704 705 tr = self._transref and self._transref() or None
705 706 if tr and tr.running():
706 707 return tr.nest()
707 708
708 709 # abort here if the journal already exists
709 710 if os.path.exists(self.sjoin("journal")):
710 711 raise error.RepoError(
711 712 _("abandoned transaction found - run hg recover"))
712 713
713 714 journalfiles = self._writejournal(desc)
714 715 renames = [(x, undoname(x)) for x in journalfiles]
715 716
716 717 tr = transaction.transaction(self.ui.warn, self.sopener,
717 718 self.sjoin("journal"),
718 719 aftertrans(renames),
719 720 self.store.createmode)
720 721 self._transref = weakref.ref(tr)
721 722 return tr
722 723
723 724 def _writejournal(self, desc):
724 725 # save dirstate for rollback
725 726 try:
726 727 ds = self.opener.read("dirstate")
727 728 except IOError:
728 729 ds = ""
729 730 self.opener.write("journal.dirstate", ds)
730 731 self.opener.write("journal.branch",
731 732 encoding.fromlocal(self.dirstate.branch()))
732 733 self.opener.write("journal.desc",
733 734 "%d\n%s\n" % (len(self), desc))
734 735
735 736 bkname = self.join('bookmarks')
736 737 if os.path.exists(bkname):
737 738 util.copyfile(bkname, self.join('journal.bookmarks'))
738 739 else:
739 740 self.opener.write('journal.bookmarks', '')
740 741
741 742 return (self.sjoin('journal'), self.join('journal.dirstate'),
742 743 self.join('journal.branch'), self.join('journal.desc'),
743 744 self.join('journal.bookmarks'))
744 745
745 746 def recover(self):
746 747 lock = self.lock()
747 748 try:
748 749 if os.path.exists(self.sjoin("journal")):
749 750 self.ui.status(_("rolling back interrupted transaction\n"))
750 751 transaction.rollback(self.sopener, self.sjoin("journal"),
751 752 self.ui.warn)
752 753 self.invalidate()
753 754 return True
754 755 else:
755 756 self.ui.warn(_("no interrupted transaction available\n"))
756 757 return False
757 758 finally:
758 759 lock.release()
759 760
760 761 def rollback(self, dryrun=False, force=False):
761 762 wlock = lock = None
762 763 try:
763 764 wlock = self.wlock()
764 765 lock = self.lock()
765 766 if os.path.exists(self.sjoin("undo")):
766 767 return self._rollback(dryrun, force)
767 768 else:
768 769 self.ui.warn(_("no rollback information available\n"))
769 770 return 1
770 771 finally:
771 772 release(lock, wlock)
772 773
773 774 def _rollback(self, dryrun, force):
774 775 ui = self.ui
775 776 try:
776 777 args = self.opener.read('undo.desc').splitlines()
777 778 (oldlen, desc, detail) = (int(args[0]), args[1], None)
778 779 if len(args) >= 3:
779 780 detail = args[2]
780 781 oldtip = oldlen - 1
781 782
782 783 if detail and ui.verbose:
783 784 msg = (_('repository tip rolled back to revision %s'
784 785 ' (undo %s: %s)\n')
785 786 % (oldtip, desc, detail))
786 787 else:
787 788 msg = (_('repository tip rolled back to revision %s'
788 789 ' (undo %s)\n')
789 790 % (oldtip, desc))
790 791 except IOError:
791 792 msg = _('rolling back unknown transaction\n')
792 793 desc = None
793 794
794 795 if not force and self['.'] != self['tip'] and desc == 'commit':
795 796 raise util.Abort(
796 797 _('rollback of last commit while not checked out '
797 798 'may lose data'), hint=_('use -f to force'))
798 799
799 800 ui.status(msg)
800 801 if dryrun:
801 802 return 0
802 803
803 804 parents = self.dirstate.parents()
804 805 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
805 806 if os.path.exists(self.join('undo.bookmarks')):
806 807 util.rename(self.join('undo.bookmarks'),
807 808 self.join('bookmarks'))
808 809 self.invalidate()
809 810
810 811 parentgone = (parents[0] not in self.changelog.nodemap or
811 812 parents[1] not in self.changelog.nodemap)
812 813 if parentgone:
813 814 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
814 815 try:
815 816 branch = self.opener.read('undo.branch')
816 817 self.dirstate.setbranch(branch)
817 818 except IOError:
818 819 ui.warn(_('named branch could not be reset: '
819 820 'current branch is still \'%s\'\n')
820 821 % self.dirstate.branch())
821 822
822 823 self.dirstate.invalidate()
823 824 parents = tuple([p.rev() for p in self.parents()])
824 825 if len(parents) > 1:
825 826 ui.status(_('working directory now based on '
826 827 'revisions %d and %d\n') % parents)
827 828 else:
828 829 ui.status(_('working directory now based on '
829 830 'revision %d\n') % parents)
830 831 self.destroyed()
831 832 return 0
832 833
833 834 def invalidatecaches(self):
834 835 try:
835 836 delattr(self, '_tagscache')
836 837 except AttributeError:
837 838 pass
838 839
839 840 self._branchcache = None # in UTF-8
840 841 self._branchcachetip = None
841 842
842 843 def invalidatedirstate(self):
843 844 '''Invalidates the dirstate, causing the next call to dirstate
844 845 to check if it was modified since the last time it was read,
845 846 rereading it if it has.
846 847
847 848 This is different to dirstate.invalidate() that it doesn't always
848 849 rereads the dirstate. Use dirstate.invalidate() if you want to
849 850 explicitly read the dirstate again (i.e. restoring it to a previous
850 851 known good state).'''
851 852 try:
852 853 delattr(self, 'dirstate')
853 854 except AttributeError:
854 855 pass
855 856
856 857 def invalidate(self):
857 858 for k in self._filecache:
858 859 # dirstate is invalidated separately in invalidatedirstate()
859 860 if k == 'dirstate':
860 861 continue
861 862
862 863 try:
863 864 delattr(self, k)
864 865 except AttributeError:
865 866 pass
866 867 self.invalidatecaches()
867 868
868 869 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
869 870 try:
870 871 l = lock.lock(lockname, 0, releasefn, desc=desc)
871 872 except error.LockHeld, inst:
872 873 if not wait:
873 874 raise
874 875 self.ui.warn(_("waiting for lock on %s held by %r\n") %
875 876 (desc, inst.locker))
876 877 # default to 600 seconds timeout
877 878 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
878 879 releasefn, desc=desc)
879 880 if acquirefn:
880 881 acquirefn()
881 882 return l
882 883
883 884 def lock(self, wait=True):
884 885 '''Lock the repository store (.hg/store) and return a weak reference
885 886 to the lock. Use this before modifying the store (e.g. committing or
886 887 stripping). If you are opening a transaction, get a lock as well.)'''
887 888 l = self._lockref and self._lockref()
888 889 if l is not None and l.held:
889 890 l.lock()
890 891 return l
891 892
892 893 def unlock():
893 894 self.store.write()
894 895 for k, ce in self._filecache.items():
895 896 if k == 'dirstate':
896 897 continue
897 898 ce.refresh()
898 899
899 900 l = self._lock(self.sjoin("lock"), wait, unlock,
900 901 self.invalidate, _('repository %s') % self.origroot)
901 902 self._lockref = weakref.ref(l)
902 903 return l
903 904
904 905 def wlock(self, wait=True):
905 906 '''Lock the non-store parts of the repository (everything under
906 907 .hg except .hg/store) and return a weak reference to the lock.
907 908 Use this before modifying files in .hg.'''
908 909 l = self._wlockref and self._wlockref()
909 910 if l is not None and l.held:
910 911 l.lock()
911 912 return l
912 913
913 914 def unlock():
914 915 self.dirstate.write()
915 916 ce = self._filecache.get('dirstate')
916 917 if ce:
917 918 ce.refresh()
918 919
919 920 l = self._lock(self.join("wlock"), wait, unlock,
920 921 self.invalidatedirstate, _('working directory of %s') %
921 922 self.origroot)
922 923 self._wlockref = weakref.ref(l)
923 924 return l
924 925
925 926 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
926 927 """
927 928 commit an individual file as part of a larger transaction
928 929 """
929 930
930 931 fname = fctx.path()
931 932 text = fctx.data()
932 933 flog = self.file(fname)
933 934 fparent1 = manifest1.get(fname, nullid)
934 935 fparent2 = fparent2o = manifest2.get(fname, nullid)
935 936
936 937 meta = {}
937 938 copy = fctx.renamed()
938 939 if copy and copy[0] != fname:
939 940 # Mark the new revision of this file as a copy of another
940 941 # file. This copy data will effectively act as a parent
941 942 # of this new revision. If this is a merge, the first
942 943 # parent will be the nullid (meaning "look up the copy data")
943 944 # and the second one will be the other parent. For example:
944 945 #
945 946 # 0 --- 1 --- 3 rev1 changes file foo
946 947 # \ / rev2 renames foo to bar and changes it
947 948 # \- 2 -/ rev3 should have bar with all changes and
948 949 # should record that bar descends from
949 950 # bar in rev2 and foo in rev1
950 951 #
951 952 # this allows this merge to succeed:
952 953 #
953 954 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
954 955 # \ / merging rev3 and rev4 should use bar@rev2
955 956 # \- 2 --- 4 as the merge base
956 957 #
957 958
958 959 cfname = copy[0]
959 960 crev = manifest1.get(cfname)
960 961 newfparent = fparent2
961 962
962 963 if manifest2: # branch merge
963 964 if fparent2 == nullid or crev is None: # copied on remote side
964 965 if cfname in manifest2:
965 966 crev = manifest2[cfname]
966 967 newfparent = fparent1
967 968
968 969 # find source in nearest ancestor if we've lost track
969 970 if not crev:
970 971 self.ui.debug(" %s: searching for copy revision for %s\n" %
971 972 (fname, cfname))
972 973 for ancestor in self[None].ancestors():
973 974 if cfname in ancestor:
974 975 crev = ancestor[cfname].filenode()
975 976 break
976 977
977 978 if crev:
978 979 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
979 980 meta["copy"] = cfname
980 981 meta["copyrev"] = hex(crev)
981 982 fparent1, fparent2 = nullid, newfparent
982 983 else:
983 984 self.ui.warn(_("warning: can't find ancestor for '%s' "
984 985 "copied from '%s'!\n") % (fname, cfname))
985 986
986 987 elif fparent2 != nullid:
987 988 # is one parent an ancestor of the other?
988 989 fparentancestor = flog.ancestor(fparent1, fparent2)
989 990 if fparentancestor == fparent1:
990 991 fparent1, fparent2 = fparent2, nullid
991 992 elif fparentancestor == fparent2:
992 993 fparent2 = nullid
993 994
994 995 # is the file changed?
995 996 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
996 997 changelist.append(fname)
997 998 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
998 999
999 1000 # are just the flags changed during merge?
1000 1001 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1001 1002 changelist.append(fname)
1002 1003
1003 1004 return fparent1
1004 1005
1005 1006 def commit(self, text="", user=None, date=None, match=None, force=False,
1006 1007 editor=False, extra={}):
1007 1008 """Add a new revision to current repository.
1008 1009
1009 1010 Revision information is gathered from the working directory,
1010 1011 match can be used to filter the committed files. If editor is
1011 1012 supplied, it is called to get a commit message.
1012 1013 """
1013 1014
1014 1015 def fail(f, msg):
1015 1016 raise util.Abort('%s: %s' % (f, msg))
1016 1017
1017 1018 if not match:
1018 1019 match = matchmod.always(self.root, '')
1019 1020
1020 1021 if not force:
1021 1022 vdirs = []
1022 1023 match.dir = vdirs.append
1023 1024 match.bad = fail
1024 1025
1025 1026 wlock = self.wlock()
1026 1027 try:
1027 1028 wctx = self[None]
1028 1029 merge = len(wctx.parents()) > 1
1029 1030
1030 1031 if (not force and merge and match and
1031 1032 (match.files() or match.anypats())):
1032 1033 raise util.Abort(_('cannot partially commit a merge '
1033 1034 '(do not specify files or patterns)'))
1034 1035
1035 1036 changes = self.status(match=match, clean=force)
1036 1037 if force:
1037 1038 changes[0].extend(changes[6]) # mq may commit unchanged files
1038 1039
1039 1040 # check subrepos
1040 1041 subs = []
1041 1042 removedsubs = set()
1042 1043 if '.hgsub' in wctx:
1043 1044 # only manage subrepos and .hgsubstate if .hgsub is present
1044 1045 for p in wctx.parents():
1045 1046 removedsubs.update(s for s in p.substate if match(s))
1046 1047 for s in wctx.substate:
1047 1048 removedsubs.discard(s)
1048 1049 if match(s) and wctx.sub(s).dirty():
1049 1050 subs.append(s)
1050 1051 if (subs or removedsubs):
1051 1052 if (not match('.hgsub') and
1052 1053 '.hgsub' in (wctx.modified() + wctx.added())):
1053 1054 raise util.Abort(
1054 1055 _("can't commit subrepos without .hgsub"))
1055 1056 if '.hgsubstate' not in changes[0]:
1056 1057 changes[0].insert(0, '.hgsubstate')
1057 1058 if '.hgsubstate' in changes[2]:
1058 1059 changes[2].remove('.hgsubstate')
1059 1060 elif '.hgsub' in changes[2]:
1060 1061 # clean up .hgsubstate when .hgsub is removed
1061 1062 if ('.hgsubstate' in wctx and
1062 1063 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1063 1064 changes[2].insert(0, '.hgsubstate')
1064 1065
1065 1066 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1066 1067 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1067 1068 if changedsubs:
1068 1069 raise util.Abort(_("uncommitted changes in subrepo %s")
1069 1070 % changedsubs[0],
1070 1071 hint=_("use --subrepos for recursive commit"))
1071 1072
1072 1073 # make sure all explicit patterns are matched
1073 1074 if not force and match.files():
1074 1075 matched = set(changes[0] + changes[1] + changes[2])
1075 1076
1076 1077 for f in match.files():
1077 1078 if f == '.' or f in matched or f in wctx.substate:
1078 1079 continue
1079 1080 if f in changes[3]: # missing
1080 1081 fail(f, _('file not found!'))
1081 1082 if f in vdirs: # visited directory
1082 1083 d = f + '/'
1083 1084 for mf in matched:
1084 1085 if mf.startswith(d):
1085 1086 break
1086 1087 else:
1087 1088 fail(f, _("no match under directory!"))
1088 1089 elif f not in self.dirstate:
1089 1090 fail(f, _("file not tracked!"))
1090 1091
1091 1092 if (not force and not extra.get("close") and not merge
1092 1093 and not (changes[0] or changes[1] or changes[2])
1093 1094 and wctx.branch() == wctx.p1().branch()):
1094 1095 return None
1095 1096
1096 1097 ms = mergemod.mergestate(self)
1097 1098 for f in changes[0]:
1098 1099 if f in ms and ms[f] == 'u':
1099 1100 raise util.Abort(_("unresolved merge conflicts "
1100 1101 "(see hg help resolve)"))
1101 1102
1102 1103 cctx = context.workingctx(self, text, user, date, extra, changes)
1103 1104 if editor:
1104 1105 cctx._text = editor(self, cctx, subs)
1105 1106 edited = (text != cctx._text)
1106 1107
1107 1108 # commit subs
1108 1109 if subs or removedsubs:
1109 1110 state = wctx.substate.copy()
1110 1111 for s in sorted(subs):
1111 1112 sub = wctx.sub(s)
1112 1113 self.ui.status(_('committing subrepository %s\n') %
1113 1114 subrepo.subrelpath(sub))
1114 1115 sr = sub.commit(cctx._text, user, date)
1115 1116 state[s] = (state[s][0], sr)
1116 1117 subrepo.writestate(self, state)
1117 1118
1118 1119 # Save commit message in case this transaction gets rolled back
1119 1120 # (e.g. by a pretxncommit hook). Leave the content alone on
1120 1121 # the assumption that the user will use the same editor again.
1121 1122 msgfn = self.savecommitmessage(cctx._text)
1122 1123
1123 1124 p1, p2 = self.dirstate.parents()
1124 1125 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1125 1126 try:
1126 1127 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1127 1128 ret = self.commitctx(cctx, True)
1128 1129 except:
1129 1130 if edited:
1130 1131 self.ui.write(
1131 1132 _('note: commit message saved in %s\n') % msgfn)
1132 1133 raise
1133 1134
1134 1135 # update bookmarks, dirstate and mergestate
1135 1136 bookmarks.update(self, p1, ret)
1136 1137 for f in changes[0] + changes[1]:
1137 1138 self.dirstate.normal(f)
1138 1139 for f in changes[2]:
1139 1140 self.dirstate.drop(f)
1140 1141 self.dirstate.setparents(ret)
1141 1142 ms.reset()
1142 1143 finally:
1143 1144 wlock.release()
1144 1145
1145 1146 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1146 1147 return ret
1147 1148
1148 1149 def commitctx(self, ctx, error=False):
1149 1150 """Add a new revision to current repository.
1150 1151 Revision information is passed via the context argument.
1151 1152 """
1152 1153
1153 1154 tr = lock = None
1154 1155 removed = list(ctx.removed())
1155 1156 p1, p2 = ctx.p1(), ctx.p2()
1156 1157 user = ctx.user()
1157 1158
1158 1159 lock = self.lock()
1159 1160 try:
1160 1161 tr = self.transaction("commit")
1161 1162 trp = weakref.proxy(tr)
1162 1163
1163 1164 if ctx.files():
1164 1165 m1 = p1.manifest().copy()
1165 1166 m2 = p2.manifest()
1166 1167
1167 1168 # check in files
1168 1169 new = {}
1169 1170 changed = []
1170 1171 linkrev = len(self)
1171 1172 for f in sorted(ctx.modified() + ctx.added()):
1172 1173 self.ui.note(f + "\n")
1173 1174 try:
1174 1175 fctx = ctx[f]
1175 1176 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1176 1177 changed)
1177 1178 m1.set(f, fctx.flags())
1178 1179 except OSError, inst:
1179 1180 self.ui.warn(_("trouble committing %s!\n") % f)
1180 1181 raise
1181 1182 except IOError, inst:
1182 1183 errcode = getattr(inst, 'errno', errno.ENOENT)
1183 1184 if error or errcode and errcode != errno.ENOENT:
1184 1185 self.ui.warn(_("trouble committing %s!\n") % f)
1185 1186 raise
1186 1187 else:
1187 1188 removed.append(f)
1188 1189
1189 1190 # update manifest
1190 1191 m1.update(new)
1191 1192 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1192 1193 drop = [f for f in removed if f in m1]
1193 1194 for f in drop:
1194 1195 del m1[f]
1195 1196 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1196 1197 p2.manifestnode(), (new, drop))
1197 1198 files = changed + removed
1198 1199 else:
1199 1200 mn = p1.manifestnode()
1200 1201 files = []
1201 1202
1202 1203 # update changelog
1203 1204 self.changelog.delayupdate()
1204 1205 n = self.changelog.add(mn, files, ctx.description(),
1205 1206 trp, p1.node(), p2.node(),
1206 1207 user, ctx.date(), ctx.extra().copy())
1207 1208 p = lambda: self.changelog.writepending() and self.root or ""
1208 1209 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1209 1210 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1210 1211 parent2=xp2, pending=p)
1211 1212 self.changelog.finalize(trp)
1212 1213 tr.close()
1213 1214
1214 1215 if self._branchcache:
1215 1216 self.updatebranchcache()
1216 1217 return n
1217 1218 finally:
1218 1219 if tr:
1219 1220 tr.release()
1220 1221 lock.release()
1221 1222
1222 1223 def destroyed(self):
1223 1224 '''Inform the repository that nodes have been destroyed.
1224 1225 Intended for use by strip and rollback, so there's a common
1225 1226 place for anything that has to be done after destroying history.'''
1226 1227 # XXX it might be nice if we could take the list of destroyed
1227 1228 # nodes, but I don't see an easy way for rollback() to do that
1228 1229
1229 1230 # Ensure the persistent tag cache is updated. Doing it now
1230 1231 # means that the tag cache only has to worry about destroyed
1231 1232 # heads immediately after a strip/rollback. That in turn
1232 1233 # guarantees that "cachetip == currenttip" (comparing both rev
1233 1234 # and node) always means no nodes have been added or destroyed.
1234 1235
1235 1236 # XXX this is suboptimal when qrefresh'ing: we strip the current
1236 1237 # head, refresh the tag cache, then immediately add a new head.
1237 1238 # But I think doing it this way is necessary for the "instant
1238 1239 # tag cache retrieval" case to work.
1239 1240 self.invalidatecaches()
1240 1241
1241 1242 def walk(self, match, node=None):
1242 1243 '''
1243 1244 walk recursively through the directory tree or a given
1244 1245 changeset, finding all files matched by the match
1245 1246 function
1246 1247 '''
1247 1248 return self[node].walk(match)
1248 1249
1249 1250 def status(self, node1='.', node2=None, match=None,
1250 1251 ignored=False, clean=False, unknown=False,
1251 1252 listsubrepos=False):
1252 1253 """return status of files between two nodes or node and working directory
1253 1254
1254 1255 If node1 is None, use the first dirstate parent instead.
1255 1256 If node2 is None, compare node1 with working directory.
1256 1257 """
1257 1258
1258 1259 def mfmatches(ctx):
1259 1260 mf = ctx.manifest().copy()
1260 1261 for fn in mf.keys():
1261 1262 if not match(fn):
1262 1263 del mf[fn]
1263 1264 return mf
1264 1265
1265 1266 if isinstance(node1, context.changectx):
1266 1267 ctx1 = node1
1267 1268 else:
1268 1269 ctx1 = self[node1]
1269 1270 if isinstance(node2, context.changectx):
1270 1271 ctx2 = node2
1271 1272 else:
1272 1273 ctx2 = self[node2]
1273 1274
1274 1275 working = ctx2.rev() is None
1275 1276 parentworking = working and ctx1 == self['.']
1276 1277 match = match or matchmod.always(self.root, self.getcwd())
1277 1278 listignored, listclean, listunknown = ignored, clean, unknown
1278 1279
1279 1280 # load earliest manifest first for caching reasons
1280 1281 if not working and ctx2.rev() < ctx1.rev():
1281 1282 ctx2.manifest()
1282 1283
1283 1284 if not parentworking:
1284 1285 def bad(f, msg):
1285 1286 if f not in ctx1:
1286 1287 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1287 1288 match.bad = bad
1288 1289
1289 1290 if working: # we need to scan the working dir
1290 1291 subrepos = []
1291 1292 if '.hgsub' in self.dirstate:
1292 1293 subrepos = ctx2.substate.keys()
1293 1294 s = self.dirstate.status(match, subrepos, listignored,
1294 1295 listclean, listunknown)
1295 1296 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1296 1297
1297 1298 # check for any possibly clean files
1298 1299 if parentworking and cmp:
1299 1300 fixup = []
1300 1301 # do a full compare of any files that might have changed
1301 1302 for f in sorted(cmp):
1302 1303 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1303 1304 or ctx1[f].cmp(ctx2[f])):
1304 1305 modified.append(f)
1305 1306 else:
1306 1307 fixup.append(f)
1307 1308
1308 1309 # update dirstate for files that are actually clean
1309 1310 if fixup:
1310 1311 if listclean:
1311 1312 clean += fixup
1312 1313
1313 1314 try:
1314 1315 # updating the dirstate is optional
1315 1316 # so we don't wait on the lock
1316 1317 wlock = self.wlock(False)
1317 1318 try:
1318 1319 for f in fixup:
1319 1320 self.dirstate.normal(f)
1320 1321 finally:
1321 1322 wlock.release()
1322 1323 except error.LockError:
1323 1324 pass
1324 1325
1325 1326 if not parentworking:
1326 1327 mf1 = mfmatches(ctx1)
1327 1328 if working:
1328 1329 # we are comparing working dir against non-parent
1329 1330 # generate a pseudo-manifest for the working dir
1330 1331 mf2 = mfmatches(self['.'])
1331 1332 for f in cmp + modified + added:
1332 1333 mf2[f] = None
1333 1334 mf2.set(f, ctx2.flags(f))
1334 1335 for f in removed:
1335 1336 if f in mf2:
1336 1337 del mf2[f]
1337 1338 else:
1338 1339 # we are comparing two revisions
1339 1340 deleted, unknown, ignored = [], [], []
1340 1341 mf2 = mfmatches(ctx2)
1341 1342
1342 1343 modified, added, clean = [], [], []
1343 1344 for fn in mf2:
1344 1345 if fn in mf1:
1345 1346 if (fn not in deleted and
1346 1347 (mf1.flags(fn) != mf2.flags(fn) or
1347 1348 (mf1[fn] != mf2[fn] and
1348 1349 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1349 1350 modified.append(fn)
1350 1351 elif listclean:
1351 1352 clean.append(fn)
1352 1353 del mf1[fn]
1353 1354 elif fn not in deleted:
1354 1355 added.append(fn)
1355 1356 removed = mf1.keys()
1356 1357
1357 1358 if working and modified and not self.dirstate._checklink:
1358 1359 # Symlink placeholders may get non-symlink-like contents
1359 1360 # via user error or dereferencing by NFS or Samba servers,
1360 1361 # so we filter out any placeholders that don't look like a
1361 1362 # symlink
1362 1363 sane = []
1363 1364 for f in modified:
1364 1365 if ctx2.flags(f) == 'l':
1365 1366 d = ctx2[f].data()
1366 1367 if len(d) >= 1024 or '\n' in d or util.binary(d):
1367 1368 self.ui.debug('ignoring suspect symlink placeholder'
1368 1369 ' "%s"\n' % f)
1369 1370 continue
1370 1371 sane.append(f)
1371 1372 modified = sane
1372 1373
1373 1374 r = modified, added, removed, deleted, unknown, ignored, clean
1374 1375
1375 1376 if listsubrepos:
1376 1377 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1377 1378 if working:
1378 1379 rev2 = None
1379 1380 else:
1380 1381 rev2 = ctx2.substate[subpath][1]
1381 1382 try:
1382 1383 submatch = matchmod.narrowmatcher(subpath, match)
1383 1384 s = sub.status(rev2, match=submatch, ignored=listignored,
1384 1385 clean=listclean, unknown=listunknown,
1385 1386 listsubrepos=True)
1386 1387 for rfiles, sfiles in zip(r, s):
1387 1388 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1388 1389 except error.LookupError:
1389 1390 self.ui.status(_("skipping missing subrepository: %s\n")
1390 1391 % subpath)
1391 1392
1392 1393 for l in r:
1393 1394 l.sort()
1394 1395 return r
1395 1396
1396 1397 def heads(self, start=None):
1397 1398 heads = self.changelog.heads(start)
1398 1399 # sort the output in rev descending order
1399 1400 return sorted(heads, key=self.changelog.rev, reverse=True)
1400 1401
1401 1402 def branchheads(self, branch=None, start=None, closed=False):
1402 1403 '''return a (possibly filtered) list of heads for the given branch
1403 1404
1404 1405 Heads are returned in topological order, from newest to oldest.
1405 1406 If branch is None, use the dirstate branch.
1406 1407 If start is not None, return only heads reachable from start.
1407 1408 If closed is True, return heads that are marked as closed as well.
1408 1409 '''
1409 1410 if branch is None:
1410 1411 branch = self[None].branch()
1411 1412 branches = self.branchmap()
1412 1413 if branch not in branches:
1413 1414 return []
1414 1415 # the cache returns heads ordered lowest to highest
1415 1416 bheads = list(reversed(branches[branch]))
1416 1417 if start is not None:
1417 1418 # filter out the heads that cannot be reached from startrev
1418 1419 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1419 1420 bheads = [h for h in bheads if h in fbheads]
1420 1421 if not closed:
1421 1422 bheads = [h for h in bheads if
1422 1423 ('close' not in self.changelog.read(h)[5])]
1423 1424 return bheads
1424 1425
1425 1426 def branches(self, nodes):
1426 1427 if not nodes:
1427 1428 nodes = [self.changelog.tip()]
1428 1429 b = []
1429 1430 for n in nodes:
1430 1431 t = n
1431 1432 while True:
1432 1433 p = self.changelog.parents(n)
1433 1434 if p[1] != nullid or p[0] == nullid:
1434 1435 b.append((t, n, p[0], p[1]))
1435 1436 break
1436 1437 n = p[0]
1437 1438 return b
1438 1439
1439 1440 def between(self, pairs):
1440 1441 r = []
1441 1442
1442 1443 for top, bottom in pairs:
1443 1444 n, l, i = top, [], 0
1444 1445 f = 1
1445 1446
1446 1447 while n != bottom and n != nullid:
1447 1448 p = self.changelog.parents(n)[0]
1448 1449 if i == f:
1449 1450 l.append(n)
1450 1451 f = f * 2
1451 1452 n = p
1452 1453 i += 1
1453 1454
1454 1455 r.append(l)
1455 1456
1456 1457 return r
1457 1458
1458 1459 def pull(self, remote, heads=None, force=False):
1459 1460 lock = self.lock()
1460 1461 try:
1461 1462 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1462 1463 force=force)
1463 1464 common, fetch, rheads = tmp
1464 1465 if not fetch:
1465 1466 self.ui.status(_("no changes found\n"))
1466 1467 result = 0
1467 1468 else:
1468 1469 if heads is None and list(common) == [nullid]:
1469 1470 self.ui.status(_("requesting all changes\n"))
1470 1471 elif heads is None and remote.capable('changegroupsubset'):
1471 1472 # issue1320, avoid a race if remote changed after discovery
1472 1473 heads = rheads
1473 1474
1474 1475 if remote.capable('getbundle'):
1475 1476 cg = remote.getbundle('pull', common=common,
1476 1477 heads=heads or rheads)
1477 1478 elif heads is None:
1478 1479 cg = remote.changegroup(fetch, 'pull')
1479 1480 elif not remote.capable('changegroupsubset'):
1480 1481 raise util.Abort(_("partial pull cannot be done because "
1481 1482 "other repository doesn't support "
1482 1483 "changegroupsubset."))
1483 1484 else:
1484 1485 cg = remote.changegroupsubset(fetch, heads, 'pull')
1485 1486 result = self.addchangegroup(cg, 'pull', remote.url(),
1486 1487 lock=lock)
1487 1488 finally:
1488 1489 lock.release()
1489 1490
1490 1491 return result
1491 1492
1492 1493 def checkpush(self, force, revs):
1493 1494 """Extensions can override this function if additional checks have
1494 1495 to be performed before pushing, or call it if they override push
1495 1496 command.
1496 1497 """
1497 1498 pass
1498 1499
1499 1500 def push(self, remote, force=False, revs=None, newbranch=False):
1500 1501 '''Push outgoing changesets (limited by revs) from the current
1501 1502 repository to remote. Return an integer:
1502 1503 - 0 means HTTP error *or* nothing to push
1503 1504 - 1 means we pushed and remote head count is unchanged *or*
1504 1505 we have outgoing changesets but refused to push
1505 1506 - other values as described by addchangegroup()
1506 1507 '''
1507 1508 # there are two ways to push to remote repo:
1508 1509 #
1509 1510 # addchangegroup assumes local user can lock remote
1510 1511 # repo (local filesystem, old ssh servers).
1511 1512 #
1512 1513 # unbundle assumes local user cannot lock remote repo (new ssh
1513 1514 # servers, http servers).
1514 1515
1515 1516 self.checkpush(force, revs)
1516 1517 lock = None
1517 1518 unbundle = remote.capable('unbundle')
1518 1519 if not unbundle:
1519 1520 lock = remote.lock()
1520 1521 try:
1521 1522 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1522 1523 newbranch)
1523 1524 ret = remote_heads
1524 1525 if cg is not None:
1525 1526 if unbundle:
1526 1527 # local repo finds heads on server, finds out what
1527 1528 # revs it must push. once revs transferred, if server
1528 1529 # finds it has different heads (someone else won
1529 1530 # commit/push race), server aborts.
1530 1531 if force:
1531 1532 remote_heads = ['force']
1532 1533 # ssh: return remote's addchangegroup()
1533 1534 # http: return remote's addchangegroup() or 0 for error
1534 1535 ret = remote.unbundle(cg, remote_heads, 'push')
1535 1536 else:
1536 1537 # we return an integer indicating remote head count change
1537 1538 ret = remote.addchangegroup(cg, 'push', self.url(),
1538 1539 lock=lock)
1539 1540 finally:
1540 1541 if lock is not None:
1541 1542 lock.release()
1542 1543
1543 1544 self.ui.debug("checking for updated bookmarks\n")
1544 1545 rb = remote.listkeys('bookmarks')
1545 1546 for k in rb.keys():
1546 1547 if k in self._bookmarks:
1547 1548 nr, nl = rb[k], hex(self._bookmarks[k])
1548 1549 if nr in self:
1549 1550 cr = self[nr]
1550 1551 cl = self[nl]
1551 1552 if cl in cr.descendants():
1552 1553 r = remote.pushkey('bookmarks', k, nr, nl)
1553 1554 if r:
1554 1555 self.ui.status(_("updating bookmark %s\n") % k)
1555 1556 else:
1556 1557 self.ui.warn(_('updating bookmark %s'
1557 1558 ' failed!\n') % k)
1558 1559
1559 1560 return ret
1560 1561
1561 1562 def changegroupinfo(self, nodes, source):
1562 1563 if self.ui.verbose or source == 'bundle':
1563 1564 self.ui.status(_("%d changesets found\n") % len(nodes))
1564 1565 if self.ui.debugflag:
1565 1566 self.ui.debug("list of changesets:\n")
1566 1567 for node in nodes:
1567 1568 self.ui.debug("%s\n" % hex(node))
1568 1569
1569 1570 def changegroupsubset(self, bases, heads, source):
1570 1571 """Compute a changegroup consisting of all the nodes that are
1571 1572 descendants of any of the bases and ancestors of any of the heads.
1572 1573 Return a chunkbuffer object whose read() method will return
1573 1574 successive changegroup chunks.
1574 1575
1575 1576 It is fairly complex as determining which filenodes and which
1576 1577 manifest nodes need to be included for the changeset to be complete
1577 1578 is non-trivial.
1578 1579
1579 1580 Another wrinkle is doing the reverse, figuring out which changeset in
1580 1581 the changegroup a particular filenode or manifestnode belongs to.
1581 1582 """
1582 1583 cl = self.changelog
1583 1584 if not bases:
1584 1585 bases = [nullid]
1585 1586 csets, bases, heads = cl.nodesbetween(bases, heads)
1586 1587 # We assume that all ancestors of bases are known
1587 1588 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1588 1589 return self._changegroupsubset(common, csets, heads, source)
1589 1590
1590 1591 def getbundle(self, source, heads=None, common=None):
1591 1592 """Like changegroupsubset, but returns the set difference between the
1592 1593 ancestors of heads and the ancestors common.
1593 1594
1594 1595 If heads is None, use the local heads. If common is None, use [nullid].
1595 1596
1596 1597 The nodes in common might not all be known locally due to the way the
1597 1598 current discovery protocol works.
1598 1599 """
1599 1600 cl = self.changelog
1600 1601 if common:
1601 1602 nm = cl.nodemap
1602 1603 common = [n for n in common if n in nm]
1603 1604 else:
1604 1605 common = [nullid]
1605 1606 if not heads:
1606 1607 heads = cl.heads()
1607 1608 common, missing = cl.findcommonmissing(common, heads)
1608 1609 if not missing:
1609 1610 return None
1610 1611 return self._changegroupsubset(common, missing, heads, source)
1611 1612
1612 1613 def _changegroupsubset(self, commonrevs, csets, heads, source):
1613 1614
1614 1615 cl = self.changelog
1615 1616 mf = self.manifest
1616 1617 mfs = {} # needed manifests
1617 1618 fnodes = {} # needed file nodes
1618 1619 changedfiles = set()
1619 1620 fstate = ['', {}]
1620 1621 count = [0]
1621 1622
1622 1623 # can we go through the fast path ?
1623 1624 heads.sort()
1624 1625 if heads == sorted(self.heads()):
1625 1626 return self._changegroup(csets, source)
1626 1627
1627 1628 # slow path
1628 1629 self.hook('preoutgoing', throw=True, source=source)
1629 1630 self.changegroupinfo(csets, source)
1630 1631
1631 1632 # filter any nodes that claim to be part of the known set
1632 1633 def prune(revlog, missing):
1633 1634 return [n for n in missing
1634 1635 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1635 1636
1636 1637 def lookup(revlog, x):
1637 1638 if revlog == cl:
1638 1639 c = cl.read(x)
1639 1640 changedfiles.update(c[3])
1640 1641 mfs.setdefault(c[0], x)
1641 1642 count[0] += 1
1642 1643 self.ui.progress(_('bundling'), count[0],
1643 1644 unit=_('changesets'), total=len(csets))
1644 1645 return x
1645 1646 elif revlog == mf:
1646 1647 clnode = mfs[x]
1647 1648 mdata = mf.readfast(x)
1648 1649 for f in changedfiles:
1649 1650 if f in mdata:
1650 1651 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1651 1652 count[0] += 1
1652 1653 self.ui.progress(_('bundling'), count[0],
1653 1654 unit=_('manifests'), total=len(mfs))
1654 1655 return mfs[x]
1655 1656 else:
1656 1657 self.ui.progress(
1657 1658 _('bundling'), count[0], item=fstate[0],
1658 1659 unit=_('files'), total=len(changedfiles))
1659 1660 return fstate[1][x]
1660 1661
1661 1662 bundler = changegroup.bundle10(lookup)
1662 1663 reorder = self.ui.config('bundle', 'reorder', 'auto')
1663 1664 if reorder == 'auto':
1664 1665 reorder = None
1665 1666 else:
1666 1667 reorder = util.parsebool(reorder)
1667 1668
1668 1669 def gengroup():
1669 1670 # Create a changenode group generator that will call our functions
1670 1671 # back to lookup the owning changenode and collect information.
1671 1672 for chunk in cl.group(csets, bundler, reorder=reorder):
1672 1673 yield chunk
1673 1674 self.ui.progress(_('bundling'), None)
1674 1675
1675 1676 # Create a generator for the manifestnodes that calls our lookup
1676 1677 # and data collection functions back.
1677 1678 count[0] = 0
1678 1679 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1679 1680 yield chunk
1680 1681 self.ui.progress(_('bundling'), None)
1681 1682
1682 1683 mfs.clear()
1683 1684
1684 1685 # Go through all our files in order sorted by name.
1685 1686 count[0] = 0
1686 1687 for fname in sorted(changedfiles):
1687 1688 filerevlog = self.file(fname)
1688 1689 if not len(filerevlog):
1689 1690 raise util.Abort(_("empty or missing revlog for %s") % fname)
1690 1691 fstate[0] = fname
1691 1692 fstate[1] = fnodes.pop(fname, {})
1692 1693
1693 1694 nodelist = prune(filerevlog, fstate[1])
1694 1695 if nodelist:
1695 1696 count[0] += 1
1696 1697 yield bundler.fileheader(fname)
1697 1698 for chunk in filerevlog.group(nodelist, bundler, reorder):
1698 1699 yield chunk
1699 1700
1700 1701 # Signal that no more groups are left.
1701 1702 yield bundler.close()
1702 1703 self.ui.progress(_('bundling'), None)
1703 1704
1704 1705 if csets:
1705 1706 self.hook('outgoing', node=hex(csets[0]), source=source)
1706 1707
1707 1708 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1708 1709
1709 1710 def changegroup(self, basenodes, source):
1710 1711 # to avoid a race we use changegroupsubset() (issue1320)
1711 1712 return self.changegroupsubset(basenodes, self.heads(), source)
1712 1713
1713 1714 def _changegroup(self, nodes, source):
1714 1715 """Compute the changegroup of all nodes that we have that a recipient
1715 1716 doesn't. Return a chunkbuffer object whose read() method will return
1716 1717 successive changegroup chunks.
1717 1718
1718 1719 This is much easier than the previous function as we can assume that
1719 1720 the recipient has any changenode we aren't sending them.
1720 1721
1721 1722 nodes is the set of nodes to send"""
1722 1723
1723 1724 cl = self.changelog
1724 1725 mf = self.manifest
1725 1726 mfs = {}
1726 1727 changedfiles = set()
1727 1728 fstate = ['']
1728 1729 count = [0]
1729 1730
1730 1731 self.hook('preoutgoing', throw=True, source=source)
1731 1732 self.changegroupinfo(nodes, source)
1732 1733
1733 1734 revset = set([cl.rev(n) for n in nodes])
1734 1735
1735 1736 def gennodelst(log):
1736 1737 return [log.node(r) for r in log if log.linkrev(r) in revset]
1737 1738
1738 1739 def lookup(revlog, x):
1739 1740 if revlog == cl:
1740 1741 c = cl.read(x)
1741 1742 changedfiles.update(c[3])
1742 1743 mfs.setdefault(c[0], x)
1743 1744 count[0] += 1
1744 1745 self.ui.progress(_('bundling'), count[0],
1745 1746 unit=_('changesets'), total=len(nodes))
1746 1747 return x
1747 1748 elif revlog == mf:
1748 1749 count[0] += 1
1749 1750 self.ui.progress(_('bundling'), count[0],
1750 1751 unit=_('manifests'), total=len(mfs))
1751 1752 return cl.node(revlog.linkrev(revlog.rev(x)))
1752 1753 else:
1753 1754 self.ui.progress(
1754 1755 _('bundling'), count[0], item=fstate[0],
1755 1756 total=len(changedfiles), unit=_('files'))
1756 1757 return cl.node(revlog.linkrev(revlog.rev(x)))
1757 1758
1758 1759 bundler = changegroup.bundle10(lookup)
1759 1760 reorder = self.ui.config('bundle', 'reorder', 'auto')
1760 1761 if reorder == 'auto':
1761 1762 reorder = None
1762 1763 else:
1763 1764 reorder = util.parsebool(reorder)
1764 1765
1765 1766 def gengroup():
1766 1767 '''yield a sequence of changegroup chunks (strings)'''
1767 1768 # construct a list of all changed files
1768 1769
1769 1770 for chunk in cl.group(nodes, bundler, reorder=reorder):
1770 1771 yield chunk
1771 1772 self.ui.progress(_('bundling'), None)
1772 1773
1773 1774 count[0] = 0
1774 1775 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1775 1776 yield chunk
1776 1777 self.ui.progress(_('bundling'), None)
1777 1778
1778 1779 count[0] = 0
1779 1780 for fname in sorted(changedfiles):
1780 1781 filerevlog = self.file(fname)
1781 1782 if not len(filerevlog):
1782 1783 raise util.Abort(_("empty or missing revlog for %s") % fname)
1783 1784 fstate[0] = fname
1784 1785 nodelist = gennodelst(filerevlog)
1785 1786 if nodelist:
1786 1787 count[0] += 1
1787 1788 yield bundler.fileheader(fname)
1788 1789 for chunk in filerevlog.group(nodelist, bundler, reorder):
1789 1790 yield chunk
1790 1791 yield bundler.close()
1791 1792 self.ui.progress(_('bundling'), None)
1792 1793
1793 1794 if nodes:
1794 1795 self.hook('outgoing', node=hex(nodes[0]), source=source)
1795 1796
1796 1797 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1797 1798
1798 1799 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1799 1800 """Add the changegroup returned by source.read() to this repo.
1800 1801 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1801 1802 the URL of the repo where this changegroup is coming from.
1802 1803 If lock is not None, the function takes ownership of the lock
1803 1804 and releases it after the changegroup is added.
1804 1805
1805 1806 Return an integer summarizing the change to this repo:
1806 1807 - nothing changed or no source: 0
1807 1808 - more heads than before: 1+added heads (2..n)
1808 1809 - fewer heads than before: -1-removed heads (-2..-n)
1809 1810 - number of heads stays the same: 1
1810 1811 """
1811 1812 def csmap(x):
1812 1813 self.ui.debug("add changeset %s\n" % short(x))
1813 1814 return len(cl)
1814 1815
1815 1816 def revmap(x):
1816 1817 return cl.rev(x)
1817 1818
1818 1819 if not source:
1819 1820 return 0
1820 1821
1821 1822 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1822 1823
1823 1824 changesets = files = revisions = 0
1824 1825 efiles = set()
1825 1826
1826 1827 # write changelog data to temp files so concurrent readers will not see
1827 1828 # inconsistent view
1828 1829 cl = self.changelog
1829 1830 cl.delayupdate()
1830 1831 oldheads = cl.heads()
1831 1832
1832 1833 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1833 1834 try:
1834 1835 trp = weakref.proxy(tr)
1835 1836 # pull off the changeset group
1836 1837 self.ui.status(_("adding changesets\n"))
1837 1838 clstart = len(cl)
1838 1839 class prog(object):
1839 1840 step = _('changesets')
1840 1841 count = 1
1841 1842 ui = self.ui
1842 1843 total = None
1843 1844 def __call__(self):
1844 1845 self.ui.progress(self.step, self.count, unit=_('chunks'),
1845 1846 total=self.total)
1846 1847 self.count += 1
1847 1848 pr = prog()
1848 1849 source.callback = pr
1849 1850
1850 1851 source.changelogheader()
1851 1852 if (cl.addgroup(source, csmap, trp) is None
1852 1853 and not emptyok):
1853 1854 raise util.Abort(_("received changelog group is empty"))
1854 1855 clend = len(cl)
1855 1856 changesets = clend - clstart
1856 1857 for c in xrange(clstart, clend):
1857 1858 efiles.update(self[c].files())
1858 1859 efiles = len(efiles)
1859 1860 self.ui.progress(_('changesets'), None)
1860 1861
1861 1862 # pull off the manifest group
1862 1863 self.ui.status(_("adding manifests\n"))
1863 1864 pr.step = _('manifests')
1864 1865 pr.count = 1
1865 1866 pr.total = changesets # manifests <= changesets
1866 1867 # no need to check for empty manifest group here:
1867 1868 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1868 1869 # no new manifest will be created and the manifest group will
1869 1870 # be empty during the pull
1870 1871 source.manifestheader()
1871 1872 self.manifest.addgroup(source, revmap, trp)
1872 1873 self.ui.progress(_('manifests'), None)
1873 1874
1874 1875 needfiles = {}
1875 1876 if self.ui.configbool('server', 'validate', default=False):
1876 1877 # validate incoming csets have their manifests
1877 1878 for cset in xrange(clstart, clend):
1878 1879 mfest = self.changelog.read(self.changelog.node(cset))[0]
1879 1880 mfest = self.manifest.readdelta(mfest)
1880 1881 # store file nodes we must see
1881 1882 for f, n in mfest.iteritems():
1882 1883 needfiles.setdefault(f, set()).add(n)
1883 1884
1884 1885 # process the files
1885 1886 self.ui.status(_("adding file changes\n"))
1886 1887 pr.step = _('files')
1887 1888 pr.count = 1
1888 1889 pr.total = efiles
1889 1890 source.callback = None
1890 1891
1891 1892 while True:
1892 1893 chunkdata = source.filelogheader()
1893 1894 if not chunkdata:
1894 1895 break
1895 1896 f = chunkdata["filename"]
1896 1897 self.ui.debug("adding %s revisions\n" % f)
1897 1898 pr()
1898 1899 fl = self.file(f)
1899 1900 o = len(fl)
1900 1901 if fl.addgroup(source, revmap, trp) is None:
1901 1902 raise util.Abort(_("received file revlog group is empty"))
1902 1903 revisions += len(fl) - o
1903 1904 files += 1
1904 1905 if f in needfiles:
1905 1906 needs = needfiles[f]
1906 1907 for new in xrange(o, len(fl)):
1907 1908 n = fl.node(new)
1908 1909 if n in needs:
1909 1910 needs.remove(n)
1910 1911 if not needs:
1911 1912 del needfiles[f]
1912 1913 self.ui.progress(_('files'), None)
1913 1914
1914 1915 for f, needs in needfiles.iteritems():
1915 1916 fl = self.file(f)
1916 1917 for n in needs:
1917 1918 try:
1918 1919 fl.rev(n)
1919 1920 except error.LookupError:
1920 1921 raise util.Abort(
1921 1922 _('missing file data for %s:%s - run hg verify') %
1922 1923 (f, hex(n)))
1923 1924
1924 1925 dh = 0
1925 1926 if oldheads:
1926 1927 heads = cl.heads()
1927 1928 dh = len(heads) - len(oldheads)
1928 1929 for h in heads:
1929 1930 if h not in oldheads and 'close' in self[h].extra():
1930 1931 dh -= 1
1931 1932 htext = ""
1932 1933 if dh:
1933 1934 htext = _(" (%+d heads)") % dh
1934 1935
1935 1936 self.ui.status(_("added %d changesets"
1936 1937 " with %d changes to %d files%s\n")
1937 1938 % (changesets, revisions, files, htext))
1938 1939
1939 1940 if changesets > 0:
1940 1941 p = lambda: cl.writepending() and self.root or ""
1941 1942 self.hook('pretxnchangegroup', throw=True,
1942 1943 node=hex(cl.node(clstart)), source=srctype,
1943 1944 url=url, pending=p)
1944 1945
1945 1946 # make changelog see real files again
1946 1947 cl.finalize(trp)
1947 1948
1948 1949 tr.close()
1949 1950 finally:
1950 1951 tr.release()
1951 1952 if lock:
1952 1953 lock.release()
1953 1954
1954 1955 if changesets > 0:
1955 1956 # forcefully update the on-disk branch cache
1956 1957 self.ui.debug("updating the branch cache\n")
1957 1958 self.updatebranchcache()
1958 1959 self.hook("changegroup", node=hex(cl.node(clstart)),
1959 1960 source=srctype, url=url)
1960 1961
1961 1962 for i in xrange(clstart, clend):
1962 1963 self.hook("incoming", node=hex(cl.node(i)),
1963 1964 source=srctype, url=url)
1964 1965
1965 1966 # never return 0 here:
1966 1967 if dh < 0:
1967 1968 return dh - 1
1968 1969 else:
1969 1970 return dh + 1
1970 1971
1971 1972 def stream_in(self, remote, requirements):
1972 1973 lock = self.lock()
1973 1974 try:
1974 1975 fp = remote.stream_out()
1975 1976 l = fp.readline()
1976 1977 try:
1977 1978 resp = int(l)
1978 1979 except ValueError:
1979 1980 raise error.ResponseError(
1980 1981 _('Unexpected response from remote server:'), l)
1981 1982 if resp == 1:
1982 1983 raise util.Abort(_('operation forbidden by server'))
1983 1984 elif resp == 2:
1984 1985 raise util.Abort(_('locking the remote repository failed'))
1985 1986 elif resp != 0:
1986 1987 raise util.Abort(_('the server sent an unknown error code'))
1987 1988 self.ui.status(_('streaming all changes\n'))
1988 1989 l = fp.readline()
1989 1990 try:
1990 1991 total_files, total_bytes = map(int, l.split(' ', 1))
1991 1992 except (ValueError, TypeError):
1992 1993 raise error.ResponseError(
1993 1994 _('Unexpected response from remote server:'), l)
1994 1995 self.ui.status(_('%d files to transfer, %s of data\n') %
1995 1996 (total_files, util.bytecount(total_bytes)))
1996 1997 start = time.time()
1997 1998 for i in xrange(total_files):
1998 1999 # XXX doesn't support '\n' or '\r' in filenames
1999 2000 l = fp.readline()
2000 2001 try:
2001 2002 name, size = l.split('\0', 1)
2002 2003 size = int(size)
2003 2004 except (ValueError, TypeError):
2004 2005 raise error.ResponseError(
2005 2006 _('Unexpected response from remote server:'), l)
2006 2007 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2007 2008 # for backwards compat, name was partially encoded
2008 2009 ofp = self.sopener(store.decodedir(name), 'w')
2009 2010 for chunk in util.filechunkiter(fp, limit=size):
2010 2011 ofp.write(chunk)
2011 2012 ofp.close()
2012 2013 elapsed = time.time() - start
2013 2014 if elapsed <= 0:
2014 2015 elapsed = 0.001
2015 2016 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2016 2017 (util.bytecount(total_bytes), elapsed,
2017 2018 util.bytecount(total_bytes / elapsed)))
2018 2019
2019 2020 # new requirements = old non-format requirements + new format-related
2020 2021 # requirements from the streamed-in repository
2021 2022 requirements.update(set(self.requirements) - self.supportedformats)
2022 2023 self._applyrequirements(requirements)
2023 2024 self._writerequirements()
2024 2025
2025 2026 self.invalidate()
2026 2027 return len(self.heads()) + 1
2027 2028 finally:
2028 2029 lock.release()
2029 2030
2030 2031 def clone(self, remote, heads=[], stream=False):
2031 2032 '''clone remote repository.
2032 2033
2033 2034 keyword arguments:
2034 2035 heads: list of revs to clone (forces use of pull)
2035 2036 stream: use streaming clone if possible'''
2036 2037
2037 2038 # now, all clients that can request uncompressed clones can
2038 2039 # read repo formats supported by all servers that can serve
2039 2040 # them.
2040 2041
2041 2042 # if revlog format changes, client will have to check version
2042 2043 # and format flags on "stream" capability, and use
2043 2044 # uncompressed only if compatible.
2044 2045
2045 2046 if stream and not heads:
2046 2047 # 'stream' means remote revlog format is revlogv1 only
2047 2048 if remote.capable('stream'):
2048 2049 return self.stream_in(remote, set(('revlogv1',)))
2049 2050 # otherwise, 'streamreqs' contains the remote revlog format
2050 2051 streamreqs = remote.capable('streamreqs')
2051 2052 if streamreqs:
2052 2053 streamreqs = set(streamreqs.split(','))
2053 2054 # if we support it, stream in and adjust our requirements
2054 2055 if not streamreqs - self.supportedformats:
2055 2056 return self.stream_in(remote, streamreqs)
2056 2057 return self.pull(remote, heads)
2057 2058
2058 2059 def pushkey(self, namespace, key, old, new):
2059 2060 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2060 2061 old=old, new=new)
2061 2062 ret = pushkey.push(self, namespace, key, old, new)
2062 2063 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2063 2064 ret=ret)
2064 2065 return ret
2065 2066
2066 2067 def listkeys(self, namespace):
2067 2068 self.hook('prelistkeys', throw=True, namespace=namespace)
2068 2069 values = pushkey.list(self, namespace)
2069 2070 self.hook('listkeys', namespace=namespace, values=values)
2070 2071 return values
2071 2072
2072 2073 def debugwireargs(self, one, two, three=None, four=None, five=None):
2073 2074 '''used to test argument passing over the wire'''
2074 2075 return "%s %s %s %s %s" % (one, two, three, four, five)
2075 2076
2076 2077 def savecommitmessage(self, text):
2077 2078 fp = self.opener('last-message.txt', 'wb')
2078 2079 try:
2079 2080 fp.write(text)
2080 2081 finally:
2081 2082 fp.close()
2082 2083 return self.pathto(fp.name[len(self.root)+1:])
2083 2084
2084 2085 # used to avoid circular references so destructors work
2085 2086 def aftertrans(files):
2086 2087 renamefiles = [tuple(t) for t in files]
2087 2088 def a():
2088 2089 for src, dest in renamefiles:
2089 2090 util.rename(src, dest)
2090 2091 return a
2091 2092
2092 2093 def undoname(fn):
2093 2094 base, name = os.path.split(fn)
2094 2095 assert name.startswith('journal')
2095 2096 return os.path.join(base, name.replace('journal', 'undo', 1))
2096 2097
2097 2098 def instance(ui, path, create):
2098 2099 return localrepository(ui, util.urllocalpath(path), create)
2099 2100
2100 2101 def islocal(path):
2101 2102 return True
General Comments 0
You need to be logged in to leave comments. Login now